Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge
authorDavid S. Miller <davem@davemloft.net>
Sun, 31 May 2015 08:07:06 +0000 (01:07 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 31 May 2015 08:07:06 +0000 (01:07 -0700)
Antonio Quartulli says:

====================
Included changes:
- checkpatch fixes
- code cleanup
- debugfs component is now compiled only if DEBUG_FS is selected
- update copyright years
- disable by default not-so-user-safe features
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
148 files changed:
Documentation/devicetree/bindings/net/ipq806x-dwmac.txt [new file with mode: 0644]
Documentation/networking/ieee802154.txt
drivers/bluetooth/Kconfig
drivers/bluetooth/Makefile
drivers/bluetooth/btbcm.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btrtl.c [new file with mode: 0644]
drivers/bluetooth/btrtl.h [new file with mode: 0644]
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/hci_bcsp.c
drivers/clk/qcom/gcc-ipq806x.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx5/Kconfig
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/en.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/flow_table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/transobj.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/vport.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/wq.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/wq.h [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig
drivers/net/ieee802154/Makefile
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/at86rf230.h [new file with mode: 0644]
drivers/net/ieee802154/atusb.c [new file with mode: 0644]
drivers/net/ieee802154/atusb.h [new file with mode: 0644]
drivers/net/ieee802154/cc2520.c
drivers/net/ieee802154/fakelb.c
drivers/net/ieee802154/mrf24j40.c
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/reset/qcom,gcc-ipq806x.h
include/linux/bpf.h
include/linux/if_vlan.h
include/linux/mlx4/device.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/flow_table.h [new file with mode: 0644]
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/stmmac.h
include/net/cfg802154.h
include/net/ieee802154_netdev.h
include/net/mac802154.h
include/net/netfilter/nf_tables.h
include/net/netns/nftables.h
include/net/nl802154.h
include/uapi/linux/bpf.h
include/uapi/linux/ethtool.h
include/uapi/linux/netfilter/nf_tables.h
kernel/bpf/arraymap.c
kernel/bpf/syscall.c
net/bluetooth/hci_core.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/core/filter.c
net/core/netevent.c
net/ieee802154/6lowpan/core.c
net/ieee802154/6lowpan/tx.c
net/ieee802154/core.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/socket.c
net/ieee802154/trace.h
net/ipv4/netfilter/ip_tables.c
net/ipv4/sysctl_net_ipv4.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/raw.c
net/mac802154/Kconfig
net/mac802154/cfg.c
net/mac802154/driver-ops.h
net/mac802154/ieee802154_i.h
net/mac802154/iface.c
net/mac802154/mac_cmd.c
net/mac802154/main.c
net/mac802154/mib.c
net/mac802154/rx.c
net/mac802154/util.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_netdev.c [new file with mode: 0644]
net/tipc/socket.c

diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
new file mode 100644 (file)
index 0000000..6d7ab4e
--- /dev/null
@@ -0,0 +1,35 @@
+* IPQ806x DWMAC Ethernet controller
+
+The device inherits all the properties of the dwmac/stmmac devices
+described in the file net/stmmac.txt with the following changes.
+
+Required properties:
+
+- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac"
+             and any applicable more detailed version number
+             described in net/stmmac.txt
+
+- qcom,nss-common: should contain a phandle to a syscon device mapping the
+                  nss-common registers.
+
+- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the
+                  qsgmii-csr registers.
+
+Example:
+
+       gmac: ethernet@37000000 {
+               device_type = "network";
+               compatible = "qcom,ipq806x-gmac";
+               reg = <0x37000000 0x200000>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "macirq";
+
+               qcom,nss-common = <&nss_common>;
+               qcom,qsgmii-csr = <&qsgmii_csr>;
+
+               clocks = <&gcc GMAC_CORE1_CLK>;
+               clock-names = "stmmaceth";
+
+               resets = <&gcc GMAC_CORE1_RESET>;
+               reset-names = "stmmaceth";
+       };
index 22bbc7225f8ed599e8c8b51653c017a6c4845c53..1700756af0570bf75b6422600beb22c935072575 100644 (file)
@@ -30,8 +30,8 @@ int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in our userspace package (see either linux-zigbee sourceforge download page
-or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee).
+in the userspace package (see either http://wpan.cakelab.org/ or the
+git tree at https://github.com/linux-wpan/wpan-tools).
 
 One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
 
@@ -49,15 +49,6 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
 Those types of devices require different approach to be hooked into Linux kernel.
 
 
-MLME - MAC Level Management
-============================
-
-Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
-See the include/net/nl802154.h header. Our userspace tools package
-(see above) provides CLI configuration utility for radio interfaces and simple
-coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
-
-
 HardMAC
 =======
 
@@ -75,8 +66,6 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields
 assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional.
 All other fields are required.
 
-We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c
-
 
 SoftMAC
 =======
@@ -89,7 +78,8 @@ stack interface for network sniffers (e.g. WireShark).
 
 This layer is going to be extended soon.
 
-See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
+See header include/net/mac802154.h and several drivers in
+drivers/net/ieee802154/.
 
 
 Device drivers API
@@ -114,18 +104,17 @@ Moreover IEEE 802.15.4 device operations structure should be filled.
 Fake drivers
 ============
 
-In addition there are two drivers available which simulate real devices with
-HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver)
-interfaces. This option provides possibility to test and debug stack without
-usage of real hardware.
+In addition there is a driver available which simulates a real device with
+SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
+provides possibility to test and debug stack without usage of real hardware.
 
-See sources in drivers/ieee802154 folder for more details.
+See sources in drivers/net/ieee802154 folder for more details.
 
 
 6LoWPAN Linux implementation
 ============================
 
-The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
 octets of actual MAC payload once security is turned on, on a wireless link
 with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
 [RFC4944] was specified to carry IPv6 datagrams over such constrained links,
@@ -140,7 +129,8 @@ In Semptember 2011 the standard update was published - [RFC6282].
 It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
 used in this Linux implementation.
 
-All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+All the code related to 6lowpan you may find in files: net/6lowpan/*
+and net/ieee802154/6lowpan/*
 
 To setup 6lowpan interface you need (busybox release > 1.17.0):
 1. Add IEEE802.15.4 interface and initialize PANid;
index ed5c2738bea20efcf0c6ed0a920ad9faf5b1fd67..2e777071e1dcb8bd544a16aedd58ca28ad2f43bc 100644 (file)
@@ -9,6 +9,10 @@ config BT_BCM
        tristate
        select FW_LOADER
 
+config BT_RTL
+       tristate
+       select FW_LOADER
+
 config BT_HCIBTUSB
        tristate "HCI USB driver"
        depends on USB
@@ -32,6 +36,17 @@ config BT_HCIBTUSB_BCM
 
          Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIBTUSB_RTL
+       bool "Realtek protocol support"
+       depends on BT_HCIBTUSB
+       select BT_RTL
+       default y
+       help
+         The Realtek protocol support enables firmware and configuration
+         download support for Realtek Bluetooth controllers.
+
+         Say Y here to compile support for Realtek protocol.
+
 config BT_HCIBTSDIO
        tristate "HCI SDIO driver"
        depends on MMC
index dd0d9c40b99914817f76728d600fb359dbcac007..f40e194e7080183e999ebb5ac8381130cb7a4b54 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_BT_MRVL)         += btmrvl.o
 obj-$(CONFIG_BT_MRVL_SDIO)     += btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)                += btwilink.o
 obj-$(CONFIG_BT_BCM)           += btbcm.o
+obj-$(CONFIG_BT_RTL)           += btrtl.o
 
 btmrvl-y                       := btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)      += btmrvl_debugfs.o
index 4bba86677adc64553fe8415d9b6812bfb3e1449d..728fce38a5a24cd6fb142f77b7a7178508d8e0c1 100644 (file)
@@ -55,12 +55,6 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
        }
 
        bda = (struct hci_rp_read_bd_addr *)skb->data;
-       if (bda->status) {
-               BT_ERR("%s: BCM: Device address result failed (%02x)",
-                      hdev->name, bda->status);
-               kfree_skb(skb);
-               return -bt_to_errno(bda->status);
-       }
 
        /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
         * with no configured address.
index 2d43d4279b0092d8cfe5f225e6574e7d4ccea67e..828f2f8d1568c8c50962dee7d8e77fcfcd669972 100644 (file)
@@ -53,12 +53,6 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
        }
 
        bda = (struct hci_rp_read_bd_addr *)skb->data;
-       if (bda->status) {
-               BT_ERR("%s: Intel device address result failed (%02x)",
-                      hdev->name, bda->status);
-               kfree_skb(skb);
-               return -bt_to_errno(bda->status);
-       }
 
        /* For some Intel based controllers, the default Bluetooth device
         * address 00:03:19:9E:8B:00 can be found. These controllers are
index 01d6da577eeb0713127f57c264e6345b5055781d..b9a811900f6ab534087e17f3726c840a8afdb34e 100644 (file)
@@ -1217,7 +1217,7 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv)
        unsigned int reg, reg_start, reg_end;
        enum rdwr_status stat;
        u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr;
-       u8 dump_num, idx, i, read_reg, doneflag = 0;
+       u8 dump_num = 0, idx, i, read_reg, doneflag = 0;
        u32 memory_size, fw_dump_len = 0;
 
        /* dump sdio register first */
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
new file mode 100644 (file)
index 0000000..8428893
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/usb.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btrtl.h"
+
+#define VERSION "0.1"
+
+#define RTL_EPATCH_SIGNATURE   "Realtech"
+#define RTL_ROM_LMP_3499       0x3499
+#define RTL_ROM_LMP_8723A      0x1200
+#define RTL_ROM_LMP_8723B      0x8723
+#define RTL_ROM_LMP_8821A      0x8821
+#define RTL_ROM_LMP_8761A      0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+       struct rtl_rom_version_evt *rom_version;
+       struct sk_buff *skb;
+
+       /* Read RTL ROM version command */
+       skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Read ROM version failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*rom_version)) {
+               BT_ERR("%s: RTL version event length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       rom_version = (struct rtl_rom_version_evt *)skb->data;
+       BT_INFO("%s: rom_version status=%x version=%x",
+               hdev->name, rom_version->status, rom_version->version);
+
+       *version = rom_version->version;
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+                                  const struct firmware *fw,
+                                  unsigned char **_buf)
+{
+       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+       struct rtl_epatch_header *epatch_info;
+       unsigned char *buf;
+       int i, ret, len;
+       size_t min_size;
+       u8 opcode, length, data, rom_version = 0;
+       int project_id = -1;
+       const unsigned char *fwptr, *chip_id_base;
+       const unsigned char *patch_length_base, *patch_offset_base;
+       u32 patch_offset = 0;
+       u16 patch_length, num_patches;
+       const u16 project_id_to_lmp_subver[] = {
+               RTL_ROM_LMP_8723A,
+               RTL_ROM_LMP_8723B,
+               RTL_ROM_LMP_8821A,
+               RTL_ROM_LMP_8761A
+       };
+
+       ret = rtl_read_rom_version(hdev, &rom_version);
+       if (ret)
+               return ret;
+
+       min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       fwptr = fw->data + fw->size - sizeof(extension_sig);
+       if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+               BT_ERR("%s: extension section signature mismatch", hdev->name);
+               return -EINVAL;
+       }
+
+       /* Loop from the end of the firmware parsing instructions, until
+        * we find an instruction that identifies the "project ID" for the
+        * hardware supported by this firwmare file.
+        * Once we have that, we double-check that that project_id is suitable
+        * for the hardware we are working with.
+        */
+       while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+               opcode = *--fwptr;
+               length = *--fwptr;
+               data = *--fwptr;
+
+               BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+               if (opcode == 0xff) /* EOF */
+                       break;
+
+               if (length == 0) {
+                       BT_ERR("%s: found instruction with length 0",
+                              hdev->name);
+                       return -EINVAL;
+               }
+
+               if (opcode == 0 && length == 1) {
+                       project_id = data;
+                       break;
+               }
+
+               fwptr -= length;
+       }
+
+       if (project_id < 0) {
+               BT_ERR("%s: failed to find version instruction", hdev->name);
+               return -EINVAL;
+       }
+
+       if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+               BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+               return -EINVAL;
+       }
+
+       if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+               BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+                      project_id_to_lmp_subver[project_id], lmp_subver);
+               return -EINVAL;
+       }
+
+       epatch_info = (struct rtl_epatch_header *)fw->data;
+       if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+               BT_ERR("%s: bad EPATCH signature", hdev->name);
+               return -EINVAL;
+       }
+
+       num_patches = le16_to_cpu(epatch_info->num_patches);
+       BT_DBG("fw_version=%x, num_patches=%d",
+              le32_to_cpu(epatch_info->fw_version), num_patches);
+
+       /* After the rtl_epatch_header there is a funky patch metadata section.
+        * Assuming 2 patches, the layout is:
+        * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+        *
+        * Find the right patch for this chip.
+        */
+       min_size += 8 * num_patches;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+       patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+       patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+       for (i = 0; i < num_patches; i++) {
+               u16 chip_id = get_unaligned_le16(chip_id_base +
+                                                (i * sizeof(u16)));
+               if (chip_id == rom_version + 1) {
+                       patch_length = get_unaligned_le16(patch_length_base +
+                                                         (i * sizeof(u16)));
+                       patch_offset = get_unaligned_le32(patch_offset_base +
+                                                         (i * sizeof(u32)));
+                       break;
+               }
+       }
+
+       if (!patch_offset) {
+               BT_ERR("%s: didn't find patch for chip id %d",
+                      hdev->name, rom_version);
+               return -EINVAL;
+       }
+
+       BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+       min_size = patch_offset + patch_length;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       /* Copy the firmware into a new buffer and write the version at
+        * the end.
+        */
+       len = patch_length;
+       buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+       *_buf = buf;
+       return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+                                const unsigned char *data, int fw_len)
+{
+       struct rtl_download_cmd *dl_cmd;
+       int frag_num = fw_len / RTL_FRAG_LEN + 1;
+       int frag_len = RTL_FRAG_LEN;
+       int ret = 0;
+       int i;
+
+       dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+       if (!dl_cmd)
+               return -ENOMEM;
+
+       for (i = 0; i < frag_num; i++) {
+               struct sk_buff *skb;
+
+               BT_DBG("download fw (%d/%d)", i, frag_num);
+
+               dl_cmd->index = i;
+               if (i == (frag_num - 1)) {
+                       dl_cmd->index |= 0x80; /* data end */
+                       frag_len = fw_len % RTL_FRAG_LEN;
+               }
+               memcpy(dl_cmd->data, data, frag_len);
+
+               /* Send download command */
+               skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+                                    HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb)) {
+                       BT_ERR("%s: download fw command failed (%ld)",
+                              hdev->name, PTR_ERR(skb));
+                       ret = -PTR_ERR(skb);
+                       goto out;
+               }
+
+               if (skb->len != sizeof(struct rtl_download_response)) {
+                       BT_ERR("%s: download fw event length mismatch",
+                              hdev->name);
+                       kfree_skb(skb);
+                       ret = -EIO;
+                       goto out;
+               }
+
+               kfree_skb(skb);
+               data += RTL_FRAG_LEN;
+       }
+
+out:
+       kfree(dl_cmd);
+       return ret;
+}
+
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+{
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+       ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+               return ret;
+       }
+
+       if (fw->size < 8) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Check that the firmware doesn't have the epatch signature
+        * (which is only for RTL8723B and newer).
+        */
+       if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+               BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+                               const char *fw_name)
+{
+       unsigned char *fw_data = NULL;
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+       ret = request_firmware(&fw, fw_name, &hdev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+               return ret;
+       }
+
+       ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+       if (ret < 0)
+               goto out;
+
+       ret = rtl_download_firmware(hdev, fw_data, ret);
+       kfree(fw_data);
+       if (ret < 0)
+               goto out;
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+                      hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       struct hci_rp_read_local_version *resp;
+       u16 lmp_subver;
+
+       skb = btrtl_read_local_version(hdev);
+       if (IS_ERR(skb))
+               return -PTR_ERR(skb);
+
+       resp = (struct hci_rp_read_local_version *)skb->data;
+       BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+               "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+               resp->lmp_ver, resp->lmp_subver);
+
+       lmp_subver = le16_to_cpu(resp->lmp_subver);
+       kfree_skb(skb);
+
+       /* Match a set of subver values that correspond to stock firmware,
+        * which is not compatible with standard btusb.
+        * If matched, upload an alternative firmware that does conform to
+        * standard btusb. Once that firmware is uploaded, the subver changes
+        * to a different value.
+        */
+       switch (lmp_subver) {
+       case RTL_ROM_LMP_8723A:
+       case RTL_ROM_LMP_3499:
+               return btrtl_setup_rtl8723a(hdev);
+       case RTL_ROM_LMP_8723B:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8723b_fw.bin");
+       case RTL_ROM_LMP_8821A:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8821a_fw.bin");
+       case RTL_ROM_LMP_8761A:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8761a_fw.bin");
+       default:
+               BT_INFO("rtl: assuming no firmware upload needed.");
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+
+MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
new file mode 100644 (file)
index 0000000..38ffe48
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+       __u8 index;
+       __u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+       __u8 status;
+       __u8 index;
+} __packed;
+
+struct rtl_rom_version_evt {
+       __u8 status;
+       __u8 version;
+} __packed;
+
+struct rtl_epatch_header {
+       __u8 signature[8];
+       __le32 fw_version;
+       __le16 num_patches;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_RTL)
+
+int btrtl_setup_realtek(struct hci_dev *hdev);
+
+#else
+
+static inline int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
index 3c10d4dfe9a790e6e34f12022b1bcc2321ac648c..94c6c048130fe61be8173248bdb769eb7d50f84b 100644 (file)
 
 #include "btintel.h"
 #include "btbcm.h"
+#include "btrtl.h"
 
 #define VERSION "0.8"
 
 static bool disable_scofix;
 static bool force_scofix;
 
-static bool reset = 1;
+static bool reset = true;
 
 static struct usb_driver btusb_driver;
 
@@ -330,6 +331,7 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_FIRMWARE_LOADED  7
 #define BTUSB_FIRMWARE_FAILED  8
 #define BTUSB_BOOTING          9
+#define BTUSB_RESET_RESUME     10
 
 struct btusb_data {
        struct hci_dev       *hdev;
@@ -1372,378 +1374,6 @@ static int btusb_setup_csr(struct hci_dev *hdev)
        return ret;
 }
 
-#define RTL_FRAG_LEN 252
-
-struct rtl_download_cmd {
-       __u8 index;
-       __u8 data[RTL_FRAG_LEN];
-} __packed;
-
-struct rtl_download_response {
-       __u8 status;
-       __u8 index;
-} __packed;
-
-struct rtl_rom_version_evt {
-       __u8 status;
-       __u8 version;
-} __packed;
-
-struct rtl_epatch_header {
-       __u8 signature[8];
-       __le32 fw_version;
-       __le16 num_patches;
-} __packed;
-
-#define RTL_EPATCH_SIGNATURE   "Realtech"
-#define RTL_ROM_LMP_3499       0x3499
-#define RTL_ROM_LMP_8723A      0x1200
-#define RTL_ROM_LMP_8723B      0x8723
-#define RTL_ROM_LMP_8821A      0x8821
-#define RTL_ROM_LMP_8761A      0x8761
-
-static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
-{
-       struct rtl_rom_version_evt *rom_version;
-       struct sk_buff *skb;
-       int ret;
-
-       /* Read RTL ROM version command */
-       skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Read ROM version failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return PTR_ERR(skb);
-       }
-
-       if (skb->len != sizeof(*rom_version)) {
-               BT_ERR("%s: RTL version event length mismatch", hdev->name);
-               kfree_skb(skb);
-               return -EIO;
-       }
-
-       rom_version = (struct rtl_rom_version_evt *)skb->data;
-       BT_INFO("%s: rom_version status=%x version=%x",
-               hdev->name, rom_version->status, rom_version->version);
-
-       ret = rom_version->status;
-       if (ret == 0)
-               *version = rom_version->version;
-
-       kfree_skb(skb);
-       return ret;
-}
-
-static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
-                                  const struct firmware *fw,
-                                  unsigned char **_buf)
-{
-       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
-       struct rtl_epatch_header *epatch_info;
-       unsigned char *buf;
-       int i, ret, len;
-       size_t min_size;
-       u8 opcode, length, data, rom_version = 0;
-       int project_id = -1;
-       const unsigned char *fwptr, *chip_id_base;
-       const unsigned char *patch_length_base, *patch_offset_base;
-       u32 patch_offset = 0;
-       u16 patch_length, num_patches;
-       const u16 project_id_to_lmp_subver[] = {
-               RTL_ROM_LMP_8723A,
-               RTL_ROM_LMP_8723B,
-               RTL_ROM_LMP_8821A,
-               RTL_ROM_LMP_8761A
-       };
-
-       ret = rtl_read_rom_version(hdev, &rom_version);
-       if (ret)
-               return -bt_to_errno(ret);
-
-       min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       fwptr = fw->data + fw->size - sizeof(extension_sig);
-       if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
-               BT_ERR("%s: extension section signature mismatch", hdev->name);
-               return -EINVAL;
-       }
-
-       /* Loop from the end of the firmware parsing instructions, until
-        * we find an instruction that identifies the "project ID" for the
-        * hardware supported by this firwmare file.
-        * Once we have that, we double-check that that project_id is suitable
-        * for the hardware we are working with.
-        */
-       while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
-               opcode = *--fwptr;
-               length = *--fwptr;
-               data = *--fwptr;
-
-               BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
-
-               if (opcode == 0xff) /* EOF */
-                       break;
-
-               if (length == 0) {
-                       BT_ERR("%s: found instruction with length 0",
-                              hdev->name);
-                       return -EINVAL;
-               }
-
-               if (opcode == 0 && length == 1) {
-                       project_id = data;
-                       break;
-               }
-
-               fwptr -= length;
-       }
-
-       if (project_id < 0) {
-               BT_ERR("%s: failed to find version instruction", hdev->name);
-               return -EINVAL;
-       }
-
-       if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
-               BT_ERR("%s: unknown project id %d", hdev->name, project_id);
-               return -EINVAL;
-       }
-
-       if (lmp_subver != project_id_to_lmp_subver[project_id]) {
-               BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
-                      project_id_to_lmp_subver[project_id], lmp_subver);
-               return -EINVAL;
-       }
-
-       epatch_info = (struct rtl_epatch_header *)fw->data;
-       if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
-               BT_ERR("%s: bad EPATCH signature", hdev->name);
-               return -EINVAL;
-       }
-
-       num_patches = le16_to_cpu(epatch_info->num_patches);
-       BT_DBG("fw_version=%x, num_patches=%d",
-              le32_to_cpu(epatch_info->fw_version), num_patches);
-
-       /* After the rtl_epatch_header there is a funky patch metadata section.
-        * Assuming 2 patches, the layout is:
-        * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
-        *
-        * Find the right patch for this chip.
-        */
-       min_size += 8 * num_patches;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
-       patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
-       patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
-       for (i = 0; i < num_patches; i++) {
-               u16 chip_id = get_unaligned_le16(chip_id_base +
-                                                (i * sizeof(u16)));
-               if (chip_id == rom_version + 1) {
-                       patch_length = get_unaligned_le16(patch_length_base +
-                                                         (i * sizeof(u16)));
-                       patch_offset = get_unaligned_le32(patch_offset_base +
-                                                         (i * sizeof(u32)));
-                       break;
-               }
-       }
-
-       if (!patch_offset) {
-               BT_ERR("%s: didn't find patch for chip id %d",
-                      hdev->name, rom_version);
-               return -EINVAL;
-       }
-
-       BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
-       min_size = patch_offset + patch_length;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       /* Copy the firmware into a new buffer and write the version at
-        * the end.
-        */
-       len = patch_length;
-       buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
-
-       *_buf = buf;
-       return len;
-}
-
-static int rtl_download_firmware(struct hci_dev *hdev,
-                                const unsigned char *data, int fw_len)
-{
-       struct rtl_download_cmd *dl_cmd;
-       int frag_num = fw_len / RTL_FRAG_LEN + 1;
-       int frag_len = RTL_FRAG_LEN;
-       int ret = 0;
-       int i;
-
-       dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
-       if (!dl_cmd)
-               return -ENOMEM;
-
-       for (i = 0; i < frag_num; i++) {
-               struct rtl_download_response *dl_resp;
-               struct sk_buff *skb;
-
-               BT_DBG("download fw (%d/%d)", i, frag_num);
-
-               dl_cmd->index = i;
-               if (i == (frag_num - 1)) {
-                       dl_cmd->index |= 0x80; /* data end */
-                       frag_len = fw_len % RTL_FRAG_LEN;
-               }
-               memcpy(dl_cmd->data, data, frag_len);
-
-               /* Send download command */
-               skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
-                                    HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb)) {
-                       BT_ERR("%s: download fw command failed (%ld)",
-                              hdev->name, PTR_ERR(skb));
-                       ret = -PTR_ERR(skb);
-                       goto out;
-               }
-
-               if (skb->len != sizeof(*dl_resp)) {
-                       BT_ERR("%s: download fw event length mismatch",
-                              hdev->name);
-                       kfree_skb(skb);
-                       ret = -EIO;
-                       goto out;
-               }
-
-               dl_resp = (struct rtl_download_response *)skb->data;
-               if (dl_resp->status != 0) {
-                       kfree_skb(skb);
-                       ret = bt_to_errno(dl_resp->status);
-                       goto out;
-               }
-
-               kfree_skb(skb);
-               data += RTL_FRAG_LEN;
-       }
-
-out:
-       kfree(dl_cmd);
-       return ret;
-}
-
-static int btusb_setup_rtl8723a(struct hci_dev *hdev)
-{
-       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-       struct usb_device *udev = interface_to_usbdev(data->intf);
-       const struct firmware *fw;
-       int ret;
-
-       BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
-       ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
-       if (ret < 0) {
-               BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
-               return ret;
-       }
-
-       if (fw->size < 8) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* Check that the firmware doesn't have the epatch signature
-        * (which is only for RTL8723B and newer).
-        */
-       if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
-               BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
-       release_firmware(fw);
-       return ret;
-}
-
-static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
-                               const char *fw_name)
-{
-       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-       struct usb_device *udev = interface_to_usbdev(data->intf);
-       unsigned char *fw_data = NULL;
-       const struct firmware *fw;
-       int ret;
-
-       BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
-       ret = request_firmware(&fw, fw_name, &udev->dev);
-       if (ret < 0) {
-               BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
-               return ret;
-       }
-
-       ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
-       if (ret < 0)
-               goto out;
-
-       ret = rtl_download_firmware(hdev, fw_data, ret);
-       kfree(fw_data);
-       if (ret < 0)
-               goto out;
-
-out:
-       release_firmware(fw);
-       return ret;
-}
-
-static int btusb_setup_realtek(struct hci_dev *hdev)
-{
-       struct sk_buff *skb;
-       struct hci_rp_read_local_version *resp;
-       u16 lmp_subver;
-
-       skb = btusb_read_local_version(hdev);
-       if (IS_ERR(skb))
-               return -PTR_ERR(skb);
-
-       resp = (struct hci_rp_read_local_version *)skb->data;
-       BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
-               "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
-               resp->lmp_ver, resp->lmp_subver);
-
-       lmp_subver = le16_to_cpu(resp->lmp_subver);
-       kfree_skb(skb);
-
-       /* Match a set of subver values that correspond to stock firmware,
-        * which is not compatible with standard btusb.
-        * If matched, upload an alternative firmware that does conform to
-        * standard btusb. Once that firmware is uploaded, the subver changes
-        * to a different value.
-        */
-       switch (lmp_subver) {
-       case RTL_ROM_LMP_8723A:
-       case RTL_ROM_LMP_3499:
-               return btusb_setup_rtl8723a(hdev);
-       case RTL_ROM_LMP_8723B:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8723b_fw.bin");
-       case RTL_ROM_LMP_8821A:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8821a_fw.bin");
-       case RTL_ROM_LMP_8761A:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8761a_fw.bin");
-       default:
-               BT_INFO("rtl: assuming no firmware upload needed.");
-               return 0;
-       }
-}
-
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -1951,12 +1581,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        }
 
        ver = (struct intel_version *)skb->data;
-       if (ver->status) {
-               BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
-                      ver->status);
-               kfree_skb(skb);
-               return -bt_to_errno(ver->status);
-       }
 
        BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
                hdev->name, ver->hw_platform, ver->hw_variant,
@@ -2004,15 +1628,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
                return PTR_ERR(skb);
        }
 
-       if (skb->data[0]) {
-               u8 evt_status = skb->data[0];
-
-               BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
-                      hdev->name, evt_status);
-               kfree_skb(skb);
-               release_firmware(fw);
-               return -bt_to_errno(evt_status);
-       }
        kfree_skb(skb);
 
        disable_patch = 1;
@@ -2358,13 +1973,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        }
 
        ver = (struct intel_version *)skb->data;
-       if (ver->status) {
-               BT_ERR("%s: Intel version command failure (%02x)",
-                      hdev->name, ver->status);
-               err = -bt_to_errno(ver->status);
-               kfree_skb(skb);
-               return err;
-       }
 
        /* The hardware platform number has a fixed value of 0x37 and
         * for now only accept this single value.
@@ -2439,13 +2047,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        }
 
        params = (struct intel_boot_params *)skb->data;
-       if (params->status) {
-               BT_ERR("%s: Intel boot parameters command failure (%02x)",
-                      hdev->name, params->status);
-               err = -bt_to_errno(params->status);
-               kfree_skb(skb);
-               return err;
-       }
 
        BT_INFO("%s: Device revision is %u", hdev->name,
                le16_to_cpu(params->dev_revid));
@@ -2678,13 +2279,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
                return;
        }
 
-       if (skb->data[0] != 0x00) {
-               BT_ERR("%s: Exception info command failure (%02x)",
-                      hdev->name, skb->data[0]);
-               kfree_skb(skb);
-               return;
-       }
-
        BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
 
        kfree_skb(skb);
@@ -2792,6 +2386,7 @@ struct qca_device_info {
 static const struct qca_device_info qca_devices_table[] = {
        { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
        { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+       { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
        { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
        { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
        { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
@@ -3175,8 +2770,17 @@ static int btusb_probe(struct usb_interface *intf,
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
        }
 
-       if (id->driver_info & BTUSB_REALTEK)
-               hdev->setup = btusb_setup_realtek;
+#ifdef CONFIG_BT_HCIBTUSB_RTL
+       if (id->driver_info & BTUSB_REALTEK) {
+               hdev->setup = btrtl_setup_realtek;
+
+               /* Realtek devices lose their updated firmware over suspend,
+                * but the USB hub doesn't notice any status change.
+                * Explicitly request a device reset on resume.
+                */
+               set_bit(BTUSB_RESET_RESUME, &data->flags);
+       }
+#endif
 
        if (id->driver_info & BTUSB_AMP) {
                /* AMP controllers do not support SCO packets */
@@ -3308,6 +2912,14 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
        btusb_stop_traffic(data);
        usb_kill_anchored_urbs(&data->tx_anchor);
 
+       /* Optionally request a device reset on resume, but only when
+        * wakeups are disabled. If wakeups are enabled we assume the
+        * device will stay powered up throughout suspend.
+        */
+       if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
+           !device_may_wakeup(&data->udev->dev))
+               data->udev->reset_resume = 1;
+
        return 0;
 }
 
index 55c135b7757a9df84745f05bf27870365cbddd73..7a722df97343ee6f25fb4c02959b8d64bacc4449 100644 (file)
@@ -22,7 +22,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
  */
-#define DEBUG
+
 #include <linux/platform_device.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
index dc8e3d4356a0ff40f1b27ddc63de3e7eefef7a87..fc0056a28b8177cf2be5d0d40c37bd26d38f1aa9 100644 (file)
@@ -47,8 +47,8 @@
 
 #include "hci_uart.h"
 
-static bool txcrc = 1;
-static bool hciextn = 1;
+static bool txcrc = true;
+static bool hciextn = true;
 
 #define BCSP_TXWINSIZE 4
 
index a50936a17376b3654a6449c78554254aea19824a..563969942a1df2bee95ef9164b7f4fce509a2229 100644 (file)
@@ -140,12 +140,47 @@ static struct clk_regmap pll14_vote = {
        },
 };
 
+#define NSS_PLL_RATE(f, _l, _m, _n, i) \
+       {  \
+               .freq = f,  \
+               .l = _l, \
+               .m = _m, \
+               .n = _n, \
+               .ibits = i, \
+       }
+
+static struct pll_freq_tbl pll18_freq_tbl[] = {
+       NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625),
+       NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625),
+};
+
+static struct clk_pll pll18 = {
+       .l_reg = 0x31a4,
+       .m_reg = 0x31a8,
+       .n_reg = 0x31ac,
+       .config_reg = 0x31b4,
+       .mode_reg = 0x31a0,
+       .status_reg = 0x31b8,
+       .status_bit = 16,
+       .post_div_shift = 16,
+       .post_div_width = 1,
+       .freq_tbl = pll18_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll18",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
 enum {
        P_PXO,
        P_PLL8,
        P_PLL3,
        P_PLL0,
        P_CXO,
+       P_PLL14,
+       P_PLL18,
 };
 
 static const struct parent_map gcc_pxo_pll8_map[] = {
@@ -197,6 +232,22 @@ static const char *gcc_pxo_pll8_pll0_map[] = {
        "pll0_vote",
 };
 
+static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
+       { P_PXO, 0 },
+       { P_PLL8, 4 },
+       { P_PLL0, 2 },
+       { P_PLL14, 5 },
+       { P_PLL18, 1 }
+};
+
+static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+       "pxo",
+       "pll8_vote",
+       "pll0_vote",
+       "pll14",
+       "pll18",
+};
+
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
        {  1843200, P_PLL8, 2,  6, 625 },
        {  3686400, P_PLL8, 2, 12, 625 },
@@ -2202,6 +2253,472 @@ static struct clk_branch ebi2_aon_clk = {
        },
 };
 
+static const struct freq_tbl clk_tbl_gmac[] = {
+       { 133000000, P_PLL0, 1,  50, 301 },
+       { 266000000, P_PLL0, 1, 127, 382 },
+       { }
+};
+
+static struct clk_dyn_rcg gmac_core1_src = {
+       .ns_reg[0] = 0x3cac,
+       .ns_reg[1] = 0x3cb0,
+       .md_reg[0] = 0x3ca4,
+       .md_reg[1] = 0x3ca8,
+       .bank_reg = 0x3ca0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3ca0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core1_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core1_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 4,
+       .hwcg_reg = 0x3cb4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cb4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core1_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core1_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core2_src = {
+       .ns_reg[0] = 0x3ccc,
+       .ns_reg[1] = 0x3cd0,
+       .md_reg[0] = 0x3cc4,
+       .md_reg[1] = 0x3cc8,
+       .bank_reg = 0x3ca0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3cc0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core2_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core2_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 5,
+       .hwcg_reg = 0x3cd4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cd4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core2_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core2_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core3_src = {
+       .ns_reg[0] = 0x3cec,
+       .ns_reg[1] = 0x3cf0,
+       .md_reg[0] = 0x3ce4,
+       .md_reg[1] = 0x3ce8,
+       .bank_reg = 0x3ce0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3ce0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core3_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core3_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 6,
+       .hwcg_reg = 0x3cf4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cf4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core3_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core3_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core4_src = {
+       .ns_reg[0] = 0x3d0c,
+       .ns_reg[1] = 0x3d10,
+       .md_reg[0] = 0x3d04,
+       .md_reg[1] = 0x3d08,
+       .bank_reg = 0x3d00,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3d00,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core4_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core4_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 7,
+       .hwcg_reg = 0x3d14,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3d14,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core4_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core4_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_nss_tcm[] = {
+       { 266000000, P_PLL0, 3, 0, 0 },
+       { 400000000, P_PLL0, 2, 0, 0 },
+       { }
+};
+
+static struct clk_dyn_rcg nss_tcm_src = {
+       .ns_reg[0] = 0x3dc4,
+       .ns_reg[1] = 0x3dc8,
+       .bank_reg = 0x3dc0,
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss_tcm,
+       .clkr = {
+               .enable_reg = 0x3dc0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_tcm_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch nss_tcm_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x3dd0,
+               .enable_mask = BIT(6) | BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_tcm_clk",
+                       .parent_names = (const char *[]){
+                               "nss_tcm_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_nss[] = {
+       { 110000000, P_PLL18, 1, 1, 5 },
+       { 275000000, P_PLL18, 2, 0, 0 },
+       { 550000000, P_PLL18, 1, 0, 0 },
+       { 733000000, P_PLL18, 1, 0, 0 },
+       { }
+};
+
+static struct clk_dyn_rcg ubi32_core1_src_clk = {
+       .ns_reg[0] = 0x3d2c,
+       .ns_reg[1] = 0x3d30,
+       .md_reg[0] = 0x3d24,
+       .md_reg[1] = 0x3d28,
+       .bank_reg = 0x3d20,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss,
+       .clkr = {
+               .enable_reg = 0x3d20,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ubi32_core1_src_clk",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               },
+       },
+};
+
+static struct clk_dyn_rcg ubi32_core2_src_clk = {
+       .ns_reg[0] = 0x3d4c,
+       .ns_reg[1] = 0x3d50,
+       .md_reg[0] = 0x3d44,
+       .md_reg[1] = 0x3d48,
+       .bank_reg = 0x3d40,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss,
+       .clkr = {
+               .enable_reg = 0x3d40,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ubi32_core2_src_clk",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               },
+       },
+};
+
 static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
@@ -2211,6 +2728,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
        [PLL14_VOTE] = &pll14_vote,
+       [PLL18] = &pll18.clkr,
        [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
        [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
        [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
@@ -2307,6 +2825,18 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
        [EBI2_CLK] = &ebi2_clk.clkr,
        [EBI2_AON_CLK] = &ebi2_aon_clk.clkr,
+       [GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr,
+       [GMAC_CORE1_CLK] = &gmac_core1_clk.clkr,
+       [GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr,
+       [GMAC_CORE2_CLK] = &gmac_core2_clk.clkr,
+       [GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr,
+       [GMAC_CORE3_CLK] = &gmac_core3_clk.clkr,
+       [GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr,
+       [GMAC_CORE4_CLK] = &gmac_core4_clk.clkr,
+       [UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr,
+       [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
+       [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
+       [NSSTCM_CLK] = &nss_tcm_clk.clkr,
 };
 
 static const struct qcom_reset_map gcc_ipq806x_resets[] = {
@@ -2425,6 +2955,48 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = {
        [USB30_1_PHY_RESET] = { 0x3b58, 0 },
        [NSSFB0_RESET] = { 0x3b60, 6 },
        [NSSFB1_RESET] = { 0x3b60, 7 },
+       [UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3},
+       [UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 },
+       [UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 },
+       [UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 },
+       [UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 },
+       [UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 },
+       [UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 },
+       [UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 },
+       [GMAC_CORE1_RESET] = { 0x3cbc, 0 },
+       [GMAC_CORE2_RESET] = { 0x3cdc, 0 },
+       [GMAC_CORE3_RESET] = { 0x3cfc, 0 },
+       [GMAC_CORE4_RESET] = { 0x3d1c, 0 },
+       [GMAC_AHB_RESET] = { 0x3e24, 0 },
+       [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 },
+       [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 },
+       [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 },
+       [NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 },
+       [NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 },
+       [NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 },
+       [NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 },
+       [NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 },
+       [NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 },
+       [NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 },
+       [NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 },
+       [NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 },
+       [NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 },
+       [NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 },
+       [NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 },
+       [NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 },
+       [NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 },
+       [NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 },
+       [NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 },
+       [NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 },
+       [NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 },
+       [NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 },
+       [NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 },
+       [NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 },
+       [NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 },
+       [NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 },
+       [NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 },
+       [NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 },
+       [NSS_SRDS_N_RESET] = { 0x3b60, 28 },
 };
 
 static const struct regmap_config gcc_ipq806x_regmap_config = {
@@ -2453,6 +3025,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
 {
        struct clk *clk;
        struct device *dev = &pdev->dev;
+       struct regmap *regmap;
+       int ret;
 
        /* Temporary until RPM clocks supported */
        clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
@@ -2463,7 +3037,25 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+       ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+       if (ret)
+               return ret;
+
+       regmap = dev_get_regmap(dev, NULL);
+       if (!regmap)
+               return -ENODEV;
+
+       /* Setup PLL18 static bits */
+       regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
+       regmap_write(regmap, 0x31b0, 0x3080);
+
+       /* Set GMAC footswitch sleep/wakeup values */
+       regmap_write(regmap, 0x3cb8, 8);
+       regmap_write(regmap, 0x3cd8, 8);
+       regmap_write(regmap, 0x3cf8, 8);
+       regmap_write(regmap, 0x3d18, 8);
+
+       return 0;
 }
 
 static int gcc_ipq806x_remove(struct platform_device *pdev)
index cc64400d41ace3005c8a878b4c6811b0506726f9..024b0f745035caee392ea24e3a75b1e75fe6a1c9 100644 (file)
@@ -1090,7 +1090,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
 
        ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
-                          MLX4_CMD_NATIVE);
+                          MLX4_CMD_WRAPPED);
        if (ret == -ENOMEM)
                pr_err("mcg table is full. Fail to register network rule.\n");
        else if (ret == -ENXIO)
@@ -1107,7 +1107,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
        int err;
        err = mlx4_cmd(dev, reg_id, 0, 0,
                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
-                      MLX4_CMD_NATIVE);
+                      MLX4_CMD_WRAPPED);
        if (err)
                pr_err("Fail to detach network rule. registration id = 0x%llx\n",
                       reg_id);
@@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
 
 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
-       char name[80];
-       int eq_per_port = 0;
-       int added_eqs = 0;
-       int total_eqs = 0;
-       int i, j, eq;
-
-       /* Legacy mode or comp_pool is not large enough */
-       if (dev->caps.comp_pool == 0 ||
-           dev->caps.num_ports > dev->caps.comp_pool)
-               return;
-
-       eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
-
-       /* Init eq table */
-       added_eqs = 0;
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-               added_eqs += eq_per_port;
-
-       total_eqs = dev->caps.num_comp_vectors + added_eqs;
+       int i, j, eq = 0, total_eqs = 0;
 
-       ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+       ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+                                 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
        if (!ibdev->eq_table)
                return;
 
-       ibdev->eq_added = added_eqs;
-
-       eq = 0;
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
-               for (j = 0; j < eq_per_port; j++) {
-                       snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
-                                i, j, dev->persist->pdev->bus->name);
-                       /* Set IRQ for specific name (per ring) */
-                       if (mlx4_assign_eq(dev, name, NULL,
-                                          &ibdev->eq_table[eq])) {
-                               /* Use legacy (same as mlx4_en driver) */
-                               pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
-                               ibdev->eq_table[eq] =
-                                       (eq % dev->caps.num_comp_vectors);
-                       }
-                       eq++;
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+                    j++, total_eqs++) {
+                       if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
+                               continue;
+                       ibdev->eq_table[eq] = total_eqs;
+                       if (!mlx4_assign_eq(dev, i,
+                                           &ibdev->eq_table[eq]))
+                               eq++;
+                       else
+                               ibdev->eq_table[eq] = -1;
                }
        }
 
-       /* Fill the reset of the vector with legacy EQ */
-       for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
-               ibdev->eq_table[eq++] = i;
+       for (i = eq; i < dev->caps.num_comp_vectors;
+            ibdev->eq_table[i++] = -1)
+               ;
 
        /* Advertise the new number of EQs to clients */
-       ibdev->ib_dev.num_comp_vectors = total_eqs;
+       ibdev->ib_dev.num_comp_vectors = eq;
 }
 
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
        int i;
+       int total_eqs = ibdev->ib_dev.num_comp_vectors;
 
-       /* no additional eqs were added */
+       /* no eqs were allocated */
        if (!ibdev->eq_table)
                return;
 
        /* Reset the advertised EQ number */
-       ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+       ibdev->ib_dev.num_comp_vectors = 0;
 
-       /* Free only the added eqs */
-       for (i = 0; i < ibdev->eq_added; i++) {
-               /* Don't free legacy eqs if used */
-               if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
-                       continue;
+       for (i = 0; i < total_eqs; i++)
                mlx4_release_eq(dev, ibdev->eq_table[i]);
-       }
 
        kfree(ibdev->eq_table);
+       ibdev->eq_table = NULL;
 }
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
index fce3934372a161680e4e4f2dd9716963e1178790..ef80e6c99a685bf0eb94a269e455746de7c74e49 100644 (file)
@@ -523,7 +523,6 @@ struct mlx4_ib_dev {
        struct mlx4_ib_iboe     iboe;
        int                     counters[MLX4_MAX_PORTS];
        int                    *eq_table;
-       int                     eq_added;
        struct kobject         *iov_parent;
        struct kobject         *ports_parent;
        struct kobject         *dev_ports_parent[MLX4_MFUNC_MAX];
index 10df386c63447c9757fa22bfc97c433b59ea3895..bce263b928211c3e2126b04f6ef00fbc41b33d03 100644 (file)
@@ -1,8 +1,6 @@
 config MLX5_INFINIBAND
        tristate "Mellanox Connect-IB HCA support"
-       depends on NETDEVICES && ETHERNET && PCI
-       select NET_VENDOR_MELLANOX
-       select MLX5_CORE
+       depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
        ---help---
          This driver provides low-level InfiniBand support for
          Mellanox Connect-IB PCI Express host channel adapters (HCAs).
index 2ee6b105197544abb2799e552b129d37eff53906..e2bea9ab93b3b81ece3482963aecf5dec2ef3160 100644 (file)
@@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
 {
        int err;
 
-       err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
-                            PAGE_SIZE * 2, &buf->buf);
+       err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
        if (err)
                return err;
 
@@ -754,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
                return ERR_PTR(-EINVAL);
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev->caps.gen.max_cqes)
+       if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
                return ERR_PTR(-EINVAL);
 
        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -921,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        int err;
        u32 fsel;
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+       if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
                return -ENOSYS;
 
        in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1076,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int uninitialized_var(cqe_size);
        unsigned long flags;
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+       if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
                pr_info("Firmware does not support resize CQ\n");
                return -ENOSYS;
        }
@@ -1085,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -EINVAL;
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev->caps.gen.max_cqes + 1)
+       if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
index 9cf9a37bb5ff9360303a0ea9197869b5fcfbaefc..f2d9e70818d73d6a38c36924724b25415cb95ced 100644 (file)
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
 
        packet_error = be16_to_cpu(out_mad->status);
 
-       dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
+       dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
                MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 
 out:
index 57c9809e8b8774e8aac47806134216ef97c46883..9075649f30fc10505ad1a26899a69b863f71a12f 100644 (file)
@@ -66,15 +66,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
-       struct mlx5_general_caps *gen;
        int err = -ENOMEM;
        int max_rq_sg;
        int max_sq_sg;
-       u64 flags;
 
-       gen = &dev->mdev->caps.gen;
        in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
        out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
        if (!in_mad || !out_mad)
@@ -96,18 +94,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                IB_DEVICE_PORT_ACTIVE_EVENT             |
                IB_DEVICE_SYS_IMAGE_GUID                |
                IB_DEVICE_RC_RNR_NAK_GEN;
-       flags = gen->flags;
-       if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
+
+       if (MLX5_CAP_GEN(mdev, pkv))
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
-       if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
+       if (MLX5_CAP_GEN(mdev, qkv))
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-       if (flags & MLX5_DEV_CAP_FLAG_APM)
+       if (MLX5_CAP_GEN(mdev, apm))
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
-       if (flags & MLX5_DEV_CAP_FLAG_XRC)
+       if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
-       if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
+       if (MLX5_CAP_GEN(mdev, sho)) {
                props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
                /* At this stage no support for signature handover */
                props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -116,7 +114,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
                                       IB_GUARD_T10DIF_CSUM;
        }
-       if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
+       if (MLX5_CAP_GEN(mdev, block_lb_mc))
                props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 
        props->vendor_id           = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
@@ -126,37 +124,38 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
 
        props->max_mr_size         = ~0ull;
-       props->page_size_cap       = gen->min_page_sz;
-       props->max_qp              = 1 << gen->log_max_qp;
-       props->max_qp_wr           = gen->max_wqes;
-       max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-       max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
-               sizeof(struct mlx5_wqe_data_seg);
+       props->page_size_cap       = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+       props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+       props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+       max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+                    sizeof(struct mlx5_wqe_data_seg);
+       max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
+                    sizeof(struct mlx5_wqe_ctrl_seg)) /
+                    sizeof(struct mlx5_wqe_data_seg);
        props->max_sge = min(max_rq_sg, max_sq_sg);
-       props->max_cq              = 1 << gen->log_max_cq;
-       props->max_cqe             = gen->max_cqes - 1;
-       props->max_mr              = 1 << gen->log_max_mkey;
-       props->max_pd              = 1 << gen->log_max_pd;
-       props->max_qp_rd_atom      = 1 << gen->log_max_ra_req_qp;
-       props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
-       props->max_srq             = 1 << gen->log_max_srq;
-       props->max_srq_wr          = gen->max_srq_wqes - 1;
-       props->local_ca_ack_delay  = gen->local_ca_ack_delay;
+       props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+       props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+       props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+       props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+       props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+       props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+       props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+       props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+       props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
-       props->local_ca_ack_delay  = gen->local_ca_ack_delay;
        props->atomic_cap          = IB_ATOMIC_NONE;
        props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
-       props->max_mcast_grp       = 1 << gen->log_max_mcg;
-       props->max_mcast_qp_attach = gen->max_qp_mcg;
+       props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+       props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
        props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
                                           props->max_mcast_grp;
        props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+       if (MLX5_CAP_GEN(mdev, pg))
                props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
        props->odp_caps = dev->odp_caps;
 #endif
@@ -172,14 +171,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
                       struct ib_port_attr *props)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
-       struct mlx5_general_caps *gen;
        int ext_active_speed;
        int err = -ENOMEM;
 
-       gen = &dev->mdev->caps.gen;
-       if (port < 1 || port > gen->num_ports) {
+       if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
                mlx5_ib_warn(dev, "invalid port number %d\n", port);
                return -EINVAL;
        }
@@ -210,8 +208,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
        props->phys_state       = out_mad->data[33] >> 4;
        props->port_cap_flags   = be32_to_cpup((__be32 *)(out_mad->data + 20));
        props->gid_tbl_len      = out_mad->data[50];
-       props->max_msg_sz       = 1 << gen->log_max_msg;
-       props->pkey_tbl_len     = gen->port[port - 1].pkey_table_len;
+       props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+       props->pkey_tbl_len     = mdev->port_caps[port - 1].pkey_table_len;
        props->bad_pkey_cntr    = be16_to_cpup((__be16 *)(out_mad->data + 46));
        props->qkey_viol_cntr   = be16_to_cpup((__be16 *)(out_mad->data + 48));
        props->active_width     = out_mad->data[31] & 0xf;
@@ -238,7 +236,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 
        /* If reported active speed is QDR, check if is FDR-10 */
        if (props->active_speed == 4) {
-               if (gen->ext_port_cap[port - 1] &
+               if (mdev->port_caps[port - 1].ext_port_cap &
                    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
                        init_query_mad(in_mad);
                        in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -392,7 +390,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        struct mlx5_ib_alloc_ucontext_req_v2 req;
        struct mlx5_ib_alloc_ucontext_resp resp;
        struct mlx5_ib_ucontext *context;
-       struct mlx5_general_caps *gen;
        struct mlx5_uuar_info *uuari;
        struct mlx5_uar *uars;
        int gross_uuars;
@@ -403,7 +400,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int i;
        size_t reqlen;
 
-       gen = &dev->mdev->caps.gen;
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
 
@@ -436,14 +432,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-       resp.qp_tab_size      = 1 << gen->log_max_qp;
-       resp.bf_reg_size      = gen->bf_reg_size;
-       resp.cache_line_size  = L1_CACHE_BYTES;
-       resp.max_sq_desc_sz = gen->max_sq_desc_sz;
-       resp.max_rq_desc_sz = gen->max_rq_desc_sz;
-       resp.max_send_wqebb = gen->max_wqes;
-       resp.max_recv_wr = gen->max_wqes;
-       resp.max_srq_recv_wr = gen->max_srq_wqes;
+       resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       resp.cache_line_size = L1_CACHE_BYTES;
+       resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+       resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+       resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+       resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+       resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
@@ -493,7 +489,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        mutex_init(&context->db_page_mutex);
 
        resp.tot_uuars = req.total_num_uuars;
-       resp.num_ports = gen->num_ports;
+       resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
        err = ib_copy_to_udata(udata, &resp,
                               sizeof(resp) - sizeof(resp.reserved));
        if (err)
@@ -895,11 +891,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
 
 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
 {
-       struct mlx5_general_caps *gen;
        int port;
 
-       gen = &dev->mdev->caps.gen;
-       for (port = 1; port <= gen->num_ports; port++)
+       for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
                mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -907,11 +901,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
 {
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
-       struct mlx5_general_caps *gen;
        int err = -ENOMEM;
        int port;
 
-       gen = &dev->mdev->caps.gen;
        pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
        if (!pprops)
                goto out;
@@ -926,14 +918,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
                goto out;
        }
 
-       for (port = 1; port <= gen->num_ports; port++) {
+       for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
                err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
                if (err) {
-                       mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
+                       mlx5_ib_warn(dev, "query_port %d failed %d\n",
+                                    port, err);
                        break;
                }
-               gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
-               gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
+               dev->mdev->port_caps[port - 1].pkey_table_len =
+                                               dprops->max_pkeys;
+               dev->mdev->port_caps[port - 1].gid_table_len =
+                                               pprops->gid_tbl_len;
                mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
                            dprops->max_pkeys, pprops->gid_tbl_len);
        }
@@ -1207,8 +1202,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
        dev->ib_dev.owner               = THIS_MODULE;
        dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
-       dev->ib_dev.local_dma_lkey      = mdev->caps.gen.reserved_lkey;
-       dev->num_ports          = mdev->caps.gen.num_ports;
+       dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
+       dev->num_ports          = MLX5_CAP_GEN(mdev, num_ports);
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
        dev->ib_dev.num_comp_vectors    =
                dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1286,9 +1281,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
 
-       mlx5_ib_internal_query_odp_caps(dev);
+       mlx5_ib_internal_fill_odp_caps(dev);
 
-       if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
+       if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
                dev->ib_dev.uverbs_cmd_mask |=
index dff1cfcdf476cfed06d8835cd5316d234df09e1e..0c441add04649ae0733170c1f1406e3f5de878bc 100644 (file)
@@ -617,7 +617,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
 
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
                               struct mlx5_ib_pfault *pfault);
 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
@@ -631,9 +631,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end);
 
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-       return 0;
+       return;
 }
 
 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)                {}
index 71c5935838649e71a4a2f6b6cc16cb18f9a1cc16..bc9a0de897cb466d62d69dde0330ca96581953ca 100644 (file)
@@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        struct mlx5_ib_mr *mr;
        int inlen;
        int err;
-       bool pg_cap = !!(dev->mdev->caps.gen.flags &
-                        MLX5_DEV_CAP_FLAG_ON_DMND_PG);
+       bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
 
        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
index 5099db08afd2c80c1b9049e3fc71fef4cfbb269b..aa8391e75385016bb11a2526f8660331193b7c4e 100644 (file)
@@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
        ib_umem_odp_unmap_dma_pages(umem, start, end);
 }
 
-#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do {        \
-       if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name)  \
-               ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name;       \
-} while (0)
-
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-       int err;
-       struct mlx5_odp_caps hw_caps;
        struct ib_odp_caps *caps = &dev->odp_caps;
 
        memset(caps, 0, sizeof(*caps));
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-               return 0;
-
-       err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
-       if (err)
-               goto out;
+       if (!MLX5_CAP_GEN(dev->mdev, pg))
+               return;
 
        caps->general_caps = IB_ODP_SUPPORT;
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
-                              SEND);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              SEND);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              RECV);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              WRITE);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              READ);
-
-out:
-       return err;
+
+       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+               caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+       return;
 }
 
 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
index d35f62d4f4c58ecce848cfb2d0544500c62dbfd3..15fd485d1ad99b085cdf971bc8bb28c618c07c4d 100644 (file)
@@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
 {
-       struct mlx5_general_caps *gen;
        int wqe_size;
        int wq_size;
 
-       gen = &dev->mdev->caps.gen;
        /* Sanity check RQ size before proceeding */
-       if (cap->max_recv_wr  > gen->max_wqes)
+       if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
                return -EINVAL;
 
        if (!has_rq) {
@@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                        wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
                        wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
                        qp->rq.wqe_cnt = wq_size / wqe_size;
-                       if (wqe_size > gen->max_rq_desc_sz) {
+                       if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
                                mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
                                            wqe_size,
-                                           gen->max_rq_desc_sz);
+                                           MLX5_CAP_GEN(dev->mdev,
+                                                        max_wqe_sz_rq));
                                return -EINVAL;
                        }
                        qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                        struct mlx5_ib_qp *qp)
 {
-       struct mlx5_general_caps *gen;
        int wqe_size;
        int wq_size;
 
-       gen = &dev->mdev->caps.gen;
        if (!attr->cap.max_send_wr)
                return 0;
 
@@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
        if (wqe_size < 0)
                return wqe_size;
 
-       if (wqe_size > gen->max_sq_desc_sz) {
+       if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
                mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
-                           wqe_size, gen->max_sq_desc_sz);
+                           wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
                return -EINVAL;
        }
 
@@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
-       if (qp->sq.wqe_cnt > gen->max_wqes) {
+       if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
                mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
-                           qp->sq.wqe_cnt, gen->max_wqes);
+                           qp->sq.wqe_cnt,
+                           1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
                return -ENOMEM;
        }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
                            struct mlx5_ib_qp *qp,
                            struct mlx5_ib_create_qp *ucmd)
 {
-       struct mlx5_general_caps *gen;
        int desc_sz = 1 << qp->sq.wqe_shift;
 
-       gen = &dev->mdev->caps.gen;
-       if (desc_sz > gen->max_sq_desc_sz) {
+       if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
                mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
-                            desc_sz, gen->max_sq_desc_sz);
+                            desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
                return -EINVAL;
        }
 
@@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
 
        qp->sq.wqe_cnt = ucmd->sq_wqe_count;
 
-       if (qp->sq.wqe_cnt > gen->max_wqes) {
+       if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
                mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
-                            qp->sq.wqe_cnt, gen->max_wqes);
+                            qp->sq.wqe_cnt,
+                            1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
                return -EINVAL;
        }
 
@@ -768,7 +765,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
        qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
-       err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+       err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
                goto err_uuar;
@@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                            struct ib_udata *udata, struct mlx5_ib_qp *qp)
 {
        struct mlx5_ib_resources *devr = &dev->devr;
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_ib_create_qp_resp resp;
        struct mlx5_create_qp_mbox_in *in;
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_create_qp ucmd;
        int inlen = sizeof(*in);
        int err;
 
        mlx5_ib_odp_create_qp(qp);
 
-       gen = &dev->mdev->caps.gen;
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
 
        if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
-               if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+               if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
                        mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
                        return -EINVAL;
                } else {
@@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
        if (pd) {
                if (pd->uobject) {
+                       __u32 max_wqes =
+                               1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
                        mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
                        if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
                            ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
                                mlx5_ib_dbg(dev, "invalid rq params\n");
                                return -EINVAL;
                        }
-                       if (ucmd.sq_wqe_count > gen->max_wqes) {
+                       if (ucmd.sq_wqe_count > max_wqes) {
                                mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
-                                           ucmd.sq_wqe_count, gen->max_wqes);
+                                           ucmd.sq_wqe_count, max_wqes);
                                return -EINVAL;
                        }
                        err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1226,7 +1224,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                                struct ib_qp_init_attr *init_attr,
                                struct ib_udata *udata)
 {
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_dev *dev;
        struct mlx5_ib_qp *qp;
        u16 xrcdn = 0;
@@ -1244,12 +1241,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                }
                dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
        }
-       gen = &dev->mdev->caps.gen;
 
        switch (init_attr->qp_type) {
        case IB_QPT_XRC_TGT:
        case IB_QPT_XRC_INI:
-               if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
+               if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
                        mlx5_ib_dbg(dev, "XRC not supported\n");
                        return ERR_PTR(-ENOSYS);
                }
@@ -1356,9 +1352,6 @@ enum {
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-       struct mlx5_general_caps *gen;
-
-       gen = &dev->mdev->caps.gen;
        if (rate == IB_RATE_PORT_CURRENT) {
                return 0;
        } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1359,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
        } else {
                while (rate != IB_RATE_2_5_GBPS &&
                       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-                        gen->stat_rate_support))
+                        MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
                        --rate;
        }
 
@@ -1377,10 +1370,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
                         struct mlx5_qp_path *path, u8 port, int attr_mask,
                         u32 path_flags, const struct ib_qp_attr *attr)
 {
-       struct mlx5_general_caps *gen;
        int err;
 
-       gen = &dev->mdev->caps.gen;
        path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
        path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
 
@@ -1391,9 +1382,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
        path->rlid      = cpu_to_be16(ah->dlid);
 
        if (ah->ah_flags & IB_AH_GRH) {
-               if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
+               if (ah->grh.sgid_index >=
+                   dev->mdev->port_caps[port - 1].gid_table_len) {
                        pr_err("sgid_index (%u) too large. max is %d\n",
-                              ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
+                              ah->grh.sgid_index,
+                              dev->mdev->port_caps[port - 1].gid_table_len);
                        return -EINVAL;
                }
                path->grh_mlid |= 1 << 7;
@@ -1570,7 +1563,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        struct mlx5_ib_cq *send_cq, *recv_cq;
        struct mlx5_qp_context *context;
-       struct mlx5_general_caps *gen;
        struct mlx5_modify_qp_mbox_in *in;
        struct mlx5_ib_pd *pd;
        enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1571,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        int mlx5_st;
        int err;
 
-       gen = &dev->mdev->caps.gen;
        in = kzalloc(sizeof(*in), GFP_KERNEL);
        if (!in)
                return -ENOMEM;
@@ -1619,7 +1610,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                        err = -EINVAL;
                        goto out;
                }
-               context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
+               context->mtu_msgmax = (attr->path_mtu << 5) |
+                                     (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
        }
 
        if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1769,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        enum ib_qp_state cur_state, new_state;
-       struct mlx5_general_caps *gen;
        int err = -EINVAL;
        int port;
 
-       gen = &dev->mdev->caps.gen;
        mutex_lock(&qp->mutex);
 
        cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1783,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
 
        if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > gen->num_ports))
+           (attr->port_num == 0 ||
+            attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
                goto out;
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
-               if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
+               if (attr->pkey_index >=
+                   dev->mdev->port_caps[port - 1].pkey_table_len)
                        goto out;
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
+           attr->max_rd_atomic >
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
                goto out;
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
+           attr->max_dest_rd_atomic >
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
                goto out;
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3003,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
        ib_ah_attr->port_num      = path->port;
 
        if (ib_ah_attr->port_num == 0 ||
-           ib_ah_attr->port_num > dev->caps.gen.num_ports)
+           ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
                return;
 
        ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3129,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
                                          struct ib_udata *udata)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_xrcd *xrcd;
        int err;
 
-       gen = &dev->mdev->caps.gen;
-       if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
+       if (!MLX5_CAP_GEN(dev->mdev, xrc))
                return ERR_PTR(-ENOSYS);
 
        xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
index 02d77a29764d5e1ab925423b64bdb20157fbc781..e8e8e942fa4a96674f9da0a1437d960369c920d6 100644 (file)
@@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                return err;
        }
 
-       if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+       if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
                mlx5_ib_dbg(dev, "buf alloc failed\n");
                err = -ENOMEM;
                goto err_db;
@@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                                  struct ib_udata *udata)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_srq *srq;
        int desc_size;
        int buf_size;
@@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        int uninitialized_var(inlen);
        int is_xrc;
        u32 flgs, xrcdn;
+       __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
-       gen = &dev->mdev->caps.gen;
        /* Sanity check SRQ size before proceeding */
-       if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
+       if (init_attr->attr.max_wr >= max_srq_wqes) {
                mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
                            init_attr->attr.max_wr,
-                           gen->max_srq_wqes);
+                           max_srq_wqes);
                return ERR_PTR(-EINVAL);
        }
 
index 084a50a555de57af781a7f786ee6c6a911765de2..909ad7a0d48088fcaa8295dfdd3b3617becf8d12 100644 (file)
@@ -524,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
        dma_unmap_addr_set(cb, dma_addr, 0);
 }
 
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
-                                struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+                                            struct bcm_sysport_cb *cb)
 {
        struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
+       struct sk_buff *skb, *rx_skb;
        dma_addr_t mapping;
-       int ret;
 
-       cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
-       if (!cb->skb) {
+       /* Allocate a new SKB for a new packet */
+       skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+       if (!skb) {
+               priv->mib.alloc_rx_buff_failed++;
                netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
-               return -ENOMEM;
+               return NULL;
        }
 
-       mapping = dma_map_single(kdev, cb->skb->data,
+       mapping = dma_map_single(kdev, skb->data,
                                 RX_BUF_LENGTH, DMA_FROM_DEVICE);
-       ret = dma_mapping_error(kdev, mapping);
-       if (ret) {
+       if (dma_mapping_error(kdev, mapping)) {
                priv->mib.rx_dma_failed++;
-               bcm_sysport_free_cb(cb);
+               dev_kfree_skb_any(skb);
                netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
-               return ret;
+               return NULL;
        }
 
-       dma_unmap_addr_set(cb, dma_addr, mapping);
-       dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+       /* Grab the current SKB on the ring */
+       rx_skb = cb->skb;
+       if (likely(rx_skb))
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
-       priv->rx_bd_assign_index++;
-       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
-       priv->rx_bd_assign_ptr = priv->rx_bds +
-               (priv->rx_bd_assign_index * DESC_SIZE);
+       /* Put the new SKB on the ring */
+       cb->skb = skb;
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_desc_set_addr(priv, cb->bd_addr, mapping);
 
        netif_dbg(priv, rx_status, ndev, "RX refill\n");
 
-       return 0;
+       /* Return the current SKB to the caller */
+       return rx_skb;
 }
 
 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
 {
        struct bcm_sysport_cb *cb;
-       int ret = 0;
+       struct sk_buff *skb;
        unsigned int i;
 
        for (i = 0; i < priv->num_rx_bds; i++) {
-               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
-               if (cb->skb)
-                       continue;
-
-               ret = bcm_sysport_rx_refill(priv, cb);
-               if (ret)
-                       break;
+               cb = &priv->rx_cbs[i];
+               skb = bcm_sysport_rx_refill(priv, cb);
+               if (skb)
+                       dev_kfree_skb(skb);
+               if (!cb->skb)
+                       return -ENOMEM;
        }
 
-       return ret;
+       return 0;
 }
 
 /* Poll the hardware for up to budget packets to process */
 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                                        unsigned int budget)
 {
-       struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
        unsigned int processed = 0, to_process;
        struct bcm_sysport_cb *cb;
@@ -592,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        unsigned int p_index;
        u16 len, status;
        struct bcm_rsb *rsb;
-       int ret;
 
        /* Determine how much we should process since last call */
        p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -610,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
 
        while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
+               skb = bcm_sysport_rx_refill(priv, cb);
 
-               processed++;
-               priv->rx_read_ptr++;
-
-               if (priv->rx_read_ptr == priv->num_rx_bds)
-                       priv->rx_read_ptr = 0;
 
                /* We do not have a backing SKB, so we do not a corresponding
                 * DMA mapping for this incoming packet since
@@ -627,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                        netif_err(priv, rx_err, ndev, "out of memory!\n");
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       goto refill;
+                       goto next;
                }
 
-               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
                /* Extract the Receive Status Block prepended */
                rsb = (struct bcm_rsb *)skb->data;
                len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -644,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                          p_index, priv->rx_c_index, priv->rx_read_ptr,
                          len, status);
 
+               if (unlikely(len > RX_BUF_LENGTH)) {
+                       netif_err(priv, rx_status, ndev, "oversized packet\n");
+                       ndev->stats.rx_length_errors++;
+                       ndev->stats.rx_errors++;
+                       dev_kfree_skb_any(skb);
+                       goto next;
+               }
+
                if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
                        netif_err(priv, rx_status, ndev, "fragmented packet!\n");
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       bcm_sysport_free_cb(cb);
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                }
 
                if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -658,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                                ndev->stats.rx_over_errors++;
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       bcm_sysport_free_cb(cb);
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                }
 
                skb_put(skb, len);
@@ -686,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                ndev->stats.rx_bytes += len;
 
                napi_gro_receive(&priv->napi, skb);
-refill:
-               ret = bcm_sysport_rx_refill(priv, cb);
-               if (ret)
-                       priv->mib.alloc_rx_buff_failed++;
+next:
+               processed++;
+               priv->rx_read_ptr++;
+
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
        }
 
        return processed;
@@ -1330,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
 
 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
 {
+       struct bcm_sysport_cb *cb;
        u32 reg;
        int ret;
+       int i;
 
        /* Initialize SW view of the RX ring */
        priv->num_rx_bds = NUM_RX_DESC;
        priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
-       priv->rx_bd_assign_ptr = priv->rx_bds;
-       priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
        priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1347,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
                return -ENOMEM;
        }
 
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = priv->rx_cbs + i;
+               cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+       }
+
        ret = bcm_sysport_alloc_rx_bufs(priv);
        if (ret) {
                netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
index 42a4b4a0bc148f5e508e94bab58ee512ed74d128..f28bf545d7f466527b76a2b2d98f8f9e31602cc0 100644 (file)
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
 
        /* Receive queue */
        void __iomem            *rx_bds;
-       void __iomem            *rx_bd_assign_ptr;
-       unsigned int            rx_bd_assign_index;
        struct bcm_sysport_cb   *rx_cbs;
        unsigned int            num_rx_bds;
        unsigned int            rx_read_ptr;
index 6365fb4242be88e2aed0c718ec7aa4c2bc250a19..fc3d8e3ee807d439647202a5b086eca67ecadebc 100644 (file)
@@ -4,7 +4,7 @@
 
 config NET_VENDOR_CAVIUM
        tristate "Cavium ethernet drivers"
-       depends on PCI
+       depends on PCI && 64BIT
        ---help---
          Enable support for the Cavium ThunderX Network Interface
          Controller (NIC). The NIC provides the controller and DMA
index 33c35d3b7420fa9ae545aea4ebd5160036914718..aca9cef50d81a521f21aa74a93ebaebd0a27d793 100644 (file)
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
        I40E_FD_STAT_SB,
+       I40E_FD_STAT_ATR_TUNNEL,
        I40E_FD_STAT_PF_COUNT
 };
 #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
 #define I40E_FD_SB_STAT_IDX(pf_id)  \
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
 
        struct hlist_head fdir_filter_list;
        u16 fdir_pf_active_filters;
-       u16 fd_sb_cnt_idx;
-       u16 fd_atr_cnt_idx;
        unsigned long fd_flush_timestamp;
        u32 fd_flush_cnt;
        u32 fd_add_err;
index 4cbaaeb902c47737274010d2070d6fb95c5637ce..9a68c65b17ea03bd00642aab5fe3b2e5a5066765 100644 (file)
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+       I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
 
        /* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
        return *data;
 }
 
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+       struct i40e_vf *vfs = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+                       return true;
+       return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
                           struct ethtool_test *eth_test, u64 *data)
 {
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
                netif_info(pf, drv, netdev, "offline testing starting\n");
 
                set_bit(__I40E_TESTING, &pf->state);
+
+               if (i40e_active_vfs(pf)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+                       data[I40E_ETH_TEST_REG]         = 1;
+                       data[I40E_ETH_TEST_EEPROM]      = 1;
+                       data[I40E_ETH_TEST_INTR]        = 1;
+                       data[I40E_ETH_TEST_LOOPBACK]    = 1;
+                       data[I40E_ETH_TEST_LINK]        = 1;
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+                       clear_bit(__I40E_TESTING, &pf->state);
+                       goto skip_ol_tests;
+               }
+
                /* If the device is online then take it offline */
                if (if_running)
                        /* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
                data[I40E_ETH_TEST_LOOPBACK] = 0;
        }
 
+skip_ol_tests:
+
        netif_info(pf, drv, netdev, "testing finished\n");
 }
 
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->pctype = 0;
        input->dest_vsi = vsi->id;
        input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
-       input->cnt_index  = pf->fd_sb_cnt_idx;
+       input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
        input->flow_type = fsp->flow_type;
        input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
index 1803afeef23ede81ed906b5400e9f3164234a4de..c8b621e0e7cda622c5a0fa9e795a898e53886cf5 100644 (file)
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
  *
  * The FC EOF is converted to the value understood by HW for descriptor
  * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
  **/
 static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
 {
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
        case FC_EOF_A:
                return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
        default:
-               /* FIXME: still returns 0 */
-               pr_err("Unrecognized EOF %x\n", eof);
-               return 0;
+               /* Supported valid eof shall be already checked by
+                * calling i40e_fcoe_eof_is_supported() first,
+                * therefore this default case shall never hit.
+                */
+               WARN_ON(1);
+               return -EINVAL;
        }
 }
 
index a54c14491e3b6a4dbc168980dd44d399b6766487..0a3e928a2b0014b62ecbbd4d27efd1b7607816dd 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
 
        dcb_cfg = &hw->local_dcbx_config;
 
-       /* See if DCB enabled with PFC TC */
-       if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
-           !(dcb_cfg->pfc.pfcenable)) {
+       /* Collect Link XOFF stats when PFC is disabled */
+       if (!dcb_cfg->pfc.pfcenable) {
                i40e_update_link_xoff_rx(pf);
                return;
        }
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           &osd->rx_jabber, &nsd->rx_jabber);
 
        /* FDIR stats */
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_atr_match, &nsd->fd_atr_match);
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_sb_match, &nsd->fd_sb_match);
+       i40e_stat_update32(hw,
+                     I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+                     pf->stat_offsets_loaded,
+                     &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
 
        val = rd32(hw, I40E_PRTPM_EEE_STAT);
        nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                pf->fd_add_err = pf->fd_atr_cnt = 0;
                if (pf->fd_tcp_rule > 0) {
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
-                       dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
        }
        /* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
                }
        }
 }
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 
                if (!(time_after(jiffies, min_flush_time)) &&
                    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-                       dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
                        disable_atr = true;
                }
 
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
                        if (!disable_atr)
                                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                        clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
-                       dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
                }
        }
 }
@@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               /* Setup a counter for fd_atr per PF */
-               pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
                if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-                       /* Setup a counter for fd_sb per PF */
-                       pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
                } else {
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
                pf->fdir_pf_active_filters = 0;
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-               dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
index 0b4a7be2c7d2b21f2286732218c0cbb2f2bb61b2..cc82a7ffacb06326f259ff3e8c1f0fc4ec6e9838 100644 (file)
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
-       /* set the timestamp */
-       tx_buf->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.
         */
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
        } else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                  (pf->fd_tcp_rule - 1) : 0;
                if (pf->fd_tcp_rule == 0) {
                        pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
                }
        }
 
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                            !(pf->auto_disable_flags &
                                     I40E_FLAG_FD_SB_ENABLED)) {
-                               dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+                               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                                       dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
                        }
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
-               dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-                        "  time_stamp           <%lx>\n"
-                        "  jiffies              <%lx>\n",
-                        tx_ring->tx_bi[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
- * @flags:    send flags
+ * @tx_flags: send tx flags
  * @protocol: wire protocol
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                    u32 flags, __be16 protocol)
+                    u32 tx_flags, __be16 protocol)
 {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!tx_ring->atr_sample_rate)
                return;
 
-       /* snag network header to get L4 type and address */
-       hdr.network = skb_network_header(skb);
+       if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+               return;
 
-       /* Currently only IPv4/IPv6 with TCP is supported */
-       if (protocol == htons(ETH_P_IP)) {
-               if (hdr.ipv4->protocol != IPPROTO_TCP)
-                       return;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+               /* snag network header to get L4 type and address */
+               hdr.network = skb_network_header(skb);
 
-               /* access ihl as a u8 to avoid unaligned access on ia64 */
-               hlen = (hdr.network[0] & 0x0F) << 2;
-       } else if (protocol == htons(ETH_P_IPV6)) {
-               if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+               /* Currently only IPv4/IPv6 with TCP is supported
+                * access ihl as u8 to avoid unaligned access on ia64
+                */
+               if (tx_flags & I40E_TX_FLAGS_IPV4)
+                       hlen = (hdr.network[0] & 0x0F) << 2;
+               else if (protocol == htons(ETH_P_IPV6))
+                       hlen = sizeof(struct ipv6hdr);
+               else
                        return;
-
-               hlen = sizeof(struct ipv6hdr);
        } else {
-               return;
+               hdr.network = skb_inner_network_header(skb);
+               hlen = skb_inner_network_header_len(skb);
        }
 
+       /* Currently only IPv4/IPv6 with TCP is supported
+        * Note: tx_flags gets modified to reflect inner protocols in
+        * tx_enable_csum function if encap is enabled.
+        */
+       if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+           (hdr.ipv4->protocol != IPPROTO_TCP))
+               return;
+       else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+                (hdr.ipv6->nexthdr != IPPROTO_TCP))
+               return;
+
        th = (struct tcphdr *)(hdr.network + hlen);
 
        /* Due to lack of space, no more new filters can be programmed */
@@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-       dtype_cmd |=
-               ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
-               I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       else
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
@@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
 #ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring,
-                              u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring,
                                      u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring,
+                                            u32 *flags)
 #endif
 {
        __be16 protocol = skb->protocol;
@@ -2117,16 +2130,14 @@ out:
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u32 tx_flags, __be16 protocol, u8 *hdr_len,
-                   u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
@@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
@@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
@@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-               if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
-               } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (tx_flags & I40E_TX_FLAGS_TSO)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
 
@@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
-                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
-                       tx_flags |= I40E_TX_FLAGS_IPV6;
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
        } else {
                network_hdr_len = skb_network_header_len(skb);
@@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        }
 
        /* Enable IP checksum offloads */
-       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
@@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-       } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * Returns 0 if stop is not needed
  **/
 #ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #endif
 {
        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2476,13 +2487,13 @@ linearize_chk_done:
  * @td_offset: offset for checksum or crc
  **/
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                struct i40e_tx_buffer *first, u32 tx_flags,
-                const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                              struct i40e_tx_buffer *first, u32 tx_flags,
+                              const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
        unsigned int data_len = skb->data_len;
@@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                 tx_ring->queue_index),
                             first->bytecount);
 
-       /* set the timestamp */
-       first->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -2643,11 +2651,11 @@ dma_error:
  * one descriptor.
  **/
 #ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring)
 #endif
 {
        unsigned int f;
@@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
 
        if (tso < 0)
@@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-               i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
 
index 4b0b8102cdc39c2529f49c18d6b1cbc61c48c341..0dc48dc9ca61922a4b11bd0b7624f07c153c603a 100644 (file)
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
 #define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
 
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
-       unsigned long time_stamp;
        union {
                struct sk_buff *skb;
                void *raw_buf;
index 568e855da0f3e16b29a81702766070974dd3da91..9a5a75b1e2bc053b50bec13adde2fd3aa4848595 100644 (file)
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
        /* flow director stats */
        u64 fd_atr_match;
        u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 78d1c4ff565e8853473b70c3827e6a727ff3ce1c..4653b6e653c9470da76e9b35be2bd3767da7bf5b 100644 (file)
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
        int pre_existing_vfs = pci_num_vf(pdev);
        int err = 0;
 
+       if (pf->state & __I40E_TESTING) {
+               dev_warn(&pdev->dev,
+                        "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+               err = -EPERM;
+               goto err_out;
+       }
+
        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                i40e_free_vfs(pf);
index 3ef23091439f72d90879acd3f820f15c71206127..ec7e220757db1e283cb385f58f0b5955f50e2b25 100644 (file)
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
-               dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-                        "  time_stamp           <%lx>\n"
-                        "  jiffies              <%lx>\n",
-                        tx_ring->tx_bi[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
 }
 
 /**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  * @flags:   the tx flags to be set
@@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring,
-                                     u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                              struct i40e_ring *tx_ring,
+                                              u32 *flags)
 {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
@@ -1406,16 +1399,14 @@ out:
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u32 tx_flags, __be16 protocol, u8 *hdr_len,
-                   u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
@@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
@@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
@@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-               if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
-               } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (tx_flags & I40E_TX_FLAGS_TSO)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
 
@@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
-                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
-                       tx_flags |= I40E_TX_FLAGS_IPV6;
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
 
 
@@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        }
 
        /* Enable IP checksum offloads */
-       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
@@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-       } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@ -1675,7 +1666,44 @@ linearize_chk_done:
 }
 
 /**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       /* Memory barrier before checking head and tail */
+       smp_mb();
+
+       /* Check again in a case another CPU has just made room available. */
+       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
+       return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
  * @skb:      send buffer
  * @first:    first buffer info buffer to use
@@ -1684,9 +1712,9 @@ linearize_chk_done:
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  **/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                       struct i40e_tx_buffer *first, u32 tx_flags,
-                       const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                                struct i40e_tx_buffer *first, u32 tx_flags,
+                                const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
@@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                 tx_ring->queue_index),
                             first->bytecount);
 
-       /* set the timestamp */
-       first->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
+       i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
        /* notify HW of packet */
-       writel(i, tx_ring->tail);
+       if (!skb->xmit_more ||
+           netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                  tx_ring->queue_index)))
+               writel(i, tx_ring->tail);
 
        return;
 
@@ -1834,44 +1863,7 @@ dma_error:
 }
 
 /**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       /* Memory barrier before checking head and tail */
-       smp_mb();
-
-       /* Check again in a case another CPU has just made room available. */
-       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
-               return -EBUSY;
-
-       /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       ++tx_ring->tx_stats.restart_queue;
-       return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-               return 0;
-       return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  *
@@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * there is not enough descriptors available in this ring since we need at least
  * one descriptor.
  **/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+                                              struct i40e_ring *tx_ring)
 {
        unsigned int f;
        int count = 0;
@@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
        count += TXD_USE_COUNT(skb_headlen(skb));
-       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+       if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
                tx_ring->tx_stats.tx_busy++;
                return 0;
        }
@@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        u32 td_cmd = 0;
        u8 hdr_len = 0;
        int tso;
-       if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+       if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
                return NETDEV_TX_BUSY;
 
        /* prepare the xmit flags */
-       if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+       if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
 
        /* obtain protocol of skb */
@@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
 
        if (tso < 0)
@@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-               i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
 
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
 
-       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-                   td_cmd, td_offset);
-
-       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+       i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+                     td_cmd, td_offset);
 
        return NETDEV_TX_OK;
 
index 1e49bb1fbac1f0de59444626cc9645b72aeac0da..e7a34f899f2cbb8150495a31e0690a95e90efc1a 100644 (file)
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
 
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
-       unsigned long time_stamp;
        union {
                struct sk_buff *skb;
                void *raw_buf;
index ec9d83a9337944f20f934cf6217467ee61ec7303..c463ec41579c708ffbe606ea0b31ca0485ea4c58 100644 (file)
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
        /* flow director stats */
        u64 fd_atr_match;
        u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 9f6fb19062a00c43005d39e03c8037bf58f93488..9a1d0f142b091c3cecb06ce65ffac1d3c8e6bf1a 100644 (file)
@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_fdir_filter *input;
        union ixgbe_atr_input mask;
+       u8 queue;
        int err;
 
        if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                return -EOPNOTSUPP;
 
-       /*
-        * Don't allow programming if the action is a queue greater than
-        * the number of online Rx queues.
+       /* ring_cookie is a masked into a set of queues and ixgbe pools or
+        * we use the drop index.
         */
-       if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
-           (fsp->ring_cookie >= adapter->num_rx_queues))
-               return -EINVAL;
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+               queue = IXGBE_FDIR_DROP_QUEUE;
+       } else {
+               u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+               u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+               if (!vf && (ring >= adapter->num_rx_queues))
+                       return -EINVAL;
+               else if (vf &&
+                        ((vf > adapter->num_vfs) ||
+                          ring >= adapter->num_rx_queues_per_pool))
+                       return -EINVAL;
+
+               /* Map the ring onto the absolute queue index */
+               if (!vf)
+                       queue = adapter->rx_ring[ring]->reg_idx;
+               else
+                       queue = ((vf - 1) *
+                               adapter->num_rx_queues_per_pool) + ring;
+       }
 
        /* Don't allow indexes to exist outside of available space */
        if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 
        /* program filters to filter memory */
        err = ixgbe_fdir_write_perfect_filter_82599(hw,
-                               &input->filter, input->sw_idx,
-                               (input->action == IXGBE_FDIR_DROP_QUEUE) ?
-                               IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[input->action]->reg_idx);
+                               &input->filter, input->sw_idx, queue);
        if (err)
                goto err_out_w_lock;
 
index e71f31387ac6c73b843fef3f023cf6150327fe09..3348e646db705f41ff1cb3923d4d1533aea80e2d 100644 (file)
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        u64 mtt_addr;
        int err;
 
-       if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+       if (vector >= dev->caps.num_comp_vectors)
                return -EINVAL;
 
        cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
                cq_context->flags  |= cpu_to_be32(1 << 19);
 
        cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
-       cq_context->comp_eqn        = priv->eq_table.eq[vector].eqn;
+       cq_context->comp_eqn        = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
        cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 
        mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        init_completion(&cq->free);
        cq->comp = mlx4_add_cq_to_tasklet;
        cq->tasklet_ctx.priv =
-               &priv->eq_table.eq[cq->vector].tasklet_ctx;
+               &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
        INIT_LIST_HEAD(&cq->tasklet_ctx.list);
 
 
-       cq->irq = priv->eq_table.eq[cq->vector].irq;
+       cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
        return 0;
 
 err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
        if (err)
                mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
 
-       synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+       synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+       if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+           priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+               synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 
        spin_lock_irq(&cq_table->lock);
        radix_tree_delete(&cq_table->tree, cq->cqn);
index 22da4d0d0f05511dfc89a360e6df6871e96b6e7a..63769df872a42be81784eff223ef41a9e5a639e6 100644 (file)
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
 
        cq->ring = ring;
        cq->is_tx = mode;
+       cq->vector = mdev->dev->caps.num_comp_vectors;
 
        /* Allocate HW buffers on provided NUMA node.
         * dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        int err = 0;
        char name[25];
        int timestamp_en = 0;
-       struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
-               priv->dev->rx_cpu_rmap;
-#else
-               NULL;
-#endif
+       bool assigned_eq = false;
 
        cq->dev = mdev->pndev[priv->port];
        cq->mcq.set_ci_db  = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        memset(cq->buf, 0, cq->buf_size);
 
        if (cq->is_tx == RX) {
-               if (mdev->dev->caps.comp_pool) {
-                       if (!cq->vector) {
-                               sprintf(name, "%s-%d", priv->dev->name,
-                                       cq->ring);
-                               /* Set IRQ for specific name (per ring) */
-                               if (mlx4_assign_eq(mdev->dev, name, rmap,
-                                                  &cq->vector)) {
-                                       cq->vector = (cq->ring + 1 + priv->port)
-                                           % mdev->dev->caps.num_comp_vectors;
-                                       mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
-                                                 name);
-                               }
-
+               if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+                                            cq->vector)) {
+                       cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+                       err = mlx4_assign_eq(mdev->dev, priv->port,
+                                            &cq->vector);
+                       if (err) {
+                               mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+                                        name);
+                               goto free_eq;
                        }
-               } else {
-                       cq->vector = (cq->ring + 1 + priv->port) %
-                               mdev->dev->caps.num_comp_vectors;
+
+                       assigned_eq = true;
                }
 
                cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                            &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
                            cq->vector, 0, timestamp_en);
        if (err)
-               return err;
+               goto free_eq;
 
        cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
        cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
                               NAPI_POLL_WEIGHT);
        } else {
-               struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
-               err = irq_set_affinity_hint(cq->mcq.irq,
-                                           ring->affinity_mask);
-               if (err)
-                       mlx4_warn(mdev, "Failed setting affinity hint\n");
-
                netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
                napi_hash_add(&cq->napi);
        }
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        napi_enable(&cq->napi);
 
        return 0;
+
+free_eq:
+       if (assigned_eq)
+               mlx4_release_eq(mdev->dev, cq->vector);
+       cq->vector = mdev->dev->caps.num_comp_vectors;
+       return err;
 }
 
 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
 
        mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-       if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+       if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+           cq->is_tx == RX)
                mlx4_release_eq(priv->mdev->dev, cq->vector);
-       }
        cq->vector = 0;
        cq->buf_size = 0;
        cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        if (!cq->is_tx) {
                napi_hash_del(&cq->napi);
                synchronize_rcu();
-               irq_set_affinity_hint(cq->mcq.irq, NULL);
        }
        netif_napi_del(&cq->napi);
 
index 32f5ec7374723d1315f4234f77b12ffbe5adcfe0..455cecae5aa48f7c2e8ebdd8532350ff46779341 100644 (file)
@@ -1958,7 +1958,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
        int i;
 
 #ifdef CONFIG_RFS_ACCEL
-       free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
        priv->dev->rx_cpu_rmap = NULL;
 #endif
 
@@ -2016,11 +2015,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
        }
 
 #ifdef CONFIG_RFS_ACCEL
-       if (priv->mdev->dev->caps.comp_pool) {
-               priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
-               if (!priv->dev->rx_cpu_rmap)
-                       goto err;
-       }
+       priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
 #endif
 
        return 0;
index 2a77a6b191216b19059c89fa8ad386252684806c..35f726c17e48c80bdadfc07ba6a43974619c6938 100644 (file)
@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
        struct mlx4_dev *dev = mdev->dev;
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-               if (!dev->caps.comp_pool)
-                       num_of_eqs = max_t(int, MIN_RX_RINGS,
-                                          min_t(int,
-                                                dev->caps.num_comp_vectors,
-                                                DEF_RX_RINGS));
-               else
-                       num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
-                                          dev->caps.comp_pool/
-                                          dev->caps.num_ports) - 1;
+               num_of_eqs = max_t(int, MIN_RX_RINGS,
+                                  min_t(int,
+                                        mlx4_get_eqs_per_port(mdev->dev, i),
+                                        DEF_RX_RINGS));
 
                num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
                        min_t(int, num_of_eqs,
index 80bcd648c5e0e3541929a1c8fede71115a5c7097..11168825a9fab457c380c1a663b15dab2500b43e 100644 (file)
@@ -221,6 +221,20 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
        slave_event(dev, slave, eqe);
 }
 
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+       int hint_err;
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+       if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+               return;
+
+       hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+       if (hint_err)
+               mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+
 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
 {
        struct mlx4_eqe eqe;
@@ -895,8 +909,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
         * we need to map, take the difference of highest index and
         * the lowest index we'll use and add 1.
         */
-       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
-                dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+               dev->caps.reserved_eqs / 4 + 1;
 }
 
 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1085,32 +1099,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
 static void mlx4_free_irqs(struct mlx4_dev *dev)
 {
        struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int     i, vec;
+       int     i;
 
        if (eq_table->have_irq)
                free_irq(dev->persist->pdev->irq, dev);
 
        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                if (eq_table->eq[i].have_irq) {
+                       free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+                       irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
                        free_irq(eq_table->eq[i].irq, eq_table->eq + i);
                        eq_table->eq[i].have_irq = 0;
                }
 
-       for (i = 0; i < dev->caps.comp_pool; i++) {
-               /*
-                * Freeing the assigned irq's
-                * all bits should be 0, but we need to validate
-                */
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       /* NO need protecting*/
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-               }
-       }
-
-
        kfree(eq_table->irq_names);
 }
 
@@ -1191,76 +1194,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        }
 
        priv->eq_table.irq_names =
-               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
-                                            dev->caps.comp_pool),
+               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
                        GFP_KERNEL);
        if (!priv->eq_table.irq_names) {
                err = -ENOMEM;
-               goto err_out_bitmap;
+               goto err_out_clr_int;
        }
 
-       for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
-               }
-       }
-
-       err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
-                            (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
-                            &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-       if (err)
-               goto err_out_comp;
-
-       /*if additional completion vectors poolsize is 0 this loop will not run*/
-       for (i = dev->caps.num_comp_vectors + 1;
-             i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+               if (i == MLX4_EQ_ASYNC) {
+                       err = mlx4_create_eq(dev,
+                                            MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+                                            0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+               } else {
+#ifdef CONFIG_RFS_ACCEL
+                       struct mlx4_eq  *eq = &priv->eq_table.eq[i];
+                       int port = find_first_bit(eq->actv_ports.ports,
+                                                 dev->caps.num_ports) + 1;
+
+                       if (port <= dev->caps.num_ports) {
+                               struct mlx4_port_info *info =
+                                       &mlx4_priv(dev)->port[port];
+
+                               if (!info->rmap) {
+                                       info->rmap = alloc_irq_cpu_rmap(
+                                               mlx4_get_eqs_per_port(dev, port));
+                                       if (!info->rmap) {
+                                               mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+                                               err = -ENOMEM;
+                                               goto err_out_unmap;
+                                       }
+                               }
 
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
+                               err = irq_cpu_rmap_add(
+                                       info->rmap, eq->irq);
+                               if (err)
+                                       mlx4_warn(dev, "Failed adding irq rmap\n");
+                       }
+#endif
+                       err = mlx4_create_eq(dev, dev->caps.num_cqs -
+                                                 dev->caps.reserved_cqs +
+                                                 MLX4_NUM_SPARE_EQE,
+                                            (dev->flags & MLX4_FLAG_MSI_X) ?
+                                            i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+                                            eq);
                }
+               if (err)
+                       goto err_out_unmap;
        }
 
-
        if (dev->flags & MLX4_FLAG_MSI_X) {
                const char *eq_name;
 
-               for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
-                       if (i < dev->caps.num_comp_vectors) {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-comp-%d@pci:%s", i,
-                                        pci_name(dev->persist->pdev));
-                       } else {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-async@pci:%s",
-                                        pci_name(dev->persist->pdev));
-                       }
+               snprintf(priv->eq_table.irq_names +
+                        MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+                        MLX4_IRQNAME_SIZE,
+                        "mlx4-async@pci:%s",
+                        pci_name(dev->persist->pdev));
+               eq_name = priv->eq_table.irq_names +
+                       MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
 
-                       eq_name = priv->eq_table.irq_names +
-                                 i * MLX4_IRQNAME_SIZE;
-                       err = request_irq(priv->eq_table.eq[i].irq,
-                                         mlx4_msi_x_interrupt, 0, eq_name,
-                                         priv->eq_table.eq + i);
-                       if (err)
-                               goto err_out_async;
+               err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+                                 mlx4_msi_x_interrupt, 0, eq_name,
+                                 priv->eq_table.eq + MLX4_EQ_ASYNC);
+               if (err)
+                       goto err_out_unmap;
 
-                       priv->eq_table.eq[i].have_irq = 1;
-               }
+               priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
        } else {
                snprintf(priv->eq_table.irq_names,
                         MLX4_IRQNAME_SIZE,
@@ -1269,36 +1269,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
                                  IRQF_SHARED, priv->eq_table.irq_names, dev);
                if (err)
-                       goto err_out_async;
+                       goto err_out_unmap;
 
                priv->eq_table.have_irq = 1;
        }
 
        err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-                         priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                         priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
        if (err)
                mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
-                          priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+                          priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
 
-       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-               eq_set_ci(&priv->eq_table.eq[i], 1);
+       /* arm ASYNC eq */
+       eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
 
        return 0;
 
-err_out_async:
-       mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
-       i = dev->caps.num_comp_vectors - 1;
-
 err_out_unmap:
-       while (i >= 0) {
-               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-               --i;
+       while (i >= 0)
+               mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               if (mlx4_priv(dev)->port[i].rmap) {
+                       free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+                       mlx4_priv(dev)->port[i].rmap = NULL;
+               }
        }
+#endif
+       mlx4_free_irqs(dev);
+
+err_out_clr_int:
        if (!mlx4_is_slave(dev))
                mlx4_unmap_clr_int(dev);
-       mlx4_free_irqs(dev);
 
 err_out_bitmap:
        mlx4_unmap_uar(dev);
@@ -1316,11 +1318,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
        int i;
 
        mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 
+#ifdef CONFIG_RFS_ACCEL
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               if (mlx4_priv(dev)->port[i].rmap) {
+                       free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+                       mlx4_priv(dev)->port[i].rmap = NULL;
+               }
+       }
+#endif
        mlx4_free_irqs(dev);
 
-       for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
        if (!mlx4_is_slave(dev))
@@ -1371,87 +1381,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
 
        /* Return to default */
        mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
        return err;
 }
 EXPORT_SYMBOL(mlx4_test_interrupts);
 
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-                  int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+       if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+           (vector == MLX4_EQ_ASYNC))
+               return false;
+
+       return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       unsigned int i;
+       unsigned int sum = 0;
+
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+               sum += !!test_bit(port - 1,
+                                 priv->eq_table.eq[i].actv_ports.ports);
+
+       return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+       if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+               return -EINVAL;
+
+       return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+                               dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
 {
+       return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
 
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
        struct mlx4_priv *priv = mlx4_priv(dev);
-       int vec = 0, err = 0, i;
+       int err = 0, i = 0;
+       u32 min_ref_count_val = (u32)-1;
+       int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+       int *prequested_vector = NULL;
+
 
        mutex_lock(&priv->msix_ctl.pool_lock);
-       for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
-               if (~priv->msix_ctl.pool_bm & 1ULL << i) {
-                       priv->msix_ctl.pool_bm |= 1ULL << i;
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       snprintf(priv->eq_table.irq_names +
-                                       vec * MLX4_IRQNAME_SIZE,
-                                       MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
-                       if (rmap) {
-                               err = irq_cpu_rmap_add(rmap,
-                                                      priv->eq_table.eq[vec].irq);
-                               if (err)
-                                       mlx4_warn(dev, "Failed adding irq rmap\n");
+       if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+           (requested_vector >= 0) &&
+           (requested_vector != MLX4_EQ_ASYNC)) {
+               if (test_bit(port - 1,
+                            priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+                       prequested_vector = &requested_vector;
+               } else {
+                       struct mlx4_eq *eq;
+
+                       for (i = 1; i < port;
+                            requested_vector += mlx4_get_eqs_per_port(dev, i++))
+                               ;
+
+                       eq = &priv->eq_table.eq[requested_vector];
+                       if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+                           test_bit(port - 1, eq->actv_ports.ports)) {
+                               prequested_vector = &requested_vector;
                        }
-#endif
-                       err = request_irq(priv->eq_table.eq[vec].irq,
-                                         mlx4_msi_x_interrupt, 0,
-                                         &priv->eq_table.irq_names[vec<<5],
-                                         priv->eq_table.eq + vec);
-                       if (err) {
-                               /*zero out bit by fliping it*/
-                               priv->msix_ctl.pool_bm ^= 1 << i;
-                               vec = 0;
-                               continue;
-                               /*we dont want to break here*/
+               }
+       }
+
+       if  (!prequested_vector) {
+               requested_vector = -1;
+               for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+                    i++) {
+                       struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+                       if (min_ref_count_val > eq->ref_count &&
+                           test_bit(port - 1, eq->actv_ports.ports)) {
+                               min_ref_count_val = eq->ref_count;
+                               requested_vector = i;
                        }
+               }
+
+               if (requested_vector < 0) {
+                       err = -ENOSPC;
+                       goto err_unlock;
+               }
+
+               prequested_vector = &requested_vector;
+       }
+
+       if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+           dev->flags & MLX4_FLAG_MSI_X) {
+               set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+               snprintf(priv->eq_table.irq_names +
+                        *prequested_vector * MLX4_IRQNAME_SIZE,
+                        MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+                        *prequested_vector, dev_name(&dev->persist->pdev->dev));
+
+               err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+                                 mlx4_msi_x_interrupt, 0,
+                                 &priv->eq_table.irq_names[*prequested_vector << 5],
+                                 priv->eq_table.eq + *prequested_vector);
 
-                       eq_set_ci(&priv->eq_table.eq[vec], 1);
+               if (err) {
+                       clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+                       *prequested_vector = -1;
+               } else {
+#if defined(CONFIG_SMP)
+                       mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+                       eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+                       priv->eq_table.eq[*prequested_vector].have_irq = 1;
                }
        }
+
+       if (!err && *prequested_vector >= 0)
+               priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
        mutex_unlock(&priv->msix_ctl.pool_lock);
 
-       if (vec) {
-               *vector = vec;
-       } else {
+       if (!err && *prequested_vector >= 0)
+               *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+       else
                *vector = 0;
-               err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
-       }
+
        return err;
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
 
-       return priv->eq_table.eq[vec].irq;
+       return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
 }
 EXPORT_SYMBOL(mlx4_eq_get_irq);
 
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       /*bm index*/
-       int i = vec - dev->caps.num_comp_vectors - 1;
-
-       if (likely(i >= 0)) {
-               /*sanity check , making sure were not trying to free irq's
-                 Belonging to a legacy EQ*/
-               mutex_lock(&priv->msix_ctl.pool_lock);
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-                       priv->msix_ctl.pool_bm &= ~(1ULL << i);
-               }
-               mutex_unlock(&priv->msix_ctl.pool_lock);
-       }
+       int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
 
+       mutex_lock(&priv->msix_ctl.pool_lock);
+       priv->eq_table.eq[eq_vec].ref_count--;
+
+       /* once we allocated EQ, we don't release it because it might be binded
+        * to cpu_rmap.
+        */
+       mutex_unlock(&priv->msix_ctl.pool_lock);
 }
 EXPORT_SYMBOL(mlx4_release_eq);
 
index 70d33f6e2a41a39bbf513eb0b4155c2409074d10..0dbd70427221c5ebda3d248028699b97b000a36e 100644 (file)
@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
        if (err) {
                if (dev->flags & MLX4_FLAG_MSI_X) {
                        mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
-                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                                 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
                        mlx4_warn(dev, "Trying again without MSI-X\n");
                } else {
                        mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
-                                priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                                priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
                        mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
                }
 
@@ -2481,14 +2481,45 @@ err_uar_table_free:
        return err;
 }
 
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+       int requested_cpu = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_eq *eq;
+       int off = 0;
+       int i;
+
+       if (eqn > dev->caps.num_comp_vectors)
+               return -EINVAL;
+
+       for (i = 1; i < port; i++)
+               off += mlx4_get_eqs_per_port(dev, i);
+
+       requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+       /* Meaning EQs are shared, and this call comes from the second port */
+       if (requested_cpu < 0)
+               return 0;
+
+       eq = &priv->eq_table.eq[eqn];
+
+       if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+       return 0;
+}
+
 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct msix_entry *entries;
        int i;
+       int port = 0;
 
        if (msi_x) {
-               int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+               int nreq = dev->caps.num_ports * num_online_cpus() + 1;
 
                nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
                             nreq);
@@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
                                             nreq);
 
-               if (nreq < 0) {
+               if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
                        kfree(entries);
                        goto no_msi;
-               } else if (nreq < MSIX_LEGACY_SZ +
-                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
-                       /*Working in legacy mode , all EQ's shared*/
-                       dev->caps.comp_pool           = 0;
-                       dev->caps.num_comp_vectors = nreq - 1;
-               } else {
-                       dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
-                       dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
                }
-               for (i = 0; i < nreq; ++i)
-                       priv->eq_table.eq[i].irq = entries[i].vector;
+               /* 1 is reserved for events (asyncrounous EQ) */
+               dev->caps.num_comp_vectors = nreq - 1;
+
+               priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+               bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+                           dev->caps.num_ports);
+
+               for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+                       if (i == MLX4_EQ_ASYNC)
+                               continue;
+
+                       priv->eq_table.eq[i].irq =
+                               entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+                       if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+                               bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+                                           dev->caps.num_ports);
+                               /* We don't set affinity hint when there
+                                * aren't enough EQs
+                                */
+                       } else {
+                               set_bit(port,
+                                       priv->eq_table.eq[i].actv_ports.ports);
+                               if (mlx4_init_affinity_hint(dev, port + 1, i))
+                                       mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+                                                 i);
+                       }
+                       /* We divide the Eqs evenly between the two ports.
+                        * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+                        * refers to the number of Eqs per port
+                        * (i.e eqs_per_port). Theoretically, we would like to
+                        * write something like (i + 1) % eqs_per_port == 0.
+                        * However, since there's an asynchronous Eq, we have
+                        * to skip over it by comparing this condition to
+                        * !!((i + 1) > MLX4_EQ_ASYNC).
+                        */
+                       if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+                           ((i + 1) %
+                            (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+                           !!((i + 1) > MLX4_EQ_ASYNC))
+                               /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+                                * everything is shared anyway.
+                                */
+                               port++;
+               }
 
                dev->flags |= MLX4_FLAG_MSI_X;
 
@@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 
 no_msi:
        dev->caps.num_comp_vectors = 1;
-       dev->caps.comp_pool        = 0;
 
-       for (i = 0; i < 2; ++i)
+       BUG_ON(MLX4_EQ_ASYNC >= 2);
+       for (i = 0; i < 2; ++i) {
                priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+               if (i != MLX4_EQ_ASYNC) {
+                       bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+                                   dev->caps.num_ports);
+               }
+       }
 }
 
 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
        device_remove_file(&info->dev->persist->pdev->dev,
                           &info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(info->rmap);
+       info->rmap = NULL;
+#endif
 }
 
 static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -3024,7 +3099,7 @@ slave_start:
        if (err)
                goto err_master_mfunc;
 
-       priv->msix_ctl.pool_bm = 0;
+       bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
        mutex_init(&priv->msix_ctl.pool_lock);
 
        mlx4_enable_msi_x(dev);
@@ -3046,7 +3121,6 @@ slave_start:
            !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
                dev->caps.num_comp_vectors = 1;
-               dev->caps.comp_pool        = 0;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
        }
index 502d3dd2c888528e71af1cbf1ed276b10d058c81..f424900d23a65d8ad549927ae52bc6d262f9d37c 100644 (file)
@@ -287,6 +287,12 @@ struct mlx4_icm_table {
 #define MLX4_CQE_SIZE_MASK_STRIDE      0x3
 #define MLX4_EQE_SIZE_MASK_STRIDE      0x30
 
+#define MLX4_EQ_ASYNC                  0
+#define MLX4_EQ_TO_CQ_VECTOR(vector)   ((vector) - \
+                                        !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector)   ((vector) + \
+                                        !!((int)(vector) >= MLX4_EQ_ASYNC))
+
 /*
  * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
  */
@@ -391,6 +397,9 @@ struct mlx4_eq {
        struct mlx4_buf_list   *page_list;
        struct mlx4_mtt         mtt;
        struct mlx4_eq_tasklet  tasklet_ctx;
+       struct mlx4_active_ports actv_ports;
+       u32                     ref_count;
+       cpumask_var_t           affinity_mask;
 };
 
 struct mlx4_slave_eqe {
@@ -808,6 +817,7 @@ struct mlx4_port_info {
        struct mlx4_vlan_table  vlan_table;
        struct mlx4_roce_gid_table gid_table;
        int                     base_qpn;
+       struct cpu_rmap         *rmap;
 };
 
 struct mlx4_sense {
@@ -818,7 +828,7 @@ struct mlx4_sense {
 };
 
 struct mlx4_msix_ctl {
-       u64             pool_bm;
+       DECLARE_BITMAP(pool_bm, MAX_MSIX);
        struct mutex    pool_lock;
 };
 
index d021f079f181b06bb6ec73250ea8493ad87d1cee..edd8fd69ec9a8d2133fcb27c44cca13c73551fa1 100644 (file)
@@ -338,7 +338,7 @@ struct mlx4_en_cq {
        struct napi_struct      napi;
        int size;
        int buf_size;
-       unsigned vector;
+       int vector;
        enum cq_type is_tx;
        u16 moder_time;
        u16 moder_cnt;
index 15ec0818165882cfa36c7a76dd15ec5c9a37b6ac..ab48386bfefcd4ea18c3173cc151af0100a016f1 100644 (file)
@@ -3973,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
        return 0;
 }
 
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+                                        struct _rule_hw *eth_header)
+{
+       if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+           is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+               struct mlx4_net_trans_rule_hw_eth *eth =
+                       (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+               struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+               bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+                       next_rule->rsvd == 0;
+
+               if (last_rule)
+                       ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+       }
+}
+
 /*
  * In case of missing eth header, append eth header with a MAC address
  * assigned to the VF.
@@ -4125,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        rule_header = (struct _rule_hw *)(ctrl + 1);
        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
 
+       if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+               handle_eth_header_mcast_prio(ctrl, rule_header);
+
+       if (slave == dev->caps.function)
+               goto execute;
+
        switch (header_id) {
        case MLX4_NET_TRANS_RULE_ID_ETH:
                if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4151,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                goto err_put;
        }
 
+execute:
        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
                           vhcr->in_modifier, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
index 8ff57e8e3e91601bc503e5f501ac2ef1da956296..0d7aef040fb0878a6717643cd0ccec42280dfd7b 100644 (file)
@@ -3,6 +3,18 @@
 #
 
 config MLX5_CORE
-       tristate
+       tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
        depends on PCI
        default n
+       ---help---
+         Core driver for low level functionality of the ConnectX-4 and
+         Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+       bool "Mellanox Technologies ConnectX-4 Ethernet support"
+       depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE
+       default n
+       ---help---
+         Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+         Ethernet and Infiniband support in ConnectX-4 are currently mutually
+         exclusive.
index 105780bb980b051e9dafaaa060ee51d41aa7e3ba..87e9e606596a6fb1e56b34529f347d497184cfea 100644 (file)
@@ -3,3 +3,6 @@ obj-$(CONFIG_MLX5_CORE)         += mlx5_core.o
 mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
                health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
                mad.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \
+               en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+               en_txrx.o
index ac0f7bf4be958bef168c0281f05108f6287304f4..0715b497511f6c861f5ac027341960fdc0acfab5 100644 (file)
 #include "mlx5_core.h"
 
 /* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-                  struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
 {
        dma_addr_t t;
 
        buf->size = size;
-       if (size <= max_direct) {
-               buf->nbufs        = 1;
-               buf->npages       = 1;
-               buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-                                                       size, &t, GFP_KERNEL);
-               if (!buf->direct.buf)
-                       return -ENOMEM;
-
-               buf->direct.map = t;
-
-               while (t & ((1 << buf->page_shift) - 1)) {
-                       --buf->page_shift;
-                       buf->npages *= 2;
-               }
-       } else {
-               int i;
-
-               buf->direct.buf  = NULL;
-               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               buf->npages      = buf->nbufs;
-               buf->page_shift  = PAGE_SHIFT;
-               buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-                                          GFP_KERNEL);
-               if (!buf->page_list)
-                       return -ENOMEM;
-
-               for (i = 0; i < buf->nbufs; i++) {
-                       buf->page_list[i].buf =
-                               dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                   &t, GFP_KERNEL);
-                       if (!buf->page_list[i].buf)
-                               goto err_free;
-
-                       buf->page_list[i].map = t;
-               }
-
-               if (BITS_PER_LONG == 64) {
-                       struct page **pages;
-                       pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-                       for (i = 0; i < buf->nbufs; i++)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               }
-       }
+       buf->npages       = 1;
+       buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
+       buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
+                                               size, &t, GFP_KERNEL);
+       if (!buf->direct.buf)
+               return -ENOMEM;
 
-       return 0;
+       buf->direct.map = t;
 
-err_free:
-       mlx5_buf_free(dev, buf);
+       while (t & ((1 << buf->page_shift) - 1)) {
+               --buf->page_shift;
+               buf->npages *= 2;
+       }
 
-       return -ENOMEM;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 {
-       int i;
-
-       if (buf->nbufs == 1)
-               dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
-                                 buf->direct.map);
-       else {
-               if (BITS_PER_LONG == 64)
-                       vunmap(buf->direct.buf);
-
-               for (i = 0; i < buf->nbufs; i++)
-                       if (buf->page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 buf->page_list[i].buf,
-                                                 buf->page_list[i].map);
-               kfree(buf->page_list);
-       }
+       dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+                         buf->direct.map);
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
        int i;
 
        for (i = 0; i < buf->npages; i++) {
-               if (buf->nbufs == 1)
-                       addr = buf->direct.map + (i << buf->page_shift);
-               else
-                       addr = buf->page_list[i].map;
+               addr = buf->direct.map + (i << buf->page_shift);
 
                pas[i] = cpu_to_be64(addr);
        }
index e3273faf4568945cb494e6598dbc013e61b11919..75ff58dc1ff5f9d9af725f7e5e3285e338b1be8c 100644 (file)
@@ -75,25 +75,6 @@ enum {
        MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
 };
 
-enum {
-       MLX5_CMD_STAT_OK                        = 0x0,
-       MLX5_CMD_STAT_INT_ERR                   = 0x1,
-       MLX5_CMD_STAT_BAD_OP_ERR                = 0x2,
-       MLX5_CMD_STAT_BAD_PARAM_ERR             = 0x3,
-       MLX5_CMD_STAT_BAD_SYS_STATE_ERR         = 0x4,
-       MLX5_CMD_STAT_BAD_RES_ERR               = 0x5,
-       MLX5_CMD_STAT_RES_BUSY                  = 0x6,
-       MLX5_CMD_STAT_LIM_ERR                   = 0x8,
-       MLX5_CMD_STAT_BAD_RES_STATE_ERR         = 0x9,
-       MLX5_CMD_STAT_IX_ERR                    = 0xa,
-       MLX5_CMD_STAT_NO_RES_ERR                = 0xf,
-       MLX5_CMD_STAT_BAD_INP_LEN_ERR           = 0x50,
-       MLX5_CMD_STAT_BAD_OUTP_LEN_ERR          = 0x51,
-       MLX5_CMD_STAT_BAD_QP_STATE_ERR          = 0x10,
-       MLX5_CMD_STAT_BAD_PKT_ERR               = 0x30,
-       MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
-};
-
 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
                                           struct mlx5_cmd_msg *in,
                                           struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
        case MLX5_CMD_OP_ARM_RQ:
                return "ARM_RQ";
 
-       case MLX5_CMD_OP_RESIZE_SRQ:
-               return "RESIZE_SRQ";
+       case MLX5_CMD_OP_CREATE_XRC_SRQ:
+               return "CREATE_XRC_SRQ";
+
+       case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+               return "DESTROY_XRC_SRQ";
+
+       case MLX5_CMD_OP_QUERY_XRC_SRQ:
+               return "QUERY_XRC_SRQ";
+
+       case MLX5_CMD_OP_ARM_XRC_SRQ:
+               return "ARM_XRC_SRQ";
 
        case MLX5_CMD_OP_ALLOC_PD:
                return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
        case MLX5_CMD_OP_ATTACH_TO_MCG:
                return "ATTACH_TO_MCG";
 
-       case MLX5_CMD_OP_DETACH_FROM_MCG:
-               return "DETACH_FROM_MCG";
+       case MLX5_CMD_OP_DETTACH_FROM_MCG:
+               return "DETTACH_FROM_MCG";
 
        case MLX5_CMD_OP_ALLOC_XRCD:
                return "ALLOC_XRCD";
index eb0cf81f5f4518a06579a6c52e191b72ad1d0e50..04ab7e445eae080b0888d74022af1222479ea36b 100644 (file)
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 }
 EXPORT_SYMBOL(mlx5_core_modify_cq);
 
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+                                  struct mlx5_core_cq *cq,
+                                  u16 cq_period,
+                                  u16 cq_max_count)
+{
+       struct mlx5_modify_cq_mbox_in in;
+
+       memset(&in, 0, sizeof(in));
+
+       in.cqn              = cpu_to_be32(cq->cqn);
+       in.ctx.cq_period    = cpu_to_be16(cq_period);
+       in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+       in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+                                         MLX5_CQ_MODIFY_COUNT);
+
+       return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
 {
        struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644 (file)
index 0000000..cbb3c7c
--- /dev/null
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include "vport.h"
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC       8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
+#define MLX5E_PARAMS_MIN_MTU                            46
+
+#define MLX5E_TX_CQ_POLL_BUDGET        128
+#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+       /* vport statistics */
+       "rx_packets",
+       "rx_bytes",
+       "tx_packets",
+       "tx_bytes",
+       "rx_error_packets",
+       "rx_error_bytes",
+       "tx_error_packets",
+       "tx_error_bytes",
+       "rx_unicast_packets",
+       "rx_unicast_bytes",
+       "tx_unicast_packets",
+       "tx_unicast_bytes",
+       "rx_multicast_packets",
+       "rx_multicast_bytes",
+       "tx_multicast_packets",
+       "tx_multicast_bytes",
+       "rx_broadcast_packets",
+       "rx_broadcast_bytes",
+       "tx_broadcast_packets",
+       "tx_broadcast_bytes",
+
+       /* SW counters */
+       "tso_packets",
+       "tso_bytes",
+       "lro_packets",
+       "lro_bytes",
+       "rx_csum_good",
+       "rx_csum_none",
+       "tx_csum_offload",
+       "tx_queue_stopped",
+       "tx_queue_wake",
+       "tx_queue_dropped",
+       "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+       /* HW counters */
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 rx_error_packets;
+       u64 rx_error_bytes;
+       u64 tx_error_packets;
+       u64 tx_error_bytes;
+       u64 rx_unicast_packets;
+       u64 rx_unicast_bytes;
+       u64 tx_unicast_packets;
+       u64 tx_unicast_bytes;
+       u64 rx_multicast_packets;
+       u64 rx_multicast_bytes;
+       u64 tx_multicast_packets;
+       u64 tx_multicast_bytes;
+       u64 rx_broadcast_packets;
+       u64 rx_broadcast_bytes;
+       u64 tx_broadcast_packets;
+       u64 tx_broadcast_bytes;
+
+       /* SW counters */
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 rx_csum_good;
+       u64 rx_csum_none;
+       u64 tx_csum_offload;
+       u64 tx_queue_stopped;
+       u64 tx_queue_wake;
+       u64 tx_queue_dropped;
+       u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS     31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+       "packets",
+       "csum_none",
+       "lro_packets",
+       "lro_bytes",
+       "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+       u64 packets;
+       u64 csum_none;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+       "packets",
+       "tso_packets",
+       "tso_bytes",
+       "csum_offload_none",
+       "stopped",
+       "wake",
+       "dropped",
+       "nop"
+};
+
+struct mlx5e_sq_stats {
+       u64 packets;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 csum_offload_none;
+       u64 stopped;
+       u64 wake;
+       u64 dropped;
+       u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+       struct mlx5e_vport_stats   vport;
+};
+
+struct mlx5e_params {
+       u8  log_sq_size;
+       u8  log_rq_size;
+       u16 num_channels;
+       u8  default_vlan_prio;
+       u8  num_tc;
+       u16 rx_cq_moderation_usec;
+       u16 rx_cq_moderation_pkts;
+       u16 tx_cq_moderation_usec;
+       u16 tx_cq_moderation_pkts;
+       u16 min_rx_wqes;
+       u16 rx_hash_log_tbl_sz;
+       bool lro_en;
+       u32 lro_wqe_sz;
+};
+
+enum {
+       MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+       MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+       /* data path - accessed per cqe */
+       struct mlx5_cqwq           wq;
+       void                      *sqrq;
+       unsigned long              flags;
+
+       /* data path - accessed per napi poll */
+       struct napi_struct        *napi;
+       struct mlx5_core_cq        mcq;
+       struct mlx5e_channel      *channel;
+
+       /* control */
+       struct mlx5_wq_ctrl        wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+       /* data path */
+       struct mlx5_wq_ll      wq;
+       u32                    wqe_sz;
+       struct sk_buff       **skb;
+
+       struct device         *pdev;
+       struct net_device     *netdev;
+       struct mlx5e_rq_stats  stats;
+       struct mlx5e_cq        cq;
+
+       unsigned long          state;
+       int                    ix;
+
+       /* control */
+       struct mlx5_wq_ctrl    wq_ctrl;
+       u32                    rqn;
+       struct mlx5e_channel  *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+       u32 num_bytes;
+       u8  num_wqebbs;
+       u8  num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+       dma_addr_t addr;
+       u32        size;
+};
+
+enum {
+       MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+       /* data path */
+
+       /* dirtied @completion */
+       u16                        cc;
+       u32                        dma_fifo_cc;
+
+       /* dirtied @xmit */
+       u16                        pc ____cacheline_aligned_in_smp;
+       u32                        dma_fifo_pc;
+       u32                        bf_offset;
+       struct mlx5e_sq_stats      stats;
+
+       struct mlx5e_cq            cq;
+
+       /* pointers to per packet info: write@xmit, read@completion */
+       struct sk_buff           **skb;
+       struct mlx5e_sq_dma       *dma_fifo;
+
+       /* read only */
+       struct mlx5_wq_cyc         wq;
+       u32                        dma_fifo_mask;
+       void __iomem              *uar_map;
+       struct netdev_queue       *txq;
+       u32                        sqn;
+       u32                        bf_buf_size;
+       struct device             *pdev;
+       __be32                     mkey_be;
+       unsigned long              state;
+
+       /* control path */
+       struct mlx5_wq_ctrl        wq_ctrl;
+       struct mlx5_uar            uar;
+       struct mlx5e_channel      *channel;
+       int                        tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+       return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+               (sq->cc  == sq->pc));
+}
+
+enum channel_flags {
+       MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+       /* data path */
+       struct mlx5e_rq            rq;
+       struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+       struct napi_struct         napi;
+       struct device             *pdev;
+       struct net_device         *netdev;
+       __be32                     mkey_be;
+       u8                         num_tc;
+       unsigned long              flags;
+
+       /* control */
+       struct mlx5e_priv         *priv;
+       int                        ix;
+       int                        cpu;
+};
+
+enum mlx5e_traffic_types {
+       MLX5E_TT_IPV4_TCP = 0,
+       MLX5E_TT_IPV6_TCP = 1,
+       MLX5E_TT_IPV4_UDP = 2,
+       MLX5E_TT_IPV6_UDP = 3,
+       MLX5E_TT_IPV4     = 4,
+       MLX5E_TT_IPV6     = 5,
+       MLX5E_TT_ANY      = 6,
+       MLX5E_NUM_TT      = 7,
+};
+
+enum {
+       MLX5E_RQT_SPREADING  = 0,
+       MLX5E_RQT_DEFAULT_RQ = 1,
+       MLX5E_NUM_RQT        = 2,
+};
+
+struct mlx5e_eth_addr_info {
+       u8  addr[ETH_ALEN + 2];
+       u32 tt_vec;
+       u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+       struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+       struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+       struct mlx5e_eth_addr_info broadcast;
+       struct mlx5e_eth_addr_info allmulti;
+       struct mlx5e_eth_addr_info promisc;
+       bool                       broadcast_enabled;
+       bool                       allmulti_enabled;
+       bool                       promisc_enabled;
+};
+
+enum {
+       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       u32           active_vlans_ft_ix[VLAN_N_VID];
+       u32           untagged_rule_ft_ix;
+       u32           any_vlan_rule_ft_ix;
+       bool          filter_disabled;
+};
+
+struct mlx5e_flow_table {
+       void *vlan;
+       void *main;
+};
+
+struct mlx5e_priv {
+       /* priv data path fields - start */
+       int                        order_base_2_num_channels;
+       int                        queue_mapping_channel_mask;
+       int                        num_tc;
+       int                        default_vlan_prio;
+       /* priv data path fields - end */
+
+       unsigned long              state;
+       struct mutex               state_lock; /* Protects Interface state */
+       struct mlx5_uar            cq_uar;
+       u32                        pdn;
+       struct mlx5_core_mr        mr;
+
+       struct mlx5e_channel     **channel;
+       u32                        tisn[MLX5E_MAX_NUM_TC];
+       u32                        rqtn;
+       u32                        tirn[MLX5E_NUM_TT];
+
+       struct mlx5e_flow_table    ft;
+       struct mlx5e_eth_addr_db   eth_addr;
+       struct mlx5e_vlan_db       vlan;
+
+       struct mlx5e_params        params;
+       spinlock_t                 async_events_spinlock; /* sync hw events */
+       struct work_struct         update_carrier_work;
+       struct work_struct         set_rx_mode_work;
+       struct delayed_work        update_stats_work;
+
+       struct mlx5_core_dev      *mdev;
+       struct net_device         *netdev;
+       struct mlx5e_stats         stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+       struct mlx5_wqe_srq_next_seg  next;
+       struct mlx5_wqe_data_seg      data;
+};
+
+enum mlx5e_link_mode {
+       MLX5E_1000BASE_CX_SGMII  = 0,
+       MLX5E_1000BASE_KX        = 1,
+       MLX5E_10GBASE_CX4        = 2,
+       MLX5E_10GBASE_KX4        = 3,
+       MLX5E_10GBASE_KR         = 4,
+       MLX5E_20GBASE_KR2        = 5,
+       MLX5E_40GBASE_CR4        = 6,
+       MLX5E_40GBASE_KR4        = 7,
+       MLX5E_56GBASE_R4         = 8,
+       MLX5E_10GBASE_CR         = 12,
+       MLX5E_10GBASE_SR         = 13,
+       MLX5E_10GBASE_ER         = 14,
+       MLX5E_40GBASE_SR4        = 15,
+       MLX5E_40GBASE_LR4        = 16,
+       MLX5E_100GBASE_CR4       = 20,
+       MLX5E_100GBASE_SR4       = 21,
+       MLX5E_100GBASE_KR4       = 22,
+       MLX5E_100GBASE_LR4       = 23,
+       MLX5E_100BASE_TX         = 24,
+       MLX5E_100BASE_T          = 25,
+       MLX5E_10GBASE_T          = 26,
+       MLX5E_25GBASE_CR         = 27,
+       MLX5E_25GBASE_KR         = 28,
+       MLX5E_25GBASE_SR         = 29,
+       MLX5E_50GBASE_CR2        = 30,
+       MLX5E_50GBASE_KR2        = 31,
+       MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+                         u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+                          u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+                            struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+                                     struct mlx5e_tx_wqe *wqe)
+{
+       /* ensure wqe is visible to device before updating doorbell record */
+       dma_wmb();
+
+       *sq->wq.db = cpu_to_be32(sq->pc);
+
+       /* ensure doorbell record is visible to device before ringing the
+        * doorbell
+        */
+       wmb();
+
+       mlx5_write64((__be32 *)&wqe->ctrl,
+                    sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+                    NULL);
+
+       sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+       struct mlx5_core_cq *mcq;
+
+       mcq = &cq->mcq;
+       mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644 (file)
index 0000000..de7aec8
--- /dev/null
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+                             struct ethtool_drvinfo *drvinfo)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+       strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+       u32 supported;
+       u32 advertised;
+       u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+       [MLX5E_1000BASE_CX_SGMII] = {
+               .supported  = SUPPORTED_1000baseKX_Full,
+               .advertised = ADVERTISED_1000baseKX_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_1000BASE_KX] = {
+               .supported  = SUPPORTED_1000baseKX_Full,
+               .advertised = ADVERTISED_1000baseKX_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_10GBASE_CX4] = {
+               .supported  = SUPPORTED_10000baseKX4_Full,
+               .advertised = ADVERTISED_10000baseKX4_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_KX4] = {
+               .supported  = SUPPORTED_10000baseKX4_Full,
+               .advertised = ADVERTISED_10000baseKX4_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_KR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_20GBASE_KR2] = {
+               .supported  = SUPPORTED_20000baseKR2_Full,
+               .advertised = ADVERTISED_20000baseKR2_Full,
+               .speed      = 20000,
+       },
+       [MLX5E_40GBASE_CR4] = {
+               .supported  = SUPPORTED_40000baseCR4_Full,
+               .advertised = ADVERTISED_40000baseCR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_40GBASE_KR4] = {
+               .supported  = SUPPORTED_40000baseKR4_Full,
+               .advertised = ADVERTISED_40000baseKR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_56GBASE_R4] = {
+               .supported  = SUPPORTED_56000baseKR4_Full,
+               .advertised = ADVERTISED_56000baseKR4_Full,
+               .speed      = 56000,
+       },
+       [MLX5E_10GBASE_CR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_SR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_ER] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_40GBASE_SR4] = {
+               .supported  = SUPPORTED_40000baseSR4_Full,
+               .advertised = ADVERTISED_40000baseSR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_40GBASE_LR4] = {
+               .supported  = SUPPORTED_40000baseLR4_Full,
+               .advertised = ADVERTISED_40000baseLR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_100GBASE_CR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_SR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_KR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_LR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100BASE_TX]   = {
+               .speed      = 100,
+       },
+       [MLX5E_100BASE_T]    = {
+               .supported  = SUPPORTED_100baseT_Full,
+               .advertised = ADVERTISED_100baseT_Full,
+               .speed      = 100,
+       },
+       [MLX5E_10GBASE_T]    = {
+               .supported  = SUPPORTED_10000baseT_Full,
+               .advertised = ADVERTISED_10000baseT_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_25GBASE_CR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_25GBASE_KR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_25GBASE_SR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_50GBASE_CR2]  = {
+               .speed      = 50000,
+       },
+       [MLX5E_50GBASE_KR2]  = {
+               .speed      = 50000,
+       },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return NUM_VPORT_COUNTERS +
+                      priv->params.num_channels * NUM_RQ_STATS +
+                      priv->params.num_channels * priv->num_tc *
+                                                  NUM_SQ_STATS;
+       /* fallthrough */
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+                             uint32_t stringset, uint8_t *data)
+{
+       int i, j, tc, idx = 0;
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       switch (stringset) {
+       case ETH_SS_PRIV_FLAGS:
+               break;
+
+       case ETH_SS_TEST:
+               break;
+
+       case ETH_SS_STATS:
+               /* VPORT counters */
+               for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+                       strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                              vport_strings[i]);
+
+               /* per channel counters */
+               for (i = 0; i < priv->params.num_channels; i++)
+                       for (j = 0; j < NUM_RQ_STATS; j++)
+                               sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                                       "rx%d_%s", i, rq_stats_strings[j]);
+
+               for (i = 0; i < priv->params.num_channels; i++)
+                       for (tc = 0; tc < priv->num_tc; tc++)
+                               for (j = 0; j < NUM_SQ_STATS; j++)
+                                       sprintf(data +
+                                               (idx++) * ETH_GSTRING_LEN,
+                                               "tx%d_%d_%s", i, tc,
+                                               sq_stats_strings[j]);
+               break;
+       }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int i, j, tc, idx = 0;
+
+       if (!data)
+               return;
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_update_stats(priv);
+       mutex_unlock(&priv->state_lock);
+
+       for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+               data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+       /* per channel counters */
+       for (i = 0; i < priv->params.num_channels; i++)
+               for (j = 0; j < NUM_RQ_STATS; j++)
+                       data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+                                               &priv->state) ? 0 :
+                                      ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+       for (i = 0; i < priv->params.num_channels; i++)
+               for (tc = 0; tc < priv->num_tc; tc++)
+                       for (j = 0; j < NUM_SQ_STATS; j++)
+                               data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+                                                       &priv->state) ? 0 :
+                               ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+                               struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+       param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+       param->rx_pending     = 1 << priv->params.log_rq_size;
+       param->tx_pending     = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_params new_params;
+       u16 min_rx_wqes;
+       u8 log_rq_size;
+       u8 log_sq_size;
+       int err = 0;
+
+       if (param->rx_jumbo_pending) {
+               netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (param->rx_mini_pending) {
+               netdev_info(dev, "%s: rx_mini_pending not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+               netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+                           __func__, param->rx_pending,
+                           1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+               netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+                           __func__, param->rx_pending,
+                           1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+               netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+                           __func__, param->tx_pending,
+                           1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+               netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+                           __func__, param->tx_pending,
+                           1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+               return -EINVAL;
+       }
+
+       log_rq_size = order_base_2(param->rx_pending);
+       log_sq_size = order_base_2(param->tx_pending);
+       min_rx_wqes = min_t(u16, param->rx_pending - 1,
+                           MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+       if (log_rq_size == priv->params.log_rq_size &&
+           log_sq_size == priv->params.log_sq_size &&
+           min_rx_wqes == priv->params.min_rx_wqes)
+               return 0;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+       new_params.log_rq_size = log_rq_size;
+       new_params.log_sq_size = log_sq_size;
+       new_params.min_rx_wqes = min_rx_wqes;
+       err = mlx5e_update_priv_params(priv, &new_params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+                              struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+       ch->max_combined   = ncv;
+       ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+                             struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+       unsigned int count = ch->combined_count;
+       struct mlx5e_params new_params;
+       int err = 0;
+
+       if (!count) {
+               netdev_info(dev, "%s: combined_count=0 not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (ch->rx_count || ch->tx_count) {
+               netdev_info(dev, "%s: separate rx/tx count not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (count > ncv) {
+               netdev_info(dev, "%s: count (%d) > max (%d)\n",
+                           __func__, count, ncv);
+               return -EINVAL;
+       }
+
+       if (priv->params.num_channels == count)
+               return 0;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+       new_params.num_channels = count;
+       err = mlx5e_update_priv_params(priv, &new_params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
+       coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+       coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
+       coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+       return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_channel *c;
+       int tc;
+       int i;
+
+       priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+       priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+       priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+       priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+       for (i = 0; i < priv->params.num_channels; ++i) {
+               c = priv->channel[i];
+
+               for (tc = 0; tc < c->num_tc; tc++) {
+                       mlx5_core_modify_cq_moderation(mdev,
+                                               &c->sq[tc].cq.mcq,
+                                               coal->tx_coalesce_usecs,
+                                               coal->tx_max_coalesced_frames);
+               }
+
+               mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+                                              coal->rx_coalesce_usecs,
+                                              coal->rx_max_coalesced_frames);
+       }
+
+       return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+       int i;
+       u32 supported_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_cap & MLX5E_PROT_MASK(i))
+                       supported_modes |= ptys2ethtool_table[i].supported;
+       }
+       return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+       int i;
+       u32 advertising_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_cap & MLX5E_PROT_MASK(i))
+                       advertising_modes |= ptys2ethtool_table[i].advertised;
+       }
+       return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+       if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+                          | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+                          | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+               return SUPPORTED_FIBRE;
+       }
+
+       if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+                          | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+               return SUPPORTED_Backplane;
+       }
+       return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+                            u32 eth_proto_oper,
+                            struct ethtool_cmd *cmd)
+{
+       int i;
+       u32 speed = SPEED_UNKNOWN;
+       u8 duplex = DUPLEX_UNKNOWN;
+
+       if (!netif_carrier_ok(netdev))
+               goto out;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+                       speed = ptys2ethtool_table[i].speed;
+                       duplex = DUPLEX_FULL;
+                       break;
+               }
+       }
+out:
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+       *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+       *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+       *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+                           u8 rx_pause, u32 *advertising)
+{
+       *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+       *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+       *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+                        | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+                        | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+                       return PORT_FIBRE;
+       }
+
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+                        | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+                       return PORT_DA;
+       }
+
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+                        | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+                        | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+                       return PORT_NONE;
+       }
+
+       return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+       *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+                             struct ethtool_cmd *cmd)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       u32 eth_proto_lp;
+       u32 eth_proto_oper;
+       int err;
+
+       err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
+
+       if (err) {
+               netdev_err(netdev, "%s: query port ptys failed: %d\n",
+                          __func__, err);
+               goto err_query_ptys;
+       }
+
+       eth_proto_cap   = MLX5_GET(ptys_reg, out, eth_proto_capability);
+       eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+       eth_proto_oper  = MLX5_GET(ptys_reg, out, eth_proto_oper);
+       eth_proto_lp    = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+       cmd->supported   = 0;
+       cmd->advertising = 0;
+
+       get_supported(eth_proto_cap, &cmd->supported);
+       get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+       get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+       cmd->port = get_connector_port(eth_proto_oper);
+       get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+       cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+       return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+       u32 i, ptys_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (ptys2ethtool_table[i].advertised & link_modes)
+                       ptys_modes |= MLX5E_PROT_MASK(i);
+       }
+
+       return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+       u32 i, speed_links = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (ptys2ethtool_table[i].speed == speed)
+                       speed_links |= MLX5E_PROT_MASK(i);
+       }
+
+       return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+                             struct ethtool_cmd *cmd)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 link_modes;
+       u32 speed;
+       u32 eth_proto_cap, eth_proto_admin;
+       u8 port_status;
+       int err;
+
+       speed = ethtool_cmd_speed(cmd);
+
+       link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+               mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+               mlx5e_ethtool2ptys_speed_link(speed);
+
+       err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       link_modes = link_modes & eth_proto_cap;
+       if (!link_modes) {
+               netdev_err(netdev, "%s: Not supported link mode(s) requested",
+                          __func__);
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       if (link_modes == eth_proto_admin)
+               goto out;
+
+       err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       err = mlx5_query_port_status(mdev, &port_status);
+       if (err)
+               goto out;
+
+       if (port_status == MLX5_PORT_DOWN)
+               return 0;
+
+       err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+       if (err)
+               goto out;
+       err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+       return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+       .get_drvinfo       = mlx5e_get_drvinfo,
+       .get_link          = ethtool_op_get_link,
+       .get_strings       = mlx5e_get_strings,
+       .get_sset_count    = mlx5e_get_sset_count,
+       .get_ethtool_stats = mlx5e_get_ethtool_stats,
+       .get_ringparam     = mlx5e_get_ringparam,
+       .set_ringparam     = mlx5e_set_ringparam,
+       .get_channels      = mlx5e_get_channels,
+       .set_channels      = mlx5e_set_channels,
+       .get_coalesce      = mlx5e_get_coalesce,
+       .set_coalesce      = mlx5e_set_coalesce,
+       .get_settings      = mlx5e_get_settings,
+       .set_settings      = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644 (file)
index 0000000..6feebda
--- /dev/null
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+       MLX5E_FULLMATCH = 0,
+       MLX5E_ALLMULTI  = 1,
+       MLX5E_PROMISC   = 2,
+};
+
+enum {
+       MLX5E_UC        = 0,
+       MLX5E_MC_IPV4   = 1,
+       MLX5E_MC_IPV6   = 2,
+       MLX5E_MC_OTHER  = 3,
+};
+
+enum {
+       MLX5E_ACTION_NONE = 0,
+       MLX5E_ACTION_ADD  = 1,
+       MLX5E_ACTION_DEL  = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+       struct hlist_node          hlist;
+       u8                         action;
+       struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+       return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       int ix = mlx5e_hash_eth_addr(addr);
+       int found = 0;
+
+       hlist_for_each_entry(hn, &hash[ix], hlist)
+               if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+                       found = 1;
+                       break;
+               }
+
+       if (found) {
+               hn->action = MLX5E_ACTION_NONE;
+               return;
+       }
+
+       hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+       if (!hn)
+               return;
+
+       ether_addr_copy(hn->ai.addr, addr);
+       hn->action = MLX5E_ACTION_ADD;
+
+       hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+       hlist_del(&hn->hlist);
+       kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+                                              struct mlx5e_eth_addr_info *ai)
+{
+       void *ft = priv->ft.main;
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+       if (is_unicast_ether_addr(addr))
+               return MLX5E_UC;
+
+       if ((addr[0] == 0x01) &&
+           (addr[1] == 0x00) &&
+           (addr[2] == 0x5e) &&
+          !(addr[3] &  0x80))
+               return MLX5E_MC_IPV4;
+
+       if ((addr[0] == 0x33) &&
+           (addr[1] == 0x33))
+               return MLX5E_MC_IPV6;
+
+       return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+       int eth_addr_type;
+       u32 ret;
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+               switch (eth_addr_type) {
+               case MLX5E_UC:
+                       ret =
+                               (1 << MLX5E_TT_IPV4_TCP) |
+                               (1 << MLX5E_TT_IPV6_TCP) |
+                               (1 << MLX5E_TT_IPV4_UDP) |
+                               (1 << MLX5E_TT_IPV6_UDP) |
+                               (1 << MLX5E_TT_IPV4)     |
+                               (1 << MLX5E_TT_IPV6)     |
+                               (1 << MLX5E_TT_ANY)      |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV4:
+                       ret =
+                               (1 << MLX5E_TT_IPV4_UDP) |
+                               (1 << MLX5E_TT_IPV4)     |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV6:
+                       ret =
+                               (1 << MLX5E_TT_IPV6_UDP) |
+                               (1 << MLX5E_TT_IPV6)     |
+                               0;
+                       break;
+
+               case MLX5E_MC_OTHER:
+                       ret =
+                               (1 << MLX5E_TT_ANY)      |
+                               0;
+                       break;
+               }
+
+               break;
+
+       case MLX5E_ALLMULTI:
+               ret =
+                       (1 << MLX5E_TT_IPV4_UDP) |
+                       (1 << MLX5E_TT_IPV6_UDP) |
+                       (1 << MLX5E_TT_IPV4)     |
+                       (1 << MLX5E_TT_IPV6)     |
+                       (1 << MLX5E_TT_ANY)      |
+                       0;
+               break;
+
+       default: /* MLX5E_PROMISC */
+               ret =
+                       (1 << MLX5E_TT_IPV4_TCP) |
+                       (1 << MLX5E_TT_IPV6_TCP) |
+                       (1 << MLX5E_TT_IPV4_UDP) |
+                       (1 << MLX5E_TT_IPV6_UDP) |
+                       (1 << MLX5E_TT_IPV4)     |
+                       (1 << MLX5E_TT_IPV6)     |
+                       (1 << MLX5E_TT_ANY)      |
+                       0;
+               break;
+       }
+
+       return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                    struct mlx5e_eth_addr_info *ai, int type,
+                                    void *flow_context, void *match_criteria)
+{
+       u8 match_criteria_enable = 0;
+       void *match_value;
+       void *dest;
+       u8   *dmac;
+       u8   *match_criteria_dmac;
+       void *ft   = priv->ft.main;
+       u32  *tirn = priv->tirn;
+       u32  tt_vec;
+       int  err;
+
+       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+       dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+                           outer_headers.dmac_47_16);
+       match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                          outer_headers.dmac_47_16);
+       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+       MLX5_SET(flow_context, flow_context, action,
+                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+       MLX5_SET(dest_format_struct, dest, destination_type,
+                MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               memset(match_criteria_dmac, 0xff, ETH_ALEN);
+               ether_addr_copy(dmac, ai->addr);
+               break;
+
+       case MLX5E_ALLMULTI:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               match_criteria_dmac[0] = 0x01;
+               dmac[0] = 0x01;
+               break;
+
+       case MLX5E_PROMISC:
+               break;
+       }
+
+       tt_vec = mlx5e_get_tt_vec(ai, type);
+
+       if (tt_vec & (1 << MLX5E_TT_ANY)) {
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_ANY]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_ANY]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_ANY);
+       }
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.ethertype);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+       }
+
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_UDP);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_UDP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_UDP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_TCP);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_TCP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_TCP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+       }
+
+       return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_eth_addr_info *ai, int type)
+{
+       u32 *flow_context;
+       u32 *match_criteria;
+       int err;
+
+       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+                                     MLX5_ST_SZ_BYTES(dest_format_struct));
+       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!flow_context || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_eth_addr_rule_out;
+       }
+
+       err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+                                       match_criteria);
+       if (err)
+               netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+       kvfree(match_criteria);
+       kvfree(flow_context);
+       return err;
+}
+
+enum mlx5e_vlan_rule_type {
+       MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+       MLX5E_VLAN_RULE_TYPE_ANY_VID,
+       MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+                              enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       u8 match_criteria_enable = 0;
+       u32 *flow_context;
+       void *match_value;
+       void *dest;
+       u32 *match_criteria;
+       u32 *ft_ix;
+       int err;
+
+       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+                                     MLX5_ST_SZ_BYTES(dest_format_struct));
+       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!flow_context || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_vlan_rule_out;
+       }
+       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+       MLX5_SET(flow_context, flow_context, action,
+                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+       MLX5_SET(dest_format_struct, dest, destination_type,
+                MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+       MLX5_SET(dest_format_struct, dest, destination_id,
+                mlx5_get_flow_table_id(priv->ft.main));
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.vlan_tag);
+
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               ft_ix = &priv->vlan.untagged_rule_ft_ix;
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+                        1);
+               break;
+       default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+               ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+                        1);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.first_vid);
+               MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+                        vid);
+               break;
+       }
+
+       err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+                                       match_criteria, flow_context, ft_ix);
+       if (err)
+               netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+       kvfree(match_criteria);
+       kvfree(flow_context);
+       return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.untagged_rule_ft_ix);
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.any_vlan_rule_ft_ix);
+               break;
+       case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.active_vlans_ft_ix[vid]);
+               break;
+       }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       if (priv->vlan.filter_disabled) {
+               priv->vlan.filter_disabled = false;
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       if (!priv->vlan.filter_disabled) {
+               priv->vlan.filter_disabled = true;
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+                         u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       mutex_lock(&priv->state_lock);
+
+       set_bit(vid, priv->vlan.active_vlans);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+                                         vid);
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+                          u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       mutex_lock(&priv->state_lock);
+
+       clear_bit(vid, priv->vlan.active_vlans);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+       mutex_unlock(&priv->state_lock);
+
+       return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+       u16 vid;
+       int err;
+
+       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+                                         vid);
+               if (err)
+                       return err;
+       }
+
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       if (err)
+               return err;
+
+       if (priv->vlan.filter_disabled) {
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                         0);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+       u16 vid;
+
+       if (priv->vlan.filter_disabled)
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+       for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+               hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+                                struct mlx5e_eth_addr_hash_node *hn)
+{
+       switch (hn->action) {
+       case MLX5E_ACTION_ADD:
+               mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+               hn->action = MLX5E_ACTION_NONE;
+               break;
+
+       case MLX5E_ACTION_DEL:
+               mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+               mlx5e_del_eth_addr_from_hash(hn);
+               break;
+       }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct netdev_hw_addr *ha;
+
+       netif_addr_lock_bh(netdev);
+
+       mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+                                  priv->netdev->dev_addr);
+
+       netdev_for_each_uc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+       netdev_for_each_mc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+       netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               mlx5e_execute_action(priv, hn);
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               hn->action = MLX5E_ACTION_DEL;
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               hn->action = MLX5E_ACTION_DEL;
+
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_sync_netdev_addr(priv);
+
+       mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+       struct net_device *ndev = priv->netdev;
+
+       bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+       bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+       bool broadcast_enabled = rx_mode_enable;
+
+       bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
+       bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
+       bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
+       bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
+       bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
+       bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
+
+       if (enable_promisc)
+               mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+       if (enable_allmulti)
+               mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+       if (enable_broadcast)
+               mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+       mlx5e_handle_netdev_addr(priv);
+
+       if (disable_broadcast)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+       if (disable_allmulti)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+       if (disable_promisc)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+       ea->promisc_enabled   = promisc_enabled;
+       ea->allmulti_enabled  = allmulti_enabled;
+       ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              set_rx_mode_work);
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_set_rx_mode_core(priv);
+       mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+       ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_table_group *g;
+       u8 *dmac;
+
+       g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+
+       g[0].log_sz = 2;
+       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[1].log_sz = 1;
+       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+                        outer_headers.ethertype);
+
+       g[2].log_sz = 0;
+
+       g[3].log_sz = 14;
+       g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[4].log_sz = 13;
+       g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+       MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+                        outer_headers.ethertype);
+
+       g[5].log_sz = 11;
+       g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+
+       g[6].log_sz = 2;
+       g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[7].log_sz = 1;
+       g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+                        outer_headers.ethertype);
+
+       g[8].log_sz = 0;
+       g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+                                              9, g);
+       kfree(g);
+
+       return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_table_group *g;
+
+       g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+       if (!g)
+               return -ENOMEM;
+
+       g[0].log_sz = 12;
+       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.first_vid);
+
+       /* untagged + any vlan id */
+       g[1].log_sz = 1;
+       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+                        outer_headers.vlan_tag);
+
+       priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+                                              2, g);
+
+       kfree(g);
+       return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+       int err;
+
+       err = mlx5e_create_main_flow_table(priv);
+       if (err)
+               return err;
+
+       err = mlx5e_create_vlan_flow_table(priv);
+       if (err)
+               goto err_destroy_main_flow_table;
+
+       return 0;
+
+err_destroy_main_flow_table:
+       mlx5e_destroy_main_flow_table(priv);
+
+       return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5e_destroy_vlan_flow_table(priv);
+       mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644 (file)
index 0000000..eee829d
--- /dev/null
@@ -0,0 +1,1899 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+       u32                        rqc[MLX5_ST_SZ_DW(rqc)];
+       struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_sq_param {
+       u32                        sqc[MLX5_ST_SZ_DW(sqc)];
+       struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_cq_param {
+       u32                        cqc[MLX5_ST_SZ_DW(cqc)];
+       struct mlx5_wq_param       wq;
+       u16                        eq_ix;
+};
+
+struct mlx5e_channel_param {
+       struct mlx5e_rq_param      rq;
+       struct mlx5e_sq_param      sq;
+       struct mlx5e_cq_param      rx_cq;
+       struct mlx5e_cq_param      tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u8 port_state;
+
+       port_state = mlx5_query_vport_state(mdev,
+               MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+       if (port_state == VPORT_STATE_UP)
+               netif_carrier_on(priv->netdev);
+       else
+               netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              update_carrier_work);
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_update_carrier(priv);
+       mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_vport_stats *s = &priv->stats.vport;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_sq_stats *sq_stats;
+       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+       u64 tx_offload_none;
+       int i, j;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return;
+
+       /* Collect firts the SW counters and then HW for consistency */
+       s->tso_packets          = 0;
+       s->tso_bytes            = 0;
+       s->tx_queue_stopped     = 0;
+       s->tx_queue_wake        = 0;
+       s->tx_queue_dropped     = 0;
+       tx_offload_none         = 0;
+       s->lro_packets          = 0;
+       s->lro_bytes            = 0;
+       s->rx_csum_none         = 0;
+       s->rx_wqe_err           = 0;
+       for (i = 0; i < priv->params.num_channels; i++) {
+               rq_stats = &priv->channel[i]->rq.stats;
+
+               s->lro_packets  += rq_stats->lro_packets;
+               s->lro_bytes    += rq_stats->lro_bytes;
+               s->rx_csum_none += rq_stats->csum_none;
+               s->rx_wqe_err   += rq_stats->wqe_err;
+
+               for (j = 0; j < priv->num_tc; j++) {
+                       sq_stats = &priv->channel[i]->sq[j].stats;
+
+                       s->tso_packets          += sq_stats->tso_packets;
+                       s->tso_bytes            += sq_stats->tso_bytes;
+                       s->tx_queue_stopped     += sq_stats->stopped;
+                       s->tx_queue_wake        += sq_stats->wake;
+                       s->tx_queue_dropped     += sq_stats->dropped;
+                       tx_offload_none         += sq_stats->csum_offload_none;
+               }
+       }
+
+       /* HW counters */
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_vport_counter_in, in, opcode,
+                MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+       MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+       MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+       memset(out, 0, outlen);
+
+       if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+               goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+       MLX5_GET64(query_vport_counter_out, p, x)
+
+       s->rx_error_packets     =
+               MLX5_GET_CTR(out, received_errors.packets);
+       s->rx_error_bytes       =
+               MLX5_GET_CTR(out, received_errors.octets);
+       s->tx_error_packets     =
+               MLX5_GET_CTR(out, transmit_errors.packets);
+       s->tx_error_bytes       =
+               MLX5_GET_CTR(out, transmit_errors.octets);
+
+       s->rx_unicast_packets   =
+               MLX5_GET_CTR(out, received_eth_unicast.packets);
+       s->rx_unicast_bytes     =
+               MLX5_GET_CTR(out, received_eth_unicast.octets);
+       s->tx_unicast_packets   =
+               MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+       s->tx_unicast_bytes     =
+               MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+       s->rx_multicast_packets =
+               MLX5_GET_CTR(out, received_eth_multicast.packets);
+       s->rx_multicast_bytes   =
+               MLX5_GET_CTR(out, received_eth_multicast.octets);
+       s->tx_multicast_packets =
+               MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+       s->tx_multicast_bytes   =
+               MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+       s->rx_broadcast_packets =
+               MLX5_GET_CTR(out, received_eth_broadcast.packets);
+       s->rx_broadcast_bytes   =
+               MLX5_GET_CTR(out, received_eth_broadcast.octets);
+       s->tx_broadcast_packets =
+               MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+       s->tx_broadcast_bytes   =
+               MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+       s->rx_packets =
+               s->rx_unicast_packets +
+               s->rx_multicast_packets +
+               s->rx_broadcast_packets;
+       s->rx_bytes =
+               s->rx_unicast_bytes +
+               s->rx_multicast_bytes +
+               s->rx_broadcast_bytes;
+       s->tx_packets =
+               s->tx_unicast_packets +
+               s->tx_multicast_packets +
+               s->tx_broadcast_packets;
+       s->tx_bytes =
+               s->tx_unicast_bytes +
+               s->tx_multicast_bytes +
+               s->tx_broadcast_bytes;
+
+       /* Update calculated offload counters */
+       s->tx_csum_offload = s->tx_packets - tx_offload_none;
+       s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+
+free_out:
+       kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+                                              update_stats_work);
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               mlx5e_update_stats(priv);
+               schedule_delayed_work(dwork,
+                                     msecs_to_jiffies(
+                                             MLX5E_UPDATE_STATS_INTERVAL));
+       }
+       mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+                               enum mlx5_dev_event event)
+{
+       switch (event) {
+       case MLX5_DEV_EVENT_PORT_UP:
+       case MLX5_DEV_EVENT_PORT_DOWN:
+               schedule_work(&priv->update_carrier_work);
+               break;
+
+       default:
+               break;
+       }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+                             enum mlx5_dev_event event, unsigned long param)
+{
+       struct mlx5e_priv *priv = vpriv;
+
+       spin_lock(&priv->async_events_spinlock);
+       if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+               __mlx5e_async_event(priv, event);
+       spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+       spin_lock_irq(&priv->async_events_spinlock);
+       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+static void mlx5e_send_nop(struct mlx5e_sq *sq)
+{
+       struct mlx5_wq_cyc                *wq  = &sq->wq;
+
+       u16 pi = sq->pc & wq->sz_m1;
+       struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+       struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
+
+       memset(cseg, 0, sizeof(*cseg));
+
+       cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+       cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
+       cseg->fm_ce_se         = MLX5_WQE_CTRL_CQ_UPDATE;
+
+       sq->skb[pi] = NULL;
+       sq->pc++;
+       mlx5e_tx_notify_hw(sq, wqe);
+}
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+                          struct mlx5e_rq_param *param,
+                          struct mlx5e_rq *rq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       void *rqc = param->rqc;
+       void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+       int wq_sz;
+       int err;
+       int i;
+
+       err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+                               &rq->wq_ctrl);
+       if (err)
+               return err;
+
+       rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+       wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+                              cpu_to_node(c->cpu));
+       if (!rq->skb) {
+               err = -ENOMEM;
+               goto err_rq_wq_destroy;
+       }
+
+       rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+                               priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+
+               wqe->data.lkey       = c->mkey_be;
+               wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+       }
+
+       rq->pdev    = c->pdev;
+       rq->netdev  = c->netdev;
+       rq->channel = c;
+       rq->ix      = c->ix;
+
+       return 0;
+
+err_rq_wq_destroy:
+       mlx5_wq_destroy(&rq->wq_ctrl);
+
+       return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+       kfree(rq->skb);
+       mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       void *wq;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+               sizeof(u64) * rq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+       wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+       memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+       MLX5_SET(rqc,  rqc, cqn,                c->rq.cq.mcq.cqn);
+       MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
+       MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
+       MLX5_SET(wq,   wq,  wq_type,            MLX5_WQ_TYPE_LINKED_LIST);
+       MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
+                                               PAGE_SHIFT);
+       MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
+
+       mlx5_fill_page_array(&rq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+       err = mlx5_create_rq(mdev, in, inlen, &rq->rqn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+       MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+       MLX5_SET(rqc, rqc, state, next_state);
+
+       err = mlx5_modify_rq(mdev, rq->rqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_wq_ll *wq = &rq->wq;
+       int i;
+
+       for (i = 0; i < 1000; i++) {
+               if (wq->cur_sz >= priv->params.min_rx_wqes)
+                       return 0;
+
+               msleep(20);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+                        struct mlx5e_rq_param *param,
+                        struct mlx5e_rq *rq)
+{
+       int err;
+
+       err = mlx5e_create_rq(c, param, rq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_rq(rq, param);
+       if (err)
+               goto err_destroy_rq;
+
+       err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+       if (err)
+               goto err_disable_rq;
+
+       set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+
+       return 0;
+
+err_disable_rq:
+       mlx5e_disable_rq(rq);
+err_destroy_rq:
+       mlx5e_destroy_rq(rq);
+
+       return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+       clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+       mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       while (!mlx5_wq_ll_is_empty(&rq->wq))
+               msleep(20);
+
+       /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+       napi_synchronize(&rq->channel->napi);
+
+       mlx5e_disable_rq(rq);
+       mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+       kfree(sq->dma_fifo);
+       kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+       int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+       int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+       sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+       sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+                                   numa);
+
+       if (!sq->skb || !sq->dma_fifo) {
+               mlx5e_free_sq_db(sq);
+               return -ENOMEM;
+       }
+
+       sq->dma_fifo_mask = df_sz - 1;
+
+       return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+                          int tc,
+                          struct mlx5e_sq_param *param,
+                          struct mlx5e_sq *sq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *sqc = param->sqc;
+       void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+       int err;
+
+       err = mlx5_alloc_map_uar(mdev, &sq->uar);
+       if (err)
+               return err;
+
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+                                &sq->wq_ctrl);
+       if (err)
+               goto err_unmap_free_uar;
+
+       sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
+       sq->uar_map     = sq->uar.map;
+       sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+       if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
+               goto err_sq_wq_destroy;
+
+       sq->txq = netdev_get_tx_queue(priv->netdev,
+                                     c->ix + tc * priv->params.num_channels);
+
+       sq->pdev    = c->pdev;
+       sq->mkey_be = c->mkey_be;
+       sq->channel = c;
+       sq->tc      = tc;
+
+       return 0;
+
+err_sq_wq_destroy:
+       mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+       mlx5_unmap_free_uar(mdev, &sq->uar);
+
+       return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+
+       mlx5e_free_sq_db(sq);
+       mlx5_wq_destroy(&sq->wq_ctrl);
+       mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *sqc;
+       void *wq;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+               sizeof(u64) * sq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+       wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+       MLX5_SET(sqc,  sqc, user_index,         sq->tc);
+       MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
+       MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
+       MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
+       MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
+       MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
+
+       MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
+       MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+       MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
+                                         PAGE_SHIFT);
+       MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
+
+       mlx5_fill_page_array(&sq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+       err = mlx5_create_sq(mdev, in, inlen, &sq->sqn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *sqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+       MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+       MLX5_SET(sqc, sqc, state, next_state);
+
+       err = mlx5_modify_sq(mdev, sq->sqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+                        int tc,
+                        struct mlx5e_sq_param *param,
+                        struct mlx5e_sq *sq)
+{
+       int err;
+
+       err = mlx5e_create_sq(c, tc, param, sq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_sq(sq, param);
+       if (err)
+               goto err_destroy_sq;
+
+       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+       if (err)
+               goto err_disable_sq;
+
+       set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+       netdev_tx_reset_queue(sq->txq);
+       netif_tx_start_queue(sq->txq);
+
+       return 0;
+
+err_disable_sq:
+       mlx5e_disable_sq(sq);
+err_destroy_sq:
+       mlx5e_destroy_sq(sq);
+
+       return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+       __netif_tx_lock_bh(txq);
+       netif_tx_stop_queue(txq);
+       __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+       clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+       napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+       netif_tx_disable_queue(sq->txq);
+
+       /* ensure hw is notified of all pending wqes */
+       if (mlx5e_sq_has_room_for(sq, 1))
+               mlx5e_send_nop(sq);
+
+       mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+       while (sq->cc != sq->pc) /* wait till sq is empty */
+               msleep(20);
+
+       /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+       napi_synchronize(&sq->channel->napi);
+
+       mlx5e_disable_sq(sq);
+       mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+                          struct mlx5e_cq_param *param,
+                          struct mlx5e_cq *cq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_cq *mcq = &cq->mcq;
+       int eqn_not_used;
+       int irqn;
+       int err;
+       u32 i;
+
+       param->wq.numa = cpu_to_node(c->cpu);
+       param->eq_ix   = c->ix;
+
+       err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+                              &cq->wq_ctrl);
+       if (err)
+               return err;
+
+       mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+       cq->napi        = &c->napi;
+
+       mcq->cqe_sz     = 64;
+       mcq->set_ci_db  = cq->wq_ctrl.db.db;
+       mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+       *mcq->set_ci_db = 0;
+       *mcq->arm_db    = 0;
+       mcq->vector     = param->eq_ix;
+       mcq->comp       = mlx5e_completion_event;
+       mcq->event      = mlx5e_cq_error_event;
+       mcq->irqn       = irqn;
+       mcq->uar        = &priv->cq_uar;
+
+       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+               struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+               cqe->op_own = 0xf1;
+       }
+
+       cq->channel = c;
+
+       return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+       mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_cq *mcq = &cq->mcq;
+
+       void *in;
+       void *cqc;
+       int inlen;
+       int irqn_not_used;
+       int eqn;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+       memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+       mlx5_fill_page_array(&cq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+       mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+       MLX5_SET(cqc,   cqc, c_eqn,         eqn);
+       MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+                                           PAGE_SHIFT);
+       MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
+
+       err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+       kvfree(in);
+
+       if (err)
+               return err;
+
+       mlx5e_cq_arm(cq);
+
+       return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+                        struct mlx5e_cq_param *param,
+                        struct mlx5e_cq *cq,
+                        u16 moderation_usecs,
+                        u16 moderation_frames)
+{
+       int err;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       err = mlx5e_create_cq(c, param, cq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_cq(cq, param);
+       if (err)
+               goto err_destroy_cq;
+
+       err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+                                            moderation_usecs,
+                                            moderation_frames);
+       if (err)
+               goto err_destroy_cq;
+
+       return 0;
+
+err_destroy_cq:
+       mlx5e_destroy_cq(cq);
+
+       return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+       mlx5e_disable_cq(cq);
+       mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+                            struct mlx5e_channel_param *cparam)
+{
+       struct mlx5e_priv *priv = c->priv;
+       int err;
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++) {
+               err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+                                   priv->params.tx_cq_moderation_usec,
+                                   priv->params.tx_cq_moderation_pkts);
+               if (err)
+                       goto err_close_tx_cqs;
+
+               c->sq[tc].cq.sqrq = &c->sq[tc];
+       }
+
+       return 0;
+
+err_close_tx_cqs:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_cq(&c->sq[tc].cq);
+
+       return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+                         struct mlx5e_channel_param *cparam)
+{
+       int err;
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++) {
+               err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+               if (err)
+                       goto err_close_sqs;
+       }
+
+       return 0;
+
+err_close_sqs:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_sq(&c->sq[tc]);
+
+       return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_close_sq(&c->sq[tc]);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+                             struct mlx5e_channel_param *cparam,
+                             struct mlx5e_channel **cp)
+{
+       struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
+       struct mlx5e_channel *c;
+       int err;
+
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+       if (!c)
+               return -ENOMEM;
+
+       c->priv     = priv;
+       c->ix       = ix;
+       c->cpu      = cpu;
+       c->pdev     = &priv->mdev->pdev->dev;
+       c->netdev   = priv->netdev;
+       c->mkey_be  = cpu_to_be32(priv->mr.key);
+       c->num_tc   = priv->num_tc;
+
+       netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+       err = mlx5e_open_tx_cqs(c, cparam);
+       if (err)
+               goto err_napi_del;
+
+       err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+                           priv->params.rx_cq_moderation_usec,
+                           priv->params.rx_cq_moderation_pkts);
+       if (err)
+               goto err_close_tx_cqs;
+       c->rq.cq.sqrq = &c->rq;
+
+       napi_enable(&c->napi);
+
+       err = mlx5e_open_sqs(c, cparam);
+       if (err)
+               goto err_disable_napi;
+
+       err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+       if (err)
+               goto err_close_sqs;
+
+       netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+       *cp = c;
+
+       return 0;
+
+err_close_sqs:
+       mlx5e_close_sqs(c);
+
+err_disable_napi:
+       napi_disable(&c->napi);
+       mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+       mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+       netif_napi_del(&c->napi);
+       kfree(c);
+
+       return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+       mlx5e_close_rq(&c->rq);
+       mlx5e_close_sqs(c);
+       napi_disable(&c->napi);
+       mlx5e_close_cq(&c->rq.cq);
+       mlx5e_close_tx_cqs(c);
+       netif_napi_del(&c->napi);
+       kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_rq_param *param)
+{
+       void *rqc = param->rqc;
+       void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+       MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
+       MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+       MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+       MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
+       MLX5_SET(wq, wq, pd,               priv->pdn);
+
+       param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+       MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+       MLX5_SET(wq, wq, pd,            priv->pdn);
+
+       param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+                                       struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
+
+       mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
+
+       mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+                                     struct mlx5e_channel_param *cparam)
+{
+       memset(cparam, 0, sizeof(*cparam));
+
+       mlx5e_build_rq_param(priv, &cparam->rq);
+       mlx5e_build_sq_param(priv, &cparam->sq);
+       mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+       mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channel_param cparam;
+       int err;
+       int i;
+       int j;
+
+       priv->channel = kcalloc(priv->params.num_channels,
+                               sizeof(struct mlx5e_channel *), GFP_KERNEL);
+       if (!priv->channel)
+               return -ENOMEM;
+
+       mlx5e_build_channel_param(priv, &cparam);
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+               if (err)
+                       goto err_close_channels;
+       }
+
+       for (j = 0; j < priv->params.num_channels; j++) {
+               err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+               if (err)
+                       goto err_close_channels;
+       }
+
+       return 0;
+
+err_close_channels:
+       for (i--; i >= 0; i--)
+               mlx5e_close_channel(priv->channel[i]);
+
+       kfree(priv->channel);
+
+       return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->params.num_channels; i++)
+               mlx5e_close_channel(priv->channel[i]);
+
+       kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+       void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(tisc, tisc, prio,  tc);
+
+       return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+       mlx5_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+       int num_tc = priv->num_tc;
+       int err;
+       int tc;
+
+       for (tc = 0; tc < num_tc; tc++) {
+               err = mlx5e_open_tis(priv, tc);
+               if (err)
+                       goto err_close_tises;
+       }
+
+       return 0;
+
+err_close_tises:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_tis(priv, tc);
+
+       return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+       int num_tc = priv->num_tc;
+       int tc;
+
+       for (tc = 0; tc < num_tc; tc++)
+               mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+       void *rqtc;
+       int inlen;
+       int err;
+       int sz;
+       int i;
+
+       sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+       MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+       for (i = 0; i < sz; i++) {
+               int ix = i % priv->params.num_channels;
+
+               MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+       }
+
+       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+       if (!err)
+               priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+       MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+       mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+                                  sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                         MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                         MLX5_HASH_FIELD_SEL_DST_IP   |\
+                         MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                         MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+       if (priv->params.lro_en) {
+               MLX5_SET(tirc, tirc, lro_enable_mask,
+                        MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+                        MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+               MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+                        (priv->params.lro_wqe_sz -
+                         ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+               MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+                        MLX5_CAP_ETH(priv->mdev,
+                                     lro_timer_supported_periods[3]));
+       }
+
+       switch (tt) {
+       case MLX5E_TT_ANY:
+               MLX5_SET(tirc, tirc, disp_type,
+                        MLX5_TIRC_DISP_TYPE_DIRECT);
+               MLX5_SET(tirc, tirc, inline_rqn,
+                        priv->channel[0]->rq.rqn);
+               break;
+       default:
+               MLX5_SET(tirc, tirc, disp_type,
+                        MLX5_TIRC_DISP_TYPE_INDIRECT);
+               MLX5_SET(tirc, tirc, indirect_table,
+                        priv->rqtn);
+               MLX5_SET(tirc, tirc, rx_hash_fn,
+                        MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+                                                rx_hash_toeplitz_key),
+                                   MLX5_FLD_SZ_BYTES(tirc,
+                                                     rx_hash_toeplitz_key));
+               break;
+       }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       void *tirc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+       mlx5e_build_tir_ctx(priv, tirc, tt);
+
+       err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+       mlx5_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < MLX5E_NUM_TT; i++) {
+               err = mlx5e_open_tir(priv, i);
+               if (err)
+                       goto err_close_tirs;
+       }
+
+       return 0;
+
+err_close_tirs:
+       for (i--; i >= 0; i--)
+               mlx5e_close_tir(priv, i);
+
+       return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < MLX5E_NUM_TT; i++)
+               mlx5e_close_tir(priv, i);
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int actual_mtu;
+       int num_txqs;
+       int err;
+
+       num_txqs = roundup_pow_of_two(priv->params.num_channels) *
+                  priv->params.num_tc;
+       netif_set_real_num_tx_queues(netdev, num_txqs);
+       netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+       err = mlx5_set_port_mtu(mdev, netdev->mtu);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       err = mlx5_query_port_oper_mtu(mdev, &actual_mtu);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       if (actual_mtu != netdev->mtu)
+               netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
+                           __func__, netdev->mtu);
+
+       netdev->mtu = actual_mtu;
+
+       err = mlx5e_open_tises(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       err = mlx5e_open_channels(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+                          __func__, err);
+               goto err_close_tises;
+       }
+
+       err = mlx5e_open_rqt(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+                          __func__, err);
+               goto err_close_channels;
+       }
+
+       err = mlx5e_open_tirs(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+                          __func__, err);
+               goto err_close_rqls;
+       }
+
+       err = mlx5e_open_flow_table(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+                          __func__, err);
+               goto err_close_tirs;
+       }
+
+       err = mlx5e_add_all_vlan_rules(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+                          __func__, err);
+               goto err_close_flow_table;
+       }
+
+       mlx5e_init_eth_addr(priv);
+
+       set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       mlx5e_update_carrier(priv);
+       mlx5e_set_rx_mode_core(priv);
+
+       schedule_delayed_work(&priv->update_stats_work, 0);
+       return 0;
+
+err_close_flow_table:
+       mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+       mlx5e_close_tirs(priv);
+
+err_close_rqls:
+       mlx5e_close_rqt(priv);
+
+err_close_channels:
+       mlx5e_close_channels(priv);
+
+err_close_tises:
+       mlx5e_close_tises(priv);
+
+       return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_open_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       mlx5e_set_rx_mode_core(priv);
+       mlx5e_del_all_vlan_rules(priv);
+       netif_carrier_off(priv->netdev);
+       mlx5e_close_flow_table(priv);
+       mlx5e_close_tirs(priv);
+       mlx5e_close_rqt(priv);
+       mlx5e_close_channels(priv);
+       mlx5e_close_tises(priv);
+
+       return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_close_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+                            struct mlx5e_params *new_params)
+{
+       int err = 0;
+       int was_opened;
+
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(priv->netdev);
+
+       priv->params = *new_params;
+
+       if (was_opened)
+               err = mlx5e_open_locked(priv->netdev);
+
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+       stats->rx_packets = vstats->rx_packets;
+       stats->rx_bytes   = vstats->rx_bytes;
+       stats->tx_packets = vstats->tx_packets;
+       stats->tx_bytes   = vstats->tx_bytes;
+       stats->multicast  = vstats->rx_multicast_packets +
+                           vstats->tx_multicast_packets;
+       stats->tx_errors  = vstats->tx_error_packets;
+       stats->rx_errors  = vstats->rx_error_packets;
+       stats->tx_dropped = vstats->tx_queue_dropped;
+       stats->rx_crc_errors = 0;
+       stats->rx_length_errors = 0;
+
+       return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct sockaddr *saddr = addr;
+
+       if (!is_valid_ether_addr(saddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       netif_addr_lock_bh(netdev);
+       ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+       netif_addr_unlock_bh(netdev);
+
+       schedule_work(&priv->set_rx_mode_work);
+
+       return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+                             netdev_features_t features)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       netdev_features_t changes = features ^ netdev->features;
+       struct mlx5e_params new_params;
+       bool update_params = false;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+
+       if (changes & NETIF_F_LRO) {
+               new_params.lro_en = !!(features & NETIF_F_LRO);
+               update_params = true;
+       }
+
+       if (update_params)
+               mlx5e_update_priv_params(priv, &new_params);
+
+       if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+               if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+                       mlx5e_enable_vlan_filter(priv);
+               else
+                       mlx5e_disable_vlan_filter(priv);
+       }
+
+       mutex_unlock(&priv->state_lock);
+
+       return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int max_mtu;
+       int err = 0;
+
+       err = mlx5_query_port_max_mtu(mdev, &max_mtu);
+       if (err)
+               return err;
+
+       if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
+               netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
+                          __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+               return -EINVAL;
+       }
+
+       mutex_lock(&priv->state_lock);
+       netdev->mtu = new_mtu;
+       err = mlx5e_update_priv_params(priv, &priv->params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+       .ndo_open                = mlx5e_open,
+       .ndo_stop                = mlx5e_close,
+       .ndo_start_xmit          = mlx5e_xmit,
+       .ndo_get_stats64         = mlx5e_get_stats,
+       .ndo_set_rx_mode         = mlx5e_set_rx_mode,
+       .ndo_set_mac_address     = mlx5e_set_mac,
+       .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
+       .ndo_set_features        = mlx5e_set_features,
+       .ndo_change_mtu          = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+       if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -ENOTSUPP;
+       if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+           !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+           !MLX5_CAP_ETH(mdev, csum_cap) ||
+           !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+           !MLX5_CAP_ETH(mdev, vlan_cap) ||
+           !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+               mlx5_core_warn(mdev,
+                              "Not creating net device, some required device capabilities are missing\n");
+               return -ENOTSUPP;
+       }
+       return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+                                   struct net_device *netdev,
+                                   int num_comp_vectors)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       priv->params.log_sq_size           =
+               MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+       priv->params.log_rq_size           =
+               MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+       priv->params.rx_cq_moderation_usec =
+               MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+       priv->params.rx_cq_moderation_pkts =
+               MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+       priv->params.tx_cq_moderation_usec =
+               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+       priv->params.tx_cq_moderation_pkts =
+               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+       priv->params.min_rx_wqes           =
+               MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+       priv->params.rx_hash_log_tbl_sz    =
+               (order_base_2(num_comp_vectors) >
+                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+               order_base_2(num_comp_vectors)           :
+               MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+       priv->params.num_tc                = 1;
+       priv->params.default_vlan_prio     = 0;
+
+       priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+       priv->params.lro_wqe_sz            =
+               MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+       priv->mdev                         = mdev;
+       priv->netdev                       = netdev;
+       priv->params.num_channels          = num_comp_vectors;
+       priv->order_base_2_num_channels    = order_base_2(num_comp_vectors);
+       priv->queue_mapping_channel_mask   =
+               roundup_pow_of_two(num_comp_vectors) - 1;
+       priv->num_tc                       = priv->params.num_tc;
+       priv->default_vlan_prio            = priv->params.default_vlan_prio;
+
+       spin_lock_init(&priv->async_events_spinlock);
+       mutex_init(&priv->state_lock);
+
+       INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+       INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+       INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+       if (priv->num_tc > 1) {
+               mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+               mlx5e_netdev_ops.ndo_start_xmit   = mlx5e_xmit_multi_tc;
+       }
+
+       netdev->netdev_ops        = &mlx5e_netdev_ops;
+       netdev->watchdog_timeo    = 15 * HZ;
+
+       netdev->ethtool_ops       = &mlx5e_ethtool_ops;
+
+       netdev->vlan_features    |= NETIF_F_IP_CSUM;
+       netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
+       netdev->vlan_features    |= NETIF_F_GRO;
+       netdev->vlan_features    |= NETIF_F_TSO;
+       netdev->vlan_features    |= NETIF_F_TSO6;
+       netdev->vlan_features    |= NETIF_F_RXCSUM;
+       netdev->vlan_features    |= NETIF_F_RXHASH;
+
+       if (!!MLX5_CAP_ETH(mdev, lro_cap))
+               netdev->vlan_features    |= NETIF_F_LRO;
+
+       netdev->hw_features       = netdev->vlan_features;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       netdev->features          = netdev->hw_features;
+       if (!priv->params.lro_en)
+               netdev->features  &= ~NETIF_F_LRO;
+
+       netdev->features         |= NETIF_F_HIGHDMA;
+
+       netdev->priv_flags       |= IFF_UNICAST_FLT;
+
+       mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+                            struct mlx5_core_mr *mr)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_create_mkey_mbox_in *in;
+       int err;
+
+       in = mlx5_vzalloc(sizeof(*in));
+       if (!in)
+               return -ENOMEM;
+
+       in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+                       MLX5_PERM_LOCAL_READ  |
+                       MLX5_ACCESS_MODE_PA;
+       in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+       in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+       err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+                                   NULL);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+       struct net_device *netdev;
+       struct mlx5e_priv *priv;
+       int ncv = mdev->priv.eq_table.num_comp_vectors;
+       int err;
+
+       if (mlx5e_check_required_hca_cap(mdev))
+               return NULL;
+
+       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+                                   roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
+                                   ncv);
+       if (!netdev) {
+               mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+               return NULL;
+       }
+
+       mlx5e_build_netdev_priv(mdev, netdev, ncv);
+       mlx5e_build_netdev(netdev);
+
+       netif_carrier_off(netdev);
+
+       priv = netdev_priv(netdev);
+
+       err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+                          __func__, err);
+               goto err_free_netdev;
+       }
+
+       err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+                          __func__, err);
+               goto err_unmap_free_uar;
+       }
+
+       err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+                          __func__, err);
+               goto err_dealloc_pd;
+       }
+
+       err = register_netdev(netdev);
+       if (err) {
+               netdev_err(netdev, "%s: register_netdev failed, %d\n",
+                          __func__, err);
+               goto err_destroy_mkey;
+       }
+
+       mlx5e_enable_async_events(priv);
+
+       return priv;
+
+err_destroy_mkey:
+       mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_pd:
+       mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+       mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+       free_netdev(netdev);
+
+       return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+       struct net_device *netdev = priv->netdev;
+
+       unregister_netdev(netdev);
+       mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+       mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+       mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       mlx5e_disable_async_events(priv);
+       flush_scheduled_work();
+       free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+
+       return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+       .add       = mlx5e_create_netdev,
+       .remove    = mlx5e_destroy_netdev,
+       .event     = mlx5e_async_event,
+       .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
+       .get_dev   = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+       mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+       mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644 (file)
index 0000000..ce1317c
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+                                    struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+
+       skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+       dma_addr = dma_map_single(rq->pdev,
+                                 /* hw start padding */
+                                 skb->data - MLX5E_NET_IP_ALIGN,
+                                 /* hw   end padding */
+                                 rq->wqe_sz,
+                                 DMA_FROM_DEVICE);
+
+       if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+               goto err_free_skb;
+
+       *((dma_addr_t *)skb->cb) = dma_addr;
+       wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+       rq->skb[ix] = skb;
+
+       return 0;
+
+err_free_skb:
+       dev_kfree_skb(skb);
+
+       return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+
+       if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+               return false;
+
+       while (!mlx5_wq_ll_is_full(wq)) {
+               struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+               if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+                       break;
+
+               mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+       }
+
+       /* ensure wqes are visible to device before updating doorbell record */
+       dma_wmb();
+
+       mlx5_wq_ll_update_db_record(wq);
+
+       return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+       struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
+       struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
+       struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+       struct tcphdr   *tcp;
+
+       u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+       int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
+                      (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+       u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+       if (eth->h_proto == htons(ETH_P_IP)) {
+               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+                                       sizeof(struct iphdr));
+               ipv6 = NULL;
+       } else {
+               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+                                       sizeof(struct ipv6hdr));
+               ipv4 = NULL;
+       }
+
+       if (get_cqe_lro_tcppsh(cqe))
+               tcp->psh                = 1;
+
+       if (tcp_ack) {
+               tcp->ack                = 1;
+               tcp->ack_seq            = cqe->lro_ack_seq_num;
+               tcp->window             = cqe->lro_tcp_win;
+       }
+
+       if (ipv4) {
+               ipv4->ttl               = cqe->lro_min_ttl;
+               ipv4->tot_len           = cpu_to_be16(tot_len);
+               ipv4->check             = 0;
+               ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
+                                                      ipv4->ihl);
+       } else {
+               ipv6->hop_limit         = cqe->lro_min_ttl;
+               ipv6->payload_len       = cpu_to_be16(tot_len -
+                                                     sizeof(struct ipv6hdr));
+       }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+                                     struct sk_buff *skb)
+{
+       u8 cht = cqe->rss_hash_type;
+       int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+                (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+                                           PKT_HASH_TYPE_NONE;
+       skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+                                     struct mlx5e_rq *rq,
+                                     struct sk_buff *skb)
+{
+       struct net_device *netdev = rq->netdev;
+       u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+       int lro_num_seg;
+
+       skb_put(skb, cqe_bcnt);
+
+       lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+       if (lro_num_seg > 1) {
+               mlx5e_lro_update_hdr(skb, cqe);
+               skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+               rq->stats.lro_packets++;
+               rq->stats.lro_bytes += cqe_bcnt;
+       }
+
+       if (likely(netdev->features & NETIF_F_RXCSUM) &&
+           (cqe->hds_ip_ext & CQE_L2_OK) &&
+           (cqe->hds_ip_ext & CQE_L3_OK) &&
+           (cqe->hds_ip_ext & CQE_L4_OK)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else {
+               skb->ip_summed = CHECKSUM_NONE;
+               rq->stats.csum_none++;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       skb_record_rx_queue(skb, rq->ix);
+
+       if (likely(netdev->features & NETIF_F_RXHASH))
+               mlx5e_skb_set_hash(cqe, skb);
+
+       if (cqe_has_vlan(cqe))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+       struct mlx5e_rq *rq = cq->sqrq;
+       int i;
+
+       /* avoid accessing cq (dma coherent memory) if not needed */
+       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+               return false;
+
+       for (i = 0; i < budget; i++) {
+               struct mlx5e_rx_wqe *wqe;
+               struct mlx5_cqe64 *cqe;
+               struct sk_buff *skb;
+               __be16 wqe_counter_be;
+               u16 wqe_counter;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               wqe_counter_be = cqe->wqe_counter;
+               wqe_counter    = be16_to_cpu(wqe_counter_be);
+               wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+               skb            = rq->skb[wqe_counter];
+               rq->skb[wqe_counter] = NULL;
+
+               dma_unmap_single(rq->pdev,
+                                *((dma_addr_t *)skb->cb),
+                                skb_end_offset(skb),
+                                DMA_FROM_DEVICE);
+
+               if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+                       rq->stats.wqe_err++;
+                       dev_kfree_skb(skb);
+                       goto wq_ll_pop;
+               }
+
+               mlx5e_build_rx_skb(cqe, rq, skb);
+               rq->stats.packets++;
+               napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+               mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+                              &wqe->next.next_wqe_index);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       if (i == budget) {
+               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+               return true;
+       }
+
+       return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644 (file)
index 0000000..8020986
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+                                     u32 *size)
+{
+       sq->dma_fifo_pc--;
+       *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+       *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+       dma_addr_t addr;
+       u32 size;
+       int i;
+
+       for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+               mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+               dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+       }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+                                 u32 size)
+{
+       sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+       sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+       sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+                                u32 *size)
+{
+       *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+       *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv, select_queue_fallback_t fallback)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int channel_ix = fallback(dev, skb);
+       int up = skb_vlan_tag_present(skb)        ?
+                skb->vlan_tci >> VLAN_PRIO_SHIFT :
+                priv->default_vlan_prio;
+       int tc = netdev_get_prio_tc_map(dev, up);
+
+       return (tc << priv->order_base_2_num_channels) | channel_ix;
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+                                           struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+       return MLX5E_MIN_INLINE;
+}
+
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+       struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+       int cpy1_sz = 2 * ETH_ALEN;
+       int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
+
+       skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+       skb_pull_inline(skb, cpy1_sz);
+       vhdr->h_vlan_proto = skb->vlan_proto;
+       vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+       skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+                                 cpy2_sz);
+       skb_pull_inline(skb, cpy2_sz);
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+       struct mlx5_wq_cyc       *wq   = &sq->wq;
+
+       u16 pi = sq->pc & wq->sz_m1;
+       struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+       struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+       struct mlx5_wqe_data_seg *dseg;
+
+       u8  opcode = MLX5_OPCODE_SEND;
+       dma_addr_t dma_addr = 0;
+       u16 headlen;
+       u16 ds_cnt;
+       u16 ihs;
+       int i;
+
+       memset(wqe, 0, sizeof(*wqe));
+
+       if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+               eseg->cs_flags  = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+       else
+               sq->stats.csum_offload_none++;
+
+       if (skb_is_gso(skb)) {
+               u32 payload_len;
+               int num_pkts;
+
+               eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
+               opcode       = MLX5_OPCODE_LSO;
+               ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               payload_len  = skb->len - ihs;
+               num_pkts     =    (payload_len / skb_shinfo(skb)->gso_size) +
+                               !!(payload_len % skb_shinfo(skb)->gso_size);
+               MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+                                                 (num_pkts - 1) * ihs;
+               sq->stats.tso_packets++;
+               sq->stats.tso_bytes += payload_len;
+       } else {
+               ihs             = mlx5e_get_inline_hdr_size(sq, skb);
+               MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+                                                       ETH_ZLEN);
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+       } else {
+               skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+               skb_pull_inline(skb, ihs);
+       }
+
+       eseg->inline_hdr_sz     = cpu_to_be16(ihs);
+
+       ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+       ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+                              MLX5_SEND_WQE_DS);
+       dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+       MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+       headlen = skb_headlen(skb);
+       if (headlen) {
+               dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+                                         DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+                       goto dma_unmap_wqe_err;
+
+               dseg->addr       = cpu_to_be64(dma_addr);
+               dseg->lkey       = sq->mkey_be;
+               dseg->byte_count = cpu_to_be32(headlen);
+
+               mlx5e_dma_push(sq, dma_addr, headlen);
+               MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+               dseg++;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               int fsz = skb_frag_size(frag);
+
+               dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+                                           DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+                       goto dma_unmap_wqe_err;
+
+               dseg->addr       = cpu_to_be64(dma_addr);
+               dseg->lkey       = sq->mkey_be;
+               dseg->byte_count = cpu_to_be32(fsz);
+
+               mlx5e_dma_push(sq, dma_addr, fsz);
+               MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+               dseg++;
+       }
+
+       ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+       cseg->opmod_idx_opcode  = cpu_to_be32((sq->pc << 8) | opcode);
+       cseg->qpn_ds            = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+       cseg->fm_ce_se          = MLX5_WQE_CTRL_CQ_UPDATE;
+
+       sq->skb[pi] = skb;
+
+       MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+                                                       MLX5_SEND_WQEBB_NUM_DS);
+       sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+       netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+               netif_tx_stop_queue(sq->txq);
+               sq->stats.stopped++;
+       }
+
+       if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+               mlx5e_tx_notify_hw(sq, wqe);
+
+       sq->stats.packets++;
+       return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+       sq->stats.dropped++;
+       mlx5e_dma_unmap_wqe_err(sq, skb);
+
+       dev_kfree_skb_any(skb);
+
+       return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ix = skb->queue_mapping;
+       int tc = 0;
+       struct mlx5e_channel *c = priv->channel[ix];
+       struct mlx5e_sq *sq = &c->sq[tc];
+
+       return mlx5e_sq_xmit(sq, skb);
+}
+
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
+       int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
+       struct mlx5e_channel *c = priv->channel[ix];
+       struct mlx5e_sq *sq = &c->sq[tc];
+
+       return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_sq *sq;
+       u32 dma_fifo_cc;
+       u32 nbytes;
+       u16 npkts;
+       u16 sqcc;
+       int i;
+
+       /* avoid accessing cq (dma coherent memory) if not needed */
+       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+               return false;
+
+       sq = cq->sqrq;
+
+       npkts = 0;
+       nbytes = 0;
+
+       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+        * otherwise a cq overrun may occur
+        */
+       sqcc = sq->cc;
+
+       /* avoid dirtying sq cache line every cqe */
+       dma_fifo_cc = sq->dma_fifo_cc;
+
+       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+               struct mlx5_cqe64 *cqe;
+               struct sk_buff *skb;
+               u16 ci;
+               int j;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               ci = sqcc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+
+               if (unlikely(!skb)) { /* nop */
+                       sq->stats.nop++;
+                       sqcc++;
+                       goto free_skb;
+               }
+
+               for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+                       dma_addr_t addr;
+                       u32 size;
+
+                       mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+                       dma_fifo_cc++;
+                       dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+               }
+
+               npkts++;
+               nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+               sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+free_skb:
+               dev_kfree_skb(skb);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       sq->dma_fifo_cc = dma_fifo_cc;
+       sq->cc = sqcc;
+
+       netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+       if (netif_tx_queue_stopped(sq->txq) &&
+           mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+           likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+                               netif_tx_wake_queue(sq->txq);
+                               sq->stats.wake++;
+       }
+       if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+               return true;
+       }
+
+       return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644 (file)
index 0000000..088bc42
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+       struct mlx5_cqwq *wq = &cq->wq;
+       u32 ci = mlx5_cqwq_get_ci(wq);
+       struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+       int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+       int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+       if (cqe_ownership_bit != sw_ownership_val)
+               return NULL;
+
+       mlx5_cqwq_pop(wq);
+
+       /* ensure cqe content is read after cqe ownership bit */
+       rmb();
+
+       return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+                                              napi);
+       bool busy = false;
+       int i;
+
+       clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+       for (i = 0; i < c->num_tc; i++)
+               busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+       busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+       busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
+
+       if (busy)
+               return budget;
+
+       napi_complete(napi);
+
+       /* avoid losing completion event during/after polling cqs */
+       if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+               napi_schedule(napi);
+               return 0;
+       }
+
+       for (i = 0; i < c->num_tc; i++)
+               mlx5e_cq_arm(&c->sq[i].cq);
+       mlx5e_cq_arm(&c->rq.cq);
+
+       return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+       struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+       set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+       set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+       barrier();
+       napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+       struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct net_device *netdev = priv->netdev;
+
+       netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+                  __func__, mcq->cqn, event);
+}
index 58800e4f39585c2fd30d76e8de4a21bfb6f8bf66..a40b96d4c6621231987ece6fb5364384c4714e4c 100644 (file)
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_priv *priv = &dev->priv;
        struct mlx5_create_eq_mbox_in *in;
        struct mlx5_create_eq_mbox_out out;
        int err;
        int inlen;
 
        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
-       err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
-                            &eq->buf);
+       err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
        if (err)
                return err;
 
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                goto err_in;
        }
 
-       snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+       snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
                 name, pci_name(dev->pdev));
+
        eq->eqn = out.eq_number;
        eq->irqn = vecidx;
        eq->dev = dev;
        eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-       err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-                         eq->name, eq);
+       err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+                         priv->irq_info[vecidx].name, eq);
        if (err)
                goto err_eq;
 
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        return 0;
 
 err_irq:
-       free_irq(table->msix_arr[vecidx].vector, eq);
+       free_irq(priv->msix_arr[vecidx].vector, eq);
 
 err_eq:
        mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
 
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
 
        mlx5_debug_eq_remove(dev, eq);
-       free_irq(table->msix_arr[eq->irqn].vector, eq);
+       free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
        if (err)
                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
                               eq->eqn);
-       synchronize_irq(table->msix_arr[eq->irqn].vector);
+       synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
        mlx5_buf_free(dev, &eq->buf);
 
        return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
        u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
        int err;
 
-       if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+       if (MLX5_CAP_GEN(dev, pg))
                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 
        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
 
        err = mlx5_create_map_eq(dev, &table->pages_eq,
                                 MLX5_EQ_VEC_PAGES,
-                                dev->caps.gen.max_vf + 1,
+                                /* TODO: sriov max_vf + */ 1,
                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
                                 &dev->priv.uuari.uars[0]);
        if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644 (file)
index 0000000..ca90b9b
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+       struct mlx5_flow_table_group    g;
+       u32                             id;
+       u32                             start_ix;
+};
+
+struct mlx5_flow_table {
+       struct mlx5_core_dev    *dev;
+       u8                      level;
+       u8                      type;
+       u32                     id;
+       struct mutex            mutex; /* sync bitmap alloc */
+       u16                     num_groups;
+       struct mlx5_ftg         *group;
+       unsigned long           *bitmap;
+       u32                     size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+                                  u32 flow_index, void *flow_context)
+{
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+       u32 *in;
+       void *in_flow_context;
+       int fcdls =
+               MLX5_GET(flow_context, flow_context, destination_list_size) *
+               MLX5_ST_SZ_BYTES(dest_format_struct);
+       int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+               return -ENOMEM;
+       }
+
+       MLX5_SET(set_fte_in, in, table_type, ft->type);
+       MLX5_SET(set_fte_in, in, table_id,   ft->id);
+       MLX5_SET(set_fte_in, in, flow_index, flow_index);
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       memcpy(in_flow_context, flow_context,
+              MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+       MLX5_SET(flow_context, in_flow_context, group_id,
+                ft->group[group_ix].id);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+                                        sizeof(out));
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+       u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+       u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+       MLX5_SET_DFTEI(in, table_type, ft->type);
+       MLX5_SET_DFTEI(in, table_id,   ft->id);
+       MLX5_SET_DFTEI(in, flow_index, flow_index);
+       MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+       MLX5_SET_DFGI(in, table_type, ft->type);
+       MLX5_SET_DFGI(in, table_id,   ft->id);
+       MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+       MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+       u32 *in;
+       void *in_match_criteria;
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_table_group *g = &ft->group[i].g;
+       u32 start_ix = ft->group[i].start_ix;
+       u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+               return -ENOMEM;
+       }
+       in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+                                        match_criteria);
+
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+       MLX5_SET_CFGI(in, table_type,            ft->type);
+       MLX5_SET_CFGI(in, table_id,              ft->id);
+       MLX5_SET_CFGI(in, opcode,                MLX5_CMD_OP_CREATE_FLOW_GROUP);
+       MLX5_SET_CFGI(in, start_flow_index,      start_ix);
+       MLX5_SET_CFGI(in, end_flow_index,        end_ix);
+       MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+       memcpy(in_match_criteria, g->match_criteria,
+              MLX5_ST_SZ_BYTES(fte_match_param));
+
+       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+                                        sizeof(out));
+       if (!err)
+               ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+                                          group_id);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++)
+               mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++) {
+               err = mlx5_create_flow_group_cmd(ft, i);
+               if (err)
+                       goto err_destroy_flow_table_groups;
+       }
+
+       return 0;
+
+err_destroy_flow_table_groups:
+       for (i--; i >= 0; i--)
+               mlx5_destroy_flow_group_cmd(ft, i);
+
+       return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+       MLX5_SET(create_flow_table_in, in, level,      ft->level);
+       MLX5_SET(create_flow_table_in, in, log_size,   order_base_2(ft->size));
+
+       MLX5_SET(create_flow_table_in, in, opcode,
+                MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+                                        sizeof(out));
+       if (err)
+               return err;
+
+       ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+       return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+       MLX5_SET_DFTI(in, table_type, ft->type);
+       MLX5_SET_DFTI(in, table_id,   ft->id);
+       MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+                          u32 *match_criteria, int *group_ix)
+{
+       void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     outer_headers);
+       void *mc_misc  = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     misc_parameters);
+       void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     inner_headers);
+       int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+       int mc_misc_sz  = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+       int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++) {
+               struct mlx5_flow_table_group *g = &ft->group[i].g;
+               void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              outer_headers);
+               void *gmc_misc  = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              misc_parameters);
+               void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              inner_headers);
+
+               if (g->match_criteria_enable != match_criteria_enable)
+                       continue;
+
+               if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+                       if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+                               continue;
+
+               if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+                       if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+                               continue;
+
+               if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+                       if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+                               continue;
+
+               *group_ix = i;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+       struct mlx5_ftg *g = &ft->group[group_ix];
+       int err = 0;
+
+       mutex_lock(&ft->mutex);
+
+       *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+       if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+               err = -ENOSPC;
+       else
+               __set_bit(*ix, ft->bitmap);
+
+       mutex_unlock(&ft->mutex);
+
+       return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+       __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+                             void *match_criteria, void *flow_context,
+                             u32 *flow_index)
+{
+       struct mlx5_flow_table *ft = flow_table;
+       int group_ix;
+       int err;
+
+       err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+                             &group_ix);
+       if (err) {
+               mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+               return err;
+       }
+
+       err = alloc_flow_index(ft, group_ix, flow_index);
+       if (err) {
+               mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+               return err;
+       }
+
+       return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       mlx5_del_flow_entry_cmd(ft, flow_index);
+       mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+                            u16 num_groups,
+                            struct mlx5_flow_table_group *group)
+{
+       struct mlx5_flow_table *ft;
+       u32 start_ix = 0;
+       u32 ft_size = 0;
+       void *gr;
+       void *bm;
+       int err;
+       int i;
+
+       for (i = 0; i < num_groups; i++)
+               ft_size += (1 << group[i].log_sz);
+
+       ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+       gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+       bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+       if (!ft || !gr || !bm)
+               goto err_free_ft;
+
+       ft->group       = gr;
+       ft->bitmap      = bm;
+       ft->num_groups  = num_groups;
+       ft->level       = level;
+       ft->type        = table_type;
+       ft->size        = ft_size;
+       ft->dev         = dev;
+       mutex_init(&ft->mutex);
+
+       for (i = 0; i < ft->num_groups; i++) {
+               memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+               ft->group[i].start_ix = start_ix;
+               start_ix += 1 << group[i].log_sz;
+       }
+
+       err = mlx5_create_flow_table_cmd(ft);
+       if (err)
+               goto err_free_ft;
+
+       err = mlx5_create_flow_table_groups(ft);
+       if (err)
+               goto err_destroy_flow_table_cmd;
+
+       return ft;
+
+err_destroy_flow_table_cmd:
+       mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+       mlx5_core_warn(dev, "failed to alloc flow table\n");
+       kfree(bm);
+       kfree(gr);
+       kfree(ft);
+
+       return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       mlx5_destroy_flow_table_groups(ft);
+       mlx5_destroy_flow_table_cmd(ft);
+       kfree(ft->bitmap);
+       kfree(ft->group);
+       kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
index 4b4cda3bcc5fa1eecf99b6ed5265d41cc40dfedc..801ccadd709abd2ae84a45ffabeb0f153b97d82a 100644 (file)
@@ -64,50 +64,74 @@ out_out:
        return err;
 }
 
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
 {
-       return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
-}
-
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
-{
-       u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
-       int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       void *out;
        int err;
 
-       if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-               return -ENOTSUPP;
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+       if (err)
+               return err;
 
-       memset(in, 0, sizeof(in));
-       out = kzalloc(out_sz, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-       MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
-       err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
        if (err)
-               goto out;
+               return err;
 
-       err = mlx5_cmd_status_to_err_v2(out);
-       if (err) {
-               mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
-               goto out;
+       if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
        }
 
-       memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
-              sizeof(*caps));
+       if (MLX5_CAP_GEN(dev, pg)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
 
-       mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
-               be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
-               be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
-               be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+       if (MLX5_CAP_GEN(dev, atomic)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
 
-out:
-       kfree(out);
-       return err;
+       if (MLX5_CAP_GEN(dev, roce)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
+
+       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
+       return 0;
 }
-EXPORT_SYMBOL(mlx5_query_odp_caps);
 
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
 {
index 28425e5ea91f871670e84721bb865c1725472e80..1c37f587426d7173eac3361219432b8761e33669 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/interrupt.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
 
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE  "January 2015"
-
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
 
 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
-       int num_eqs = 1 << dev->caps.gen.log_max_eq;
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_eq_table *table = &priv->eq_table;
+       int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
        int i;
 
-       nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+       nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+              MLX5_EQ_VEC_COMP_BASE;
        nvec = min_t(int, nvec, num_eqs);
        if (nvec <= MLX5_EQ_VEC_COMP_BASE)
                return -ENOMEM;
 
-       table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
-       if (!table->msix_arr)
-               return -ENOMEM;
+       priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+       priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+       if (!priv->msix_arr || !priv->irq_info)
+               goto err_free_msix;
 
        for (i = 0; i < nvec; i++)
-               table->msix_arr[i].entry = i;
+               priv->msix_arr[i].entry = i;
 
-       nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+       nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
                                     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
        if (nvec < 0)
                return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
        table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
        return 0;
+
+err_free_msix:
+       kfree(priv->irq_info);
+       kfree(priv->msix_arr);
+       return -ENOMEM;
 }
 
 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_priv *priv = &dev->priv;
 
        pci_disable_msix(dev->pdev);
-       kfree(table->msix_arr);
+       kfree(priv->irq_info);
+       kfree(priv->msix_arr);
 }
 
 struct mlx5_reg_host_endianess {
@@ -277,98 +284,28 @@ static u16 to_fw_pkey_sz(u32 size)
        }
 }
 
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
+static u16 to_sw_pkey_sz(int pkey_sz)
 {
-       __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
-       u64 v64;
-
-       MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
-       MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
-       MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
-       MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
-       v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
-       *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
-       if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
+       if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
                return 0;
 
-       return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
+       return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
 }
 
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
-       struct mlx5_general_caps *gen = &caps->gen;
-
-       gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
-       gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
-       gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
-       gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
-       gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
-       gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
-       gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
-       gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
-       gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
-       gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
-       gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
-       gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
-       gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
-       gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
-       gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
-       gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
-       gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
-       gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
-       gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
-       gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
-       gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
-       gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
-       gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
-       gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
-       gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
-       pr_debug("flags = 0x%llx\n", gen->flags);
-       gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
-       gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
-       gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
-       gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
-       gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
-       gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
-       gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
-       gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
-       gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
-       gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
-       gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
-       switch (opmod) {
-       case HCA_CAP_OPMOD_GET_MAX:
-               return "GET_MAX";
-       case HCA_CAP_OPMOD_GET_CUR:
-               return "GET_CUR";
-       default:
-               return "Invalid";
-       }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-                      u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+                      enum mlx5_cap_mode cap_mode)
 {
        u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
        int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       void *out;
+       void *out, *hca_caps;
+       u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
        int err;
 
        memset(in, 0, sizeof(in));
        out = kzalloc(out_sz, GFP_KERNEL);
        if (!out)
                return -ENOMEM;
+
        MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
        MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
        err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +314,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
 
        err = mlx5_cmd_status_to_err_v2(out);
        if (err) {
-               mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+               mlx5_core_warn(dev,
+                              "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+                              cap_type, cap_mode, err);
                goto query_ex;
        }
-       mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
-       fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
 
+       hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+       switch (cap_mode) {
+       case HCA_CAP_OPMOD_GET_MAX:
+               memcpy(dev->hca_caps_max[cap_type], hca_caps,
+                      MLX5_UN_SZ_BYTES(hca_cap_union));
+               break;
+       case HCA_CAP_OPMOD_GET_CUR:
+               memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+                      MLX5_UN_SZ_BYTES(hca_cap_union));
+               break;
+       default:
+               mlx5_core_warn(dev,
+                              "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+                              cap_type, cap_mode);
+               err = -EINVAL;
+               break;
+       }
 query_ex:
        kfree(out);
        return err;
@@ -409,49 +364,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 {
        void *set_ctx = NULL;
        struct mlx5_profile *prof = dev->profile;
-       struct mlx5_caps *cur_caps = NULL;
-       struct mlx5_caps *max_caps = NULL;
        int err = -ENOMEM;
        int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+       void *set_hca_cap;
 
        set_ctx = kzalloc(set_sz, GFP_KERNEL);
        if (!set_ctx)
                goto query_ex;
 
-       max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
-       if (!max_caps)
-               goto query_ex;
-
-       cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
-       if (!cur_caps)
-               goto query_ex;
-
-       err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
        if (err)
                goto query_ex;
 
-       err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
        if (err)
                goto query_ex;
 
+       set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+                                  capability);
+       memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+              MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+       mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+                     to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+                     128);
        /* we limit the size of the pkey table to 128 entries for now */
-       cur_caps->gen.pkey_table_size = 128;
+       MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+                to_fw_pkey_sz(128));
 
        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
-               cur_caps->gen.log_max_qp = prof->log_max_qp;
+               MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+                        prof->log_max_qp);
 
-       /* disable checksum */
-       cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+       /* disable cmdif checksum */
+       MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
-       copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
-                      cur_caps);
        err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
-       kfree(cur_caps);
-       kfree(max_caps);
        kfree(set_ctx);
-
        return err;
 }
 
@@ -507,6 +458,77 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       struct msix_entry *msix = priv->msix_arr;
+       int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+       int numa_node           = dev_to_node(&mdev->pdev->dev);
+       int err;
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       err = cpumask_set_cpu_local_first(i, numa_node, priv->irq_info[i].mask);
+       if (err) {
+               mlx5_core_warn(mdev, "cpumask_set_cpu_local_first failed");
+               goto err_clear_mask;
+       }
+
+       err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+       if (err) {
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+                              irq);
+               goto err_clear_mask;
+       }
+
+       return 0;
+
+err_clear_mask:
+       free_cpumask_var(priv->irq_info[i].mask);
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       struct msix_entry *msix = priv->msix_arr;
+       int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +571,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
-       char name[MLX5_MAX_EQ_NAME];
+       char name[MLX5_MAX_IRQ_NAME];
        struct mlx5_eq *eq;
        int ncomp_vec;
        int nent;
@@ -566,7 +588,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
                        goto clean;
                }
 
-               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(dev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
                                         name, &dev->priv.uuari.uars[0]);
@@ -588,6 +610,61 @@ clean:
        return err;
 }
 
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+       u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+       u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+       u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+       u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+       int err;
+       u32 sup_issi;
+
+       memset(query_in, 0, sizeof(query_in));
+       memset(query_out, 0, sizeof(query_out));
+
+       MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+       err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+                                        query_out, sizeof(query_out));
+       if (err) {
+               if (((struct mlx5_outbox_hdr *)query_out)->status ==
+                   MLX5_CMD_STAT_BAD_OP_ERR) {
+                       pr_debug("Only ISSI 0 is supported\n");
+                       return 0;
+               }
+
+               pr_err("failed to query ISSI\n");
+               return err;
+       }
+
+       sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+       if (sup_issi & (1 << 1)) {
+               memset(set_in, 0, sizeof(set_in));
+               memset(set_out, 0, sizeof(set_out));
+
+               MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+               MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+               err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+                                                set_out, sizeof(set_out));
+               if (err) {
+                       pr_err("failed to set ISSI=1\n");
+                       return err;
+               }
+
+               dev->issi = 1;
+
+               return 0;
+       } else if (sup_issi & (1 << 0)) {
+               return 0;
+       }
+
+       return -ENOTSUPP;
+}
+#endif
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +727,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_pagealloc_cleanup;
        }
 
+#ifdef CONFIG_MLX5_CORE_EN
+       err = mlx5_core_set_issi(dev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to set issi\n");
+               goto err_disable_hca;
+       }
+#endif
+
        err = mlx5_satisfy_startup_pages(dev, 1);
        if (err) {
                dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,7 +773,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        mlx5_start_health_poll(dev);
 
-       err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+       err = mlx5_query_hca_caps(dev);
        if (err) {
                dev_err(&pdev->dev, "query hca failed\n");
                goto err_stop_poll;
@@ -730,6 +815,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_free_comp_eqs;
+       }
+
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
        mlx5_init_cq_table(dev);
@@ -739,6 +830,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
+err_free_comp_eqs:
+       free_comp_eqs(dev);
+
 err_stop_eqs:
        mlx5_stop_eqs(dev);
 
@@ -793,6 +887,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1143,10 @@ static int __init init(void)
        if (err)
                goto err_health;
 
+#ifdef CONFIG_MLX5_CORE_EN
+       mlx5e_init();
+#endif
+
        return 0;
 
 err_health:
@@ -1060,6 +1159,9 @@ err_debug:
 
 static void __exit cleanup(void)
 {
+#ifdef CONFIG_MLX5_CORE_EN
+       mlx5e_cleanup();
+#endif
        pci_unregister_driver(&mlx5_core_driver);
        mlx5_health_cleanup();
        destroy_workqueue(mlx5_core_wq);
index d79fd85d1dd50c6e991eb9659d426839e4e013a8..d5a0c2d61a18f8949869753cf3580129f11b4a5e 100644 (file)
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
 
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
        memcpy(in.gid, mgid, sizeof(*mgid));
        in.qpn = cpu_to_be32(qpn);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
index a051b906afdf1a3fb8059c9567fe8378b4818cf9..6983c10472553c35d98c8f5007c35df44c175bff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE  "January 2015"
+
 extern int mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(dev, format, ...)                                        \
@@ -65,11 +69,20 @@ enum {
        MLX5_CMD_TIME, /* print command execution time */
 };
 
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+                                            int in_size, u32 *out,
+                                            int out_size)
+{
+       mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
 
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
-                          struct mlx5_caps *caps);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
 int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
 
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
 #endif /* __MLX5_CORE_H__ */
index 49e90f2612d8c0b8803e635acfb4c92b51044c8a..7d3d0f9f328dd23855ae170f36c778f0c19abf33 100644 (file)
@@ -102,3 +102,165 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+                        int ptys_size, int proto_mask)
+{
+       u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(ptys_reg, in, local_port, 1);
+       MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+                                  ptys_size, MLX5_REG_PTYS, 0, 0);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+                             u32 *proto_cap, int proto_mask)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+       if (err)
+               return err;
+
+       if (proto_mask == MLX5_PTYS_EN)
+               *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+       else
+               *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+                               u32 *proto_admin, int proto_mask)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+       if (err)
+               return err;
+
+       if (proto_mask == MLX5_PTYS_EN)
+               *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+       else
+               *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+                       int proto_mask)
+{
+       u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(ptys_reg, in, local_port, 1);
+       MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+       if (proto_mask == MLX5_PTYS_EN)
+               MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+       else
+               MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PTYS, 0, 1);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+                        enum mlx5_port_status status)
+{
+       u32 in[MLX5_ST_SZ_DW(paos_reg)];
+       u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(paos_reg, in, admin_status, status);
+       MLX5_SET(paos_reg, in, ase, 1);
+
+       return mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                   sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+       u32 in[MLX5_ST_SZ_DW(paos_reg)];
+       u32 out[MLX5_ST_SZ_DW(paos_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PAOS, 0, 0);
+       if (err)
+               return err;
+
+       *status = MLX5_GET(paos_reg, out, oper_status);
+       return err;
+}
+
+static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
+                              int *admin_mtu, int *max_mtu, int *oper_mtu)
+{
+       u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+       u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(pmtu_reg, in, local_port, 1);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PMTU, 0, 0);
+       if (err)
+               return err;
+
+       if (max_mtu)
+               *max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
+       if (oper_mtu)
+               *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+       if (admin_mtu)
+               *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+
+       return 0;
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+{
+       u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+       u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+       MLX5_SET(pmtu_reg, in, local_port, 1);
+
+       return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+                                   MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
+{
+       return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
+{
+       return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644 (file)
index 0000000..3c555d7
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+       int err;
+
+       MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+       return err;
+}
+
+int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+       MLX5_SET(modify_rq_in, in, rqn, rqn);
+       MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+       MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+       int err;
+
+       MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+       return err;
+}
+
+int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+       MLX5_SET(modify_sq_in, in, sqn, sqn);
+       MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+       MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+       int err;
+
+       MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+       return err;
+}
+
+void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+       u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+       MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+       int err;
+
+       MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+       return err;
+}
+
+void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+       u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+       MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644 (file)
index 0000000..1bc898c
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn);
+int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn);
+int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn);
+void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn);
+void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+
+#endif /* __TRANSOBJ_H__ */
index 5a89bb1d678a8e5ae6002a6ec9122bbd97d19085..9ef85873ceea8203655c8e63cdc4601d15088157 100644 (file)
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
        for (i = 0; i < tot_uuars; i++) {
                bf = &uuari->bfs[i];
 
-               bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+               bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
                bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
                bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
                bf->reg = NULL; /* Add WC support */
-               bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
-                       MLX5_BF_OFFSET;
+               bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+                            (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+                            MLX5_BF_OFFSET;
                bf->need_lock = need_uuar_lock(i);
                spin_lock_init(&bf->lock);
                spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
 
        return 0;
 }
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+       phys_addr_t pfn;
+       phys_addr_t uar_bar_start;
+       int err;
+
+       err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+               return err;
+       }
+
+       uar_bar_start = pci_resource_start(mdev->pdev, 0);
+       pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+       uar->map      = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       if (!uar->map) {
+               mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+               err = -ENOMEM;
+               goto err_free_uar;
+       }
+
+       return 0;
+
+err_free_uar:
+       mlx5_cmd_free_uar(mdev, uar->index);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+       iounmap(uar->map);
+       mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644 (file)
index 0000000..ba374b9
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include "vport.h"
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+       u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+       u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_vport_state_in, in, opcode,
+                MLX5_CMD_OP_QUERY_VPORT_STATE);
+       MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+       err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+                                        sizeof(out));
+       if (err)
+               mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+       return MLX5_GET(query_vport_state_out, out, state);
+}
+
+void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+       u32  in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u8 *out_addr;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return;
+
+       out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+                               nic_vport_context.permanent_address);
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+       memset(out, 0, outlen);
+       mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+       ether_addr_copy(addr, &out_addr[2]);
+
+       kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/vport.h
new file mode 100644 (file)
index 0000000..c05ca2c
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644 (file)
index 0000000..8388411
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+       return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+       return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+       return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+       return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+       return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+       return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                      void *wqc, struct mlx5_wq_cyc *wq,
+                      struct mlx5_wq_ctrl *wq_ctrl)
+{
+       int err;
+
+       wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                    void *cqc, struct mlx5_cqwq *wq,
+                    struct mlx5_wq_ctrl *wq_ctrl)
+{
+       int err;
+
+       wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+       wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+       wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                     void *wqc, struct mlx5_wq_ll *wq,
+                     struct mlx5_wq_ctrl *wq_ctrl)
+{
+       struct mlx5_wqe_srq_next_seg *next_seg;
+       int err;
+       int i;
+
+       wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       for (i = 0; i < wq->sz_m1; i++) {
+               next_seg = mlx5_wq_ll_get_wqe(wq, i);
+               next_seg->next_wqe_index = cpu_to_be16(i + 1);
+       }
+       next_seg = mlx5_wq_ll_get_wqe(wq, i);
+       wq->tail_next = &next_seg->next_wqe_index;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+       mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+       mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644 (file)
index 0000000..e0ddd69
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+       int             linear;
+       int             numa;
+};
+
+struct mlx5_wq_ctrl {
+       struct mlx5_core_dev    *mdev;
+       struct mlx5_buf         buf;
+       struct mlx5_db          db;
+};
+
+struct mlx5_wq_cyc {
+       void                    *buf;
+       __be32                  *db;
+       u16                     sz_m1;
+       u8                      log_stride;
+};
+
+struct mlx5_cqwq {
+       void                    *buf;
+       __be32                  *db;
+       u32                     sz_m1;
+       u32                     cc; /* consumer counter */
+       u8                      log_sz;
+       u8                      log_stride;
+};
+
+struct mlx5_wq_ll {
+       void                    *buf;
+       __be32                  *db;
+       __be16                  *tail_next;
+       u16                     sz_m1;
+       u16                     head;
+       u16                     wqe_ctr;
+       u16                     cur_sz;
+       u8                      log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                      void *wqc, struct mlx5_wq_cyc *wq,
+                      struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                    void *cqc, struct mlx5_cqwq *wq,
+                    struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                     void *wqc, struct mlx5_wq_ll *wq,
+                     struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+       return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+       int equal   = (cc1 == cc2);
+       int smaller = 0x8000 & (cc1 - cc2);
+
+       return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+       return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+       return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+       wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+       *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+       return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+       return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+       wq->head = head_next;
+       wq->wqe_ctr++;
+       wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+                                 __be16 *next_tail_next)
+{
+       *wq->tail_next = ix;
+       wq->tail_next = next_tail_next;
+       wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+       *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
index 731e0453a7d41dc5c90766d9f2fa41023e12cb31..cec147d1d34f2bb0cd9013f2c58c4fc7642b1b23 100644 (file)
@@ -16,6 +16,7 @@ if STMMAC_ETH
 config STMMAC_PLATFORM
        tristate "STMMAC Platform bus support"
        depends on STMMAC_ETH
+       select MFD_SYSCON
        default y
        ---help---
          This selects the platform specific bus support for the stmmac driver.
@@ -36,6 +37,19 @@ config DWMAC_GENERIC
          platform specific code to function or is using platform
          data for setup.
 
+config DWMAC_IPQ806X
+       tristate "QCA IPQ806x DWMAC support"
+       default ARCH_QCOM
+       depends on OF
+       select MFD_SYSCON
+       help
+         Support for QCA IPQ806X DWMAC Ethernet.
+
+         This selects the IPQ806x SoC glue layer support for the stmmac
+         device driver. This driver does not use any of the hardware
+         acceleration features available on this SoC. Network devices
+         will behave like standard non-accelerated ethernet interfaces.
+
 config DWMAC_LPC18XX
        tristate "NXP LPC18xx/43xx DWMAC support"
        default ARCH_LPC18XX
index 92e714a48367160518145453d0b2f3751ec0a115..b3901616f4f653073ba0911d1fa6c0814de81bc5 100644 (file)
@@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o  \
 
 # Ordering matters. Generic driver must be last.
 obj-$(CONFIG_STMMAC_PLATFORM)  += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X)    += dwmac-ipq806x.o
 obj-$(CONFIG_DWMAC_LPC18XX)    += dwmac-lpc18xx.o
 obj-$(CONFIG_DWMAC_MESON)      += dwmac-meson.o
 obj-$(CONFIG_DWMAC_ROCKCHIP)   += dwmac-rk.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644 (file)
index 0000000..7e3129e
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE                    0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x)          BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x)     BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x)     BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x)      BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x)      BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0                    0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x)           (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK                        0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL                        0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x)       1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x)       ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL                  0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x)                 (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ           BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL      BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET   8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET         0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK           0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000          1
+#define NSS_COMMON_CLK_DIV_RGMII_100           9
+#define NSS_COMMON_CLK_DIV_RGMII_10            99
+#define NSS_COMMON_CLK_DIV_SGMII_1000          0
+#define NSS_COMMON_CLK_DIV_SGMII_100           4
+#define NSS_COMMON_CLK_DIV_SGMII_10            49
+
+#define QSGMII_PCS_MODE_CTL                    0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x)      BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL               0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST           BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x)                        ((x == 1) ? 0x134 : \
+                                                (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN                      BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN                 BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN         BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN                        BIT(3)
+#define QSGMII_PHY_QSGMII_EN                   BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET      12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK                0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET           18
+#define QSGMII_PHY_RX_DC_BIAS_MASK             0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET         20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK           0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET          22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK            0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET           28
+#define QSGMII_PHY_TX_DRV_AMP_MASK             0xf
+
+struct ipq806x_gmac {
+       struct platform_device *pdev;
+       struct regmap *nss_common;
+       struct regmap *qsgmii_csr;
+       uint32_t id;
+       struct clk *core_clk;
+       phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       struct device *dev = &gmac->pdev->dev;
+       int div;
+
+       switch (speed) {
+       case SPEED_1000:
+               div = NSS_COMMON_CLK_DIV_SGMII_1000;
+               break;
+
+       case SPEED_100:
+               div = NSS_COMMON_CLK_DIV_SGMII_100;
+               break;
+
+       case SPEED_10:
+               div = NSS_COMMON_CLK_DIV_SGMII_10;
+               break;
+
+       default:
+               dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+               return -EINVAL;
+       }
+
+       return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       struct device *dev = &gmac->pdev->dev;
+       int div;
+
+       switch (speed) {
+       case SPEED_1000:
+               div = NSS_COMMON_CLK_DIV_RGMII_1000;
+               break;
+
+       case SPEED_100:
+               div = NSS_COMMON_CLK_DIV_RGMII_100;
+               break;
+
+       case SPEED_10:
+               div = NSS_COMMON_CLK_DIV_RGMII_10;
+               break;
+
+       default:
+               dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+               return -EINVAL;
+       }
+
+       return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       uint32_t clk_bits, val;
+       int div;
+
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               div = get_clk_div_rgmii(gmac, speed);
+               clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+                          NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+               break;
+
+       case PHY_INTERFACE_MODE_SGMII:
+               div = get_clk_div_sgmii(gmac, speed);
+               clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+                          NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+               break;
+
+       default:
+               dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return -EINVAL;
+       }
+
+       /* Disable the clocks */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val &= ~clk_bits;
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       /* Set the divider */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+       val &= ~(NSS_COMMON_CLK_DIV_MASK
+                << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+       val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+       /* Enable the clock back */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val |= clk_bits;
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+       struct device *dev = &gmac->pdev->dev;
+
+       gmac->phy_mode = of_get_phy_mode(dev->of_node);
+       if (gmac->phy_mode < 0) {
+               dev_err(dev, "missing phy mode property\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+               dev_err(dev, "missing qcom id property\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+        * code and keep it consistent with the Linux convention, we'll number
+        * them from 0 to 3 here.
+        */
+       if (gmac->id < 0 || gmac->id > 3) {
+               dev_err(dev, "invalid gmac id\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+       if (IS_ERR(gmac->core_clk)) {
+               dev_err(dev, "missing stmmaceth clk property\n");
+               return gmac->core_clk;
+       }
+       clk_set_rate(gmac->core_clk, 266000000);
+
+       /* Setup the register map for the nss common registers */
+       gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                          "qcom,nss-common");
+       if (IS_ERR(gmac->nss_common)) {
+               dev_err(dev, "missing nss-common node\n");
+               return gmac->nss_common;
+       }
+
+       /* Setup the register map for the qsgmii csr registers */
+       gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                          "qcom,qsgmii-csr");
+       if (IS_ERR(gmac->qsgmii_csr)) {
+               dev_err(dev, "missing qsgmii-csr node\n");
+               return gmac->qsgmii_csr;
+       }
+
+       return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ipq806x_gmac *gmac;
+       int val;
+       void *err;
+
+       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+       if (!gmac)
+               return ERR_PTR(-ENOMEM);
+
+       gmac->pdev = pdev;
+
+       err = ipq806x_gmac_of_parse(gmac);
+       if (err) {
+               dev_err(dev, "device tree parsing error\n");
+               return err;
+       }
+
+       regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+                    QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+       /* Inter frame gap is set to 12 */
+       val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+             12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+       /* We also initiate an AXI low power exit request */
+       val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return NULL;
+       }
+       regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+       /* Configure the clock src according to the mode */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+       val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+                       NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+                       NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return NULL;
+       }
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+       /* Enable PTP clock */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+               regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+                            QSGMII_PHY_CDR_EN |
+                            QSGMII_PHY_RX_FRONT_EN |
+                            QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+                            QSGMII_PHY_TX_DRIVER_EN |
+                            QSGMII_PHY_QSGMII_EN |
+                            0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+                            0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+                            0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+                            0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+                            0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+       }
+
+       return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+       struct ipq806x_gmac *gmac = priv;
+
+       ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+       .has_gmac       = 1,
+       .setup          = ipq806x_gmac_setup,
+       .fix_mac_speed  = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+       { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+       .probe = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "ipq806x-gmac-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = ipq806x_gmac_dwmac_match,
+       },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
index e4f2739760713835610bff224da6614bd0aac3f1..c46178cf4d5024050c2dd72b4fd5aeb822a23f0b 100644 (file)
@@ -52,6 +52,7 @@
 #include "stmmac_ptp.h"
 #include "stmmac.h"
 #include <linux/reset.h>
+#include <linux/of_mdio.h>
 
 #define STMMAC_ALIGN(x)        L1_CACHE_ALIGN(x)
 
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
        priv->speed = 0;
        priv->oldduplex = -1;
 
-       if (priv->plat->phy_bus_name)
-               snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
-                        priv->plat->phy_bus_name, priv->plat->bus_id);
-       else
-               snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
-                        priv->plat->bus_id);
+       if (priv->plat->phy_node) {
+               phydev = of_phy_connect(dev, priv->plat->phy_node,
+                                       &stmmac_adjust_link, 0, interface);
+       } else {
+               if (priv->plat->phy_bus_name)
+                       snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+                                priv->plat->phy_bus_name, priv->plat->bus_id);
+               else
+                       snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+                                priv->plat->bus_id);
 
-       snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-                priv->plat->phy_addr);
-       pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
+               snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+                        priv->plat->phy_addr);
+               pr_debug("stmmac_init_phy:  trying to attach to %s\n",
+                        phy_id_fmt);
 
-       phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+               phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+                                    interface);
+       }
 
        if (IS_ERR(phydev)) {
                pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
         * device as well.
         * Note: phydev->phy_id is the result of reading the UID PHY registers.
         */
-       if (phydev->phy_id == 0) {
+       if (!priv->plat->phy_node && phydev->phy_id == 0) {
                phy_disconnect(phydev);
                return -ENODEV;
        }
index 1664c0186f5b335f06f4af32e2f80df5a2720026..f3918c7e7eeb373a6736bb5145c33320acbabc53 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 
 #include "stmmac.h"
 #include "stmmac_platform.h"
@@ -144,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        /* Default to phy auto-detection */
        plat->phy_addr = -1;
 
+       /* If we find a phy-handle property, use it as the PHY */
+       plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+       /* If phy-handle is not specified, check if we have a fixed-phy */
+       if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+               if ((of_phy_register_fixed_link(np) < 0))
+                       return -ENODEV;
+
+               plat->phy_node = of_node_get(np);
+       }
+
        /* "snps,phy-addr" is not a standard property. Mark it as deprecated
         * and warn of its use. Remove this when phy node support is added.
         */
        if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
                dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-       if (plat->phy_bus_name)
+       if (plat->phy_node || plat->phy_bus_name)
                plat->mdio_bus_data = NULL;
        else
                plat->mdio_bus_data =
@@ -208,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        if (of_find_property(np, "snps,pbl", NULL)) {
                dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
                                       GFP_KERNEL);
-               if (!dma_cfg)
+               if (!dma_cfg) {
+                       of_node_put(np);
                        return -ENOMEM;
+               }
                plat->dma_cfg = dma_cfg;
                of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
                dma_cfg->fixed_burst =
index ddcc7f8d22b4d916412afc0a13e4ffd58a7ba307..dd4544085db321d2f9020d97ebbbb9a8887ed4b9 100644 (file)
@@ -161,6 +161,7 @@ struct netvsc_device_info {
        unsigned char mac_adr[ETH_ALEN];
        bool link_state;        /* 0 - link up, 1 - link down */
        int  ring_size;
+       u32  max_num_vrss_chns;
 };
 
 enum rndis_device_state {
index b0249685139cc7b56840e50cdf8f5c569c59c54f..06de98a056228261a9df982421c4137827dfda01 100644 (file)
@@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device)
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
+       int node;
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
                return -ENODEV;
        ndev = net_device->ndev;
 
-       net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+       node = cpu_to_node(device->channel->target_cpu);
+       net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+       if (!net_device->recv_buf)
+               net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
        if (!net_device->recv_buf) {
                netdev_err(ndev, "unable to allocate receive "
                        "buffer of size %d\n", net_device->recv_buf_size);
@@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device)
 
        /* Now setup the send buffer.
         */
-       net_device->send_buf = vzalloc(net_device->send_buf_size);
+       net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+       if (!net_device->send_buf)
+               net_device->send_buf = vzalloc(net_device->send_buf_size);
        if (!net_device->send_buf) {
                netdev_err(ndev, "unable to allocate send "
                           "buffer of size %d\n", net_device->send_buf_size);
index d9c88bc09f45bd2c6557ccd225490c4d2c0b494d..358475ed9b5964c53f038c61f7fb8a3996c2a5ab 100644 (file)
@@ -46,6 +46,8 @@ static int ring_size = 128;
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
+static int max_num_vrss_chns = 8;
+
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
                                NETIF_MSG_LINK | NETIF_MSG_IFUP |
                                NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        ndevctx->device_ctx = hdev;
        hv_set_drvdata(hdev, ndev);
        device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = max_num_vrss_chns;
        rndis_filter_device_add(hdev, &device_info);
        netif_tx_wake_all_queues(ndev);
 
@@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev,
 
        /* Notify the netvsc driver of the new device */
        device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = max_num_vrss_chns;
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
index 9118cea918821cb6bbe83a2f97a71134a58fd5dd..006c1b8c23857a17757366ffab86fd4eb9bad60b 100644 (file)
@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
        struct ndis_recv_scale_cap rsscap;
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        u32 mtu, size;
+       u32 num_rss_qs;
+       const struct cpumask *node_cpu_mask;
+       u32 num_possible_rss_qs;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret || rsscap.num_recv_que < 2)
                goto out;
 
+       num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
        net_device->max_chn = rsscap.num_recv_que;
-       net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
-                              num_online_cpus() : rsscap.num_recv_que;
+
+       /*
+        * We will limit the VRSS channels to the number CPUs in the NUMA node
+        * the primary channel is currently bound to.
+        */
+       node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+       num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+       net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
        if (net_device->num_chn == 1)
                goto out;
 
index 1a3c3e57aa0b67750eccb0d2fc62eefb4eef0050..1dd5ab8e5054a4a192e75841c9220750f7fa9dec 100644 (file)
@@ -53,3 +53,13 @@ config IEEE802154_CC2520
 
          This driver can also be built as a module. To do so, say M here.
          the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+       tristate "ATUSB transceiver driver"
+       depends on IEEE802154_DRIVERS && MAC802154 && USB
+       ---help---
+         Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so say M here.
+         The module will be called 'atusb'.
index d77fa4d77e27d8e19e43f29f2161633c6754dd84..cf1d2a6db023924bb12c9e05ae034665357cfe80 100644 (file)
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
 obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
 obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
index 67d00fbc2e0e29e7bd426ed8f7d21b22bf6772fc..2f25a5ed82473b38190c9d873c0651fbdf755760 100644 (file)
@@ -35,6 +35,8 @@
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
+#include "at86rf230.h"
+
 struct at86rf230_local;
 /* at86rf2xx chip depend data.
  * All timings are in us.
@@ -50,7 +52,7 @@ struct at86rf2xx_chip_data {
        int rssi_base_val;
 
        int (*set_channel)(struct at86rf230_local *, u8, u8);
-       int (*get_desense_steps)(struct at86rf230_local *, s32);
+       int (*set_txpower)(struct at86rf230_local *, s32);
 };
 
 #define AT86RF2XX_MAX_BUF              (127 + 3)
@@ -102,200 +104,6 @@ struct at86rf230_local {
        struct at86rf230_state_change tx;
 };
 
-#define RG_TRX_STATUS  (0x01)
-#define SR_TRX_STATUS          0x01, 0x1f, 0
-#define SR_RESERVED_01_3       0x01, 0x20, 5
-#define SR_CCA_STATUS          0x01, 0x40, 6
-#define SR_CCA_DONE            0x01, 0x80, 7
-#define RG_TRX_STATE   (0x02)
-#define SR_TRX_CMD             0x02, 0x1f, 0
-#define SR_TRAC_STATUS         0x02, 0xe0, 5
-#define RG_TRX_CTRL_0  (0x03)
-#define SR_CLKM_CTRL           0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL                0x03, 0x08, 3
-#define SR_PAD_IO_CLKM         0x03, 0x30, 4
-#define SR_PAD_IO              0x03, 0xc0, 6
-#define RG_TRX_CTRL_1  (0x04)
-#define SR_IRQ_POLARITY                0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE       0x04, 0x02, 1
-#define SR_SPI_CMD_MODE                0x04, 0x0c, 2
-#define SR_RX_BL_CTRL          0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON      0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN                0x04, 0x40, 6
-#define SR_PA_EXT_EN           0x04, 0x80, 7
-#define RG_PHY_TX_PWR  (0x05)
-#define SR_TX_PWR              0x05, 0x0f, 0
-#define SR_PA_LT               0x05, 0x30, 4
-#define SR_PA_BUF_LT           0x05, 0xc0, 6
-#define RG_PHY_RSSI    (0x06)
-#define SR_RSSI                        0x06, 0x1f, 0
-#define SR_RND_VALUE           0x06, 0x60, 5
-#define SR_RX_CRC_VALID                0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL        (0x07)
-#define SR_ED_LEVEL            0x07, 0xff, 0
-#define RG_PHY_CC_CCA  (0x08)
-#define SR_CHANNEL             0x08, 0x1f, 0
-#define SR_CCA_MODE            0x08, 0x60, 5
-#define SR_CCA_REQUEST         0x08, 0x80, 7
-#define RG_CCA_THRES   (0x09)
-#define SR_CCA_ED_THRES                0x09, 0x0f, 0
-#define SR_RESERVED_09_1       0x09, 0xf0, 4
-#define RG_RX_CTRL     (0x0a)
-#define SR_PDT_THRES           0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1       0x0a, 0xf0, 4
-#define RG_SFD_VALUE   (0x0b)
-#define SR_SFD_VALUE           0x0b, 0xff, 0
-#define RG_TRX_CTRL_2  (0x0c)
-#define SR_OQPSK_DATA_RATE     0x0c, 0x03, 0
-#define SR_SUB_MODE            0x0c, 0x04, 2
-#define SR_BPSK_QPSK           0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN    0x0c, 0x10, 4
-#define SR_RESERVED_0c_5       0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE                0x0c, 0x80, 7
-#define RG_ANT_DIV     (0x0d)
-#define SR_ANT_CTRL            0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN       0x0d, 0x04, 2
-#define SR_ANT_DIV_EN          0x0d, 0x08, 3
-#define SR_RESERVED_0d_2       0x0d, 0x70, 4
-#define SR_ANT_SEL             0x0d, 0x80, 7
-#define RG_IRQ_MASK    (0x0e)
-#define SR_IRQ_MASK            0x0e, 0xff, 0
-#define RG_IRQ_STATUS  (0x0f)
-#define SR_IRQ_0_PLL_LOCK      0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK    0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START      0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END       0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE   0x0f, 0x10, 4
-#define SR_IRQ_5_AMI           0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR                0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW       0x0f, 0x80, 7
-#define RG_VREG_CTRL   (0x10)
-#define SR_RESERVED_10_6       0x10, 0x03, 0
-#define SR_DVDD_OK             0x10, 0x04, 2
-#define SR_DVREG_EXT           0x10, 0x08, 3
-#define SR_RESERVED_10_3       0x10, 0x30, 4
-#define SR_AVDD_OK             0x10, 0x40, 6
-#define SR_AVREG_EXT           0x10, 0x80, 7
-#define RG_BATMON      (0x11)
-#define SR_BATMON_VTH          0x11, 0x0f, 0
-#define SR_BATMON_HR           0x11, 0x10, 4
-#define SR_BATMON_OK           0x11, 0x20, 5
-#define SR_RESERVED_11_1       0x11, 0xc0, 6
-#define RG_XOSC_CTRL   (0x12)
-#define SR_XTAL_TRIM           0x12, 0x0f, 0
-#define SR_XTAL_MODE           0x12, 0xf0, 4
-#define RG_RX_SYN      (0x15)
-#define SR_RX_PDT_LEVEL                0x15, 0x0f, 0
-#define SR_RESERVED_15_2       0x15, 0x70, 4
-#define SR_RX_PDT_DIS          0x15, 0x80, 7
-#define RG_XAH_CTRL_1  (0x17)
-#define SR_RESERVED_17_8       0x17, 0x01, 0
-#define SR_AACK_PROM_MODE      0x17, 0x02, 1
-#define SR_AACK_ACK_TIME       0x17, 0x04, 2
-#define SR_RESERVED_17_5       0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT    0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT    0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE       0x17, 0x40, 6
-#define SR_RESERVED_17_1       0x17, 0x80, 7
-#define RG_FTN_CTRL    (0x18)
-#define SR_RESERVED_18_2       0x18, 0x7f, 0
-#define SR_FTN_START           0x18, 0x80, 7
-#define RG_PLL_CF      (0x1a)
-#define SR_RESERVED_1a_2       0x1a, 0x7f, 0
-#define SR_PLL_CF_START                0x1a, 0x80, 7
-#define RG_PLL_DCU     (0x1b)
-#define SR_RESERVED_1b_3       0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2       0x1b, 0x40, 6
-#define SR_PLL_DCU_START       0x1b, 0x80, 7
-#define RG_PART_NUM    (0x1c)
-#define SR_PART_NUM            0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM         0x1d, 0xff, 0
-#define RG_MAN_ID_0    (0x1e)
-#define SR_MAN_ID_0            0x1e, 0xff, 0
-#define RG_MAN_ID_1    (0x1f)
-#define SR_MAN_ID_1            0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0        (0x20)
-#define SR_SHORT_ADDR_0                0x20, 0xff, 0
-#define RG_SHORT_ADDR_1        (0x21)
-#define SR_SHORT_ADDR_1                0x21, 0xff, 0
-#define RG_PAN_ID_0    (0x22)
-#define SR_PAN_ID_0            0x22, 0xff, 0
-#define RG_PAN_ID_1    (0x23)
-#define SR_PAN_ID_1            0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0         0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1         0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2         0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3         0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4         0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5         0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6         0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7         0x2b, 0xff, 0
-#define RG_XAH_CTRL_0  (0x2c)
-#define SR_SLOTTED_OPERATION   0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES    0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES   0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0         0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1         0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD     0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK                0x2e, 0x10, 4
-#define SR_AACK_SET_PD         0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE       0x2e, 0xc0, 6
-#define RG_CSMA_BE     (0x2f)
-#define SR_MIN_BE              0x2f, 0x0f, 0
-#define SR_MAX_BE              0x2f, 0xf0, 4
-
-#define CMD_REG                0x80
-#define CMD_REG_MASK   0x3f
-#define CMD_WRITE      0x40
-#define CMD_FB         0x20
-
-#define IRQ_BAT_LOW    (1 << 7)
-#define IRQ_TRX_UR     (1 << 6)
-#define IRQ_AMI                (1 << 5)
-#define IRQ_CCA_ED     (1 << 4)
-#define IRQ_TRX_END    (1 << 3)
-#define IRQ_RX_START   (1 << 2)
-#define IRQ_PLL_UNL    (1 << 1)
-#define IRQ_PLL_LOCK   (1 << 0)
-
-#define IRQ_ACTIVE_HIGH        0
-#define IRQ_ACTIVE_LOW 1
-
-#define STATE_P_ON             0x00    /* BUSY */
-#define STATE_BUSY_RX          0x01
-#define STATE_BUSY_TX          0x02
-#define STATE_FORCE_TRX_OFF    0x03
-#define STATE_FORCE_TX_ON      0x04    /* IDLE */
-/* 0x05 */                             /* INVALID_PARAMETER */
-#define STATE_RX_ON            0x06
-/* 0x07 */                             /* SUCCESS */
-#define STATE_TRX_OFF          0x08
-#define STATE_TX_ON            0x09
-/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP            0x0F
-#define STATE_PREP_DEEP_SLEEP  0x10
-#define STATE_BUSY_RX_AACK     0x11
-#define STATE_BUSY_TX_ARET     0x12
-#define STATE_RX_AACK_ON       0x16
-#define STATE_TX_ARET_ON       0x19
-#define STATE_RX_ON_NOCLK      0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK         (0x1F)
-
 #define AT86RF2XX_NUMREGS 0x3F
 
 static void
@@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context)
                if (lp->is_tx_from_off) {
                        lp->is_tx_from_off = false;
                        at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-                                                    at86rf230_xmit_tx_on,
+                                                    at86rf230_write_frame,
                                                     false);
                } else {
                        at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
        return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
 }
 
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+       -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+       -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+       -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+       unsigned int cca_ed_thres;
+       int rc;
+
+       rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+       if (rc < 0)
+               return rc;
+
+       switch (rssi_base_val) {
+       case -98:
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+               lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+               break;
+       case -100:
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+               lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       return 0;
+}
+
 static int
 at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
 {
@@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
        if (rc < 0)
                return rc;
 
+       rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+       if (rc < 0)
+               return rc;
+
        /* This sets the symbol_duration according frequency on the 212.
         * TODO move this handling while set channel and page in cfg802154.
         * We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        return 0;
 }
 
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+       400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+       -800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+       300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+       -900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+       500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+       -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+       -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+static int
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+       u32 i;
+
+       for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+               if (lp->hw->phy->supported.tx_powers[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+       }
+
+       return -EINVAL;
+}
+
 static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
 {
-       struct at86rf230_local *lp = hw->priv;
+       u32 i;
 
-       /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
-        * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
-        * 0dB.
-        * thus, supported values for db range from -26 to 5, for 31dB of
-        * reduction to 0dB of reduction.
-        */
-       if (db > 5 || db < -26)
-               return -EINVAL;
+       for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+               if (lp->hw->phy->supported.tx_powers[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+       }
 
-       db = -(db - 5);
+       return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+       struct at86rf230_local *lp = hw->priv;
 
-       return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+       return lp->data->set_txpower(lp, mbm);
 }
 
 static int
@@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
        return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-       return (level - lp->data->rssi_base_val) * 100 / 207;
-}
-
-static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-       return (level - lp->data->rssi_base_val) / 2;
-}
 
 static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
        struct at86rf230_local *lp = hw->priv;
+       u32 i;
 
-       if (level < lp->data->rssi_base_val || level > 30)
-               return -EINVAL;
+       for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+               if (hw->phy->supported.cca_ed_levels[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+       }
 
-       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
-                                     lp->data->get_desense_steps(lp, level));
+       return -EINVAL;
 }
 
 static int
@@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
        .t_p_ack = 545,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
-       .get_desense_steps = at86rf23x_get_desens_steps
+       .set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
        .t_p_ack = 545,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
-       .get_desense_steps = at86rf23x_get_desens_steps
+       .set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
        .t_p_ack = 545,
        .rssi_base_val = -100,
        .set_channel = at86rf212_set_channel,
-       .get_desense_steps = at86rf212_get_desens_steps
+       .set_txpower = at86rf212_set_txpower,
 };
 
 static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp)
        }
 
        lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
-                       IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
-                       IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+                       IEEE802154_HW_CSMA_PARAMS |
+                       IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+                       IEEE802154_HW_PROMISCUOUS;
+
+       lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+                            WPAN_PHY_FLAG_CCA_ED_LEVEL |
+                            WPAN_PHY_FLAG_CCA_MODE;
+
+       lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+               BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+       lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+               BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+       lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+       lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
 
        lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
 
@@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp)
        case 2:
                chip = "at86rf230";
                rc = -ENOTSUPP;
-               break;
+               goto not_supp;
        case 3:
                chip = "at86rf231";
                lp->data = &at86rf231_data;
-               lp->hw->phy->channels_supported[0] = 0x7FFF800;
+               lp->hw->phy->supported.channels[0] = 0x7FFF800;
                lp->hw->phy->current_channel = 11;
                lp->hw->phy->symbol_duration = 16;
+               lp->hw->phy->supported.tx_powers = at86rf231_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
                break;
        case 7:
                chip = "at86rf212";
                lp->data = &at86rf212_data;
                lp->hw->flags |= IEEE802154_HW_LBT;
-               lp->hw->phy->channels_supported[0] = 0x00007FF;
-               lp->hw->phy->channels_supported[2] = 0x00007FF;
+               lp->hw->phy->supported.channels[0] = 0x00007FF;
+               lp->hw->phy->supported.channels[2] = 0x00007FF;
                lp->hw->phy->current_channel = 5;
                lp->hw->phy->symbol_duration = 25;
+               lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+               lp->hw->phy->supported.tx_powers = at86rf212_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
                break;
        case 11:
                chip = "at86rf233";
                lp->data = &at86rf233_data;
-               lp->hw->phy->channels_supported[0] = 0x7FFF800;
+               lp->hw->phy->supported.channels[0] = 0x7FFF800;
                lp->hw->phy->current_channel = 13;
                lp->hw->phy->symbol_duration = 16;
+               lp->hw->phy->supported.tx_powers = at86rf233_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
                break;
        default:
                chip = "unknown";
                rc = -ENOTSUPP;
-               break;
+               goto not_supp;
        }
 
+       lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+       lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
        dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
 
        return rc;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644 (file)
index 0000000..1e6d1cc
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS  (0x01)
+#define SR_TRX_STATUS          0x01, 0x1f, 0
+#define SR_RESERVED_01_3       0x01, 0x20, 5
+#define SR_CCA_STATUS          0x01, 0x40, 6
+#define SR_CCA_DONE            0x01, 0x80, 7
+#define RG_TRX_STATE   (0x02)
+#define SR_TRX_CMD             0x02, 0x1f, 0
+#define SR_TRAC_STATUS         0x02, 0xe0, 5
+#define RG_TRX_CTRL_0  (0x03)
+#define SR_CLKM_CTRL           0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL                0x03, 0x08, 3
+#define SR_PAD_IO_CLKM         0x03, 0x30, 4
+#define SR_PAD_IO              0x03, 0xc0, 6
+#define RG_TRX_CTRL_1  (0x04)
+#define SR_IRQ_POLARITY                0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE       0x04, 0x02, 1
+#define SR_SPI_CMD_MODE                0x04, 0x0c, 2
+#define SR_RX_BL_CTRL          0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON      0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN                0x04, 0x40, 6
+#define SR_PA_EXT_EN           0x04, 0x80, 7
+#define RG_PHY_TX_PWR  (0x05)
+#define SR_TX_PWR_23X          0x05, 0x0f, 0
+#define SR_PA_LT_230           0x05, 0x30, 4
+#define SR_PA_BUF_LT_230       0x05, 0xc0, 6
+#define SR_TX_PWR_212          0x05, 0x1f, 0
+#define SR_GC_PA_212           0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212     0x05, 0x80, 7
+#define RG_PHY_RSSI    (0x06)
+#define SR_RSSI                        0x06, 0x1f, 0
+#define SR_RND_VALUE           0x06, 0x60, 5
+#define SR_RX_CRC_VALID                0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL        (0x07)
+#define SR_ED_LEVEL            0x07, 0xff, 0
+#define RG_PHY_CC_CCA  (0x08)
+#define SR_CHANNEL             0x08, 0x1f, 0
+#define SR_CCA_MODE            0x08, 0x60, 5
+#define SR_CCA_REQUEST         0x08, 0x80, 7
+#define RG_CCA_THRES   (0x09)
+#define SR_CCA_ED_THRES                0x09, 0x0f, 0
+#define SR_RESERVED_09_1       0x09, 0xf0, 4
+#define RG_RX_CTRL     (0x0a)
+#define SR_PDT_THRES           0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1       0x0a, 0xf0, 4
+#define RG_SFD_VALUE   (0x0b)
+#define SR_SFD_VALUE           0x0b, 0xff, 0
+#define RG_TRX_CTRL_2  (0x0c)
+#define SR_OQPSK_DATA_RATE     0x0c, 0x03, 0
+#define SR_SUB_MODE            0x0c, 0x04, 2
+#define SR_BPSK_QPSK           0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN    0x0c, 0x10, 4
+#define SR_RESERVED_0c_5       0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE                0x0c, 0x80, 7
+#define RG_ANT_DIV     (0x0d)
+#define SR_ANT_CTRL            0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN       0x0d, 0x04, 2
+#define SR_ANT_DIV_EN          0x0d, 0x08, 3
+#define SR_RESERVED_0d_2       0x0d, 0x70, 4
+#define SR_ANT_SEL             0x0d, 0x80, 7
+#define RG_IRQ_MASK    (0x0e)
+#define SR_IRQ_MASK            0x0e, 0xff, 0
+#define RG_IRQ_STATUS  (0x0f)
+#define SR_IRQ_0_PLL_LOCK      0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK    0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START      0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END       0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE   0x0f, 0x10, 4
+#define SR_IRQ_5_AMI           0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR                0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW       0x0f, 0x80, 7
+#define RG_VREG_CTRL   (0x10)
+#define SR_RESERVED_10_6       0x10, 0x03, 0
+#define SR_DVDD_OK             0x10, 0x04, 2
+#define SR_DVREG_EXT           0x10, 0x08, 3
+#define SR_RESERVED_10_3       0x10, 0x30, 4
+#define SR_AVDD_OK             0x10, 0x40, 6
+#define SR_AVREG_EXT           0x10, 0x80, 7
+#define RG_BATMON      (0x11)
+#define SR_BATMON_VTH          0x11, 0x0f, 0
+#define SR_BATMON_HR           0x11, 0x10, 4
+#define SR_BATMON_OK           0x11, 0x20, 5
+#define SR_RESERVED_11_1       0x11, 0xc0, 6
+#define RG_XOSC_CTRL   (0x12)
+#define SR_XTAL_TRIM           0x12, 0x0f, 0
+#define SR_XTAL_MODE           0x12, 0xf0, 4
+#define RG_RX_SYN      (0x15)
+#define SR_RX_PDT_LEVEL                0x15, 0x0f, 0
+#define SR_RESERVED_15_2       0x15, 0x70, 4
+#define SR_RX_PDT_DIS          0x15, 0x80, 7
+#define RG_XAH_CTRL_1  (0x17)
+#define SR_RESERVED_17_8       0x17, 0x01, 0
+#define SR_AACK_PROM_MODE      0x17, 0x02, 1
+#define SR_AACK_ACK_TIME       0x17, 0x04, 2
+#define SR_RESERVED_17_5       0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT    0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT    0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE       0x17, 0x40, 6
+#define SR_RESERVED_17_1       0x17, 0x80, 7
+#define RG_FTN_CTRL    (0x18)
+#define SR_RESERVED_18_2       0x18, 0x7f, 0
+#define SR_FTN_START           0x18, 0x80, 7
+#define RG_PLL_CF      (0x1a)
+#define SR_RESERVED_1a_2       0x1a, 0x7f, 0
+#define SR_PLL_CF_START                0x1a, 0x80, 7
+#define RG_PLL_DCU     (0x1b)
+#define SR_RESERVED_1b_3       0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2       0x1b, 0x40, 6
+#define SR_PLL_DCU_START       0x1b, 0x80, 7
+#define RG_PART_NUM    (0x1c)
+#define SR_PART_NUM            0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM         0x1d, 0xff, 0
+#define RG_MAN_ID_0    (0x1e)
+#define SR_MAN_ID_0            0x1e, 0xff, 0
+#define RG_MAN_ID_1    (0x1f)
+#define SR_MAN_ID_1            0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0        (0x20)
+#define SR_SHORT_ADDR_0                0x20, 0xff, 0
+#define RG_SHORT_ADDR_1        (0x21)
+#define SR_SHORT_ADDR_1                0x21, 0xff, 0
+#define RG_PAN_ID_0    (0x22)
+#define SR_PAN_ID_0            0x22, 0xff, 0
+#define RG_PAN_ID_1    (0x23)
+#define SR_PAN_ID_1            0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0         0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1         0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2         0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3         0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4         0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5         0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6         0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7         0x2b, 0xff, 0
+#define RG_XAH_CTRL_0  (0x2c)
+#define SR_SLOTTED_OPERATION   0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES    0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES   0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0         0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1         0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD     0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK                0x2e, 0x10, 4
+#define SR_AACK_SET_PD         0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE       0x2e, 0xc0, 6
+#define RG_CSMA_BE     (0x2f)
+#define SR_MIN_BE              0x2f, 0x0f, 0
+#define SR_MAX_BE              0x2f, 0xf0, 4
+
+#define CMD_REG                0x80
+#define CMD_REG_MASK   0x3f
+#define CMD_WRITE      0x40
+#define CMD_FB         0x20
+
+#define IRQ_BAT_LOW    BIT(7)
+#define IRQ_TRX_UR     BIT(6)
+#define IRQ_AMI                BIT(5)
+#define IRQ_CCA_ED     BIT(4)
+#define IRQ_TRX_END    BIT(3)
+#define IRQ_RX_START   BIT(2)
+#define IRQ_PLL_UNL    BIT(1)
+#define IRQ_PLL_LOCK   BIT(0)
+
+#define IRQ_ACTIVE_HIGH        0
+#define IRQ_ACTIVE_LOW 1
+
+#define STATE_P_ON             0x00    /* BUSY */
+#define STATE_BUSY_RX          0x01
+#define STATE_BUSY_TX          0x02
+#define STATE_FORCE_TRX_OFF    0x03
+#define STATE_FORCE_TX_ON      0x04    /* IDLE */
+/* 0x05 */                             /* INVALID_PARAMETER */
+#define STATE_RX_ON            0x06
+/* 0x07 */                             /* SUCCESS */
+#define STATE_TRX_OFF          0x08
+#define STATE_TX_ON            0x09
+/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP            0x0F
+#define STATE_PREP_DEEP_SLEEP  0x10
+#define STATE_BUSY_RX_AACK     0x11
+#define STATE_BUSY_TX_ARET     0x12
+#define STATE_RX_AACK_ON       0x16
+#define STATE_TX_ARET_ON       0x19
+#define STATE_RX_ON_NOCLK      0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK         (0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644 (file)
index 0000000..5b6bb9a
--- /dev/null
@@ -0,0 +1,699 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL      0x1f    /* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS      4       /* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS   100     /* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS    200     /* on the air timeout */
+
+struct atusb {
+       struct ieee802154_hw *hw;
+       struct usb_device *usb_dev;
+       int shutdown;                   /* non-zero if shutting down */
+       int err;                        /* set by first error */
+
+       /* RX variables */
+       struct delayed_work work;       /* memory allocations */
+       struct usb_anchor idle_urbs;    /* URBs waiting to be submitted */
+       struct usb_anchor rx_urbs;      /* URBs waiting for reception */
+
+       /* TX variables */
+       struct usb_ctrlrequest tx_dr;
+       struct urb *tx_urb;
+       struct sk_buff *tx_skb;
+       uint8_t tx_ack_seq;             /* current TX ACK sequence number */
+};
+
+/* at86rf230.h defines values as <reg, mask, shift> tuples. We use the more
+ * traditional style of having registers and or-able values. SR_REG extracts
+ * the register number. SR_VALUE uses the shift to prepare a value accordingly.
+ */
+
+#define __SR_REG(reg, mask, shift)     (reg)
+#define SR_REG(sr)                     __SR_REG(sr)
+
+#define __SR_VALUE(reg, mask, shift, val)      ((val) << (shift))
+#define SR_VALUE(sr, val)                      __SR_VALUE(sr, (val))
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+                            __u8 request, __u8 requesttype,
+                            __u16 value, __u16 index,
+                            void *data, __u16 size, int timeout)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       if (atusb->err)
+               return atusb->err;
+
+       ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+                             value, index, data, size, timeout);
+       if (ret < 0) {
+               atusb->err = ret;
+               dev_err(&usb_dev->dev,
+                       "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+                       request, value, index, ret);
+       }
+       return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+       return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+                                cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+               reg, value);
+       return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+                                ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+                                value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+       uint8_t value;
+
+       dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+                               0, reg, &value, 1, 1000);
+       return ret >= 0 ? value : ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+       int err = atusb->err;
+
+       atusb->err = 0;
+       return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU       127
+#define MAX_RX_XFER    (1 + MAX_PSDU + 2 + 1)  /* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       struct sk_buff *skb = urb->context;
+       int ret;
+
+       if (!skb) {
+               skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+               if (!skb) {
+                       dev_warn_ratelimited(&usb_dev->dev,
+                                            "atusb_in: can't allocate skb\n");
+                       return -ENOMEM;
+               }
+               skb_put(skb, MAX_RX_XFER);
+               SKB_ATUSB(skb) = atusb;
+       }
+
+       usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+                         skb->data, MAX_RX_XFER, atusb_in, skb);
+       usb_anchor_urb(urb, &atusb->rx_urbs);
+
+       ret = usb_submit_urb(urb, GFP_KERNEL);
+       if (ret) {
+               usb_unanchor_urb(urb);
+               kfree_skb(skb);
+               urb->context = NULL;
+       }
+       return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+       struct atusb *atusb =
+           container_of(to_delayed_work(work), struct atusb, work);
+       struct usb_device *usb_dev = atusb->usb_dev;
+       struct urb *urb;
+       int ret;
+
+       if (atusb->shutdown)
+               return;
+
+       do {
+               urb = usb_get_from_anchor(&atusb->idle_urbs);
+               if (!urb)
+                       return;
+               ret = atusb_submit_rx_urb(atusb, urb);
+       } while (!ret);
+
+       usb_anchor_urb(urb, &atusb->idle_urbs);
+       dev_warn_ratelimited(&usb_dev->dev,
+                            "atusb_in: can't allocate/submit URB (%d)\n", ret);
+       schedule_delayed_work(&atusb->work,
+                             msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       uint8_t expect = atusb->tx_ack_seq;
+
+       dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+       if (seq == expect) {
+               /* TODO check for ifs handling in firmware */
+               ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+       } else {
+               /* TODO I experience this case when atusb has a tx complete
+                * irq before probing, we should fix the firmware it's an
+                * unlikely case now that seq == expect is then true, but can
+                * happen and fail with a tx_skb = NULL;
+                */
+               ieee802154_wake_queue(atusb->hw);
+               if (atusb->tx_skb)
+                       dev_kfree_skb_irq(atusb->tx_skb);
+       }
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+       struct usb_device *usb_dev = urb->dev;
+       struct sk_buff *skb = urb->context;
+       struct atusb *atusb = SKB_ATUSB(skb);
+       uint8_t len, lqi;
+
+       if (!urb->actual_length) {
+               dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+               return;
+       }
+
+       len = *skb->data;
+
+       if (urb->actual_length == 1) {
+               atusb_tx_done(atusb, len);
+               return;
+       }
+
+       if (len + 1 > urb->actual_length - 1) {
+               dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+                       len, urb->actual_length);
+               return;
+       }
+
+       if (!ieee802154_is_valid_psdu_len(len)) {
+               dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+               return;
+       }
+
+       lqi = skb->data[len + 1];
+       dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+       skb_pull(skb, 1);       /* remove PHR */
+       skb_trim(skb, len);     /* get payload only */
+       ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+       urb->context = NULL;    /* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+       struct usb_device *usb_dev = urb->dev;
+       struct sk_buff *skb = urb->context;
+       struct atusb *atusb = SKB_ATUSB(skb);
+
+       dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+               urb->status, urb->actual_length);
+       if (urb->status) {
+               if (urb->status == -ENOENT) { /* being killed */
+                       kfree_skb(skb);
+                       urb->context = NULL;
+                       return;
+               }
+               dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+       } else {
+               atusb_in_good(urb);
+       }
+
+       usb_anchor_urb(urb, &atusb->idle_urbs);
+       if (!atusb->shutdown)
+               schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+       struct urb *urb;
+
+       while (1) {
+               urb = usb_get_from_anchor(&atusb->idle_urbs);
+               if (!urb)
+                       break;
+               if (urb->context)
+                       kfree_skb(urb->context);
+               usb_free_urb(urb);
+       }
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+       struct urb *urb;
+
+       while (n) {
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       atusb_free_urbs(atusb);
+                       return -ENOMEM;
+               }
+               usb_anchor_urb(urb, &atusb->idle_urbs);
+               n--;
+       }
+       return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+       dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+       atusb->tx_skb = skb;
+       atusb->tx_ack_seq++;
+       atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+       atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+       usb_fill_control_urb(atusb->tx_urb, usb_dev,
+                            usb_sndctrlpipe(usb_dev, 0),
+                            (unsigned char *)&atusb->tx_dr, skb->data,
+                            skb->len, atusb_xmit_complete, NULL);
+       ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+       dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+       return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+       struct atusb *atusb = hw->priv;
+       int ret;
+
+       /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+        * "Mode 3a, Carrier sense OR energy above threshold".
+        * We should probably make this configurable. @@@
+        */
+       ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+       if (ret < 0)
+               return ret;
+       msleep(1);      /* @@@ ugly synchronization */
+       return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+       BUG_ON(!level);
+       *level = 0xbe;
+       return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+                                 struct ieee802154_hw_addr_filt *filt,
+                                 unsigned long changed)
+{
+       struct atusb *atusb = hw->priv;
+       struct device *dev = &atusb->usb_dev->dev;
+       uint8_t reg;
+
+       if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+               u16 addr = le16_to_cpu(filt->short_addr);
+
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+               atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+               atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+       }
+
+       if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+               u16 pan = le16_to_cpu(filt->pan_id);
+
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+               atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+               atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+       }
+
+       if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+               u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+               memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+               for (i = 0; i < 8; i++)
+                       atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+       }
+
+       if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+               dev_vdbg(dev,
+                        "atusb_set_hw_addr_filt called for panc change\n");
+               reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD));
+               if (filt->pan_coord)
+                       reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1);
+               else
+                       reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1);
+               atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg);
+       }
+
+       return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       dev_dbg(&usb_dev->dev, "atusb_start\n");
+       schedule_delayed_work(&atusb->work, 0);
+       atusb_command(atusb, ATUSB_RX_MODE, 1);
+       ret = atusb_get_and_clear_error(atusb);
+       if (ret < 0)
+               usb_kill_anchored_urbs(&atusb->idle_urbs);
+       return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_stop\n");
+       usb_kill_anchored_urbs(&atusb->idle_urbs);
+       atusb_command(atusb, ATUSB_RX_MODE, 0);
+       atusb_get_and_clear_error(atusb);
+}
+
+static struct ieee802154_ops atusb_ops = {
+       .owner                  = THIS_MODULE,
+       .xmit_async             = atusb_xmit,
+       .ed                     = atusb_ed,
+       .set_channel            = atusb_channel,
+       .start                  = atusb_start,
+       .stop                   = atusb_stop,
+       .set_hw_addr_filt       = atusb_set_hw_addr_filt,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       unsigned char buffer[3];
+       int ret;
+
+       /* Get a couple of the ATMega Firmware values */
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+                               buffer, 3, 1000);
+       if (ret >= 0)
+               dev_info(&usb_dev->dev,
+                        "Firmware: major: %u, minor: %u, hardware type: %u\n",
+                        buffer[0], buffer[1], buffer[2]);
+       if (buffer[0] == 0 && buffer[1] < 2) {
+               dev_info(&usb_dev->dev,
+                        "Firmware version (%u.%u) is predates our first public release.",
+                        buffer[0], buffer[1]);
+               dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+       }
+
+       return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       char build[ATUSB_BUILD_SIZE + 1];
+       int ret;
+
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+                               build, ATUSB_BUILD_SIZE, 1000);
+       if (ret >= 0) {
+               build[ret] = 0;
+               dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+       }
+
+       return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       uint8_t man_id_0, man_id_1, part_num, version_num;
+
+       man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+       man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+       part_num = atusb_read_reg(atusb, RG_PART_NUM);
+       version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+       if (atusb->err)
+               return atusb->err;
+
+       if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+               dev_err(&usb_dev->dev,
+                       "non-Atmel transceiver xxxx%02x%02x\n",
+                       man_id_1, man_id_0);
+               goto fail;
+       }
+       if (part_num != 3 && part_num != 2) {
+               dev_err(&usb_dev->dev,
+                       "unexpected transceiver, part 0x%02x version 0x%02x\n",
+                       part_num, version_num);
+               goto fail;
+       }
+
+       dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+       return 0;
+
+fail:
+       atusb->err = -ENODEV;
+       return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+                      const struct usb_device_id *id)
+{
+       struct usb_device *usb_dev = interface_to_usbdev(interface);
+       struct ieee802154_hw *hw;
+       struct atusb *atusb = NULL;
+       int ret = -ENOMEM;
+
+       hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+       if (!hw)
+               return -ENOMEM;
+
+       atusb = hw->priv;
+       atusb->hw = hw;
+       atusb->usb_dev = usb_get_dev(usb_dev);
+       usb_set_intfdata(interface, atusb);
+
+       atusb->shutdown = 0;
+       atusb->err = 0;
+       INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+       init_usb_anchor(&atusb->idle_urbs);
+       init_usb_anchor(&atusb->rx_urbs);
+
+       if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+               goto fail;
+
+       atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+       atusb->tx_dr.bRequest = ATUSB_TX;
+       atusb->tx_dr.wValue = cpu_to_le16(0);
+
+       atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!atusb->tx_urb)
+               goto fail;
+
+       hw->parent = &usb_dev->dev;
+       hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+                   IEEE802154_HW_AACK;
+
+       hw->phy->current_page = 0;
+       hw->phy->current_channel = 11;  /* reset default */
+       hw->phy->supported.channels[0] = 0x7FFF800;
+       ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+       atusb_command(atusb, ATUSB_RF_RESET, 0);
+       atusb_get_and_show_chip(atusb);
+       atusb_get_and_show_revision(atusb);
+       atusb_get_and_show_build(atusb);
+       ret = atusb_get_and_clear_error(atusb);
+       if (ret) {
+               dev_err(&atusb->usb_dev->dev,
+                       "%s: initialization failed, error = %d\n",
+                       __func__, ret);
+               goto fail;
+       }
+
+       ret = ieee802154_register_hw(hw);
+       if (ret)
+               goto fail;
+
+       /* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+        * explicitly. Any resets after that will send us straight to TRX_OFF,
+        * making the command below redundant.
+        */
+       atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+       msleep(1);      /* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+       /* Calculating the maximum time available to empty the frame buffer
+        * on reception:
+        *
+        * According to [1], the inter-frame gap is
+        * R * 20 * 16 us + 128 us
+        * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+        * times (80 us at 250 kbps) of SHR of the next frame before the
+        * transceiver begins storing data in the frame buffer.
+        *
+        * This yields a minimum time of 208 us between the last data of a
+        * frame and the first data of the next frame. This time is further
+        * reduced by interrupt latency in the atusb firmware.
+        *
+        * atusb currently needs about 500 us to retrieve a maximum-sized
+        * frame. We therefore have to allow reception of a new frame to begin
+        * while we retrieve the previous frame.
+        *
+        * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+        *      network", Jennic 2006.
+        *     http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+        */
+
+       atusb_write_reg(atusb,
+                       SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1));
+#endif
+       atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+       ret = atusb_get_and_clear_error(atusb);
+       if (!ret)
+               return 0;
+
+       dev_err(&atusb->usb_dev->dev,
+               "%s: setup failed, error = %d\n",
+               __func__, ret);
+
+       ieee802154_unregister_hw(hw);
+fail:
+       atusb_free_urbs(atusb);
+       usb_kill_urb(atusb->tx_urb);
+       usb_free_urb(atusb->tx_urb);
+       usb_put_dev(usb_dev);
+       ieee802154_free_hw(hw);
+       return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+       struct atusb *atusb = usb_get_intfdata(interface);
+
+       dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+       atusb->shutdown = 1;
+       cancel_delayed_work_sync(&atusb->work);
+
+       usb_kill_anchored_urbs(&atusb->rx_urbs);
+       atusb_free_urbs(atusb);
+       usb_kill_urb(atusb->tx_urb);
+       usb_free_urb(atusb->tx_urb);
+
+       ieee802154_unregister_hw(atusb->hw);
+
+       ieee802154_free_hw(atusb->hw);
+
+       usb_set_intfdata(interface, NULL);
+       usb_put_dev(atusb->usb_dev);
+
+       pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+       {
+               .match_flags            = USB_DEVICE_ID_MATCH_DEVICE |
+                                         USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor               = ATUSB_VENDOR_ID,
+               .idProduct              = ATUSB_PRODUCT_ID,
+               .bInterfaceClass        = USB_CLASS_VENDOR_SPEC
+       },
+       /* end with null element */
+       {}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+       .name           = "atusb",
+       .probe          = atusb_probe,
+       .disconnect     = atusb_disconnect,
+       .id_table       = atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644 (file)
index 0000000..0690edc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID        0x20b7  /* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540        /* 802.15.4, device 0 */
+                               /*     -- -         - */
+
+#define ATUSB_BUILD_SIZE 256   /* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+       ATUSB_ID                        = 0x00, /* system status/control grp */
+       ATUSB_BUILD,
+       ATUSB_RESET,
+       ATUSB_RF_RESET                  = 0x10, /* debug/test group */
+       ATUSB_POLL_INT,
+       ATUSB_TEST,                     /* atusb-sil only */
+       ATUSB_TIMER,
+       ATUSB_GPIO,
+       ATUSB_SLP_TR,
+       ATUSB_GPIO_CLEANUP,
+       ATUSB_REG_WRITE                 = 0x20, /* transceiver group */
+       ATUSB_REG_READ,
+       ATUSB_BUF_WRITE,
+       ATUSB_BUF_READ,
+       ATUSB_SRAM_WRITE,
+       ATUSB_SRAM_READ,
+       ATUSB_SPI_WRITE                 = 0x30, /* SPI group */
+       ATUSB_SPI_READ1,
+       ATUSB_SPI_READ2,
+       ATUSB_SPI_WRITE2_SYNC,
+       ATUSB_RX_MODE                   = 0x40, /* HardMAC group */
+       ATUSB_TX,
+};
+
+/* Direction   bRequest                wValue          wIndex  wLength
+ *
+ * ->host      ATUSB_ID                -               -       3
+ * ->host      ATUSB_BUILD             -               -       #bytes
+ * host->      ATUSB_RESET             -               -       0
+ *
+ * host->      ATUSB_RF_RESET          -               -       0
+ * ->host      ATUSB_POLL_INT          -               -       1
+ * host->      ATUSB_TEST              -               -       0
+ * ->host      ATUSB_TIMER             -               -       #bytes (6)
+ * ->host      ATUSB_GPIO              dir+data        mask+p# 3
+ * host->      ATUSB_SLP_TR            -               -       0
+ * host->      ATUSB_GPIO_CLEANUP      -               -       0
+ *
+ * host->      ATUSB_REG_WRITE         value           addr    0
+ * ->host      ATUSB_REG_READ          -               addr    1
+ * host->      ATUSB_BUF_WRITE         -               -       #bytes
+ * ->host      ATUSB_BUF_READ          -               -       #bytes
+ * host->      ATUSB_SRAM_WRITE        -               addr    #bytes
+ * ->host      ATUSB_SRAM_READ         -               addr    #bytes
+ *
+ * host->      ATUSB_SPI_WRITE         byte0           byte1   #bytes
+ * ->host      ATUSB_SPI_READ1         byte0           -       #bytes
+ * ->host      ATUSB_SPI_READ2         byte0           byte1   #bytes
+ * ->host      ATUSB_SPI_WRITE2_SYNC   byte0           byte1   0/1
+ *
+ * host->      ATUSB_RX_MODE           on              -       0
+ * host->      ATUSB_TX                flags           ack_seq #bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV     (USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV       (USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
index f833b8bb66634ed0e6057235785c67e7c072f4b7..84b28a05c5a14a289c489d0064c70a637d450b57 100644 (file)
@@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv)
        ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
 
        /* We do support only 2.4 Ghz */
-       priv->hw->phy->channels_supported[0] = 0x7FFF800;
+       priv->hw->phy->supported.channels[0] = 0x7FFF800;
        priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
                          IEEE802154_HW_AFILT;
 
index dc2bfb600b4bd491a8bc2a41e12e84a87e1bff77..9d0da4ec3e8c91a6aab44231c9e701691a814799 100644 (file)
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
-static int numlbs = 1;
+static int numlbs = 2;
 
-struct fakelb_dev_priv {
-       struct ieee802154_hw *hw;
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
 
-       struct list_head list;
-       struct fakelb_priv *fake;
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
 
-       spinlock_t lock;
-       bool working;
-};
+struct fakelb_phy {
+       struct ieee802154_hw *hw;
+
+       u8 page;
+       u8 channel;
 
-struct fakelb_priv {
        struct list_head list;
-       rwlock_t lock;
+       struct list_head list_ifup;
 };
 
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
        BUG_ON(!level);
        *level = 0xbe;
@@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
        return 0;
 }
 
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
 {
-       pr_debug("set channel to %d\n", channel);
+       struct fakelb_phy *phy = hw->priv;
 
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       phy->page = page;
+       phy->channel = channel;
+       write_unlock_bh(&fakelb_ifup_phys_lock);
        return 0;
 }
 
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
-       struct sk_buff *newskb;
+       struct fakelb_phy *current_phy = hw->priv, *phy;
 
-       spin_lock(&priv->lock);
-       if (priv->working) {
-               newskb = pskb_copy(skb, GFP_ATOMIC);
-               ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
-       }
-       spin_unlock(&priv->lock);
-}
+       read_lock_bh(&fakelb_ifup_phys_lock);
+       list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+               if (current_phy == phy)
+                       continue;
 
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
-       struct fakelb_dev_priv *priv = hw->priv;
-       struct fakelb_priv *fake = priv->fake;
-
-       read_lock_bh(&fake->lock);
-       if (priv->list.next == priv->list.prev) {
-               /* we are the only one device */
-               fakelb_hw_deliver(priv, skb);
-       } else {
-               struct fakelb_dev_priv *dp;
-               list_for_each_entry(dp, &priv->fake->list, list) {
-                       if (dp != priv &&
-                           (dp->hw->phy->current_channel ==
-                            priv->hw->phy->current_channel))
-                               fakelb_hw_deliver(dp, skb);
+               if (current_phy->page == phy->page &&
+                   current_phy->channel == phy->channel) {
+                       struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+                       if (newskb)
+                               ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
                }
        }
-       read_unlock_bh(&fake->lock);
+       read_unlock_bh(&fakelb_ifup_phys_lock);
 
+       ieee802154_xmit_complete(hw, skb, false);
        return 0;
 }
 
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
-       struct fakelb_dev_priv *priv = hw->priv;
-       int ret = 0;
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+       struct fakelb_phy *phy = hw->priv;
 
-       spin_lock(&priv->lock);
-       if (priv->working)
-               ret = -EBUSY;
-       else
-               priv->working = 1;
-       spin_unlock(&priv->lock);
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       list_add(&phy->list_ifup, &fakelb_ifup_phys);
+       write_unlock_bh(&fakelb_ifup_phys_lock);
 
-       return ret;
+       return 0;
 }
 
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
-       struct fakelb_dev_priv *priv = hw->priv;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+       struct fakelb_phy *phy = hw->priv;
 
-       spin_lock(&priv->lock);
-       priv->working = 0;
-       spin_unlock(&priv->lock);
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       list_del(&phy->list_ifup);
+       write_unlock_bh(&fakelb_ifup_phys_lock);
 }
 
 static const struct ieee802154_ops fakelb_ops = {
        .owner = THIS_MODULE,
-       .xmit_sync = fakelb_hw_xmit,
+       .xmit_async = fakelb_hw_xmit,
        .ed = fakelb_hw_ed,
        .set_channel = fakelb_hw_channel,
        .start = fakelb_hw_start,
@@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = {
 module_param(numlbs, int, 0);
 MODULE_PARM_DESC(numlbs, " number of pseudo devices");
 
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
 {
-       struct fakelb_dev_priv *priv;
-       int err;
        struct ieee802154_hw *hw;
+       struct fakelb_phy *phy;
+       int err;
 
-       hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+       hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
        if (!hw)
                return -ENOMEM;
 
-       priv = hw->priv;
-       priv->hw = hw;
+       phy = hw->priv;
+       phy->hw = hw;
 
        /* 868 MHz BPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 1;
+       hw->phy->supported.channels[0] |= 1;
        /* 915 MHz BPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 0x7fe;
+       hw->phy->supported.channels[0] |= 0x7fe;
        /* 2.4 GHz O-QPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 0x7FFF800;
+       hw->phy->supported.channels[0] |= 0x7FFF800;
        /* 868 MHz ASK 802.15.4-2006 */
-       hw->phy->channels_supported[1] |= 1;
+       hw->phy->supported.channels[1] |= 1;
        /* 915 MHz ASK 802.15.4-2006 */
-       hw->phy->channels_supported[1] |= 0x7fe;
+       hw->phy->supported.channels[1] |= 0x7fe;
        /* 868 MHz O-QPSK 802.15.4-2006 */
-       hw->phy->channels_supported[2] |= 1;
+       hw->phy->supported.channels[2] |= 1;
        /* 915 MHz O-QPSK 802.15.4-2006 */
-       hw->phy->channels_supported[2] |= 0x7fe;
+       hw->phy->supported.channels[2] |= 0x7fe;
        /* 2.4 GHz CSS 802.15.4a-2007 */
-       hw->phy->channels_supported[3] |= 0x3fff;
+       hw->phy->supported.channels[3] |= 0x3fff;
        /* UWB Sub-gigahertz 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 1;
+       hw->phy->supported.channels[4] |= 1;
        /* UWB Low band 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 0x1e;
+       hw->phy->supported.channels[4] |= 0x1e;
        /* UWB High band 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 0xffe0;
+       hw->phy->supported.channels[4] |= 0xffe0;
        /* 750 MHz O-QPSK 802.15.4c-2009 */
-       hw->phy->channels_supported[5] |= 0xf;
+       hw->phy->supported.channels[5] |= 0xf;
        /* 750 MHz MPSK 802.15.4c-2009 */
-       hw->phy->channels_supported[5] |= 0xf0;
+       hw->phy->supported.channels[5] |= 0xf0;
        /* 950 MHz BPSK 802.15.4d-2009 */
-       hw->phy->channels_supported[6] |= 0x3ff;
+       hw->phy->supported.channels[6] |= 0x3ff;
        /* 950 MHz GFSK 802.15.4d-2009 */
-       hw->phy->channels_supported[6] |= 0x3ffc00;
+       hw->phy->supported.channels[6] |= 0x3ffc00;
 
-       INIT_LIST_HEAD(&priv->list);
-       priv->fake = fake;
-
-       spin_lock_init(&priv->lock);
+       ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+       /* fake phy channel 13 as default */
+       hw->phy->current_channel = 13;
+       phy->channel = hw->phy->current_channel;
 
        hw->parent = dev;
 
@@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
        if (err)
                goto err_reg;
 
-       write_lock_bh(&fake->lock);
-       list_add_tail(&priv->list, &fake->list);
-       write_unlock_bh(&fake->lock);
+       spin_lock(&fakelb_phys_lock);
+       list_add_tail(&phy->list, &fakelb_phys);
+       spin_unlock(&fakelb_phys_lock);
 
        return 0;
 
 err_reg:
-       ieee802154_free_hw(priv->hw);
+       ieee802154_free_hw(phy->hw);
        return err;
 }
 
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
 {
-       write_lock_bh(&priv->fake->lock);
-       list_del(&priv->list);
-       write_unlock_bh(&priv->fake->lock);
+       list_del(&phy->list);
 
-       ieee802154_unregister_hw(priv->hw);
-       ieee802154_free_hw(priv->hw);
+       ieee802154_unregister_hw(phy->hw);
+       ieee802154_free_hw(phy->hw);
 }
 
 static int fakelb_probe(struct platform_device *pdev)
 {
-       struct fakelb_priv *priv;
-       struct fakelb_dev_priv *dp;
-       int err = -ENOMEM;
-       int i;
-
-       priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
-                           GFP_KERNEL);
-       if (!priv)
-               goto err_alloc;
-
-       INIT_LIST_HEAD(&priv->list);
-       rwlock_init(&priv->lock);
+       struct fakelb_phy *phy, *tmp;
+       int err, i;
 
        for (i = 0; i < numlbs; i++) {
-               err = fakelb_add_one(&pdev->dev, priv);
+               err = fakelb_add_one(&pdev->dev);
                if (err < 0)
                        goto err_slave;
        }
 
-       platform_set_drvdata(pdev, priv);
        dev_info(&pdev->dev, "added ieee802154 hardware\n");
        return 0;
 
 err_slave:
-       list_for_each_entry(dp, &priv->list, list)
-               fakelb_del(dp);
-err_alloc:
+       spin_lock(&fakelb_phys_lock);
+       list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+               fakelb_del(phy);
+       spin_unlock(&fakelb_phys_lock);
        return err;
 }
 
 static int fakelb_remove(struct platform_device *pdev)
 {
-       struct fakelb_priv *priv = platform_get_drvdata(pdev);
-       struct fakelb_dev_priv *dp, *temp;
-
-       list_for_each_entry_safe(dp, temp, &priv->list, list)
-               fakelb_del(dp);
+       struct fakelb_phy *phy, *tmp;
 
+       spin_lock(&fakelb_phys_lock);
+       list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+               fakelb_del(phy);
+       spin_unlock(&fakelb_phys_lock);
        return 0;
 }
 
index fba2dfd910f7372b2aedc355039cc71e2433c008..f2a1bd122a74b4032e7feb997a36652c9683905c 100644 (file)
@@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi)
 
        devrec->hw->priv = devrec;
        devrec->hw->parent = &devrec->spi->dev;
-       devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+       devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
        devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
                            IEEE802154_HW_AFILT;
 
index ebd63fd05649b544c7bbeb11ef507efd5e26077e..dc4254b8cbbc2075bf1bb2debadd4587d3721b1f 100644 (file)
 #define UBI32_CORE1_CLK                                279
 #define UBI32_CORE2_CLK                                280
 #define EBI2_AON_CLK                           281
+#define NSSTCM_CLK_SRC                         282
+#define NSSTCM_CLK                             283
 
 #endif
index 0ad5ef930b5d64d54db112bafdd80d39a49faa46..de9c8140931a2aab610178ce2ea7d264a5ba45e9 100644 (file)
 #define USB30_1_PHY_RESET                              112
 #define NSSFB0_RESET                                   113
 #define NSSFB1_RESET                                   114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET                 115
+#define UBI32_CORE1_CLAMP_RESET                                116
+#define UBI32_CORE1_AHB_RESET                          117
+#define UBI32_CORE1_AXI_RESET                          118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET                 119
+#define UBI32_CORE2_CLAMP_RESET                                120
+#define UBI32_CORE2_AHB_RESET                          121
+#define UBI32_CORE2_AXI_RESET                          122
+#define GMAC_CORE1_RESET                               123
+#define GMAC_CORE2_RESET                               124
+#define GMAC_CORE3_RESET                               125
+#define GMAC_CORE4_RESET                               126
+#define GMAC_AHB_RESET                                 127
+#define NSS_CH0_RST_RX_CLK_N_RESET                     128
+#define NSS_CH0_RST_TX_CLK_N_RESET                     129
+#define NSS_CH0_RST_RX_125M_N_RESET                    130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET                 131
+#define NSS_CH0_RST_TX_125M_N_RESET                    132
+#define NSS_CH1_RST_RX_CLK_N_RESET                     133
+#define NSS_CH1_RST_TX_CLK_N_RESET                     134
+#define NSS_CH1_RST_RX_125M_N_RESET                    135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET                 136
+#define NSS_CH1_RST_TX_125M_N_RESET                    137
+#define NSS_CH2_RST_RX_CLK_N_RESET                     138
+#define NSS_CH2_RST_TX_CLK_N_RESET                     139
+#define NSS_CH2_RST_RX_125M_N_RESET                    140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET                 141
+#define NSS_CH2_RST_TX_125M_N_RESET                    142
+#define NSS_CH3_RST_RX_CLK_N_RESET                     143
+#define NSS_CH3_RST_TX_CLK_N_RESET                     144
+#define NSS_CH3_RST_RX_125M_N_RESET                    145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET                 146
+#define NSS_CH3_RST_TX_125M_N_RESET                    147
+#define NSS_RST_RX_250M_125M_N_RESET                   148
+#define NSS_RST_TX_250M_125M_N_RESET                   149
+#define NSS_QSGMII_TXPI_RST_N_RESET                    150
+#define NSS_QSGMII_CDR_RST_N_RESET                     151
+#define NSS_SGMII2_CDR_RST_N_RESET                     152
+#define NSS_SGMII3_CDR_RST_N_RESET                     153
+#define NSS_CAL_PRBS_RST_N_RESET                       154
+#define NSS_LCKDT_RST_N_RESET                          155
+#define NSS_SRDS_N_RESET                               156
+
 #endif
index 8821b9a8689e67756df757f632353b05c7e519e8..5f520f5f087e7fabc4cff0716c8619ffd90e9141 100644 (file)
@@ -123,7 +123,10 @@ struct bpf_prog_aux {
        const struct bpf_verifier_ops *ops;
        struct bpf_map **used_maps;
        struct bpf_prog *prog;
-       struct work_struct work;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
 struct bpf_array {
@@ -153,6 +156,7 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
 void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get(struct fd f);
 void bpf_map_put(struct bpf_map *map);
index b9ab677c0c0ad8cba177409e9ad87c1ed7cc43fc..a40d29846ac2b2ad325ee01e19c04141701c3e61 100644 (file)
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
 /**
  * __vlan_get_tag - get the VLAN ID that is part of the payload
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not of VLAN type
  */
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 /**
  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if @skb->vlan_tci is not set correctly
  */
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
 /**
  * vlan_get_tag - get the VLAN ID from the skb
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not VLAN tagged
  */
index 83e80ab9450048d121b739bd23b85ccb14a39b36..ad31e476873f8b8f9a5a8fd0f04fb70075a4df7c 100644 (file)
@@ -46,8 +46,9 @@
 
 #define MAX_MSIX_P_PORT                17
 #define MAX_MSIX               64
-#define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+                                        (dev_cap).num_ports * MIN_MSIX_P_PORT)
 
 #define MLX4_MAX_100M_UNITS_VAL                255     /*
                                                 * work around: can't set values
@@ -528,7 +529,6 @@ struct mlx4_caps {
        int                     num_eqs;
        int                     reserved_eqs;
        int                     num_comp_vectors;
-       int                     comp_pool;
        int                     num_mpts;
        int                     max_fmr_maps;
        int                     num_mtts;
@@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
 int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-                  int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
 
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
index 2695ced222df23b56df42252fb7319c7d7d8b157..abc4767695e4bcd6a53fa1eaf8bd8d7809349d26 100644 (file)
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                       struct mlx5_query_cq_mbox_out *out);
 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                        struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+                                  struct mlx5_core_cq *cq, u16 cq_period,
+                                  u16 cq_max_count);
 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 
index abf65c7904214b75f5326a7f576df3b8e2f0a9d8..b288c538347addc82e026369f69546ff5a5bd9dd 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <linux/types.h>
 #include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
 
 #if defined(__LITTLE_ENDIAN)
 #define MLX5_SET_HOST_ENDIANNESS       0
@@ -58,6 +59,8 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
 
                     << __mlx5_dw_bit_off(typ, fld))); \
 } while (0)
 
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+       BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
+       *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+       cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+                    (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+                    << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
 __mlx5_mask(typ, fld))
@@ -264,6 +275,7 @@ enum {
        MLX5_OPCODE_RDMA_WRITE_IMM      = 0x09,
        MLX5_OPCODE_SEND                = 0x0a,
        MLX5_OPCODE_SEND_IMM            = 0x0b,
+       MLX5_OPCODE_LSO                 = 0x0e,
        MLX5_OPCODE_RDMA_READ           = 0x10,
        MLX5_OPCODE_ATOMIC_CS           = 0x11,
        MLX5_OPCODE_ATOMIC_FA           = 0x12,
@@ -312,13 +324,6 @@ enum {
        MLX5_CAP_OFF_CMDIF_CSUM         = 46,
 };
 
-enum {
-       HCA_CAP_OPMOD_GET_MAX   = 0,
-       HCA_CAP_OPMOD_GET_CUR   = 1,
-       HCA_CAP_OPMOD_GET_ODP_MAX = 4,
-       HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
 struct mlx5_inbox_hdr {
        __be16          opcode;
        u8              rsvd[4];
@@ -541,6 +546,10 @@ struct mlx5_cmd_prot_block {
        u8              sig;
 };
 
+enum {
+       MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
 struct mlx5_err_cqe {
        u8      rsvd0[32];
        __be32  srqn;
@@ -554,13 +563,22 @@ struct mlx5_err_cqe {
 };
 
 struct mlx5_cqe64 {
-       u8              rsvd0[17];
+       u8              rsvd0[4];
+       u8              lro_tcppsh_abort_dupack;
+       u8              lro_min_ttl;
+       __be16          lro_tcp_win;
+       __be32          lro_ack_seq_num;
+       __be32          rss_hash_result;
+       u8              rss_hash_type;
        u8              ml_path;
-       u8              rsvd20[4];
+       u8              rsvd20[2];
+       __be16          check_sum;
        __be16          slid;
        __be32          flags_rqpn;
-       u8              rsvd28[4];
-       __be32          srqn;
+       u8              hds_ip_ext;
+       u8              l4_hdr_type_etc;
+       __be16          vlan_info;
+       __be32          srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
        __be32          imm_inval_pkey;
        u8              rsvd40[4];
        __be32          byte_cnt;
@@ -571,6 +589,40 @@ struct mlx5_cqe64 {
        u8              op_own;
 };
 
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+       return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+       CQE_L4_HDR_TYPE_NONE                    = 0x0,
+       CQE_L4_HDR_TYPE_TCP_NO_ACK              = 0x1,
+       CQE_L4_HDR_TYPE_UDP                     = 0x2,
+       CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA         = 0x3,
+       CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA        = 0x4,
+};
+
+enum {
+       CQE_RSS_HTYPE_IP        = 0x3 << 6,
+       CQE_RSS_HTYPE_L4        = 0x3 << 2,
+};
+
+enum {
+       CQE_L2_OK       = 1 << 0,
+       CQE_L3_OK       = 1 << 1,
+       CQE_L4_OK       = 1 << 2,
+};
+
 struct mlx5_sig_err_cqe {
        u8              rsvd0[16];
        __be32          expected_trans_sig;
@@ -996,4 +1048,128 @@ struct mlx5_destroy_psv_out {
        u8                      rsvd[8];
 };
 
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+       VPORT_STATE_DOWN                = 0x0,
+       VPORT_STATE_UP                  = 0x1,
+};
+
+enum {
+       MLX5_L3_PROT_TYPE_IPV4          = 0,
+       MLX5_L3_PROT_TYPE_IPV6          = 1,
+};
+
+enum {
+       MLX5_L4_PROT_TYPE_TCP           = 0,
+       MLX5_L4_PROT_TYPE_UDP           = 1,
+};
+
+enum {
+       MLX5_HASH_FIELD_SEL_SRC_IP      = 1 << 0,
+       MLX5_HASH_FIELD_SEL_DST_IP      = 1 << 1,
+       MLX5_HASH_FIELD_SEL_L4_SPORT    = 1 << 2,
+       MLX5_HASH_FIELD_SEL_L4_DPORT    = 1 << 3,
+       MLX5_HASH_FIELD_SEL_IPSEC_SPI   = 1 << 4,
+};
+
+enum {
+       MLX5_MATCH_OUTER_HEADERS        = 1 << 0,
+       MLX5_MATCH_MISC_PARAMETERS      = 1 << 1,
+       MLX5_MATCH_INNER_HEADERS        = 1 << 2,
+
+};
+
+enum {
+       MLX5_FLOW_TABLE_TYPE_NIC_RCV    = 0,
+       MLX5_FLOW_TABLE_TYPE_ESWITCH    = 4,
+};
+
+enum {
+       MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT       = 0,
+       MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE  = 1,
+       MLX5_FLOW_CONTEXT_DEST_TYPE_TIR         = 2,
+};
+
+enum {
+       MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+       MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+       HCA_CAP_OPMOD_GET_MAX   = 0,
+       HCA_CAP_OPMOD_GET_CUR   = 1,
+};
+
+enum mlx5_cap_type {
+       MLX5_CAP_GENERAL = 0,
+       MLX5_CAP_ETHERNET_OFFLOADS,
+       MLX5_CAP_ODP,
+       MLX5_CAP_ATOMIC,
+       MLX5_CAP_ROCE,
+       MLX5_CAP_IPOIB_OFFLOADS,
+       MLX5_CAP_EOIB_OFFLOADS,
+       MLX5_CAP_FLOW_TABLE,
+       /* NUM OF CAP Types */
+       MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+       MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+       MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+       MLX5_GET(per_protocol_networking_offload_caps,\
+                mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+       MLX5_GET(per_protocol_networking_offload_caps,\
+                mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+       MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+       MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+       MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+       MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+       MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+       MLX5_CMD_STAT_OK                        = 0x0,
+       MLX5_CMD_STAT_INT_ERR                   = 0x1,
+       MLX5_CMD_STAT_BAD_OP_ERR                = 0x2,
+       MLX5_CMD_STAT_BAD_PARAM_ERR             = 0x3,
+       MLX5_CMD_STAT_BAD_SYS_STATE_ERR         = 0x4,
+       MLX5_CMD_STAT_BAD_RES_ERR               = 0x5,
+       MLX5_CMD_STAT_RES_BUSY                  = 0x6,
+       MLX5_CMD_STAT_LIM_ERR                   = 0x8,
+       MLX5_CMD_STAT_BAD_RES_STATE_ERR         = 0x9,
+       MLX5_CMD_STAT_IX_ERR                    = 0xa,
+       MLX5_CMD_STAT_NO_RES_ERR                = 0xf,
+       MLX5_CMD_STAT_BAD_INP_LEN_ERR           = 0x50,
+       MLX5_CMD_STAT_BAD_OUTP_LEN_ERR          = 0x51,
+       MLX5_CMD_STAT_BAD_QP_STATE_ERR          = 0x10,
+       MLX5_CMD_STAT_BAD_PKT_ERR               = 0x30,
+       MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
+};
+
 #endif /* MLX5_DEVICE_H */
index 9a90e7523dc24d2f7f29467023c8845cbf50cff7..7fa26f03acc1845d21f656b08c6eda81996565f4 100644 (file)
@@ -44,7 +44,6 @@
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
 
 enum {
        MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
 };
 
 enum {
-       MLX5_MAX_EQ_NAME        = 32
+       MLX5_MAX_IRQ_NAME       = 32
 };
 
 enum {
@@ -150,6 +149,11 @@ enum mlx5_dev_event {
        MLX5_DEV_EVENT_CLIENT_REREG,
 };
 
+enum mlx5_port_status {
+       MLX5_PORT_UP        = 1 << 1,
+       MLX5_PORT_DOWN      = 1 << 2,
+};
+
 struct mlx5_uuar_info {
        struct mlx5_uar        *uars;
        int                     num_uars;
@@ -269,56 +273,7 @@ struct mlx5_cmd {
 struct mlx5_port_caps {
        int     gid_table_len;
        int     pkey_table_len;
-};
-
-struct mlx5_general_caps {
-       u8      log_max_eq;
-       u8      log_max_cq;
-       u8      log_max_qp;
-       u8      log_max_mkey;
-       u8      log_max_pd;
-       u8      log_max_srq;
-       u8      log_max_strq;
-       u8      log_max_mrw_sz;
-       u8      log_max_bsf_list_size;
-       u8      log_max_klm_list_size;
-       u32     max_cqes;
-       int     max_wqes;
-       u32     max_eqes;
-       u32     max_indirection;
-       int     max_sq_desc_sz;
-       int     max_rq_desc_sz;
-       int     max_dc_sq_desc_sz;
-       u64     flags;
-       u16     stat_rate_support;
-       int     log_max_msg;
-       int     num_ports;
-       u8      log_max_ra_res_qp;
-       u8      log_max_ra_req_qp;
-       int     max_srq_wqes;
-       int     bf_reg_size;
-       int     bf_regs_per_page;
-       struct mlx5_port_caps   port[MLX5_MAX_PORTS];
-       u8                      ext_port_cap[MLX5_MAX_PORTS];
-       int     max_vf;
-       u32     reserved_lkey;
-       u8      local_ca_ack_delay;
-       u8      log_max_mcg;
-       u32     max_qp_mcg;
-       int     min_page_sz;
-       int     pd_cap;
-       u32     max_qp_counters;
-       u32     pkey_table_size;
-       u8      log_max_ra_req_dc;
-       u8      log_max_ra_res_dc;
-       u32     uar_sz;
-       u8      min_log_pg_sz;
-       u8      log_max_xrcd;
-       u16     log_uar_page_sz;
-};
-
-struct mlx5_caps {
-       struct mlx5_general_caps gen;
+       u8      ext_port_cap;
 };
 
 struct mlx5_cmd_mailbox {
@@ -334,8 +289,6 @@ struct mlx5_buf_list {
 
 struct mlx5_buf {
        struct mlx5_buf_list    direct;
-       struct mlx5_buf_list   *page_list;
-       int                     nbufs;
        int                     npages;
        int                     size;
        u8                      page_shift;
@@ -351,7 +304,6 @@ struct mlx5_eq {
        u8                      eqn;
        int                     nent;
        u64                     mask;
-       char                    name[MLX5_MAX_EQ_NAME];
        struct list_head        list;
        int                     index;
        struct mlx5_rsc_debug   *dbg;
@@ -414,7 +366,6 @@ struct mlx5_eq_table {
        struct mlx5_eq          pages_eq;
        struct mlx5_eq          async_eq;
        struct mlx5_eq          cmd_eq;
-       struct msix_entry       *msix_arr;
        int                     num_comp_vectors;
        /* protect EQs list
         */
@@ -467,9 +418,16 @@ struct mlx5_mr_table {
        struct radix_tree_root  tree;
 };
 
+struct mlx5_irq_info {
+       cpumask_var_t mask;
+       char name[MLX5_MAX_IRQ_NAME];
+};
+
 struct mlx5_priv {
        char                    name[MLX5_MAX_NAME_LEN];
        struct mlx5_eq_table    eq_table;
+       struct msix_entry       *msix_arr;
+       struct mlx5_irq_info    *irq_info;
        struct mlx5_uuar_info   uuari;
        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
@@ -520,7 +478,9 @@ struct mlx5_core_dev {
        u8                      rev_id;
        char                    board_id[MLX5_BOARD_ID_LEN];
        struct mlx5_cmd         cmd;
-       struct mlx5_caps        caps;
+       struct mlx5_port_caps   port_caps[MLX5_MAX_PORTS];
+       u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+       u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
        void                    (*event) (struct mlx5_core_dev *dev,
@@ -529,6 +489,7 @@ struct mlx5_core_dev {
        struct mlx5_priv        priv;
        struct mlx5_profile     *profile;
        atomic_t                num_qps;
+       u32                     issi;
 };
 
 struct mlx5_db {
@@ -549,6 +510,11 @@ enum {
        MLX5_COMP_EQ_SIZE = 1024,
 };
 
+enum {
+       MLX5_PTYS_IB = 1 << 0,
+       MLX5_PTYS_EN = 1 << 2,
+};
+
 struct mlx5_db_pgdir {
        struct list_head        list;
        DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -586,11 +552,7 @@ struct mlx5_pas {
 
 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
 {
-       if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
                return buf->direct.buf + offset;
-       else
-               return buf->page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
 }
 
 extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +616,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
 int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-                      u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+                      enum mlx5_cap_mode cap_mode);
 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                  int out_size);
 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,12 +627,13 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_health_cleanup(void);
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-                  struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
                                                      gfp_t flags, int npages);
@@ -734,7 +697,23 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
                         int size_in, void *data_out, int size_out,
                         u16 reg_num, int arg, int write);
+
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+                        int ptys_size, int proto_mask);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+                             u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+                               u32 *proto_admin, int proto_mask);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+                       int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+                        enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu);
 
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644 (file)
index 0000000..5f922c6
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+       u8      log_sz;
+       u8      match_criteria_enable;
+       u32     match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+                            u16 num_groups,
+                            struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+                             void *match_criteria, void *flow_context,
+                             u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
index cb3ad17edd1f5959b0499b82899ec95ada191025..b27e9f6e090a028af09483ec2d7b93a056d7fb6d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- */
-
+*/
 #ifndef MLX5_IFC_H
 #define MLX5_IFC_H
 
+enum {
+       MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS                   = 0x0,
+       MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED             = 0x1,
+       MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED           = 0x2,
+       MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED                  = 0x3,
+       MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED                    = 0x13,
+       MLX5_EVENT_TYPE_CODING_SRQ_LIMIT                           = 0x14,
+       MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED          = 0x1c,
+       MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION            = 0x1d,
+       MLX5_EVENT_TYPE_CODING_CQ_ERROR                            = 0x4,
+       MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR         = 0x5,
+       MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED               = 0x7,
+       MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT                    = 0xc,
+       MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR      = 0x10,
+       MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR     = 0x11,
+       MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR        = 0x12,
+       MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR                      = 0x8,
+       MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE                   = 0x9,
+       MLX5_EVENT_TYPE_CODING_GPIO_EVENT                          = 0x15,
+       MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+       MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+       MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
+       MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
+       MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
+       MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
+};
+
+enum {
+       MLX5_MODIFY_TIR_BITMASK_LRO                   = 0x0,
+       MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE        = 0x1,
+       MLX5_MODIFY_TIR_BITMASK_HASH                  = 0x2,
+       MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN   = 0x3
+};
+
 enum {
        MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
        MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
@@ -43,6 +76,8 @@ enum {
        MLX5_CMD_OP_QUERY_PAGES                   = 0x107,
        MLX5_CMD_OP_MANAGE_PAGES                  = 0x108,
        MLX5_CMD_OP_SET_HCA_CAP                   = 0x109,
+       MLX5_CMD_OP_QUERY_ISSI                    = 0x10a,
+       MLX5_CMD_OP_SET_ISSI                      = 0x10b,
        MLX5_CMD_OP_CREATE_MKEY                   = 0x200,
        MLX5_CMD_OP_QUERY_MKEY                    = 0x201,
        MLX5_CMD_OP_DESTROY_MKEY                  = 0x202,
@@ -66,6 +101,7 @@ enum {
        MLX5_CMD_OP_2ERR_QP                       = 0x507,
        MLX5_CMD_OP_2RST_QP                       = 0x50a,
        MLX5_CMD_OP_QUERY_QP                      = 0x50b,
+       MLX5_CMD_OP_SQD_RTS_QP                    = 0x50c,
        MLX5_CMD_OP_INIT2INIT_QP                  = 0x50e,
        MLX5_CMD_OP_CREATE_PSV                    = 0x600,
        MLX5_CMD_OP_DESTROY_PSV                   = 0x601,
@@ -73,7 +109,10 @@ enum {
        MLX5_CMD_OP_DESTROY_SRQ                   = 0x701,
        MLX5_CMD_OP_QUERY_SRQ                     = 0x702,
        MLX5_CMD_OP_ARM_RQ                        = 0x703,
-       MLX5_CMD_OP_RESIZE_SRQ                    = 0x704,
+       MLX5_CMD_OP_CREATE_XRC_SRQ                = 0x705,
+       MLX5_CMD_OP_DESTROY_XRC_SRQ               = 0x706,
+       MLX5_CMD_OP_QUERY_XRC_SRQ                 = 0x707,
+       MLX5_CMD_OP_ARM_XRC_SRQ                   = 0x708,
        MLX5_CMD_OP_CREATE_DCT                    = 0x710,
        MLX5_CMD_OP_DESTROY_DCT                   = 0x711,
        MLX5_CMD_OP_DRAIN_DCT                     = 0x712,
@@ -85,8 +124,12 @@ enum {
        MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT      = 0x753,
        MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT       = 0x754,
        MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT      = 0x755,
-       MLX5_CMD_OP_QUERY_RCOE_ADDRESS            = 0x760,
+       MLX5_CMD_OP_QUERY_ROCE_ADDRESS            = 0x760,
        MLX5_CMD_OP_SET_ROCE_ADDRESS              = 0x761,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT       = 0x762,
+       MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT      = 0x763,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_GID           = 0x764,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY          = 0x765,
        MLX5_CMD_OP_QUERY_VPORT_COUNTER           = 0x770,
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
@@ -98,7 +141,7 @@ enum {
        MLX5_CMD_OP_CONFIG_INT_MODERATION         = 0x804,
        MLX5_CMD_OP_ACCESS_REG                    = 0x805,
        MLX5_CMD_OP_ATTACH_TO_MCG                 = 0x806,
-       MLX5_CMD_OP_DETACH_FROM_MCG               = 0x807,
+       MLX5_CMD_OP_DETTACH_FROM_MCG              = 0x807,
        MLX5_CMD_OP_GET_DROPPED_PACKET_LOG        = 0x80a,
        MLX5_CMD_OP_MAD_IFC                       = 0x50d,
        MLX5_CMD_OP_QUERY_MAD_DEMUX               = 0x80b,
@@ -106,23 +149,22 @@ enum {
        MLX5_CMD_OP_NOP                           = 0x80d,
        MLX5_CMD_OP_ALLOC_XRCD                    = 0x80e,
        MLX5_CMD_OP_DEALLOC_XRCD                  = 0x80f,
-       MLX5_CMD_OP_SET_BURST_SIZE                = 0x812,
-       MLX5_CMD_OP_QUERY_BURST_SZIE              = 0x813,
-       MLX5_CMD_OP_ACTIVATE_TRACER               = 0x814,
-       MLX5_CMD_OP_DEACTIVATE_TRACER             = 0x815,
-       MLX5_CMD_OP_CREATE_SNIFFER_RULE           = 0x820,
-       MLX5_CMD_OP_DESTROY_SNIFFER_RULE          = 0x821,
-       MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x822,
-       MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x823,
-       MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x824,
+       MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN        = 0x816,
+       MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN      = 0x817,
+       MLX5_CMD_OP_QUERY_CONG_STATUS             = 0x822,
+       MLX5_CMD_OP_MODIFY_CONG_STATUS            = 0x823,
+       MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x824,
+       MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x825,
+       MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x826,
+       MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT           = 0x827,
+       MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT        = 0x828,
+       MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
+       MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
+       MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
        MLX5_CMD_OP_CREATE_TIR                    = 0x900,
        MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
        MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
        MLX5_CMD_OP_QUERY_TIR                     = 0x903,
-       MLX5_CMD_OP_CREATE_TIS                    = 0x912,
-       MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
-       MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
-       MLX5_CMD_OP_QUERY_TIS                     = 0x915,
        MLX5_CMD_OP_CREATE_SQ                     = 0x904,
        MLX5_CMD_OP_MODIFY_SQ                     = 0x905,
        MLX5_CMD_OP_DESTROY_SQ                    = 0x906,
@@ -135,9 +177,430 @@ enum {
        MLX5_CMD_OP_MODIFY_RMP                    = 0x90d,
        MLX5_CMD_OP_DESTROY_RMP                   = 0x90e,
        MLX5_CMD_OP_QUERY_RMP                     = 0x90f,
-       MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x910,
-       MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x911,
-       MLX5_CMD_OP_MAX                           = 0x911
+       MLX5_CMD_OP_CREATE_TIS                    = 0x912,
+       MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
+       MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
+       MLX5_CMD_OP_QUERY_TIS                     = 0x915,
+       MLX5_CMD_OP_CREATE_RQT                    = 0x916,
+       MLX5_CMD_OP_MODIFY_RQT                    = 0x917,
+       MLX5_CMD_OP_DESTROY_RQT                   = 0x918,
+       MLX5_CMD_OP_QUERY_RQT                     = 0x919,
+       MLX5_CMD_OP_CREATE_FLOW_TABLE             = 0x930,
+       MLX5_CMD_OP_DESTROY_FLOW_TABLE            = 0x931,
+       MLX5_CMD_OP_QUERY_FLOW_TABLE              = 0x932,
+       MLX5_CMD_OP_CREATE_FLOW_GROUP             = 0x933,
+       MLX5_CMD_OP_DESTROY_FLOW_GROUP            = 0x934,
+       MLX5_CMD_OP_QUERY_FLOW_GROUP              = 0x935,
+       MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
+       MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
+       MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+       u8         outer_dmac[0x1];
+       u8         outer_smac[0x1];
+       u8         outer_ether_type[0x1];
+       u8         reserved_0[0x1];
+       u8         outer_first_prio[0x1];
+       u8         outer_first_cfi[0x1];
+       u8         outer_first_vid[0x1];
+       u8         reserved_1[0x1];
+       u8         outer_second_prio[0x1];
+       u8         outer_second_cfi[0x1];
+       u8         outer_second_vid[0x1];
+       u8         reserved_2[0x1];
+       u8         outer_sip[0x1];
+       u8         outer_dip[0x1];
+       u8         outer_frag[0x1];
+       u8         outer_ip_protocol[0x1];
+       u8         outer_ip_ecn[0x1];
+       u8         outer_ip_dscp[0x1];
+       u8         outer_udp_sport[0x1];
+       u8         outer_udp_dport[0x1];
+       u8         outer_tcp_sport[0x1];
+       u8         outer_tcp_dport[0x1];
+       u8         outer_tcp_flags[0x1];
+       u8         outer_gre_protocol[0x1];
+       u8         outer_gre_key[0x1];
+       u8         outer_vxlan_vni[0x1];
+       u8         reserved_3[0x5];
+       u8         source_eswitch_port[0x1];
+
+       u8         inner_dmac[0x1];
+       u8         inner_smac[0x1];
+       u8         inner_ether_type[0x1];
+       u8         reserved_4[0x1];
+       u8         inner_first_prio[0x1];
+       u8         inner_first_cfi[0x1];
+       u8         inner_first_vid[0x1];
+       u8         reserved_5[0x1];
+       u8         inner_second_prio[0x1];
+       u8         inner_second_cfi[0x1];
+       u8         inner_second_vid[0x1];
+       u8         reserved_6[0x1];
+       u8         inner_sip[0x1];
+       u8         inner_dip[0x1];
+       u8         inner_frag[0x1];
+       u8         inner_ip_protocol[0x1];
+       u8         inner_ip_ecn[0x1];
+       u8         inner_ip_dscp[0x1];
+       u8         inner_udp_sport[0x1];
+       u8         inner_udp_dport[0x1];
+       u8         inner_tcp_sport[0x1];
+       u8         inner_tcp_dport[0x1];
+       u8         inner_tcp_flags[0x1];
+       u8         reserved_7[0x9];
+
+       u8         reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+       u8         ft_support[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x2];
+       u8         log_max_ft_size[0x6];
+       u8         reserved_2[0x10];
+       u8         max_ft_level[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x18];
+       u8         log_max_ft_num[0x8];
+
+       u8         reserved_5[0x18];
+       u8         log_max_destination[0x8];
+
+       u8         reserved_6[0x18];
+       u8         log_max_flow[0x8];
+
+       u8         reserved_7[0x40];
+
+       struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+       struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+       u8         send[0x1];
+       u8         receive[0x1];
+       u8         write[0x1];
+       u8         read[0x1];
+       u8         reserved_0[0x1];
+       u8         srq_receive[0x1];
+       u8         reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+       u8         smac_47_16[0x20];
+
+       u8         smac_15_0[0x10];
+       u8         ethertype[0x10];
+
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         first_prio[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vid[0xc];
+
+       u8         ip_protocol[0x8];
+       u8         ip_dscp[0x6];
+       u8         ip_ecn[0x2];
+       u8         vlan_tag[0x1];
+       u8         reserved_0[0x1];
+       u8         frag[0x1];
+       u8         reserved_1[0x4];
+       u8         tcp_flags[0x9];
+
+       u8         tcp_sport[0x10];
+       u8         tcp_dport[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         udp_sport[0x10];
+       u8         udp_dport[0x10];
+
+       u8         src_ip[4][0x20];
+
+       u8         dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+       u8         reserved_0[0x20];
+
+       u8         reserved_1[0x10];
+       u8         source_port[0x10];
+
+       u8         outer_second_prio[0x3];
+       u8         outer_second_cfi[0x1];
+       u8         outer_second_vid[0xc];
+       u8         inner_second_prio[0x3];
+       u8         inner_second_cfi[0x1];
+       u8         inner_second_vid[0xc];
+
+       u8         outer_second_vlan_tag[0x1];
+       u8         inner_second_vlan_tag[0x1];
+       u8         reserved_2[0xe];
+       u8         gre_protocol[0x10];
+
+       u8         gre_key_h[0x18];
+       u8         gre_key_l[0x8];
+
+       u8         vxlan_vni[0x18];
+       u8         reserved_3[0x8];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0xc];
+       u8         outer_ipv6_flow_label[0x14];
+
+       u8         reserved_6[0xc];
+       u8         inner_ipv6_flow_label[0x14];
+
+       u8         reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+       u8         pa_h[0x20];
+
+       u8         pa_l[0x14];
+       u8         reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+       u8         hi[0x20];
+
+       u8         lo[0x20];
+};
+
+enum {
+       MLX5_ADS_STAT_RATE_NO_LIMIT  = 0x0,
+       MLX5_ADS_STAT_RATE_2_5GBPS   = 0x7,
+       MLX5_ADS_STAT_RATE_10GBPS    = 0x8,
+       MLX5_ADS_STAT_RATE_30GBPS    = 0x9,
+       MLX5_ADS_STAT_RATE_5GBPS     = 0xa,
+       MLX5_ADS_STAT_RATE_20GBPS    = 0xb,
+       MLX5_ADS_STAT_RATE_40GBPS    = 0xc,
+       MLX5_ADS_STAT_RATE_60GBPS    = 0xd,
+       MLX5_ADS_STAT_RATE_80GBPS    = 0xe,
+       MLX5_ADS_STAT_RATE_120GBPS   = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+       u8         fl[0x1];
+       u8         free_ar[0x1];
+       u8         reserved_0[0xe];
+       u8         pkey_index[0x10];
+
+       u8         reserved_1[0x8];
+       u8         grh[0x1];
+       u8         mlid[0x7];
+       u8         rlid[0x10];
+
+       u8         ack_timeout[0x5];
+       u8         reserved_2[0x3];
+       u8         src_addr_index[0x8];
+       u8         reserved_3[0x4];
+       u8         stat_rate[0x4];
+       u8         hop_limit[0x8];
+
+       u8         reserved_4[0x4];
+       u8         tclass[0x8];
+       u8         flow_label[0x14];
+
+       u8         rgid_rip[16][0x8];
+
+       u8         reserved_5[0x4];
+       u8         f_dscp[0x1];
+       u8         f_ecn[0x1];
+       u8         reserved_6[0x1];
+       u8         f_eth_prio[0x1];
+       u8         ecn[0x2];
+       u8         dscp[0x6];
+       u8         udp_sport[0x10];
+
+       u8         dei_cfi[0x1];
+       u8         eth_prio[0x3];
+       u8         sl[0x4];
+       u8         port[0x8];
+       u8         rmac_47_32[0x10];
+
+       u8         rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+       u8         reserved_0[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+       u8         reserved_1[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+       u8         reserved_2[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+       u8         reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+       u8         csum_cap[0x1];
+       u8         vlan_cap[0x1];
+       u8         lro_cap[0x1];
+       u8         lro_psh_flag[0x1];
+       u8         lro_time_stamp[0x1];
+       u8         reserved_0[0x6];
+       u8         max_lso_cap[0x5];
+       u8         reserved_1[0x4];
+       u8         rss_ind_tbl_cap[0x4];
+       u8         reserved_2[0x3];
+       u8         tunnel_lso_const_out_ip_id[0x1];
+       u8         reserved_3[0x2];
+       u8         tunnel_statless_gre[0x1];
+       u8         tunnel_stateless_vxlan[0x1];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x10];
+       u8         lro_min_mss_size[0x10];
+
+       u8         reserved_6[0x120];
+
+       u8         lro_timer_supported_periods[4][0x20];
+
+       u8         reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+       u8         roce_apm[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x60];
+
+       u8         reserved_2[0xc];
+       u8         l3_type[0x4];
+       u8         reserved_3[0x8];
+       u8         roce_version[0x8];
+
+       u8         reserved_4[0x10];
+       u8         r_roce_dest_udp_port[0x10];
+
+       u8         r_roce_max_src_udp_port[0x10];
+       u8         r_roce_min_src_udp_port[0x10];
+
+       u8         reserved_5[0x10];
+       u8         roce_address_table_size[0x10];
+
+       u8         reserved_6[0x700];
+};
+
+enum {
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE     = 0x0,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES    = 0x2,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES    = 0x4,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES    = 0x8,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES   = 0x10,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES   = 0x20,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES   = 0x40,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES  = 0x80,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES  = 0x100,
+};
+
+enum {
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE     = 0x1,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES    = 0x2,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES    = 0x4,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES    = 0x8,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES   = 0x10,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES   = 0x20,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES   = 0x40,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES  = 0x80,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES  = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+       u8         reserved_0[0x40];
+
+       u8         atomic_req_endianness[0x1];
+       u8         reserved_1[0x1f];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         atomic_operations[0x10];
+
+       u8         reserved_4[0x10];
+       u8         atomic_size_qp[0x10];
+
+       u8         reserved_5[0x10];
+       u8         atomic_size_dc[0x10];
+
+       u8         reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+       u8         reserved_0[0x40];
+
+       u8         sig[0x1];
+       u8         reserved_1[0x1f];
+
+       u8         reserved_2[0x20];
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+       u8         reserved_3[0x720];
+};
+
+enum {
+       MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
+       MLX5_WQ_TYPE_CYCLIC       = 0x1,
+       MLX5_WQ_TYPE_STRQ         = 0x2,
+};
+
+enum {
+       MLX5_WQ_END_PAD_MODE_NONE   = 0x0,
+       MLX5_WQ_END_PAD_MODE_ALIGN  = 0x1,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES    = 0x0,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES   = 0x1,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES   = 0x2,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES   = 0x3,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES  = 0x4,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES  = 0x0,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES  = 0x1,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES  = 0x2,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES   = 0x3,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES   = 0x4,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES   = 0x5,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_PORT_TYPE_IB        = 0x0,
+       MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET  = 0x1,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED       = 0x0,
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE  = 0x1,
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED        = 0x3,
+};
+
+enum {
+       MLX5_CAP_PORT_TYPE_IB  = 0x0,
+       MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
 struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_1[0xb];
        u8         log_max_qp[0x5];
 
-       u8         log_max_strq_sz[0x8];
-       u8         reserved_2[0x3];
-       u8         log_max_srqs[0x5];
+       u8         reserved_2[0xb];
+       u8         log_max_srq[0x5];
        u8         reserved_3[0x10];
 
        u8         reserved_4[0x8];
@@ -185,165 +647,6123 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         pad_cap[0x1];
        u8         cc_query_allowed[0x1];
        u8         cc_modify_allowed[0x1];
-       u8         reserved_15[0x1d];
+       u8         reserved_15[0xd];
+       u8         gid_table_size[0x10];
 
-       u8         reserved_16[0x6];
+       u8         out_of_seq_cnt[0x1];
+       u8         vport_counters[0x1];
+       u8         reserved_16[0x4];
        u8         max_qp_cnt[0xa];
        u8         pkey_table_size[0x10];
 
-       u8         eswitch_owner[0x1];
-       u8         reserved_17[0xa];
+       u8         vport_group_manager[0x1];
+       u8         vhca_group_manager[0x1];
+       u8         ib_virt[0x1];
+       u8         eth_virt[0x1];
+       u8         reserved_17[0x1];
+       u8         ets[0x1];
+       u8         nic_flow_table[0x1];
+       u8         reserved_18[0x4];
        u8         local_ca_ack_delay[0x5];
-       u8         reserved_18[0x8];
+       u8         reserved_19[0x6];
+       u8         port_type[0x2];
        u8         num_ports[0x8];
 
-       u8         reserved_19[0x3];
+       u8         reserved_20[0x3];
        u8         log_max_msg[0x5];
-       u8         reserved_20[0x18];
+       u8         reserved_21[0x18];
 
        u8         stat_rate_support[0x10];
-       u8         reserved_21[0x10];
+       u8         reserved_22[0xc];
+       u8         cqe_version[0x4];
 
-       u8         reserved_22[0x10];
+       u8         compact_address_vector[0x1];
+       u8         reserved_23[0xe];
+       u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
-       u8         reserved_23[0x1];
+       u8         reserved_24[0x1];
        u8         wq_signature[0x1];
        u8         sctr_data_cqe[0x1];
-       u8         reserved_24[0x1];
+       u8         reserved_25[0x1];
        u8         sho[0x1];
        u8         tph[0x1];
        u8         rf[0x1];
-       u8         dc[0x1];
-       u8         reserved_25[0x2];
+       u8         dct[0x1];
+       u8         reserved_26[0x1];
+       u8         eth_net_offloads[0x1];
        u8         roce[0x1];
        u8         atomic[0x1];
-       u8         rsz_srq[0x1];
+       u8         reserved_27[0x1];
 
        u8         cq_oi[0x1];
        u8         cq_resize[0x1];
        u8         cq_moderation[0x1];
-       u8         sniffer_rule_flow[0x1];
-       u8         sniffer_rule_vport[0x1];
-       u8         sniffer_rule_phy[0x1];
-       u8         reserved_26[0x1];
+       u8         reserved_28[0x3];
+       u8         cq_eq_remap[0x1];
        u8         pg[0x1];
        u8         block_lb_mc[0x1];
-       u8         reserved_27[0x3];
+       u8         reserved_29[0x1];
+       u8         scqe_break_moderation[0x1];
+       u8         reserved_30[0x1];
        u8         cd[0x1];
-       u8         reserved_28[0x1];
+       u8         reserved_31[0x1];
        u8         apm[0x1];
-       u8         reserved_29[0x7];
+       u8         reserved_32[0x7];
        u8         qkv[0x1];
        u8         pkv[0x1];
-       u8         reserved_30[0x4];
+       u8         reserved_33[0x4];
        u8         xrc[0x1];
        u8         ud[0x1];
        u8         uc[0x1];
        u8         rc[0x1];
 
-       u8         reserved_31[0xa];
+       u8         reserved_34[0xa];
        u8         uar_sz[0x6];
-       u8         reserved_32[0x8];
+       u8         reserved_35[0x8];
        u8         log_pg_sz[0x8];
 
        u8         bf[0x1];
-       u8         reserved_33[0xa];
+       u8         reserved_36[0x1];
+       u8         pad_tx_eth_packet[0x1];
+       u8         reserved_37[0x8];
        u8         log_bf_reg_size[0x5];
-       u8         reserved_34[0x10];
+       u8         reserved_38[0x10];
 
-       u8         reserved_35[0x10];
+       u8         reserved_39[0x10];
        u8         max_wqe_sz_sq[0x10];
 
-       u8         reserved_36[0x10];
+       u8         reserved_40[0x10];
        u8         max_wqe_sz_rq[0x10];
 
-       u8         reserved_37[0x10];
+       u8         reserved_41[0x10];
        u8         max_wqe_sz_sq_dc[0x10];
 
-       u8         reserved_38[0x7];
+       u8         reserved_42[0x7];
        u8         max_qp_mcg[0x19];
 
-       u8         reserved_39[0x18];
+       u8         reserved_43[0x18];
        u8         log_max_mcg[0x8];
 
-       u8         reserved_40[0xb];
+       u8         reserved_44[0x3];
+       u8         log_max_transport_domain[0x5];
+       u8         reserved_45[0x3];
        u8         log_max_pd[0x5];
-       u8         reserved_41[0xb];
+       u8         reserved_46[0xb];
        u8         log_max_xrcd[0x5];
 
-       u8         reserved_42[0x20];
+       u8         reserved_47[0x20];
 
-       u8         reserved_43[0x3];
+       u8         reserved_48[0x3];
        u8         log_max_rq[0x5];
-       u8         reserved_44[0x3];
+       u8         reserved_49[0x3];
        u8         log_max_sq[0x5];
-       u8         reserved_45[0x3];
+       u8         reserved_50[0x3];
        u8         log_max_tir[0x5];
-       u8         reserved_46[0x3];
+       u8         reserved_51[0x3];
        u8         log_max_tis[0x5];
 
-       u8         reserved_47[0x13];
-       u8         log_max_rq_per_tir[0x5];
-       u8         reserved_48[0x3];
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         reserved_52[0x2];
+       u8         log_max_rmp[0x5];
+       u8         reserved_53[0x3];
+       u8         log_max_rqt[0x5];
+       u8         reserved_54[0x3];
+       u8         log_max_rqt_size[0x5];
+       u8         reserved_55[0x3];
        u8         log_max_tis_per_sq[0x5];
 
-       u8         reserved_49[0xe0];
+       u8         reserved_56[0x3];
+       u8         log_max_stride_sz_rq[0x5];
+       u8         reserved_57[0x3];
+       u8         log_min_stride_sz_rq[0x5];
+       u8         reserved_58[0x3];
+       u8         log_max_stride_sz_sq[0x5];
+       u8         reserved_59[0x3];
+       u8         log_min_stride_sz_sq[0x5];
+
+       u8         reserved_60[0x1b];
+       u8         log_max_wq_sz[0x5];
+
+       u8         reserved_61[0xa0];
 
-       u8         reserved_50[0x10];
+       u8         reserved_62[0x3];
+       u8         log_max_l2_table[0x5];
+       u8         reserved_63[0x8];
        u8         log_uar_page_sz[0x10];
 
-       u8         reserved_51[0x100];
+       u8         reserved_64[0x100];
 
-       u8         reserved_52[0x1f];
+       u8         reserved_65[0x1f];
        u8         cqe_zip[0x1];
 
        u8         cqe_zip_timeout[0x10];
        u8         cqe_zip_max_num[0x10];
 
-       u8         reserved_53[0x220];
+       u8         reserved_66[0x220];
 };
 
-struct mlx5_ifc_set_hca_cap_in_bits {
-       u8         opcode[0x10];
-       u8         reserved_0[0x10];
+enum {
+       MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_  = 0x1,
+       MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR          = 0x2,
+};
 
-       u8         reserved_1[0x10];
-       u8         op_mod[0x10];
+struct mlx5_ifc_dest_format_struct_bits {
+       u8         destination_type[0x8];
+       u8         destination_id[0x18];
 
-       u8         reserved_2[0x40];
+       u8         reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+       struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+       struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+       struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
 
-       struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+       u8         reserved_0[0xa00];
 };
 
-struct mlx5_ifc_query_hca_cap_in_bits {
-       u8         opcode[0x10];
-       u8         reserved_0[0x10];
+enum {
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
+};
 
-       u8         reserved_1[0x10];
-       u8         op_mod[0x10];
+struct mlx5_ifc_rx_hash_field_select_bits {
+       u8         l3_prot_type[0x1];
+       u8         l4_prot_type[0x1];
+       u8         selected_fields[0x1e];
+};
 
-       u8         reserved_2[0x40];
+enum {
+       MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST  = 0x0,
+       MLX5_WQ_WQ_TYPE_WQ_CYCLIC       = 0x1,
 };
 
-struct mlx5_ifc_query_hca_cap_out_bits {
-       u8         status[0x8];
+enum {
+       MLX5_WQ_END_PADDING_MODE_END_PAD_NONE   = 0x0,
+       MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN  = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+       u8         wq_type[0x4];
+       u8         wq_signature[0x1];
+       u8         end_padding_mode[0x2];
+       u8         cd_slave[0x1];
        u8         reserved_0[0x18];
 
-       u8         syndrome[0x20];
+       u8         hds_skip_first_sge[0x1];
+       u8         log2_hds_buf_size[0x3];
+       u8         reserved_1[0x7];
+       u8         page_offset[0x5];
+       u8         lwm[0x10];
 
-       u8         reserved_1[0x40];
+       u8         reserved_2[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x8];
+       u8         uar_page[0x18];
+
+       u8         dbr_addr[0x40];
+
+       u8         hw_counter[0x20];
+
+       u8         sw_counter[0x20];
+
+       u8         reserved_4[0xc];
+       u8         log_wq_stride[0x4];
+       u8         reserved_5[0x3];
+       u8         log_wq_pg_sz[0x5];
+       u8         reserved_6[0x3];
+       u8         log_wq_sz[0x5];
+
+       u8         reserved_7[0x4e0];
 
-       u8         capability_struct[256][0x8];
+       struct mlx5_ifc_cmd_pas_bits pas[0];
 };
 
-struct mlx5_ifc_set_hca_cap_out_bits {
-       u8         status[0x8];
-       u8         reserved_0[0x18];
+struct mlx5_ifc_rq_num_bits {
+       u8         reserved_0[0x8];
+       u8         rq_num[0x18];
+};
 
-       u8         syndrome[0x20];
+struct mlx5_ifc_mac_address_layout_bits {
+       u8         reserved_0[0x10];
+       u8         mac_addr_47_32[0x10];
 
-       u8         reserved_1[0x40];
+       u8         mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+       u8         reserved_0[0xa0];
+
+       u8         min_time_between_cnps[0x20];
+
+       u8         reserved_1[0x12];
+       u8         cnp_dscp[0x6];
+       u8         reserved_2[0x5];
+       u8         cnp_802p_prio[0x3];
+
+       u8         reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+       u8         reserved_0[0x60];
+
+       u8         reserved_1[0x4];
+       u8         clamp_tgt_rate[0x1];
+       u8         reserved_2[0x3];
+       u8         clamp_tgt_rate_after_time_inc[0x1];
+       u8         reserved_3[0x17];
+
+       u8         reserved_4[0x20];
+
+       u8         rpg_time_reset[0x20];
+
+       u8         rpg_byte_reset[0x20];
+
+       u8         rpg_threshold[0x20];
+
+       u8         rpg_max_rate[0x20];
+
+       u8         rpg_ai_rate[0x20];
+
+       u8         rpg_hai_rate[0x20];
+
+       u8         rpg_gd[0x20];
+
+       u8         rpg_min_dec_fac[0x20];
+
+       u8         rpg_min_rate[0x20];
+
+       u8         reserved_5[0xe0];
+
+       u8         rate_to_set_on_first_cnp[0x20];
+
+       u8         dce_tcp_g[0x20];
+
+       u8         dce_tcp_rtt[0x20];
+
+       u8         rate_reduce_monitor_period[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         initial_alpha_value[0x20];
+
+       u8         reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+       u8         reserved_0[0x80];
+
+       u8         rppp_max_rps[0x20];
+
+       u8         rpg_time_reset[0x20];
+
+       u8         rpg_byte_reset[0x20];
+
+       u8         rpg_threshold[0x20];
+
+       u8         rpg_max_rate[0x20];
+
+       u8         rpg_ai_rate[0x20];
+
+       u8         rpg_hai_rate[0x20];
+
+       u8         rpg_gd[0x20];
+
+       u8         rpg_min_dec_fac[0x20];
+
+       u8         rpg_min_rate[0x20];
+
+       u8         reserved_1[0x640];
+};
+
+enum {
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE    = 0x1,
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET    = 0x2,
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE  = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+       u8         resize_field_select[0x20];
+};
+
+enum {
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD     = 0x1,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT  = 0x2,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI            = 0x4,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN         = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+       u8         modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+       u8         field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+       u8         field_select_r_roce_rp[0x20];
+};
+
+enum {
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS     = 0x4,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET   = 0x8,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET   = 0x10,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD    = 0x20,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE     = 0x40,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE      = 0x80,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE     = 0x100,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD           = 0x200,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC  = 0x400,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE     = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+       u8         field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+       u8         time_since_last_clear_high[0x20];
+
+       u8         time_since_last_clear_low[0x20];
+
+       u8         symbol_errors_high[0x20];
+
+       u8         symbol_errors_low[0x20];
+
+       u8         sync_headers_errors_high[0x20];
+
+       u8         sync_headers_errors_low[0x20];
+
+       u8         edpl_bip_errors_lane0_high[0x20];
+
+       u8         edpl_bip_errors_lane0_low[0x20];
+
+       u8         edpl_bip_errors_lane1_high[0x20];
+
+       u8         edpl_bip_errors_lane1_low[0x20];
+
+       u8         edpl_bip_errors_lane2_high[0x20];
+
+       u8         edpl_bip_errors_lane2_low[0x20];
+
+       u8         edpl_bip_errors_lane3_high[0x20];
+
+       u8         edpl_bip_errors_lane3_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane0_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane0_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane1_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane1_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane2_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane2_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane3_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane3_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+       u8         rs_fec_corrected_blocks_high[0x20];
+
+       u8         rs_fec_corrected_blocks_low[0x20];
+
+       u8         rs_fec_uncorrectable_blocks_high[0x20];
+
+       u8         rs_fec_uncorrectable_blocks_low[0x20];
+
+       u8         rs_fec_no_errors_blocks_high[0x20];
+
+       u8         rs_fec_no_errors_blocks_low[0x20];
+
+       u8         rs_fec_single_error_blocks_high[0x20];
+
+       u8         rs_fec_single_error_blocks_low[0x20];
+
+       u8         rs_fec_corrected_symbols_total_high[0x20];
+
+       u8         rs_fec_corrected_symbols_total_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane0_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane0_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane1_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane1_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane2_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane2_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane3_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane3_low[0x20];
+
+       u8         link_down_events[0x20];
+
+       u8         successful_recovery_events[0x20];
+
+       u8         reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+       u8         transmit_queue_high[0x20];
+
+       u8         transmit_queue_low[0x20];
+
+       u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+       u8         rx_octets_high[0x20];
+
+       u8         rx_octets_low[0x20];
+
+       u8         reserved_0[0xc0];
+
+       u8         rx_frames_high[0x20];
+
+       u8         rx_frames_low[0x20];
+
+       u8         tx_octets_high[0x20];
+
+       u8         tx_octets_low[0x20];
+
+       u8         reserved_1[0xc0];
+
+       u8         tx_frames_high[0x20];
+
+       u8         tx_frames_low[0x20];
+
+       u8         rx_pause_high[0x20];
+
+       u8         rx_pause_low[0x20];
+
+       u8         rx_pause_duration_high[0x20];
+
+       u8         rx_pause_duration_low[0x20];
+
+       u8         tx_pause_high[0x20];
+
+       u8         tx_pause_low[0x20];
+
+       u8         tx_pause_duration_high[0x20];
+
+       u8         tx_pause_duration_low[0x20];
+
+       u8         rx_pause_transition_high[0x20];
+
+       u8         rx_pause_transition_low[0x20];
+
+       u8         reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+       u8         port_transmit_wait_high[0x20];
+
+       u8         port_transmit_wait_low[0x20];
+
+       u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+       u8         dot3stats_alignment_errors_high[0x20];
+
+       u8         dot3stats_alignment_errors_low[0x20];
+
+       u8         dot3stats_fcs_errors_high[0x20];
+
+       u8         dot3stats_fcs_errors_low[0x20];
+
+       u8         dot3stats_single_collision_frames_high[0x20];
+
+       u8         dot3stats_single_collision_frames_low[0x20];
+
+       u8         dot3stats_multiple_collision_frames_high[0x20];
+
+       u8         dot3stats_multiple_collision_frames_low[0x20];
+
+       u8         dot3stats_sqe_test_errors_high[0x20];
+
+       u8         dot3stats_sqe_test_errors_low[0x20];
+
+       u8         dot3stats_deferred_transmissions_high[0x20];
+
+       u8         dot3stats_deferred_transmissions_low[0x20];
+
+       u8         dot3stats_late_collisions_high[0x20];
+
+       u8         dot3stats_late_collisions_low[0x20];
+
+       u8         dot3stats_excessive_collisions_high[0x20];
+
+       u8         dot3stats_excessive_collisions_low[0x20];
+
+       u8         dot3stats_internal_mac_transmit_errors_high[0x20];
+
+       u8         dot3stats_internal_mac_transmit_errors_low[0x20];
+
+       u8         dot3stats_carrier_sense_errors_high[0x20];
+
+       u8         dot3stats_carrier_sense_errors_low[0x20];
+
+       u8         dot3stats_frame_too_longs_high[0x20];
+
+       u8         dot3stats_frame_too_longs_low[0x20];
+
+       u8         dot3stats_internal_mac_receive_errors_high[0x20];
+
+       u8         dot3stats_internal_mac_receive_errors_low[0x20];
+
+       u8         dot3stats_symbol_errors_high[0x20];
+
+       u8         dot3stats_symbol_errors_low[0x20];
+
+       u8         dot3control_in_unknown_opcodes_high[0x20];
+
+       u8         dot3control_in_unknown_opcodes_low[0x20];
+
+       u8         dot3in_pause_frames_high[0x20];
+
+       u8         dot3in_pause_frames_low[0x20];
+
+       u8         dot3out_pause_frames_high[0x20];
+
+       u8         dot3out_pause_frames_low[0x20];
+
+       u8         reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+       u8         ether_stats_drop_events_high[0x20];
+
+       u8         ether_stats_drop_events_low[0x20];
+
+       u8         ether_stats_octets_high[0x20];
+
+       u8         ether_stats_octets_low[0x20];
+
+       u8         ether_stats_pkts_high[0x20];
+
+       u8         ether_stats_pkts_low[0x20];
+
+       u8         ether_stats_broadcast_pkts_high[0x20];
+
+       u8         ether_stats_broadcast_pkts_low[0x20];
+
+       u8         ether_stats_multicast_pkts_high[0x20];
+
+       u8         ether_stats_multicast_pkts_low[0x20];
+
+       u8         ether_stats_crc_align_errors_high[0x20];
+
+       u8         ether_stats_crc_align_errors_low[0x20];
+
+       u8         ether_stats_undersize_pkts_high[0x20];
+
+       u8         ether_stats_undersize_pkts_low[0x20];
+
+       u8         ether_stats_oversize_pkts_high[0x20];
+
+       u8         ether_stats_oversize_pkts_low[0x20];
+
+       u8         ether_stats_fragments_high[0x20];
+
+       u8         ether_stats_fragments_low[0x20];
+
+       u8         ether_stats_jabbers_high[0x20];
+
+       u8         ether_stats_jabbers_low[0x20];
+
+       u8         ether_stats_collisions_high[0x20];
+
+       u8         ether_stats_collisions_low[0x20];
+
+       u8         ether_stats_pkts64octets_high[0x20];
+
+       u8         ether_stats_pkts64octets_low[0x20];
+
+       u8         ether_stats_pkts65to127octets_high[0x20];
+
+       u8         ether_stats_pkts65to127octets_low[0x20];
+
+       u8         ether_stats_pkts128to255octets_high[0x20];
+
+       u8         ether_stats_pkts128to255octets_low[0x20];
+
+       u8         ether_stats_pkts256to511octets_high[0x20];
+
+       u8         ether_stats_pkts256to511octets_low[0x20];
+
+       u8         ether_stats_pkts512to1023octets_high[0x20];
+
+       u8         ether_stats_pkts512to1023octets_low[0x20];
+
+       u8         ether_stats_pkts1024to1518octets_high[0x20];
+
+       u8         ether_stats_pkts1024to1518octets_low[0x20];
+
+       u8         ether_stats_pkts1519to2047octets_high[0x20];
+
+       u8         ether_stats_pkts1519to2047octets_low[0x20];
+
+       u8         ether_stats_pkts2048to4095octets_high[0x20];
+
+       u8         ether_stats_pkts2048to4095octets_low[0x20];
+
+       u8         ether_stats_pkts4096to8191octets_high[0x20];
+
+       u8         ether_stats_pkts4096to8191octets_low[0x20];
+
+       u8         ether_stats_pkts8192to10239octets_high[0x20];
+
+       u8         ether_stats_pkts8192to10239octets_low[0x20];
+
+       u8         reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+       u8         if_in_octets_high[0x20];
+
+       u8         if_in_octets_low[0x20];
+
+       u8         if_in_ucast_pkts_high[0x20];
+
+       u8         if_in_ucast_pkts_low[0x20];
+
+       u8         if_in_discards_high[0x20];
+
+       u8         if_in_discards_low[0x20];
+
+       u8         if_in_errors_high[0x20];
+
+       u8         if_in_errors_low[0x20];
+
+       u8         if_in_unknown_protos_high[0x20];
+
+       u8         if_in_unknown_protos_low[0x20];
+
+       u8         if_out_octets_high[0x20];
+
+       u8         if_out_octets_low[0x20];
+
+       u8         if_out_ucast_pkts_high[0x20];
+
+       u8         if_out_ucast_pkts_low[0x20];
+
+       u8         if_out_discards_high[0x20];
+
+       u8         if_out_discards_low[0x20];
+
+       u8         if_out_errors_high[0x20];
+
+       u8         if_out_errors_low[0x20];
+
+       u8         if_in_multicast_pkts_high[0x20];
+
+       u8         if_in_multicast_pkts_low[0x20];
+
+       u8         if_in_broadcast_pkts_high[0x20];
+
+       u8         if_in_broadcast_pkts_low[0x20];
+
+       u8         if_out_multicast_pkts_high[0x20];
+
+       u8         if_out_multicast_pkts_low[0x20];
+
+       u8         if_out_broadcast_pkts_high[0x20];
+
+       u8         if_out_broadcast_pkts_low[0x20];
+
+       u8         reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+       u8         a_frames_transmitted_ok_high[0x20];
+
+       u8         a_frames_transmitted_ok_low[0x20];
+
+       u8         a_frames_received_ok_high[0x20];
+
+       u8         a_frames_received_ok_low[0x20];
+
+       u8         a_frame_check_sequence_errors_high[0x20];
+
+       u8         a_frame_check_sequence_errors_low[0x20];
+
+       u8         a_alignment_errors_high[0x20];
+
+       u8         a_alignment_errors_low[0x20];
+
+       u8         a_octets_transmitted_ok_high[0x20];
+
+       u8         a_octets_transmitted_ok_low[0x20];
+
+       u8         a_octets_received_ok_high[0x20];
+
+       u8         a_octets_received_ok_low[0x20];
+
+       u8         a_multicast_frames_xmitted_ok_high[0x20];
+
+       u8         a_multicast_frames_xmitted_ok_low[0x20];
+
+       u8         a_broadcast_frames_xmitted_ok_high[0x20];
+
+       u8         a_broadcast_frames_xmitted_ok_low[0x20];
+
+       u8         a_multicast_frames_received_ok_high[0x20];
+
+       u8         a_multicast_frames_received_ok_low[0x20];
+
+       u8         a_broadcast_frames_received_ok_high[0x20];
+
+       u8         a_broadcast_frames_received_ok_low[0x20];
+
+       u8         a_in_range_length_errors_high[0x20];
+
+       u8         a_in_range_length_errors_low[0x20];
+
+       u8         a_out_of_range_length_field_high[0x20];
+
+       u8         a_out_of_range_length_field_low[0x20];
+
+       u8         a_frame_too_long_errors_high[0x20];
+
+       u8         a_frame_too_long_errors_low[0x20];
+
+       u8         a_symbol_error_during_carrier_high[0x20];
+
+       u8         a_symbol_error_during_carrier_low[0x20];
+
+       u8         a_mac_control_frames_transmitted_high[0x20];
+
+       u8         a_mac_control_frames_transmitted_low[0x20];
+
+       u8         a_mac_control_frames_received_high[0x20];
+
+       u8         a_mac_control_frames_received_low[0x20];
+
+       u8         a_unsupported_opcodes_received_high[0x20];
+
+       u8         a_unsupported_opcodes_received_low[0x20];
+
+       u8         a_pause_mac_ctrl_frames_received_high[0x20];
+
+       u8         a_pause_mac_ctrl_frames_received_low[0x20];
+
+       u8         a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+       u8         a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+       u8         reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+       u8         command_completion_vector[0x20];
+
+       u8         reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+       u8         reserved_0[0x18];
+       u8         port_num[0x1];
+       u8         reserved_1[0x3];
+       u8         vl[0x4];
+
+       u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+       u8         event_subtype[0x8];
+       u8         reserved_0[0x8];
+       u8         congestion_level[0x8];
+       u8         reserved_1[0x8];
+
+       u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+       u8         reserved_0[0x60];
+
+       u8         gpio_event_hi[0x20];
+
+       u8         gpio_event_lo[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+       u8         reserved_0[0x40];
+
+       u8         port_num[0x4];
+       u8         reserved_1[0x1c];
+
+       u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+       u8         reserved_0[0xe0];
+};
+
+enum {
+       MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
+       MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+       u8         reserved_0[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_1[0x20];
+
+       u8         reserved_2[0x18];
+       u8         syndrome[0x8];
+
+       u8         reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+       u8         bytes_committed[0x20];
+
+       u8         r_key[0x20];
+
+       u8         reserved_0[0x10];
+       u8         packet_len[0x10];
+
+       u8         rdma_op_len[0x20];
+
+       u8         rdma_va[0x40];
+
+       u8         reserved_1[0x5];
+       u8         rdma[0x1];
+       u8         write[0x1];
+       u8         requestor[0x1];
+       u8         qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+       u8         bytes_committed[0x20];
+
+       u8         reserved_0[0x10];
+       u8         wqe_index[0x10];
+
+       u8         reserved_1[0x10];
+       u8         len[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x5];
+       u8         rdma[0x1];
+       u8         write_read[0x1];
+       u8         requestor[0x1];
+       u8         qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+       u8         reserved_0[0xa0];
+
+       u8         type[0x8];
+       u8         reserved_1[0x18];
+
+       u8         reserved_2[0x8];
+       u8         qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+       u8         reserved_0[0xc0];
+
+       u8         reserved_1[0x8];
+       u8         dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+       u8         reserved_0[0xc0];
+
+       u8         reserved_1[0x8];
+       u8         cq_number[0x18];
+};
+
+enum {
+       MLX5_QPC_STATE_RST        = 0x0,
+       MLX5_QPC_STATE_INIT       = 0x1,
+       MLX5_QPC_STATE_RTR        = 0x2,
+       MLX5_QPC_STATE_RTS        = 0x3,
+       MLX5_QPC_STATE_SQER       = 0x4,
+       MLX5_QPC_STATE_ERR        = 0x6,
+       MLX5_QPC_STATE_SQD        = 0x7,
+       MLX5_QPC_STATE_SUSPENDED  = 0x9,
+};
+
+enum {
+       MLX5_QPC_ST_RC            = 0x0,
+       MLX5_QPC_ST_UC            = 0x1,
+       MLX5_QPC_ST_UD            = 0x2,
+       MLX5_QPC_ST_XRC           = 0x3,
+       MLX5_QPC_ST_DCI           = 0x5,
+       MLX5_QPC_ST_QP0           = 0x7,
+       MLX5_QPC_ST_QP1           = 0x8,
+       MLX5_QPC_ST_RAW_DATAGRAM  = 0x9,
+       MLX5_QPC_ST_REG_UMR       = 0xc,
+};
+
+enum {
+       MLX5_QPC_PM_STATE_ARMED     = 0x0,
+       MLX5_QPC_PM_STATE_REARM     = 0x1,
+       MLX5_QPC_PM_STATE_RESERVED  = 0x2,
+       MLX5_QPC_PM_STATE_MIGRATED  = 0x3,
+};
+
+enum {
+       MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS                = 0x0,
+       MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT  = 0x1,
+};
+
+enum {
+       MLX5_QPC_MTU_256_BYTES        = 0x1,
+       MLX5_QPC_MTU_512_BYTES        = 0x2,
+       MLX5_QPC_MTU_1K_BYTES         = 0x3,
+       MLX5_QPC_MTU_2K_BYTES         = 0x4,
+       MLX5_QPC_MTU_4K_BYTES         = 0x5,
+       MLX5_QPC_MTU_RAW_ETHERNET_QP  = 0x7,
+};
+
+enum {
+       MLX5_QPC_ATOMIC_MODE_IB_SPEC     = 0x1,
+       MLX5_QPC_ATOMIC_MODE_ONLY_8B     = 0x2,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_8B    = 0x3,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_16B   = 0x4,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_32B   = 0x5,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_64B   = 0x6,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_128B  = 0x7,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_256B  = 0x8,
+};
+
+enum {
+       MLX5_QPC_CS_REQ_DISABLE    = 0x0,
+       MLX5_QPC_CS_REQ_UP_TO_32B  = 0x11,
+       MLX5_QPC_CS_REQ_UP_TO_64B  = 0x22,
+};
+
+enum {
+       MLX5_QPC_CS_RES_DISABLE    = 0x0,
+       MLX5_QPC_CS_RES_UP_TO_32B  = 0x1,
+       MLX5_QPC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+       u8         state[0x4];
+       u8         reserved_0[0x4];
+       u8         st[0x8];
+       u8         reserved_1[0x3];
+       u8         pm_state[0x2];
+       u8         reserved_2[0x7];
+       u8         end_padding_mode[0x2];
+       u8         reserved_3[0x2];
+
+       u8         wq_signature[0x1];
+       u8         block_lb_mc[0x1];
+       u8         atomic_like_write_en[0x1];
+       u8         latency_sensitive[0x1];
+       u8         reserved_4[0x1];
+       u8         drain_sigerr[0x1];
+       u8         reserved_5[0x2];
+       u8         pd[0x18];
+
+       u8         mtu[0x3];
+       u8         log_msg_max[0x5];
+       u8         reserved_6[0x1];
+       u8         log_rq_size[0x4];
+       u8         log_rq_stride[0x3];
+       u8         no_sq[0x1];
+       u8         log_sq_size[0x4];
+       u8         reserved_7[0x6];
+       u8         rlky[0x1];
+       u8         reserved_8[0x4];
+
+       u8         counter_set_id[0x8];
+       u8         uar_page[0x18];
+
+       u8         reserved_9[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_10[0x3];
+       u8         log_page_size[0x5];
+       u8         remote_qpn[0x18];
+
+       struct mlx5_ifc_ads_bits primary_address_path;
+
+       struct mlx5_ifc_ads_bits secondary_address_path;
+
+       u8         log_ack_req_freq[0x4];
+       u8         reserved_11[0x4];
+       u8         log_sra_max[0x3];
+       u8         reserved_12[0x2];
+       u8         retry_count[0x3];
+       u8         rnr_retry[0x3];
+       u8         reserved_13[0x1];
+       u8         fre[0x1];
+       u8         cur_rnr_retry[0x3];
+       u8         cur_retry_count[0x3];
+       u8         reserved_14[0x5];
+
+       u8         reserved_15[0x20];
+
+       u8         reserved_16[0x8];
+       u8         next_send_psn[0x18];
+
+       u8         reserved_17[0x8];
+       u8         cqn_snd[0x18];
+
+       u8         reserved_18[0x40];
+
+       u8         reserved_19[0x8];
+       u8         last_acked_psn[0x18];
+
+       u8         reserved_20[0x8];
+       u8         ssn[0x18];
+
+       u8         reserved_21[0x8];
+       u8         log_rra_max[0x3];
+       u8         reserved_22[0x1];
+       u8         atomic_mode[0x4];
+       u8         rre[0x1];
+       u8         rwe[0x1];
+       u8         rae[0x1];
+       u8         reserved_23[0x1];
+       u8         page_offset[0x6];
+       u8         reserved_24[0x3];
+       u8         cd_slave_receive[0x1];
+       u8         cd_slave_send[0x1];
+       u8         cd_master[0x1];
+
+       u8         reserved_25[0x3];
+       u8         min_rnr_nak[0x5];
+       u8         next_rcv_psn[0x18];
+
+       u8         reserved_26[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_27[0x8];
+       u8         cqn_rcv[0x18];
+
+       u8         dbr_addr[0x40];
+
+       u8         q_key[0x20];
+
+       u8         reserved_28[0x5];
+       u8         rq_type[0x3];
+       u8         srqn_rmpn[0x18];
+
+       u8         reserved_29[0x8];
+       u8         rmsn[0x18];
+
+       u8         hw_sq_wqebb_counter[0x10];
+       u8         sw_sq_wqebb_counter[0x10];
+
+       u8         hw_rq_counter[0x20];
+
+       u8         sw_rq_counter[0x20];
+
+       u8         reserved_30[0x20];
+
+       u8         reserved_31[0xf];
+       u8         cgs[0x1];
+       u8         cs_req[0x8];
+       u8         cs_res[0x8];
+
+       u8         dc_access_key[0x40];
+
+       u8         reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+       u8         source_l3_address[16][0x8];
+
+       u8         reserved_0[0x3];
+       u8         vlan_valid[0x1];
+       u8         vlan_id[0xc];
+       u8         source_mac_47_32[0x10];
+
+       u8         source_mac_31_0[0x20];
+
+       u8         reserved_1[0x14];
+       u8         roce_l3_type[0x4];
+       u8         roce_version[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+       struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+       struct mlx5_ifc_odp_cap_bits odp_cap;
+       struct mlx5_ifc_atomic_caps_bits atomic_caps;
+       struct mlx5_ifc_roce_cap_bits roce_cap;
+       struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+       struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+       u8         reserved_0[0x8000];
+};
+
+enum {
+       MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
+       MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
+       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+       u8         reserved_0[0x20];
+
+       u8         group_id[0x20];
+
+       u8         reserved_1[0x8];
+       u8         flow_tag[0x18];
+
+       u8         reserved_2[0x10];
+       u8         action[0x10];
+
+       u8         reserved_3[0x8];
+       u8         destination_list_size[0x18];
+
+       u8         reserved_4[0x160];
+
+       struct mlx5_ifc_fte_match_param_bits match_value;
+
+       u8         reserved_5[0x600];
+
+       struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+       MLX5_XRC_SRQC_STATE_GOOD   = 0x0,
+       MLX5_XRC_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+       u8         state[0x4];
+       u8         log_xrc_srq_size[0x4];
+       u8         reserved_0[0x18];
+
+       u8         wq_signature[0x1];
+       u8         cont_srq[0x1];
+       u8         reserved_1[0x1];
+       u8         rlky[0x1];
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         log_rq_stride[0x3];
+       u8         xrcd[0x18];
+
+       u8         page_offset[0x6];
+       u8         reserved_2[0x2];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         user_index_equal_xrc_srqn[0x1];
+       u8         reserved_4[0x1];
+       u8         log_page_size[0x6];
+       u8         user_index[0x18];
+
+       u8         reserved_5[0x20];
+
+       u8         reserved_6[0x8];
+       u8         pd[0x18];
+
+       u8         lwm[0x10];
+       u8         wqe_cnt[0x10];
+
+       u8         reserved_7[0x40];
+
+       u8         db_record_addr_h[0x20];
+
+       u8         db_record_addr_l[0x1e];
+       u8         reserved_8[0x2];
+
+       u8         reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+       u8         packets[0x40];
+
+       u8         octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+       u8         reserved_0[0xc];
+       u8         prio[0x4];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x100];
+
+       u8         reserved_3[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_4[0x3c0];
+};
+
+enum {
+       MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
+       MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
+};
+
+enum {
+       MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
+       MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+};
+
+enum {
+       MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
+       MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
+       MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+};
+
+enum {
+       MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_    = 0x1,
+       MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_  = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+       u8         reserved_0[0x20];
+
+       u8         disp_type[0x4];
+       u8         reserved_1[0x1c];
+
+       u8         reserved_2[0x40];
+
+       u8         reserved_3[0x4];
+       u8         lro_timeout_period_usecs[0x10];
+       u8         lro_enable_mask[0x4];
+       u8         lro_max_ip_payload_size[0x8];
+
+       u8         reserved_4[0x40];
+
+       u8         reserved_5[0x8];
+       u8         inline_rqn[0x18];
+
+       u8         rx_hash_symmetric[0x1];
+       u8         reserved_6[0x1];
+       u8         tunneled_offload_en[0x1];
+       u8         reserved_7[0x5];
+       u8         indirect_table[0x18];
+
+       u8         rx_hash_fn[0x4];
+       u8         reserved_8[0x2];
+       u8         self_lb_block[0x2];
+       u8         transport_domain[0x18];
+
+       u8         rx_hash_toeplitz_key[10][0x20];
+
+       struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+       struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+       u8         reserved_9[0x4c0];
+};
+
+enum {
+       MLX5_SRQC_STATE_GOOD   = 0x0,
+       MLX5_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+       u8         state[0x4];
+       u8         log_srq_size[0x4];
+       u8         reserved_0[0x18];
+
+       u8         wq_signature[0x1];
+       u8         cont_srq[0x1];
+       u8         reserved_1[0x1];
+       u8         rlky[0x1];
+       u8         reserved_2[0x1];
+       u8         log_rq_stride[0x3];
+       u8         xrcd[0x18];
+
+       u8         page_offset[0x6];
+       u8         reserved_3[0x2];
+       u8         cqn[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x2];
+       u8         log_page_size[0x6];
+       u8         reserved_6[0x18];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x8];
+       u8         pd[0x18];
+
+       u8         lwm[0x10];
+       u8         wqe_cnt[0x10];
+
+       u8         reserved_9[0x40];
+
+       u8         db_record_addr_h[0x20];
+
+       u8         db_record_addr_l[0x1e];
+       u8         reserved_10[0x2];
+
+       u8         reserved_11[0x80];
+};
+
+enum {
+       MLX5_SQC_STATE_RST  = 0x0,
+       MLX5_SQC_STATE_RDY  = 0x1,
+       MLX5_SQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+       u8         rlky[0x1];
+       u8         cd_master[0x1];
+       u8         fre[0x1];
+       u8         flush_in_error_en[0x1];
+       u8         reserved_0[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x14];
+
+       u8         reserved_2[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_4[0xa0];
+
+       u8         tis_lst_sz[0x10];
+       u8         reserved_5[0x10];
+
+       u8         reserved_6[0x40];
+
+       u8         reserved_7[0x8];
+       u8         tis_num_0[0x18];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+       u8         reserved_0[0xa0];
+
+       u8         reserved_1[0x10];
+       u8         rqt_max_size[0x10];
+
+       u8         reserved_2[0x10];
+       u8         rqt_actual_size[0x10];
+
+       u8         reserved_3[0x6a0];
+
+       struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+       MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
+       MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
+};
+
+enum {
+       MLX5_RQC_STATE_RST  = 0x0,
+       MLX5_RQC_STATE_RDY  = 0x1,
+       MLX5_RQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+       u8         rlky[0x1];
+       u8         reserved_0[0x2];
+       u8         vsd[0x1];
+       u8         mem_rq_type[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x1];
+       u8         flush_in_error_en[0x1];
+       u8         reserved_2[0x12];
+
+       u8         reserved_3[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_4[0x8];
+       u8         cqn[0x18];
+
+       u8         counter_set_id[0x8];
+       u8         reserved_5[0x18];
+
+       u8         reserved_6[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_7[0xe0];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+       MLX5_RMPC_STATE_RDY  = 0x1,
+       MLX5_RMPC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+       u8         reserved_0[0x8];
+       u8         state[0x4];
+       u8         reserved_1[0x14];
+
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         reserved_2[0x1f];
+
+       u8         reserved_3[0x140];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+       MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS  = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+       u8         reserved_0[0x1f];
+       u8         roce_en[0x1];
+
+       u8         reserved_1[0x760];
+
+       u8         reserved_2[0x5];
+       u8         allowed_list_type[0x3];
+       u8         reserved_3[0xc];
+       u8         allowed_list_size[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+       u8         reserved_4[0x20];
+
+       u8         current_uc_mac_address[0][0x40];
+};
+
+enum {
+       MLX5_MKC_ACCESS_MODE_PA    = 0x0,
+       MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
+       MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+       u8         reserved_0[0x1];
+       u8         free[0x1];
+       u8         reserved_1[0xd];
+       u8         small_fence_on_rdma_read_response[0x1];
+       u8         umr_en[0x1];
+       u8         a[0x1];
+       u8         rw[0x1];
+       u8         rr[0x1];
+       u8         lw[0x1];
+       u8         lr[0x1];
+       u8         access_mode[0x2];
+       u8         reserved_2[0x8];
+
+       u8         qpn[0x18];
+       u8         mkey_7_0[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         length64[0x1];
+       u8         bsf_en[0x1];
+       u8         sync_umr[0x1];
+       u8         reserved_4[0x2];
+       u8         expected_sigerr_count[0x1];
+       u8         reserved_5[0x1];
+       u8         en_rinval[0x1];
+       u8         pd[0x18];
+
+       u8         start_addr[0x40];
+
+       u8         len[0x40];
+
+       u8         bsf_octword_size[0x20];
+
+       u8         reserved_6[0x80];
+
+       u8         translations_octword_size[0x20];
+
+       u8         reserved_7[0x1b];
+       u8         log_page_size[0x5];
+
+       u8         reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+       u8         reserved_0[0x10];
+       u8         pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+       u8         array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+       u8         field_select[0x20];
+
+       u8         reserved_0[0xe0];
+
+       u8         sm_virt_aware[0x1];
+       u8         has_smi[0x1];
+       u8         has_raw[0x1];
+       u8         grh_required[0x1];
+       u8         reserved_1[0x10];
+       u8         port_state_policy[0x4];
+       u8         phy_port_state[0x4];
+       u8         vport_state[0x4];
+
+       u8         reserved_2[0x60];
+
+       u8         port_guid[0x40];
+
+       u8         node_guid[0x40];
+
+       u8         cap_mask1[0x20];
+
+       u8         cap_mask1_field_select[0x20];
+
+       u8         cap_mask2[0x20];
+
+       u8         cap_mask2_field_select[0x20];
+
+       u8         reserved_3[0x80];
+
+       u8         lid[0x10];
+       u8         reserved_4[0x4];
+       u8         init_type_reply[0x4];
+       u8         lmc[0x3];
+       u8         subnet_timeout[0x5];
+
+       u8         sm_lid[0x10];
+       u8         sm_sl[0x4];
+       u8         reserved_5[0xc];
+
+       u8         qkey_violation_counter[0x10];
+       u8         pkey_violation_counter[0x10];
+
+       u8         reserved_6[0xca0];
+};
+
+enum {
+       MLX5_EQC_STATUS_OK                = 0x0,
+       MLX5_EQC_STATUS_EQ_WRITE_FAILURE  = 0xa,
+};
+
+enum {
+       MLX5_EQC_ST_ARMED  = 0x9,
+       MLX5_EQC_ST_FIRED  = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+       u8         status[0x4];
+       u8         reserved_0[0x9];
+       u8         ec[0x1];
+       u8         oi[0x1];
+       u8         reserved_1[0x5];
+       u8         st[0x4];
+       u8         reserved_2[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x14];
+       u8         page_offset[0x6];
+       u8         reserved_5[0x6];
+
+       u8         reserved_6[0x3];
+       u8         log_eq_size[0x5];
+       u8         uar_page[0x18];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x18];
+       u8         intr[0x8];
+
+       u8         reserved_9[0x3];
+       u8         log_page_size[0x5];
+       u8         reserved_10[0x18];
+
+       u8         reserved_11[0x60];
+
+       u8         reserved_12[0x8];
+       u8         consumer_counter[0x18];
+
+       u8         reserved_13[0x8];
+       u8         producer_counter[0x18];
+
+       u8         reserved_14[0x80];
+};
+
+enum {
+       MLX5_DCTC_STATE_ACTIVE    = 0x0,
+       MLX5_DCTC_STATE_DRAINING  = 0x1,
+       MLX5_DCTC_STATE_DRAINED   = 0x2,
+};
+
+enum {
+       MLX5_DCTC_CS_RES_DISABLE    = 0x0,
+       MLX5_DCTC_CS_RES_NA         = 0x1,
+       MLX5_DCTC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+enum {
+       MLX5_DCTC_MTU_256_BYTES  = 0x1,
+       MLX5_DCTC_MTU_512_BYTES  = 0x2,
+       MLX5_DCTC_MTU_1K_BYTES   = 0x3,
+       MLX5_DCTC_MTU_2K_BYTES   = 0x4,
+       MLX5_DCTC_MTU_4K_BYTES   = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+       u8         reserved_0[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x18];
+
+       u8         reserved_2[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         cqn[0x18];
+
+       u8         counter_set_id[0x8];
+       u8         atomic_mode[0x4];
+       u8         rre[0x1];
+       u8         rwe[0x1];
+       u8         rae[0x1];
+       u8         atomic_like_write_en[0x1];
+       u8         latency_sensitive[0x1];
+       u8         rlky[0x1];
+       u8         free_ar[0x1];
+       u8         reserved_4[0xd];
+
+       u8         reserved_5[0x8];
+       u8         cs_res[0x8];
+       u8         reserved_6[0x3];
+       u8         min_rnr_nak[0x5];
+       u8         reserved_7[0x8];
+
+       u8         reserved_8[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_9[0x8];
+       u8         pd[0x18];
+
+       u8         tclass[0x8];
+       u8         reserved_10[0x4];
+       u8         flow_label[0x14];
+
+       u8         dc_access_key[0x40];
+
+       u8         reserved_11[0x5];
+       u8         mtu[0x3];
+       u8         port[0x8];
+       u8         pkey_index[0x10];
+
+       u8         reserved_12[0x8];
+       u8         my_addr_index[0x8];
+       u8         reserved_13[0x8];
+       u8         hop_limit[0x8];
+
+       u8         dc_access_key_violation_count[0x20];
+
+       u8         reserved_14[0x14];
+       u8         dei_cfi[0x1];
+       u8         eth_prio[0x3];
+       u8         ecn[0x2];
+       u8         dscp[0x6];
+
+       u8         reserved_15[0x40];
+};
+
+enum {
+       MLX5_CQC_STATUS_OK             = 0x0,
+       MLX5_CQC_STATUS_CQ_OVERFLOW    = 0x9,
+       MLX5_CQC_STATUS_CQ_WRITE_FAIL  = 0xa,
+};
+
+enum {
+       MLX5_CQC_CQE_SZ_64_BYTES   = 0x0,
+       MLX5_CQC_CQE_SZ_128_BYTES  = 0x1,
+};
+
+enum {
+       MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED  = 0x6,
+       MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED            = 0x9,
+       MLX5_CQC_ST_FIRED                                 = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+       u8         status[0x4];
+       u8         reserved_0[0x4];
+       u8         cqe_sz[0x3];
+       u8         cc[0x1];
+       u8         reserved_1[0x1];
+       u8         scqe_break_moderation_en[0x1];
+       u8         oi[0x1];
+       u8         reserved_2[0x2];
+       u8         cqe_zip_en[0x1];
+       u8         mini_cqe_res_format[0x2];
+       u8         st[0x4];
+       u8         reserved_3[0x8];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x14];
+       u8         page_offset[0x6];
+       u8         reserved_6[0x6];
+
+       u8         reserved_7[0x3];
+       u8         log_cq_size[0x5];
+       u8         uar_page[0x18];
+
+       u8         reserved_8[0x4];
+       u8         cq_period[0xc];
+       u8         cq_max_count[0x10];
+
+       u8         reserved_9[0x18];
+       u8         c_eqn[0x8];
+
+       u8         reserved_10[0x3];
+       u8         log_page_size[0x5];
+       u8         reserved_11[0x18];
+
+       u8         reserved_12[0x20];
+
+       u8         reserved_13[0x8];
+       u8         last_notified_index[0x18];
+
+       u8         reserved_14[0x8];
+       u8         last_solicit_index[0x18];
+
+       u8         reserved_15[0x8];
+       u8         consumer_counter[0x18];
+
+       u8         reserved_16[0x8];
+       u8         producer_counter[0x18];
+
+       u8         reserved_17[0x40];
+
+       u8         dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+       struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+       struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+       struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+       u8         reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+       u8         reserved_0[0xe0];
+
+       u8         reserved_1[0x10];
+       u8         vsd_vendor_id[0x10];
+
+       u8         vsd[208][0x8];
+
+       u8         vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+       struct mlx5_ifc_modify_field_select_bits modify_field_select;
+       struct mlx5_ifc_resize_field_select_bits resize_field_select;
+       u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+       struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+       struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+       struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+       u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+       struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+       struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+       struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+       u8         reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+       struct mlx5_ifc_comp_event_bits comp_event;
+       struct mlx5_ifc_dct_events_bits dct_events;
+       struct mlx5_ifc_qp_events_bits qp_events;
+       struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+       struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+       struct mlx5_ifc_cq_error_bits cq_error;
+       struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+       struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+       struct mlx5_ifc_gpio_event_bits gpio_event;
+       struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+       struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+       struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+       u8         reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+       u8         reserved_0[0x100];
+
+       u8         assert_existptr[0x20];
+
+       u8         assert_callra[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         fw_version[0x20];
+
+       u8         hw_id[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         irisc_index[0x8];
+       u8         synd[0x8];
+       u8         ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+       u8         no_lb[0x1];
+       u8         reserved_0[0x7];
+       u8         port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
+       MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE     = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         profile[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         roce_address_index[0x10];
+       u8         reserved_2[0x10];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL   = 0x0,
+       MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE  = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x6];
+       u8         demux_mode[0x2];
+       u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x13];
+       u8         vlan_valid[0x1];
+       u8         vlan[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+       u8         reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         current_issi[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_set_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+
+       struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+enum {
+       MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN  = 0x0,
+       MLX5_QUERY_VPORT_STATE_OUT_STATE_UP    = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         reserved_2[0x18];
+       u8         admin_state[0x4];
+       u8         state[0x4];
+};
+
+enum {
+       MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_traffic_counter_bits received_errors;
+
+       struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+       struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+       u8         reserved_2[0xa00];
+};
+
+enum {
+       MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x60];
+
+       u8         clear[0x1];
+       u8         reserved_4[0x1f];
+
+       u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_srqc_bits srq_context_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         roce_address_index[0x10];
+       u8         reserved_2[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_2[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_3[0x80];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         rx_write_requests[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         rx_read_requests[0x20];
+
+       u8         reserved_3[0x20];
+
+       u8         rx_atomic_requests[0x20];
+
+       u8         reserved_4[0x20];
+
+       u8         rx_dct_connect[0x20];
+
+       u8         reserved_5[0x20];
+
+       u8         out_of_buffer[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         out_of_sequence[0x20];
+
+       u8         reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x80];
+
+       u8         clear[0x1];
+       u8         reserved_3[0x1f];
+
+       u8         reserved_4[0x18];
+       u8         counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x10];
+       u8         function_id[0x10];
+
+       u8         num_pages[0x20];
+};
+
+enum {
+       MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES     = 0x1,
+       MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES     = 0x2,
+       MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES  = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x5];
+       u8         allowed_list_type[0x3];
+       u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         bsf0_klm0_pas_mtt0_1[16][0x8];
+
+       u8         bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         mkey_index[0x18];
+
+       u8         pg_access[0x1];
+       u8         reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xa0];
+
+       u8         reserved_2[0x13];
+       u8         vlan_valid[0x1];
+       u8         vlan[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+       u8         reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x10];
+       u8         current_issi[0x10];
+
+       u8         reserved_2[0xa0];
+
+       u8         supported_issi_reserved[76][0x8];
+       u8         supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x10];
+       u8         pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         gids_num[0x10];
+       u8         reserved_2[0x10];
+
+       struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x10];
+       u8         gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x80];
+
+       u8         reserved_2[0x8];
+       u8         level[0x8];
+       u8         reserved_3[0x8];
+       u8         log_size[0x8];
+
+       u8         reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x1c0];
+
+       struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+};
+
+enum {
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xa0];
+
+       u8         start_flow_index[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         end_flow_index[0x20];
+
+       u8         reserved_3[0xa0];
+
+       u8         reserved_4[0x18];
+       u8         match_criteria_enable[0x8];
+
+       struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+       u8         reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         group_id[0x20];
+
+       u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_eqc_bits eq_context_entry;
+
+       u8         reserved_2[0x40];
+
+       u8         event_bitmask[0x40];
+
+       u8         reserved_3[0x580];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_dctc_bits dct_context_entry;
+
+       u8         reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         enable[0x1];
+       u8         tag_enable[0x1];
+       u8         reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         priority[0x4];
+       u8         cong_protocol[0x4];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         cur_flows[0x20];
+
+       u8         sum_flows[0x20];
+
+       u8         cnp_ignored_high[0x20];
+
+       u8         cnp_ignored_low[0x20];
+
+       u8         cnp_handled_high[0x20];
+
+       u8         cnp_handled_low[0x20];
+
+       u8         reserved_2[0x100];
+
+       u8         time_stamp_high[0x20];
+
+       u8         time_stamp_low[0x20];
+
+       u8         accumulators_period[0x20];
+
+       u8         ecn_marked_roce_packets_high[0x20];
+
+       u8         ecn_marked_roce_packets_low[0x20];
+
+       u8         cnps_sent_high[0x20];
+
+       u8         cnps_sent_low[0x20];
+
+       u8         reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         clear[0x1];
+       u8         reserved_2[0x1f];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         cong_protocol[0x4];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         error[0x1];
+       u8         reserved_2[0x4];
+       u8         rdma[0x1];
+       u8         read_write[0x1];
+       u8         req_res[0x1];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x18];
+       u8         admin_state[0x4];
+       u8         reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         sq_state[0x4];
+       u8         reserved_2[0x4];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         rq_state[0x4];
+       u8         reserved_2[0x4];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         rmp_state[0x4];
+       u8         reserved_2[0x4];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+       u8         reserved_0[0x1c];
+       u8         permanent_address[0x1];
+       u8         addresses_list[0x1];
+       u8         roce_en[0x1];
+       u8         reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+       u8         reserved_3[0x780];
+
+       struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ  = 0x0,
+       MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ  = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         priority[0x4];
+       u8         cong_protocol[0x4];
+
+       u8         enable[0x1];
+       u8         tag_enable[0x1];
+       u8         reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         cong_protocol[0x4];
+
+       union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+       u8         reserved_3[0x80];
+
+       union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         output_num_entries[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         pas[0][0x40];
+};
+
+enum {
+       MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL     = 0x0,
+       MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS  = 0x1,
+       MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES    = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         input_num_entries[0x20];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         remote_lid[0x10];
+       u8         reserved_2[0x8];
+       u8         port[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         packet_headers_log[128][0x8];
+
+       u8         packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         psvn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         mkey_index[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         group_id[0x20];
+
+       u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         uar[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         counter_set_id[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_srqc_bits srq_context_entry;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_4[0x80];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         reserved_2[0x8];
+       u8         psv0_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         psv1_index[0x18];
+
+       u8         reserved_4[0x8];
+       u8         psv2_index[0x18];
+
+       u8         reserved_5[0x8];
+       u8         psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         num_psv[0x4];
+       u8         reserved_2[0x4];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         mkey_index[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         pg_access[0x1];
+       u8         reserved_3[0x1f];
+
+       struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+       u8         reserved_4[0x80];
+
+       u8         translations_octword_actual_size[0x20];
+
+       u8         reserved_5[0x560];
+
+       u8         klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x8];
+       u8         level[0x8];
+       u8         reserved_6[0x8];
+       u8         log_size[0x8];
+
+       u8         reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         group_id[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+enum {
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x20];
+
+       u8         start_flow_index[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         end_flow_index[0x20];
+
+       u8         reserved_7[0xa0];
+
+       u8         reserved_8[0x18];
+       u8         match_criteria_enable[0x8];
+
+       struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+       u8         reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_eqc_bits eq_context_entry;
+
+       u8         reserved_3[0x40];
+
+       u8         event_bitmask[0x40];
+
+       u8         reserved_4[0x580];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_dctc_bits dct_context_entry;
+
+       u8         reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x4];
+       u8         min_delay[0xc];
+       u8         int_vector[0x10];
+
+       u8         reserved_2[0x20];
+};
+
+enum {
+       MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE  = 0x0,
+       MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x4];
+       u8         min_delay[0xc];
+       u8         int_vector[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ  = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x10];
+       u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_ARM_RQ_IN_OP_MOD_SRQ_  = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srq_number[0x18];
+
+       u8         reserved_3[0x10];
+       u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dct_number[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         uar[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x18];
+       u8         counter_set_id[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         register_data[0][0x20];
+};
+
+enum {
+       MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
+       MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         register_id[0x10];
+
+       u8         argument[0x20];
+
+       u8         register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+       u8         status[0x4];
+       u8         version[0x4];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x2];
+       u8         lane[0x4];
+       u8         reserved_1[0x8];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x7];
+       u8         polarity[0x1];
+       u8         ob_tap0[0x8];
+       u8         ob_tap1[0x8];
+       u8         ob_tap2[0x8];
+
+       u8         reserved_4[0xc];
+       u8         ob_preemp_mode[0x4];
+       u8         ob_reg[0x8];
+       u8         ob_bias[0x8];
+
+       u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+       u8         status[0x4];
+       u8         version[0x4];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x2];
+       u8         lane[0x4];
+       u8         reserved_1[0x8];
+
+       u8         time_to_link_up[0x10];
+       u8         reserved_2[0xc];
+       u8         grade_lane_speed[0x4];
+
+       u8         grade_version[0x8];
+       u8         grade[0x18];
+
+       u8         reserved_3[0x4];
+       u8         height_grade_type[0x4];
+       u8         height_grade[0x18];
+
+       u8         height_dz[0x10];
+       u8         height_dv[0x10];
+
+       u8         reserved_4[0x10];
+       u8         height_sigma[0x10];
+
+       u8         reserved_5[0x20];
+
+       u8         reserved_6[0x4];
+       u8         phase_grade_type[0x4];
+       u8         phase_grade[0x18];
+
+       u8         reserved_7[0x8];
+       u8         phase_eo_pos[0x8];
+       u8         reserved_8[0x8];
+       u8         phase_eo_neg[0x8];
+
+       u8         ffe_set_tested[0x10];
+       u8         test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         vl_hw_cap[0x4];
+
+       u8         reserved_3[0x1c];
+       u8         vl_admin[0x4];
+
+       u8         reserved_4[0x1c];
+       u8         vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         reserved_0[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_1[0x4];
+       u8         oper_status[0x4];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0xd];
+       u8         proto_mask[0x3];
+
+       u8         reserved_2[0x40];
+
+       u8         eth_proto_capability[0x20];
+
+       u8         ib_link_width_capability[0x10];
+       u8         ib_proto_capability[0x10];
+
+       u8         reserved_3[0x20];
+
+       u8         eth_proto_admin[0x20];
+
+       u8         ib_link_width_admin[0x10];
+       u8         ib_proto_admin[0x10];
+
+       u8         reserved_4[0x20];
+
+       u8         eth_proto_oper[0x20];
+
+       u8         ib_link_width_oper[0x10];
+       u8         ib_proto_oper[0x10];
+
+       u8         reserved_5[0x20];
+
+       u8         eth_proto_lp_advertise[0x20];
+
+       u8         reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+       u8         reserved_0[0x20];
+
+       u8         algorithm_options[0x10];
+       u8         reserved_1[0x4];
+       u8         repetitions_mode[0x4];
+       u8         num_of_repetitions[0x8];
+
+       u8         grade_version[0x8];
+       u8         height_grade_type[0x4];
+       u8         phase_grade_type[0x4];
+       u8         height_grade_weight[0x8];
+       u8         phase_grade_weight[0x8];
+
+       u8         gisim_measure_bits[0x10];
+       u8         adaptive_tap_measure_bits[0x10];
+
+       u8         ber_bath_high_error_threshold[0x10];
+       u8         ber_bath_mid_error_threshold[0x10];
+
+       u8         ber_bath_low_error_threshold[0x10];
+       u8         one_ratio_high_threshold[0x10];
+
+       u8         one_ratio_high_mid_threshold[0x10];
+       u8         one_ratio_low_mid_threshold[0x10];
+
+       u8         one_ratio_low_threshold[0x10];
+       u8         ndeo_error_threshold[0x10];
+
+       u8         mixer_offset_step_size[0x10];
+       u8         reserved_2[0x8];
+       u8         mix90_phase_for_voltage_bath[0x8];
+
+       u8         mixer_offset_start[0x10];
+       u8         mixer_offset_end[0x10];
+
+       u8         reserved_3[0x15];
+       u8         ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         sub_port[0x8];
+       u8         reserved_0[0x8];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x5];
+       u8         prio[0x3];
+       u8         reserved_2[0x6];
+       u8         mode[0x2];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x10];
+       u8         min_threshold[0x10];
+
+       u8         reserved_5[0x10];
+       u8         max_threshold[0x10];
+
+       u8         reserved_6[0x10];
+       u8         mark_probability_denominator[0x10];
+
+       u8         reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x1c];
+       u8         wrps_admin[0x4];
+
+       u8         reserved_4[0x1c];
+       u8         wrps_status[0x4];
+
+       u8         reserved_5[0x8];
+       u8         up_threshold[0x8];
+       u8         reserved_6[0x8];
+       u8         down_threshold[0x8];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x1c];
+       u8         srps_admin[0x4];
+
+       u8         reserved_9[0x1c];
+       u8         srps_status[0x4];
+
+       u8         reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x8];
+       u8         lb_cap[0x8];
+       u8         reserved_3[0x8];
+       u8         lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         port_profile_mode[0x8];
+       u8         static_port_profile[0x8];
+       u8         active_port_profile[0x8];
+       u8         reserved_3[0x8];
+
+       u8         retransmission_active[0x8];
+       u8         fec_mode_active[0x18];
+
+       u8         reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x8];
+       u8         grp[0x6];
+
+       u8         clr[0x1];
+       u8         reserved_1[0x1c];
+       u8         prio_tc[0x3];
+
+       union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+       u8         reserved_0[0x3];
+       u8         single_mac[0x1];
+       u8         reserved_1[0x4];
+       u8         local_port[0x8];
+       u8         mac_47_32[0x10];
+
+       u8         mac_31_0[0x20];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         max_mtu[0x10];
+       u8         reserved_2[0x10];
+
+       u8         admin_mtu[0x10];
+       u8         reserved_3[0x10];
+
+       u8         oper_mtu[0x10];
+       u8         reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x18];
+       u8         attenuation_5g[0x8];
+
+       u8         reserved_3[0x18];
+       u8         attenuation_7g[0x8];
+
+       u8         reserved_4[0x18];
+       u8         attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0xc];
+       u8         module_status[0x4];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+       u8         module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+       u8         reserved_0[0x4];
+       u8         mlpn_status[0x4];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         e[0x1];
+       u8         reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+       u8         rxtx[0x1];
+       u8         reserved_0[0x7];
+       u8         local_port[0x8];
+       u8         reserved_1[0x8];
+       u8         width[0x8];
+
+       u8         lane0_module_mapping[0x20];
+
+       u8         lane1_module_mapping[0x20];
+
+       u8         lane2_module_mapping[0x20];
+
+       u8         lane3_module_mapping[0x20];
+
+       u8         reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_2[0x4];
+       u8         oper_status[0x4];
+
+       u8         ase[0x1];
+       u8         ee[0x1];
+       u8         reserved_3[0x1c];
+       u8         e[0x2];
+
+       u8         reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+       u8         reserved_0[0x4];
+       u8         profile_id[0xc];
+       u8         reserved_1[0x4];
+       u8         proto_mask[0x4];
+       u8         reserved_2[0x8];
+
+       u8         reserved_3[0x10];
+       u8         lane_speed[0x10];
+
+       u8         reserved_4[0x17];
+       u8         lpbf[0x1];
+       u8         fec_mode_policy[0x8];
+
+       u8         retransmission_capability[0x8];
+       u8         fec_mode_capability[0x18];
+
+       u8         retransmission_support_admin[0x8];
+       u8         fec_mode_support_admin[0x18];
+
+       u8         retransmission_request_admin[0x8];
+       u8         fec_mode_request_admin[0x18];
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x8];
+       u8         ib_port[0x8];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0xd];
+       u8         lbf_mode[0x3];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         dic[0x1];
+       u8         reserved_2[0x19];
+       u8         ipg[0x4];
+       u8         reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0xe0];
+
+       u8         port_filter[8][0x20];
+
+       u8         port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         ppan[0x4];
+       u8         reserved_2[0x4];
+       u8         prio_mask_tx[0x8];
+       u8         reserved_3[0x8];
+       u8         prio_mask_rx[0x8];
+
+       u8         pptx[0x1];
+       u8         aptx[0x1];
+       u8         reserved_4[0x6];
+       u8         pfctx[0x8];
+       u8         reserved_5[0x10];
+
+       u8         pprx[0x1];
+       u8         aprx[0x1];
+       u8         reserved_6[0x6];
+       u8         pfcrx[0x8];
+       u8         reserved_7[0x10];
+
+       u8         reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+       u8         op[0x4];
+       u8         reserved_0[0x4];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         op_admin[0x8];
+       u8         op_capability[0x8];
+       u8         op_request[0x8];
+       u8         op_active[0x8];
+
+       u8         admin[0x40];
+
+       u8         capability[0x40];
+
+       u8         request[0x40];
+
+       u8         active[0x40];
+
+       u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0xc];
+       u8         error_count[0x4];
+       u8         reserved_3[0x10];
+
+       u8         reserved_4[0xc];
+       u8         lane[0x4];
+       u8         reserved_5[0x8];
+       u8         error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         reserved_0[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_1[0x4];
+       u8         oper_status[0x4];
+
+       u8         ase[0x1];
+       u8         ee[0x1];
+       u8         reserved_2[0x1c];
+       u8         e[0x2];
+
+       u8         reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+       u8         reserved_0[0x8];
+       u8         opamp_group[0x8];
+       u8         reserved_1[0xc];
+       u8         opamp_group_type[0x4];
+
+       u8         start_index[0x10];
+       u8         reserved_2[0x4];
+       u8         num_of_indices[0xc];
+
+       u8         index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+       u8         reserved_0[0x6];
+       u8         rx_lane[0x2];
+       u8         reserved_1[0x6];
+       u8         tx_lane[0x2];
+       u8         reserved_2[0x8];
+       u8         module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+       u8         reserved_0[0x6];
+       u8         lossy[0x1];
+       u8         epsb[0x1];
+       u8         reserved_1[0xc];
+       u8         size[0xc];
+
+       u8         xoff_threshold[0x10];
+       u8         xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+       u8         node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+       u8         reserved_0[0x18];
+       u8         power_settings_level[0x8];
+
+       u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+       u8         he[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+       u8         reserved_0[0x20];
+
+       u8         mkey[0x20];
+
+       u8         addressh_63_32[0x20];
+
+       u8         addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+       u8         dc_key[0x40];
+
+       u8         ext[0x1];
+       u8         reserved_0[0x7];
+       u8         destination_qp_dct[0x18];
+
+       u8         static_rate[0x4];
+       u8         sl_eth_prio[0x4];
+       u8         fl[0x1];
+       u8         mlid[0x7];
+       u8         rlid_udp_sport[0x10];
+
+       u8         reserved_1[0x20];
+
+       u8         rmac_47_16[0x20];
+
+       u8         rmac_15_0[0x10];
+       u8         tclass[0x8];
+       u8         hop_limit[0x8];
+
+       u8         reserved_2[0x1];
+       u8         grh[0x1];
+       u8         reserved_3[0x2];
+       u8         src_addr_index[0x8];
+       u8         flow_label[0x14];
+
+       u8         rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+       u8         reserved_0[0x10];
+       u8         function_id[0x10];
+
+       u8         num_pages[0x20];
+
+       u8         reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+       u8         reserved_0[0x8];
+       u8         event_type[0x8];
+       u8         reserved_1[0x8];
+       u8         event_sub_type[0x8];
+
+       u8         reserved_2[0xe0];
+
+       union mlx5_ifc_event_auto_bits event_data;
+
+       u8         reserved_3[0x10];
+       u8         signature[0x8];
+       u8         reserved_4[0x7];
+       u8         owner[0x1];
+};
+
+enum {
+       MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT  = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+       u8         type[0x8];
+       u8         reserved_0[0x18];
+
+       u8         input_length[0x20];
+
+       u8         input_mailbox_pointer_63_32[0x20];
+
+       u8         input_mailbox_pointer_31_9[0x17];
+       u8         reserved_1[0x9];
+
+       u8         command_input_inline_data[16][0x8];
+
+       u8         command_output_inline_data[16][0x8];
+
+       u8         output_mailbox_pointer_63_32[0x20];
+
+       u8         output_mailbox_pointer_31_9[0x17];
+       u8         reserved_2[0x9];
+
+       u8         output_length[0x20];
+
+       u8         token[0x8];
+       u8         signature[0x8];
+       u8         reserved_3[0x8];
+       u8         status[0x7];
+       u8         ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+       u8         mailbox_data[512][0x8];
+
+       u8         reserved_0[0x180];
+
+       u8         next_pointer_63_32[0x20];
+
+       u8         next_pointer_31_10[0x16];
+       u8         reserved_1[0xa];
+
+       u8         block_number[0x20];
+
+       u8         reserved_2[0x8];
+       u8         token[0x8];
+       u8         ctrl_signature[0x8];
+       u8         signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+       u8         ptag_63_32[0x20];
+
+       u8         ptag_31_8[0x18];
+       u8         reserved_0[0x6];
+       u8         wr_en[0x1];
+       u8         rd_en[0x1];
+};
+
+enum {
+       MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER  = 0x0,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED     = 0x1,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR              = 0x1,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC                   = 0x7,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR                 = 0x8,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR                   = 0x9,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR            = 0xa,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR                 = 0xb,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN  = 0xc,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR                    = 0xd,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV                       = 0xe,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR                    = 0xf,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR                = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+       u8         fw_rev_minor[0x10];
+       u8         fw_rev_major[0x10];
+
+       u8         cmd_interface_rev[0x10];
+       u8         fw_rev_subminor[0x10];
+
+       u8         reserved_0[0x40];
+
+       u8         cmdq_phy_addr_63_32[0x20];
+
+       u8         cmdq_phy_addr_31_12[0x14];
+       u8         reserved_1[0x2];
+       u8         nic_interface[0x2];
+       u8         log_cmdq_size[0x4];
+       u8         log_cmdq_stride[0x4];
+
+       u8         command_doorbell_vector[0x20];
+
+       u8         reserved_2[0xf00];
+
+       u8         initializing[0x1];
+       u8         reserved_3[0x4];
+       u8         nic_interface_supported[0x3];
+       u8         reserved_4[0x18];
+
+       struct mlx5_ifc_health_buffer_bits health_buffer;
+
+       u8         no_dram_nic_offset[0x20];
+
+       u8         reserved_5[0x6e40];
+
+       u8         reserved_6[0x1f];
+       u8         clear_int[0x1];
+
+       u8         health_syndrome[0x8];
+       u8         health_counter[0x18];
+
+       u8         reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+       struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+       struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+       struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+       struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+       struct mlx5_ifc_pamp_reg_bits pamp_reg;
+       struct mlx5_ifc_paos_reg_bits paos_reg;
+       struct mlx5_ifc_pcap_reg_bits pcap_reg;
+       struct mlx5_ifc_peir_reg_bits peir_reg;
+       struct mlx5_ifc_pelc_reg_bits pelc_reg;
+       struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+       struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+       struct mlx5_ifc_pifr_reg_bits pifr_reg;
+       struct mlx5_ifc_pipg_reg_bits pipg_reg;
+       struct mlx5_ifc_plbf_reg_bits plbf_reg;
+       struct mlx5_ifc_plib_reg_bits plib_reg;
+       struct mlx5_ifc_plpc_reg_bits plpc_reg;
+       struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+       struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+       struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+       struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+       struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+       struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+       struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+       struct mlx5_ifc_ppad_reg_bits ppad_reg;
+       struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+       struct mlx5_ifc_pplm_reg_bits pplm_reg;
+       struct mlx5_ifc_pplr_reg_bits pplr_reg;
+       struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+       struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+       struct mlx5_ifc_pspa_reg_bits pspa_reg;
+       struct mlx5_ifc_ptas_reg_bits ptas_reg;
+       struct mlx5_ifc_ptys_reg_bits ptys_reg;
+       struct mlx5_ifc_pude_reg_bits pude_reg;
+       struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+       struct mlx5_ifc_slrg_reg_bits slrg_reg;
+       struct mlx5_ifc_sltp_reg_bits sltp_reg;
+       u8         reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+       struct mlx5_ifc_health_buffer_bits health_buffer;
+       u8         reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+       struct mlx5_ifc_initial_seg_bits initial_seg;
+       u8         reserved_0[0x20060];
 };
 
 #endif /* MLX5_IFC_H */
index 310b5f7fd6ae52101665c9f3dd6e042b6a4ceb9e..f079fb1a31f7f7953f0bb28f6f3a145279598dc7 100644 (file)
@@ -134,13 +134,21 @@ enum {
 
 enum {
        MLX5_WQE_CTRL_CQ_UPDATE         = 2 << 2,
+       MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
        MLX5_WQE_CTRL_SOLICITED         = 1 << 1,
 };
 
 enum {
+       MLX5_SEND_WQE_DS        = 16,
        MLX5_SEND_WQE_BB        = 64,
 };
 
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+       MLX5_SEND_WQE_MAX_WQEBBS        = 16,
+};
+
 enum {
        MLX5_WQE_FMR_PERM_LOCAL_READ    = 1 << 27,
        MLX5_WQE_FMR_PERM_LOCAL_WRITE   = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
 
+enum {
+       MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
+       MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
+       MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
+       MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+       u8              rsvd0[4];
+       u8              cs_flags;
+       u8              rsvd1;
+       __be16          mss;
+       __be32          rsvd2;
+       __be16          inline_hdr_sz;
+       u8              inline_hdr_start[2];
+};
+
 struct mlx5_wqe_xrc_seg {
        __be32                  xrc_srqn;
        u8                      rsvd[12];
index 7f484a239f53bd496548ff87290821259e47914d..c735f5c91eead34520726a503538ec48046dde86 100644 (file)
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
        int phy_addr;
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
+       struct device_node *phy_node;
        struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
index 6ea16c84293b0cdcb981df77481a4fa5e509fdde..290a9a69af0788794619b0ededc4a6ccfbab5e07 100644 (file)
@@ -44,6 +44,8 @@ struct cfg802154_ops {
        int     (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
        int     (*set_cca_mode)(struct wpan_phy *wpan_phy,
                                const struct wpan_phy_cca *cca);
+       int     (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+       int     (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
        int     (*set_pan_id)(struct wpan_phy *wpan_phy,
                              struct wpan_dev *wpan_dev, __le16 pan_id);
        int     (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -61,14 +63,66 @@ struct cfg802154_ops {
                                struct wpan_dev *wpan_dev, bool mode);
 };
 
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+       switch (st) {
+       case NL802154_SUPPORTED_BOOL_TRUE:
+               return b;
+       case NL802154_SUPPORTED_BOOL_FALSE:
+               return !b;
+       case NL802154_SUPPORTED_BOOL_BOTH:
+               return true;
+       default:
+               WARN_ON(1);
+       }
+
+       return false;
+}
+
+struct wpan_phy_supported {
+       u32 channels[IEEE802154_MAX_PAGE + 1],
+           cca_modes, cca_opts, iftypes;
+       enum nl802154_supported_bool_states lbt;
+       u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+          min_csma_backoffs, max_csma_backoffs;
+       s8 min_frame_retries, max_frame_retries;
+       size_t tx_powers_size, cca_ed_levels_size;
+       const s32 *tx_powers, *cca_ed_levels;
+};
+
 struct wpan_phy_cca {
        enum nl802154_cca_modes mode;
        enum nl802154_cca_opts opt;
 };
 
-struct wpan_phy {
-       struct mutex pib_lock;
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+       if (a->mode != b->mode)
+               return false;
+
+       if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+               return a->opt == b->opt;
 
+       return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ *     transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ *     level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ *     setting.
+ */
+enum wpan_phy_flags {
+       WPAN_PHY_FLAG_TXPOWER           = BIT(1),
+       WPAN_PHY_FLAG_CCA_ED_LEVEL      = BIT(2),
+       WPAN_PHY_FLAG_CCA_MODE          = BIT(3),
+};
+
+struct wpan_phy {
        /* If multiple wpan_phys are registered and you're handed e.g.
         * a regular netdev with assigned ieee802154_ptr, you won't
         * know whether it points to a wpan_phy your driver has registered
@@ -77,6 +131,8 @@ struct wpan_phy {
         */
        const void *privid;
 
+       u32 flags;
+
        /*
         * This is a PIB according to 802.15.4-2011.
         * We do not provide timing-related variables, as they
@@ -84,12 +140,14 @@ struct wpan_phy {
         */
        u8 current_channel;
        u8 current_page;
-       u32 channels_supported[IEEE802154_MAX_PAGE + 1];
-       s8 transmit_power;
+       struct wpan_phy_supported supported;
+       /* current transmit_power in mBm */
+       s32 transmit_power;
        struct wpan_phy_cca cca;
 
        __le64 perm_extended_addr;
 
+       /* current cca ed threshold in mBm */
        s32 cca_ed_level;
 
        /* PHY depended MAC PIB values */
@@ -121,9 +179,9 @@ struct wpan_dev {
        __le64 extended_addr;
 
        /* MAC BSN field */
-       u8 bsn;
+       atomic_t bsn;
        /* MAC DSN field */
-       u8 dsn;
+       atomic_t dsn;
 
        u8 min_be;
        u8 max_be;
index 94a297052442600acff26e86404fec2afbb27d5a..0a87975128ec4d180e1184e138ef601c533b704f 100644 (file)
@@ -422,16 +422,6 @@ struct ieee802154_mlme_ops {
                               struct ieee802154_mac_params *params);
 
        struct ieee802154_llsec_ops *llsec;
-
-       /* The fields below are required. */
-
-       /*
-        * FIXME: these should become the part of PIB/MIB interface.
-        * However we still don't have IB interface of any kind
-        */
-       __le16 (*get_pan_id)(const struct net_device *dev);
-       __le16 (*get_short_addr)(const struct net_device *dev);
-       u8 (*get_dsn)(const struct net_device *dev);
 };
 
 static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
        return dev->ml_priv;
 }
 
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
-       return dev->ml_priv;
-}
-
 #endif
index 7df28a4c23f98793626371d1e2334ad91f7ebf87..9605c7f7453fafd76806c4718ab4827e9fb9881a 100644 (file)
@@ -89,41 +89,26 @@ struct ieee802154_hw {
 #define IEEE802154_HW_TX_OMIT_CKSUM    0x00000001
 /* Indicates that receiver will autorespond with ACK frames. */
 #define IEEE802154_HW_AACK             0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER          0x00000004
 /* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT              0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE         0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL     0x00000020
+#define IEEE802154_HW_LBT              0x00000004
 /* Indicates that transceiver will support csma (max_be, min_be, csma retries)
  * settings. */
-#define IEEE802154_HW_CSMA_PARAMS      0x00000040
+#define IEEE802154_HW_CSMA_PARAMS      0x00000008
 /* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES    0x00000080
+#define IEEE802154_HW_FRAME_RETRIES    0x00000010
 /* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT            0x00000100
+#define IEEE802154_HW_AFILT            0x00000020
 /* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS      0x00000200
+#define IEEE802154_HW_PROMISCUOUS      0x00000040
 /* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM    0x00000400
+#define IEEE802154_HW_RX_OMIT_CKSUM    0x00000080
 /* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM        0x00000800
+#define IEEE802154_HW_RX_DROP_BAD_CKSUM        0x00000100
 
 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
 #define IEEE802154_HW_OMIT_CKSUM       (IEEE802154_HW_TX_OMIT_CKSUM | \
                                         IEEE802154_HW_RX_OMIT_CKSUM)
 
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA             (IEEE802154_HW_CCA_MODE | \
-                                        IEEE802154_HW_CCA_ED_LEVEL | \
-                                        IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET             (IEEE802154_HW_CSMA | \
-                                        IEEE802154_HW_FRAME_RETRIES)
-
 /* struct ieee802154_ops - callbacks from mac802154 to the driver
  *
  * This structure contains various callbacks that the driver may
@@ -171,7 +156,7 @@ struct ieee802154_hw {
  *       Returns either zero, or negative errno.
  *
  * set_txpower:
- *       Set radio transmit power in dB. Called with pib_lock held.
+ *       Set radio transmit power in mBm. Called with pib_lock held.
  *       Returns either zero, or negative errno.
  *
  * set_lbt
@@ -184,7 +169,7 @@ struct ieee802154_hw {
  *       Returns either zero, or negative errno.
  *
  * set_cca_ed_level
- *       Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ *       Sets the CCA energy detection threshold in mBm. Called with pib_lock
  *       held.
  *       Returns either zero, or negative errno.
  *
@@ -213,12 +198,11 @@ struct ieee802154_ops {
        int             (*set_hw_addr_filt)(struct ieee802154_hw *hw,
                                            struct ieee802154_hw_addr_filt *filt,
                                            unsigned long changed);
-       int             (*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
+       int             (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
        int             (*set_lbt)(struct ieee802154_hw *hw, bool on);
        int             (*set_cca_mode)(struct ieee802154_hw *hw,
                                        const struct wpan_phy_cca *cca);
-       int             (*set_cca_ed_level)(struct ieee802154_hw *hw,
-                                           s32 level);
+       int             (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
        int             (*set_csma_params)(struct ieee802154_hw *hw,
                                           u8 min_be, u8 max_be, u8 retries);
        int             (*set_frame_retries)(struct ieee802154_hw *hw,
index e6bcf55dcf2008b83a8a40896fcce2e011653e1b..3d6f48ca40a7493a4becce92c5d4abb1636adcc0 100644 (file)
@@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt,
  *     @use: number of chain references to this table
  *     @flags: table flag (see enum nft_table_flags)
  *     @name: name of the table
+ *     @dev: this table is bound to this device (if any)
  */
 struct nft_table {
        struct list_head                list;
@@ -828,6 +829,11 @@ struct nft_table {
        u32                             use;
        u16                             flags;
        char                            name[NFT_TABLE_MAXNAMELEN];
+       struct net_device               *dev;
+};
+
+enum nft_af_flags {
+       NFT_AF_NEEDS_DEV        = (1 << 0),
 };
 
 /**
@@ -838,6 +844,7 @@ struct nft_table {
  *     @nhooks: number of hooks in this family
  *     @owner: module owner
  *     @tables: used internally
+ *     @flags: family flags
  *     @nops: number of hook ops in this family
  *     @hook_ops_init: initialization function for chain hook ops
  *     @hooks: hookfn overrides for packet validation
@@ -848,6 +855,7 @@ struct nft_af_info {
        unsigned int                    nhooks;
        struct module                   *owner;
        struct list_head                tables;
+       u32                             flags;
        unsigned int                    nops;
        void                            (*hook_ops_init)(struct nf_hook_ops *,
                                                         unsigned int);
index eee608b12cc95f3267a00eed85467f94f761d298..c807811460191dd20c1ba13eb5be8e6555c97167 100644 (file)
@@ -13,6 +13,7 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       struct nft_af_info      *netdev;
        unsigned int            base_seq;
        u8                      gencursor;
 };
index f8b5bc997959f7ba171258bd31852c696873bc21..0badebd1de7fbe179542a440f31e9042821a2e3d 100644 (file)
@@ -100,6 +100,8 @@ enum nl802154_attrs {
 
        NL802154_ATTR_EXTENDED_ADDR,
 
+       NL802154_ATTR_WPAN_PHY_CAPS,
+
        /* add attributes here, update the policy in nl802154.c */
 
        __NL802154_ATTR_AFTER_LAST,
@@ -119,6 +121,61 @@ enum nl802154_iftype {
        NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
 };
 
+/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ *     nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+       __NL802154_CAP_ATTR_INVALID,
+
+       NL802154_CAP_ATTR_IFTYPES,
+
+       NL802154_CAP_ATTR_CHANNELS,
+       NL802154_CAP_ATTR_TX_POWERS,
+
+       NL802154_CAP_ATTR_CCA_ED_LEVELS,
+       NL802154_CAP_ATTR_CCA_MODES,
+       NL802154_CAP_ATTR_CCA_OPTS,
+
+       NL802154_CAP_ATTR_MIN_MINBE,
+       NL802154_CAP_ATTR_MAX_MINBE,
+
+       NL802154_CAP_ATTR_MIN_MAXBE,
+       NL802154_CAP_ATTR_MAX_MAXBE,
+
+       NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+       NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+       NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+       NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+       NL802154_CAP_ATTR_LBT,
+
+       /* keep last */
+       __NL802154_CAP_ATTR_AFTER_LAST,
+       NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
 /**
  * enum nl802154_cca_modes - cca modes
  *
@@ -162,4 +219,26 @@ enum nl802154_cca_opts {
        NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
 };
 
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+       NL802154_SUPPORTED_BOOL_FALSE,
+       NL802154_SUPPORTED_BOOL_TRUE,
+       /* to handle them in a mask */
+       __NL802154_SUPPORTED_BOOL_INVALD,
+       NL802154_SUPPORTED_BOOL_BOTH,
+
+       /* keep last */
+       __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+       NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
 #endif /* __NL802154_H */
index f0a9af8b4dae1149021a0e3f2494012050ede9b4..72f3080afa1e73ee3403144332d82674a1a88dc1 100644 (file)
@@ -236,6 +236,8 @@ struct __sk_buff {
        __u32 vlan_tci;
        __u32 vlan_proto;
        __u32 priority;
+       __u32 ingress_ifindex;
+       __u32 ifindex;
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
index ae832b45b44c01fe149547eb07734865dabcd478..0594933cdf55c715965db2ff46c88e272ca8114f 100644 (file)
@@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec {
        __u32           location;
 };
 
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING      0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF   0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+       return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+       return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+                               ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
 /**
  * struct ethtool_rxnfc - command to get or set RX flow classification rules
  * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
index 5fa1cd04762e47ac1a1a60d95ed889c6b8afe2a8..89a671e0f5e7813a47939ec90c6364f76df7ed51 100644 (file)
@@ -146,12 +146,14 @@ enum nft_table_flags {
  * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
  * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
  * @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_DEV: net device name (NLA_STRING)
  */
 enum nft_table_attributes {
        NFTA_TABLE_UNSPEC,
        NFTA_TABLE_NAME,
        NFTA_TABLE_FLAGS,
        NFTA_TABLE_USE,
+       NFTA_TABLE_DEV,
        __NFTA_TABLE_MAX
 };
 #define NFTA_TABLE_MAX         (__NFTA_TABLE_MAX - 1)
index 614bcd4c1d74fbed64d1780a34abf41fe2a76d18..cb31229a6fa4ddd39c1d69038b998bc0c0dee4db 100644 (file)
@@ -202,7 +202,7 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key,
 
        old_prog = xchg(array->prog + index, prog);
        if (old_prog)
-               bpf_prog_put(old_prog);
+               bpf_prog_put_rcu(old_prog);
 
        return 0;
 }
@@ -218,7 +218,7 @@ static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
 
        old_prog = xchg(array->prog + index, NULL);
        if (old_prog) {
-               bpf_prog_put(old_prog);
+               bpf_prog_put_rcu(old_prog);
                return 0;
        } else {
                return -ENOENT;
index 98a69bd83069598487236d7c6cce9e6895f267cf..a1b14d197a4fc2da6acf2d812372284e78bc5af8 100644 (file)
@@ -432,6 +432,23 @@ static void free_used_maps(struct bpf_prog_aux *aux)
        kfree(aux->used_maps);
 }
 
+static void __prog_put_rcu(struct rcu_head *rcu)
+{
+       struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
+
+       free_used_maps(aux);
+       bpf_prog_free(aux->prog);
+}
+
+/* version of bpf_prog_put() that is called after a grace period */
+void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+       if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               prog->aux->prog = prog;
+               call_rcu(&prog->aux->rcu, __prog_put_rcu);
+       }
+}
+
 void bpf_prog_put(struct bpf_prog *prog)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
@@ -445,7 +462,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
 {
        struct bpf_prog *prog = filp->private_data;
 
-       bpf_prog_put(prog);
+       bpf_prog_put_rcu(prog);
        return 0;
 }
 
index c4802f3bd4c51086de62c048858fb7f6057f3bbb..f6c99098959f6b4c29a9f0f6f54254f6a5aafa4e 100644 (file)
@@ -94,7 +94,6 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        char buf[32];
        size_t buf_size = min(count, (sizeof(buf)-1));
        bool enable;
-       int err;
 
        if (!test_bit(HCI_UP, &hdev->flags))
                return -ENETDOWN;
@@ -121,12 +120,8 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       err = -bt_to_errno(skb->data[0]);
        kfree_skb(skb);
 
-       if (err < 0)
-               return err;
-
        hci_dev_change_flag(hdev, HCI_DUT_MODE);
 
        return count;
index 7fd87e7135b52753c0bcefd58cb4a290c57c77ba..a6f21f8c2f984b42607c5f13de5c2eeed245c0ea 100644 (file)
@@ -7577,7 +7577,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        memset(&ev, 0, sizeof(ev));
 
        /* Devices using resolvable or non-resolvable random addresses
-        * without providing an indentity resolving key don't require
+        * without providing an identity resolving key don't require
         * to store long term keys. Their addresses will change the
         * next time around.
         *
@@ -7617,7 +7617,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
        /* For identity resolving keys from devices that are already
         * using a public address or static random address, do not
         * ask for storing this key. The identity resolving key really
-        * is only mandatory for devices using resovlable random
+        * is only mandatory for devices using resolvable random
         * addresses.
         *
         * Storing all identity resolving keys has the downside that
@@ -7646,7 +7646,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
        memset(&ev, 0, sizeof(ev));
 
        /* Devices using resolvable or non-resolvable random addresses
-        * without providing an indentity resolving key don't require
+        * without providing an identity resolving key don't require
         * to store signature resolving keys. Their addresses will change
         * the next time around.
         *
index 1ab3dc9c8f99bf425a2a24403cfe6e54ddbbd550..659371af39e44e0346d021ffba1fc6448830715a 100644 (file)
@@ -371,6 +371,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        uint8_t tmp[16], data[16];
        int err;
 
+       SMP_DBG("k %16phN r %16phN", k, r);
+
        if (!tfm) {
                BT_ERR("tfm %p", tfm);
                return -EINVAL;
@@ -400,6 +402,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        /* Most significant octet of encryptedData corresponds to data[0] */
        swap_buf(data, r, 16);
 
+       SMP_DBG("r %16phN", r);
+
        return err;
 }
 
@@ -410,6 +414,10 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        u8 p1[16], p2[16];
        int err;
 
+       SMP_DBG("k %16phN r %16phN", k, r);
+       SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra);
+       SMP_DBG("preq %7phN pres %7phN", preq, pres);
+
        memset(p1, 0, 16);
 
        /* p1 = pres || preq || _rat || _iat */
@@ -418,10 +426,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        memcpy(p1 + 2, preq, 7);
        memcpy(p1 + 9, pres, 7);
 
-       /* p2 = padding || ia || ra */
-       memcpy(p2, ra, 6);
-       memcpy(p2 + 6, ia, 6);
-       memset(p2 + 12, 0, 4);
+       SMP_DBG("p1 %16phN", p1);
 
        /* res = r XOR p1 */
        u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
@@ -433,6 +438,13 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
                return err;
        }
 
+       /* p2 = padding || ia || ra */
+       memcpy(p2, ra, 6);
+       memcpy(p2 + 6, ia, 6);
+       memset(p2 + 12, 0, 4);
+
+       SMP_DBG("p2 %16phN", p2);
+
        /* res = res XOR p2 */
        u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
 
index 3adcca6f17a42c538c4a5edfa256ec4a1c162593..2c30d6632d66234fd842e33e7866f4e36dbd7ef3 100644 (file)
@@ -1499,6 +1499,24 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
                                      offsetof(struct sk_buff, priority));
                break;
 
+       case offsetof(struct __sk_buff, ingress_ifindex):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, skb_iif));
+               break;
+
+       case offsetof(struct __sk_buff, ifindex):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, dev));
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+                                     offsetof(struct net_device, ifindex));
+               break;
+
        case offsetof(struct __sk_buff, mark):
                return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
 
index f17ccd291d3902a94ebd31653959851203402468..8b3bc4fac6136638de97124443fb6452b1d33fc2 100644 (file)
@@ -31,10 +31,7 @@ static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
  */
 int register_netevent_notifier(struct notifier_block *nb)
 {
-       int err;
-
-       err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
-       return err;
+       return atomic_notifier_chain_register(&netevent_notif_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_netevent_notifier);
 
index 0ae5822ef944fb0e5c74b22de0fe8d426032135c..f20a387a1011021347af182060f3b8f4ceda7183 100644 (file)
 LIST_HEAD(lowpan_devices);
 static int lowpan_open_count;
 
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
 static struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
 };
@@ -103,12 +82,6 @@ static const struct net_device_ops lowpan_netdev_ops = {
        .ndo_start_xmit         = lowpan_xmit,
 };
 
-static struct ieee802154_mlme_ops lowpan_mlme = {
-       .get_pan_id = lowpan_get_pan_id,
-       .get_short_addr = lowpan_get_short_addr,
-       .get_dsn = lowpan_get_dsn,
-};
-
 static void lowpan_setup(struct net_device *dev)
 {
        dev->addr_len           = IEEE802154_ADDR_LEN;
@@ -124,7 +97,6 @@ static void lowpan_setup(struct net_device *dev)
 
        dev->netdev_ops         = &lowpan_netdev_ops;
        dev->header_ops         = &lowpan_header_ops;
-       dev->ml_priv            = &lowpan_mlme;
        dev->destructor         = free_netdev;
        dev->features           |= NETIF_F_NETNS_LOCAL;
 }
index 2349070bd534be9f12a4d8ea342be79fe89a8bcd..98acf7319754970f109a1c6f27d8ff9da161dac2 100644 (file)
@@ -207,7 +207,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
 
        /* prepare wpan address data */
        sa.mode = IEEE802154_ADDR_LONG;
-       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       sa.pan_id = lowpan_dev_info(dev)->real_dev->ieee802154_ptr->pan_id;
        sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
 
        /* intra-PAN communications */
index 2ee00e8a03082aaebce7f017a5ca1e7219c6aade..b0248e934230d166c4e61a66a0ad18c268fa0a71 100644 (file)
@@ -121,8 +121,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
        /* atomic_inc_return makes it start at 1, make it start at 0 */
        rdev->wpan_phy_idx--;
 
-       mutex_init(&rdev->wpan_phy.pib_lock);
-
        INIT_LIST_HEAD(&rdev->wpan_dev_list);
        device_initialize(&rdev->wpan_phy.dev);
        dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
index 2b4955d7aae54b9b4e647e6b7de9b8fa7ef68c42..3503c38954f9f9e09e88706357c838a37b46ee17 100644 (file)
@@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        BUG_ON(!phy);
        get_device(&phy->dev);
 
-       short_addr = ops->get_short_addr(dev);
-       pan_id = ops->get_pan_id(dev);
+       rtnl_lock();
+       short_addr = dev->ieee802154_ptr->short_addr;
+       pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
            nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
@@ -117,12 +119,12 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
                rtnl_unlock();
 
                if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
-                              params.transmit_power) ||
+                              params.transmit_power / 100) ||
                    nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
                               params.cca.mode) ||
                    nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
-                               params.cca_ed_level) ||
+                               params.cca_ed_level / 100) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
                               params.csma_retries) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
@@ -166,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
        if (!dev)
                return NULL;
 
-       /* Check on mtu is currently a hacked solution because lowpan
-        * and wpan have the same ARPHRD type.
-        */
-       if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
+       if (dev->type != ARPHRD_IEEE802154) {
                dev_put(dev);
                return NULL;
        }
@@ -244,7 +243,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
        addr.mode = IEEE802154_ADDR_LONG;
        addr.extended_addr = nla_get_hwaddr(
                        info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
-       addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       rtnl_lock();
+       addr.pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
                nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
@@ -281,7 +282,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
                addr.short_addr = nla_get_shortaddr(
                                info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
        }
-       addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       rtnl_lock();
+       addr.pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
                        nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
@@ -449,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
 
        idx = 0;
        for_each_netdev(net, dev) {
-               /* Check on mtu is currently a hacked solution because lowpan
-                * and wpan have the same ARPHRD type.
-                */
-               if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
-                   dev->mtu != IEEE802154_MTU)
+               if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
                        goto cont;
 
                if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -510,7 +509,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
        ops->get_mac_params(dev, &params);
 
        if (info->attrs[IEEE802154_ATTR_TXPOWER])
-               params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+               params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
 
        if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
                params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
@@ -519,7 +518,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
                params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
 
        if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
-               params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+               params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
 
        if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
                params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
@@ -783,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
        int rc;
 
        for_each_netdev(net, dev) {
-               /* Check on mtu is currently a hacked solution because lowpan
-                * and wpan have the same ARPHRD type.
-                */
-               if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
-                   dev->mtu != IEEE802154_MTU)
+               if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
                        goto skip;
 
                data.ops = ieee802154_mlme_ops(dev);
index 346c6665d25e59bf372bacedc5a2ae6df30d227c..77d73014bde31ed285f3a11c40dd3ebaf82a8f2f 100644 (file)
@@ -50,26 +50,26 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
        if (!hdr)
                goto out;
 
-       mutex_lock(&phy->pib_lock);
+       rtnl_lock();
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
            nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
            nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
                goto nla_put_failure;
        for (i = 0; i < 32; i++) {
-               if (phy->channels_supported[i])
-                       buf[pages++] = phy->channels_supported[i] | (i << 27);
+               if (phy->supported.channels[i])
+                       buf[pages++] = phy->supported.channels[i] | (i << 27);
        }
        if (pages &&
            nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
                    pages * sizeof(uint32_t), buf))
                goto nla_put_failure;
-       mutex_unlock(&phy->pib_lock);
+       rtnl_unlock();
        kfree(buf);
        genlmsg_end(msg, hdr);
        return 0;
 
 nla_put_failure:
-       mutex_unlock(&phy->pib_lock);
+       rtnl_unlock();
        genlmsg_cancel(msg, hdr);
 out:
        kfree(buf);
index f3c12f6a4a392ad301e5a79fcabb2bb9f8521431..7dbb1f4ce7df6bebb28d202f8907e00f71bd497c 100644 (file)
@@ -207,10 +207,11 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_PAGE] = { .type = NLA_U8, },
        [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
 
-       [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+       [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
 
        [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
        [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
+       [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
 
        [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
 
@@ -225,6 +226,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
 
        [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+
+       [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 };
 
 /* message building helper */
@@ -235,6 +238,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
        return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd);
 }
 
+static int
+nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
+{
+       struct nlattr *nl_flags = nla_nest_start(msg, attr);
+       int i;
+
+       if (!nl_flags)
+               return -ENOBUFS;
+
+       i = 0;
+       while (mask) {
+               if ((mask & 1) && nla_put_flag(msg, i))
+                       return -ENOBUFS;
+
+               mask >>= 1;
+               i++;
+       }
+
+       nla_nest_end(msg, nl_flags);
+       return 0;
+}
+
 static int
 nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
                                struct sk_buff *msg)
@@ -248,7 +273,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
 
        for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
                if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
-                               rdev->wpan_phy.channels_supported[page]))
+                               rdev->wpan_phy.supported.channels[page]))
                        return -ENOBUFS;
        }
        nla_nest_end(msg, nl_page);
@@ -256,6 +281,92 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
        return 0;
 }
 
+static int
+nl802154_put_capabilities(struct sk_buff *msg,
+                         struct cfg802154_registered_device *rdev)
+{
+       const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
+       struct nlattr *nl_caps, *nl_channels;
+       int i;
+
+       nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS);
+       if (!nl_caps)
+               return -ENOBUFS;
+
+       nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS);
+       if (!nl_channels)
+               return -ENOBUFS;
+
+       for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
+               if (caps->channels[i]) {
+                       if (nl802154_put_flags(msg, i, caps->channels[i]))
+                               return -ENOBUFS;
+               }
+       }
+
+       nla_nest_end(msg, nl_channels);
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+               struct nlattr *nl_ed_lvls;
+
+               nl_ed_lvls = nla_nest_start(msg,
+                                           NL802154_CAP_ATTR_CCA_ED_LEVELS);
+               if (!nl_ed_lvls)
+                       return -ENOBUFS;
+
+               for (i = 0; i < caps->cca_ed_levels_size; i++) {
+                       if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
+                               return -ENOBUFS;
+               }
+
+               nla_nest_end(msg, nl_ed_lvls);
+       }
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+               struct nlattr *nl_tx_pwrs;
+
+               nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS);
+               if (!nl_tx_pwrs)
+                       return -ENOBUFS;
+
+               for (i = 0; i < caps->tx_powers_size; i++) {
+                       if (nla_put_s32(msg, i, caps->tx_powers[i]))
+                               return -ENOBUFS;
+               }
+
+               nla_nest_end(msg, nl_tx_pwrs);
+       }
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+               if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
+                                      caps->cca_modes) ||
+                   nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
+                                      caps->cca_opts))
+                       return -ENOBUFS;
+       }
+
+       if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+                      caps->min_csma_backoffs) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+                      caps->max_csma_backoffs) ||
+           nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+                      caps->min_frame_retries) ||
+           nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+                      caps->max_frame_retries) ||
+           nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
+                              caps->iftypes) ||
+           nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
+               return -ENOBUFS;
+
+       nla_nest_end(msg, nl_caps);
+
+       return 0;
+}
+
 static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                                  enum nl802154_commands cmd,
                                  struct sk_buff *msg, u32 portid, u32 seq,
@@ -286,23 +397,38 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                       rdev->wpan_phy.current_channel))
                goto nla_put_failure;
 
-       /* supported channels array */
+       /* TODO remove this behaviour, we still keep support it for a while
+        * so users can change the behaviour to the new one.
+        */
        if (nl802154_send_wpan_phy_channels(rdev, msg))
                goto nla_put_failure;
 
        /* cca mode */
-       if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
-                       rdev->wpan_phy.cca.mode))
-               goto nla_put_failure;
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+               if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+                               rdev->wpan_phy.cca.mode))
+                       goto nla_put_failure;
+
+               if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+                       if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+                                       rdev->wpan_phy.cca.opt))
+                               goto nla_put_failure;
+               }
+       }
 
-       if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
-               if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
-                               rdev->wpan_phy.cca.opt))
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+               if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
+                               rdev->wpan_phy.transmit_power))
                        goto nla_put_failure;
        }
 
-       if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
-                      rdev->wpan_phy.transmit_power))
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+               if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
+                               rdev->wpan_phy.cca_ed_level))
+                       goto nla_put_failure;
+       }
+
+       if (nl802154_put_capabilities(msg, rdev))
                goto nla_put_failure;
 
 finish:
@@ -575,7 +701,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
 
        if (info->attrs[NL802154_ATTR_IFTYPE]) {
                type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
-               if (type > NL802154_IFTYPE_MAX)
+               if (type > NL802154_IFTYPE_MAX ||
+                   !(rdev->wpan_phy.supported.iftypes & BIT(type)))
                        return -EINVAL;
        }
 
@@ -625,7 +752,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
        channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
 
        /* check 802.15.4 constraints */
-       if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+       if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
+           !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
                return -EINVAL;
 
        return rdev_set_channel(rdev, page, channel);
@@ -636,12 +764,17 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
        struct cfg802154_registered_device *rdev = info->user_ptr[0];
        struct wpan_phy_cca cca;
 
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_CCA_MODE])
                return -EINVAL;
 
        cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
        /* checking 802.15.4 constraints */
-       if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+       if (cca.mode < NL802154_CCA_ENERGY ||
+           cca.mode > NL802154_CCA_ATTR_MAX ||
+           !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
                return -EINVAL;
 
        if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
@@ -649,13 +782,58 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
-               if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+               if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
+                   !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
                        return -EINVAL;
        }
 
        return rdev_set_cca_mode(rdev, &cca);
 }
 
+static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       s32 ed_level;
+       int i;
+
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
+               return -EINVAL;
+
+       ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
+
+       for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
+               if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
+                       return rdev_set_cca_ed_level(rdev, ed_level);
+       }
+
+       return -EINVAL;
+}
+
+static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       s32 power;
+       int i;
+
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_TX_POWER])
+               return -EINVAL;
+
+       power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
+
+       for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
+               if (power == rdev->wpan_phy.supported.tx_powers[i])
+                       return rdev_set_tx_power(rdev, power);
+       }
+
+       return -EINVAL;
+}
+
 static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -668,14 +846,22 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
                return -EBUSY;
 
        /* don't change address fields on monitor */
-       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-               return -EINVAL;
-
-       if (!info->attrs[NL802154_ATTR_PAN_ID])
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+           !info->attrs[NL802154_ATTR_PAN_ID])
                return -EINVAL;
 
        pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
 
+       /* TODO
+        * I am not sure about to check here on broadcast pan_id.
+        * Broadcast is a valid setting, comment from 802.15.4:
+        * If this value is 0xffff, the device is not associated.
+        *
+        * This could useful to simple deassociate an device.
+        */
+       if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+               return -EINVAL;
+
        return rdev_set_pan_id(rdev, wpan_dev, pan_id);
 }
 
@@ -691,14 +877,27 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
                return -EBUSY;
 
        /* don't change address fields on monitor */
-       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-               return -EINVAL;
-
-       if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+           !info->attrs[NL802154_ATTR_SHORT_ADDR])
                return -EINVAL;
 
        short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
 
+       /* TODO
+        * I am not sure about to check here on broadcast short_addr.
+        * Broadcast is a valid setting, comment from 802.15.4:
+        * A value of 0xfffe indicates that the device has
+        * associated but has not been allocated an address. A
+        * value of 0xffff indicates that the device does not
+        * have a short address.
+        *
+        * I think we should allow to set these settings but
+        * don't allow to allow socket communication with it.
+        */
+       if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+           short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+               return -EINVAL;
+
        return rdev_set_short_addr(rdev, wpan_dev, short_addr);
 }
 
@@ -722,7 +921,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
        max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
 
        /* check 802.15.4 constraints */
-       if (max_be < 3 || max_be > 8 || min_be > max_be)
+       if (min_be < rdev->wpan_phy.supported.min_minbe ||
+           min_be > rdev->wpan_phy.supported.max_minbe ||
+           max_be < rdev->wpan_phy.supported.min_maxbe ||
+           max_be > rdev->wpan_phy.supported.max_maxbe ||
+           min_be > max_be)
                return -EINVAL;
 
        return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
@@ -747,7 +950,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
                        info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
 
        /* check 802.15.4 constraints */
-       if (max_csma_backoffs > 5)
+       if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
+           max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
                return -EINVAL;
 
        return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
@@ -771,7 +975,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
                        info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
 
        /* check 802.15.4 constraints */
-       if (max_frame_retries < -1 || max_frame_retries > 7)
+       if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
+           max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
                return -EINVAL;
 
        return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
@@ -791,6 +996,9 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+       if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
+               return -EINVAL;
+
        return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
@@ -936,6 +1144,22 @@ static const struct genl_ops nl802154_ops[] = {
                .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
                                  NL802154_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
+               .doit = nl802154_set_cca_ed_level,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL802154_CMD_SET_TX_POWER,
+               .doit = nl802154_set_tx_power,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
        {
                .cmd = NL802154_CMD_SET_PAN_ID,
                .doit = nl802154_set_pan_id,
index 7b5a9dd94fe5a2b55d01103aa529261141ece521..b2155a123f6c88980c180eeb7b4ffdcf68bea4fb 100644 (file)
@@ -74,6 +74,29 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level)
+{
+       int ret;
+
+       trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level);
+       ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
+static inline int
+rdev_set_tx_power(struct cfg802154_registered_device *rdev,
+                 s32 power)
+{
+       int ret;
+
+       trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power);
+       ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 static inline int
 rdev_set_pan_id(struct cfg802154_registered_device *rdev,
                struct wpan_dev *wpan_dev, __le16 pan_id)
index 7aaaf967df58d2339a03898065094ef1a30c51d2..02abef2c162187ad12be3f4e7d61fcca803883f8 100644 (file)
@@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
                        if (tmp->type != ARPHRD_IEEE802154)
                                continue;
 
-                       pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
-                       short_addr =
-                               ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
+                       pan_id = tmp->ieee802154_ptr->pan_id;
+                       short_addr = tmp->ieee802154_ptr->short_addr;
                        if (pan_id == addr->pan_id &&
                            short_addr == addr->short_addr) {
                                dev = tmp;
@@ -228,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
                goto out;
        }
 
-       if (dev->type != ARPHRD_IEEE802154) {
-               err = -ENODEV;
-               goto out_put;
-       }
-
        sk->sk_bound_dev_if = dev->ifindex;
        sk_dst_reset(sk);
 
-out_put:
        dev_put(dev);
 out:
        release_sock(sk);
@@ -286,7 +278,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        if (size > mtu) {
                pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
+               err = -EMSGSIZE;
                goto out_dev;
        }
 
@@ -797,9 +789,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
        /* Data frame processing */
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
-       pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-       short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
-       hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+       pan_id = dev->ieee802154_ptr->pan_id;
+       short_addr = dev->ieee802154_ptr->short_addr;
+       hw_addr = dev->ieee802154_ptr->extended_addr;
 
        read_lock(&dgram_lock);
        sk_for_each(sk, &dgram_head) {
index 5ac25eb6ed17869821c83d5a360fcafbf41f4a4b..73eb7605c1eb64ec6d342498ff32a0be9c195c01 100644 (file)
@@ -1,4 +1,4 @@
-/* Based on net/wireless/tracing.h */
+/* Based on net/wireless/trace.h */
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM cfg802154
@@ -93,6 +93,21 @@ TRACE_EVENT(802154_rdev_set_channel,
                  __entry->page, __entry->channel)
 );
 
+TRACE_EVENT(802154_rdev_set_tx_power,
+       TP_PROTO(struct wpan_phy *wpan_phy, s32 power),
+       TP_ARGS(wpan_phy, power),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(s32, power)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->power = power;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", power: %d", WPAN_PHY_PR_ARG,
+                 __entry->power)
+);
+
 TRACE_EVENT(802154_rdev_set_cca_mode,
        TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
        TP_ARGS(wpan_phy, cca),
@@ -108,6 +123,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode,
                  WPAN_CCA_PR_ARG)
 );
 
+TRACE_EVENT(802154_rdev_set_cca_ed_level,
+       TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level),
+       TP_ARGS(wpan_phy, ed_level),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(s32, ed_level)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->ed_level = ed_level;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", ed_level: %d", WPAN_PHY_PR_ARG,
+                 __entry->ed_level)
+);
+
 DECLARE_EVENT_CLASS(802154_le16_template,
        TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                 __le16 le16arg),
index 2d0e265fef6e7f2c657c54d4db0fd10e010fa68b..e7abf5145edc126b60524f0cecf179f7eef75d7c 100644 (file)
@@ -1444,7 +1444,6 @@ static int
 compat_find_calc_match(struct xt_entry_match *m,
                       const char *name,
                       const struct ipt_ip *ip,
-                      unsigned int hookmask,
                       int *size)
 {
        struct xt_match *match;
@@ -1513,8 +1512,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name,
-                                            &e->ip, e->comefrom, &off);
+               ret = compat_find_calc_match(ematch, name, &e->ip, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
index 0330ab2e2b6329ced120cd9b7100a5a34f50e82b..433231ccfb17fc6d01179247d1d81226803d18df 100644 (file)
@@ -41,6 +41,8 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -528,7 +530,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -543,7 +545,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "tcp_app_win",
@@ -756,7 +758,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "udp_wmem_min",
@@ -764,7 +766,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one
+               .extra1         = &min_sndbuf,
        },
        { }
 };
index 62f5b0d0bc9bfbf19940ba0c70ef9da464bf467f..cdd085f8b77011644e502c73da7b43188a267817 100644 (file)
@@ -1459,7 +1459,6 @@ static int
 compat_find_calc_match(struct xt_entry_match *m,
                       const char *name,
                       const struct ip6t_ip6 *ipv6,
-                      unsigned int hookmask,
                       int *size)
 {
        struct xt_match *match;
@@ -1528,8 +1527,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name,
-                                            &e->ipv6, e->comefrom, &off);
+               ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
index 484a5c1440736ff6d95f38376a50f67683a94b01..ca4700cb26c4feec258c8e5034389522125b113e 100644 (file)
@@ -1327,13 +1327,7 @@ static struct inet_protosw rawv6_protosw = {
 
 int __init rawv6_init(void)
 {
-       int ret;
-
-       ret = inet6_register_protosw(&rawv6_protosw);
-       if (ret)
-               goto out;
-out:
-       return ret;
+       return inet6_register_protosw(&rawv6_protosw);
 }
 
 void rawv6_exit(void)
index aa462b480a394b0081ba73e2441d9cb4619e519b..fb45287ebac35308be9202d8b6061d764c24714a 100644 (file)
@@ -2,6 +2,7 @@ config MAC802154
        tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
        depends on IEEE802154
        select CRC_CCITT
+       select CRYPTO
        select CRYPTO_AUTHENC
        select CRYPTO_CCM
        select CRYPTO_CTR
index 70be9c799f8a81596a4e753b239849549d792dd0..317c4662e544679ab37dcc8cfa92fc8108c4820b 100644 (file)
@@ -73,9 +73,9 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
 
        ASSERT_RTNL();
 
-       /* check if phy support this setting */
-       if (!(wpan_phy->channels_supported[page] & BIT(channel)))
-               return -EINVAL;
+       if (wpan_phy->current_page == page &&
+           wpan_phy->current_channel == channel)
+               return 0;
 
        ret = drv_set_channel(local, page, channel);
        if (!ret) {
@@ -95,9 +95,8 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
 
        ASSERT_RTNL();
 
-       /* check if phy support this setting */
-       if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
-               return -EOPNOTSUPP;
+       if (wpan_phy_cca_cmp(&wpan_phy->cca, cca))
+               return 0;
 
        ret = drv_set_cca_mode(local, cca);
        if (!ret)
@@ -106,21 +105,50 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
        return ret;
 }
 
+static int
+ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       ASSERT_RTNL();
+
+       if (wpan_phy->cca_ed_level == ed_level)
+               return 0;
+
+       ret = drv_set_cca_ed_level(local, ed_level);
+       if (!ret)
+               wpan_phy->cca_ed_level = ed_level;
+
+       return ret;
+}
+
+static int
+ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       ASSERT_RTNL();
+
+       if (wpan_phy->transmit_power == power)
+               return 0;
+
+       ret = drv_set_tx_power(local, power);
+       if (!ret)
+               wpan_phy->transmit_power = power;
+
+       return ret;
+}
+
 static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                      __le16 pan_id)
 {
        ASSERT_RTNL();
 
-       /* TODO
-        * I am not sure about to check here on broadcast pan_id.
-        * Broadcast is a valid setting, comment from 802.15.4:
-        * If this value is 0xffff, the device is not associated.
-        *
-        * This could useful to simple deassociate an device.
-        */
-       if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
-               return -EINVAL;
+       if (wpan_dev->pan_id == pan_id)
+               return 0;
 
        wpan_dev->pan_id = pan_id;
        return 0;
@@ -131,12 +159,11 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
                                struct wpan_dev *wpan_dev,
                                u8 min_be, u8 max_be)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-               return -EOPNOTSUPP;
+       if (wpan_dev->min_be == min_be &&
+           wpan_dev->max_be == max_be)
+               return 0;
 
        wpan_dev->min_be = min_be;
        wpan_dev->max_be = max_be;
@@ -149,20 +176,8 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       /* TODO
-        * I am not sure about to check here on broadcast short_addr.
-        * Broadcast is a valid setting, comment from 802.15.4:
-        * A value of 0xfffe indicates that the device has
-        * associated but has not been allocated an address. A
-        * value of 0xffff indicates that the device does not
-        * have a short address.
-        *
-        * I think we should allow to set these settings but
-        * don't allow to allow socket communication with it.
-        */
-       if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
-           short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
-               return -EINVAL;
+       if (wpan_dev->short_addr == short_addr)
+               return 0;
 
        wpan_dev->short_addr = short_addr;
        return 0;
@@ -173,12 +188,10 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
                                 struct wpan_dev *wpan_dev,
                                 u8 max_csma_backoffs)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-               return -EOPNOTSUPP;
+       if (wpan_dev->csma_retries == max_csma_backoffs)
+               return 0;
 
        wpan_dev->csma_retries = max_csma_backoffs;
        return 0;
@@ -189,12 +202,10 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
                                 struct wpan_dev *wpan_dev,
                                 s8 max_frame_retries)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES))
-               return -EOPNOTSUPP;
+       if (wpan_dev->frame_retries == max_frame_retries)
+               return 0;
 
        wpan_dev->frame_retries = max_frame_retries;
        return 0;
@@ -204,12 +215,10 @@ static int
 ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                        bool mode)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_LBT))
-               return -EOPNOTSUPP;
+       if (wpan_dev->lbt == mode)
+               return 0;
 
        wpan_dev->lbt = mode;
        return 0;
@@ -222,6 +231,8 @@ const struct cfg802154_ops mac802154_config_ops = {
        .del_virtual_intf = ieee802154_del_iface,
        .set_channel = ieee802154_set_channel,
        .set_cca_mode = ieee802154_set_cca_mode,
+       .set_cca_ed_level = ieee802154_set_cca_ed_level,
+       .set_tx_power = ieee802154_set_tx_power,
        .set_pan_id = ieee802154_set_pan_id,
        .set_short_addr = ieee802154_set_short_addr,
        .set_backoff_exponent = ieee802154_set_backoff_exponent,
index a0533357b9eaca22df9e7b105f17ce3440b03d4e..caecd5f43aa730341c4e3e6556636ab5f13a285f 100644 (file)
@@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
        return local->ops->set_channel(&local->hw, page, channel);
 }
 
-static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
+static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
 {
        might_sleep();
 
@@ -67,7 +67,7 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
                return -EOPNOTSUPP;
        }
 
-       return local->ops->set_txpower(&local->hw, dbm);
+       return local->ops->set_txpower(&local->hw, mbm);
 }
 
 static inline int drv_set_cca_mode(struct ieee802154_local *local,
@@ -96,7 +96,7 @@ static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
 }
 
 static inline int
-drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
 {
        might_sleep();
 
@@ -105,7 +105,7 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
                return -EOPNOTSUPP;
        }
 
-       return local->ops->set_cca_ed_level(&local->hw, ed_level);
+       return local->ops->set_cca_ed_level(&local->hw, mbm);
 }
 
 static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
index 127ba18386fc639aac4ccda482ebe4be11b8e6ee..eec668f3637ff37c9e12c1251cdca4cc59c75d7e 100644 (file)
@@ -86,8 +86,6 @@ struct ieee802154_sub_if_data {
        unsigned long state;
        char name[IFNAMSIZ];
 
-       spinlock_t mib_lock;
-
        /* protects sec from concurrent access by netlink. access by
         * encrypt/decrypt/header_create safe without additional protection.
         */
@@ -136,12 +134,7 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
 enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
 
 /* MIB callbacks */
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
-u8 mac802154_dev_get_dsn(const struct net_device *dev);
 
 int mac802154_get_params(struct net_device *dev,
                         struct ieee802154_llsec_params *params);
index 91b75abbd1a1d05b3219b9089232d9f67eb73ccd..b544b5dc4bfbd1968372c65dee771d0b59cb9671 100644 (file)
@@ -62,9 +62,10 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
        int err = -ENOIOCTLCMD;
 
-       ASSERT_RTNL();
+       if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
+               return err;
 
-       spin_lock_bh(&sdata->mib_lock);
+       rtnl_lock();
 
        switch (cmd) {
        case SIOCGIFADDR:
@@ -89,7 +90,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
        case SIOCSIFADDR:
                if (netif_running(dev)) {
-                       spin_unlock_bh(&sdata->mib_lock);
+                       rtnl_unlock();
                        return -EBUSY;
                }
 
@@ -111,7 +112,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
        }
 
-       spin_unlock_bh(&sdata->mib_lock);
+       rtnl_unlock();
        return err;
 }
 
@@ -241,7 +242,6 @@ static int mac802154_wpan_open(struct net_device *dev)
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_local *local = sdata->local;
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
-       struct wpan_phy *phy = sdata->local->phy;
 
        rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
        if (rc < 0)
@@ -251,8 +251,6 @@ static int mac802154_wpan_open(struct net_device *dev)
        if (rc < 0)
                return rc;
 
-       mutex_lock(&phy->pib_lock);
-
        if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
                rc = drv_set_promiscuous_mode(local,
                                              wpan_dev->promiscuous_mode);
@@ -294,11 +292,7 @@ static int mac802154_wpan_open(struct net_device *dev)
                        goto out;
        }
 
-       mutex_unlock(&phy->pib_lock);
-       return 0;
-
 out:
-       mutex_unlock(&phy->pib_lock);
        return rc;
 }
 
@@ -374,14 +368,12 @@ static int mac802154_header_create(struct sk_buff *skb,
        hdr.fc.type = cb->type;
        hdr.fc.security_enabled = cb->secen;
        hdr.fc.ack_request = cb->ackreq;
-       hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+       hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
 
        if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
                return -EINVAL;
 
        if (!saddr) {
-               spin_lock_bh(&sdata->mib_lock);
-
                if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
                    wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
                    wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
@@ -393,8 +385,6 @@ static int mac802154_header_create(struct sk_buff *skb,
                }
 
                hdr.source.pan_id = wpan_dev->pan_id;
-
-               spin_unlock_bh(&sdata->mib_lock);
        } else {
                hdr.source = *(const struct ieee802154_addr *)saddr;
        }
@@ -474,13 +464,16 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                       enum nl802154_iftype type)
 {
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+       u8 tmp;
 
        /* set some type-dependent values */
        sdata->vif.type = type;
        sdata->wpan_dev.iftype = type;
 
-       get_random_bytes(&wpan_dev->bsn, 1);
-       get_random_bytes(&wpan_dev->dsn, 1);
+       get_random_bytes(&tmp, sizeof(tmp));
+       atomic_set(&wpan_dev->bsn, tmp);
+       get_random_bytes(&tmp, sizeof(tmp));
+       atomic_set(&wpan_dev->dsn, tmp);
 
        /* defaults per 802.15.4-2011 */
        wpan_dev->min_be = 3;
@@ -503,7 +496,6 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                sdata->dev->ml_priv = &mac802154_mlme_wpan;
                wpan_dev->promiscuous_mode = false;
 
-               spin_lock_init(&sdata->mib_lock);
                mutex_init(&sdata->sec_mtx);
 
                mac802154_llsec_init(&sdata->sec);
index bdccb4ecd30fed81fec03a00c1a3b6452024fcbb..8606da459ff3421f9528a059cf2d66ca98a4a579 100644 (file)
@@ -36,37 +36,30 @@ static int mac802154_mlme_start_req(struct net_device *dev,
                                    u8 pan_coord, u8 blx,
                                    u8 coord_realign)
 {
-       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
-       int rc = 0;
+       struct ieee802154_llsec_params params;
+       int changed = 0;
 
        ASSERT_RTNL();
 
        BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
-       mac802154_dev_set_pan_id(dev, addr->pan_id);
-       mac802154_dev_set_short_addr(dev, addr->short_addr);
+       dev->ieee802154_ptr->pan_id = addr->pan_id;
+       dev->ieee802154_ptr->short_addr = addr->short_addr;
        mac802154_dev_set_page_channel(dev, page, channel);
 
-       if (ops->llsec) {
-               struct ieee802154_llsec_params params;
-               int changed = 0;
+       params.pan_id = addr->pan_id;
+       changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
 
-               params.coord_shortaddr = addr->short_addr;
-               changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+       params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+       changed |= IEEE802154_LLSEC_PARAM_HWADDR;
 
-               params.pan_id = addr->pan_id;
-               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+       params.coord_hwaddr = params.hwaddr;
+       changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
 
-               params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
-               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+       params.coord_shortaddr = addr->short_addr;
+       changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
 
-               params.coord_hwaddr = params.hwaddr;
-               changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
-
-               rc = ops->llsec->set_params(dev, &params, changed);
-       }
-
-       return rc;
+       return mac802154_set_params(dev, &params, changed);
 }
 
 static int mac802154_set_mac_params(struct net_device *dev,
@@ -91,19 +84,19 @@ static int mac802154_set_mac_params(struct net_device *dev,
        wpan_dev->frame_retries = params->frame_retries;
        wpan_dev->lbt = params->lbt;
 
-       if (local->hw.flags & IEEE802154_HW_TXPOWER) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) {
                ret = drv_set_tx_power(local, params->transmit_power);
                if (ret < 0)
                        return ret;
        }
 
-       if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) {
                ret = drv_set_cca_mode(local, &params->cca);
                if (ret < 0)
                        return ret;
        }
 
-       if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
                ret = drv_set_cca_ed_level(local, params->cca_ed_level);
                if (ret < 0)
                        return ret;
@@ -151,9 +144,6 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = {
 
 struct ieee802154_mlme_ops mac802154_mlme_wpan = {
        .start_req = mac802154_mlme_start_req,
-       .get_pan_id = mac802154_dev_get_pan_id,
-       .get_short_addr = mac802154_dev_get_short_addr,
-       .get_dsn = mac802154_dev_get_dsn,
 
        .llsec = &mac802154_llsec_ops,
 
index 08cb32dc8fd33e892e53f7f87f601b10ede8c38d..356b346e1ee86fdeadebf7be5d318c70dbc0d969 100644 (file)
@@ -107,6 +107,18 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
 
        skb_queue_head_init(&local->skb_queue);
 
+       /* init supported flags with 802.15.4 default ranges */
+       phy->supported.max_minbe = 8;
+       phy->supported.min_maxbe = 3;
+       phy->supported.max_maxbe = 8;
+       phy->supported.min_frame_retries = -1;
+       phy->supported.max_frame_retries = 7;
+       phy->supported.max_csma_backoffs = 5;
+       phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
+
+       /* always supported */
+       phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE);
+
        return &local->hw;
 }
 EXPORT_SYMBOL(ieee802154_alloc_hw);
@@ -155,6 +167,26 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
 
        ieee802154_setup_wpan_phy_pib(local->phy);
 
+       if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
+               local->phy->supported.min_csma_backoffs = 4;
+               local->phy->supported.max_csma_backoffs = 4;
+               local->phy->supported.min_maxbe = 5;
+               local->phy->supported.max_maxbe = 5;
+               local->phy->supported.min_minbe = 3;
+               local->phy->supported.max_minbe = 3;
+       }
+
+       if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
+               /* TODO should be 3, but our default value is -1 which means
+                * no ARET handling.
+                */
+               local->phy->supported.min_frame_retries = -1;
+               local->phy->supported.max_frame_retries = -1;
+       }
+
+       if (hw->flags & IEEE802154_HW_PROMISCUOUS)
+               local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR);
+
        rc = wpan_phy_register(local->phy);
        if (rc < 0)
                goto out_wq;
index 5cf019a57fd79cd601209971c349023e6c619ea0..73f94fbf87856a5b7d81ba0c27c50a0c6f5c54fb 100644 (file)
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       sdata->wpan_dev.short_addr = val;
-       spin_unlock_bh(&sdata->mib_lock);
-}
-
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-       __le16 ret;
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       ret = sdata->wpan_dev.short_addr;
-       spin_unlock_bh(&sdata->mib_lock);
-
-       return ret;
-}
-
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-       __le16 ret;
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       ret = sdata->wpan_dev.pan_id;
-       spin_unlock_bh(&sdata->mib_lock);
-
-       return ret;
-}
-
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       sdata->wpan_dev.pan_id = val;
-       spin_unlock_bh(&sdata->mib_lock);
-}
-
-u8 mac802154_dev_get_dsn(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return sdata->wpan_dev.dsn++;
-}
-
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
 {
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_local *local = sdata->local;
        int res;
 
+       ASSERT_RTNL();
+
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
        res = drv_set_channel(local, page, chan);
        if (res) {
                pr_debug("set_channel failed\n");
        } else {
-               mutex_lock(&local->phy->pib_lock);
                local->phy->current_channel = chan;
                local->phy->current_page = page;
-               mutex_unlock(&local->phy->pib_lock);
        }
 }
 
index c0d67b2b4132b033ca28446556c5aefe7e047ee6..e0f10063cac3816fd6830376d65e955429445145 100644 (file)
@@ -47,8 +47,6 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
 
        pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
-       spin_lock_bh(&sdata->mib_lock);
-
        span = wpan_dev->pan_id;
        sshort = wpan_dev->short_addr;
 
@@ -83,13 +81,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
                        skb->pkt_type = PACKET_OTHERHOST;
                break;
        default:
-               spin_unlock_bh(&sdata->mib_lock);
                pr_debug("invalid dest mode\n");
                goto fail;
        }
 
-       spin_unlock_bh(&sdata->mib_lock);
-
        skb->dev = sdata->dev;
 
        rc = mac802154_llsec_decrypt(&sdata->sec, skb);
index 150bf807e572eb85458371d1c8e930e2cb7ec0b2..583435f3893037e45d4a5879b66b4cda3bb6f27f 100644 (file)
@@ -85,11 +85,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
                        hrtimer_start(&local->ifs_timer,
                                      ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
                                      HRTIMER_MODE_REL);
-
-               consume_skb(skb);
        } else {
                ieee802154_wake_queue(hw);
-               consume_skb(skb);
        }
+
+       dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
index 1c78d7fb1da781b503251e574c611c8cdd1b3dea..fbc8d15c7fda7644eeba4b35d50baf7cc93da7f5 100644 (file)
@@ -3,6 +3,7 @@ menu "Core Netfilter Configuration"
 
 config NETFILTER_INGRESS
        bool "Netfilter ingress support"
+       default y
        select NET_INGRESS
        help
          This allows you to classify packets from ingress using the Netfilter
@@ -455,6 +456,11 @@ config NF_TABLES_INET
        help
          This option enables support for a mixed IPv4/IPv6 "inet" table.
 
+config NF_TABLES_NETDEV
+       tristate "Netfilter nf_tables netdev tables support"
+       help
+         This option enables support for the "netdev" table.
+
 config NFT_EXTHDR
        tristate "Netfilter nf_tables IPv6 exthdr module"
        help
index a87d8b8ec730421403930c69061a2c7167db0a6a..70d026d46fe7d07a3ee1d942b7e877782bb37957 100644 (file)
@@ -75,6 +75,7 @@ nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
 
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
 obj-$(CONFIG_NF_TABLES_INET)   += nf_tables_inet.o
+obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o
 obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
 obj-$(CONFIG_NFT_EXTHDR)       += nft_exthdr.o
 obj-$(CONFIG_NFT_META)         += nft_meta.o
index 34ded09317e715cc94b80ce8d918006bbe1f714b..4528f122bcd2ff79806709efdeb9ae0faded50cc 100644 (file)
@@ -399,6 +399,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
        [NFTA_TABLE_NAME]       = { .type = NLA_STRING,
                                    .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_TABLE_FLAGS]      = { .type = NLA_U32 },
+       [NFTA_TABLE_DEV]        = { .type = NLA_STRING,
+                                   .len = IFNAMSIZ - 1 },
 };
 
 static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -423,6 +425,10 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
                goto nla_put_failure;
 
+       if (table->dev &&
+           nla_put_string(skb, NFTA_TABLE_DEV, table->dev->name))
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -608,6 +614,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags == ctx->table->flags)
                return 0;
 
+       if ((ctx->afi->flags & NFT_AF_NEEDS_DEV) &&
+           ctx->nla[NFTA_TABLE_DEV] &&
+           nla_strcmp(ctx->nla[NFTA_TABLE_DEV], ctx->table->dev->name))
+               return -EOPNOTSUPP;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -645,6 +656,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        struct nft_table *table;
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
+       struct net_device *dev = NULL;
        u32 flags = 0;
        struct nft_ctx ctx;
        int err;
@@ -679,30 +691,50 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       if (afi->flags & NFT_AF_NEEDS_DEV) {
+               char ifname[IFNAMSIZ];
+
+               if (!nla[NFTA_TABLE_DEV])
+                       return -EOPNOTSUPP;
+
+               nla_strlcpy(ifname, nla[NFTA_TABLE_DEV], IFNAMSIZ);
+               dev = dev_get_by_name(net, ifname);
+               if (!dev)
+                       return -ENOENT;
+       } else if (nla[NFTA_TABLE_DEV]) {
+               return -EOPNOTSUPP;
+       }
+
+       err = -EAFNOSUPPORT;
        if (!try_module_get(afi->owner))
-               return -EAFNOSUPPORT;
+               goto err1;
 
        err = -ENOMEM;
        table = kzalloc(sizeof(*table), GFP_KERNEL);
        if (table == NULL)
-               goto err1;
+               goto err2;
 
        nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
        INIT_LIST_HEAD(&table->chains);
        INIT_LIST_HEAD(&table->sets);
        table->flags = flags;
+       table->dev   = dev;
 
        nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
        err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
-err2:
+err3:
        kfree(table);
-err1:
+err2:
        module_put(afi->owner);
+err1:
+       if (dev != NULL)
+               dev_put(dev);
+
        return err;
 }
 
@@ -806,6 +838,9 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 {
        BUG_ON(ctx->table->use > 0);
 
+       if (ctx->table->dev)
+               dev_put(ctx->table->dev);
+
        kfree(ctx->table);
        module_put(ctx->afi->owner);
 }
@@ -1361,6 +1396,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        ops->priority   = priority;
                        ops->priv       = chain;
                        ops->hook       = afi->hooks[ops->hooknum];
+                       ops->dev        = table->dev;
                        if (hookfn)
                                ops->hook = hookfn;
                        if (afi->hook_ops_init)
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
new file mode 100644 (file)
index 0000000..04cb170
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static inline void
+nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+                           const struct nf_hook_ops *ops, struct sk_buff *skb,
+                           const struct nf_hook_state *state)
+{
+       struct iphdr *iph, _iph;
+       u32 len, thoff;
+
+       nft_set_pktinfo(pkt, ops, skb, state);
+
+       iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+                                &_iph);
+       if (!iph)
+               return;
+
+       iph = ip_hdr(skb);
+       if (iph->ihl < 5 || iph->version != 4)
+               return;
+
+       len = ntohs(iph->tot_len);
+       thoff = iph->ihl * 4;
+       if (skb->len < len)
+               return;
+       else if (len < thoff)
+               return;
+
+       pkt->tprot = iph->protocol;
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+}
+
+static inline void
+__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                             const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+       struct ipv6hdr *ip6h, _ip6h;
+       unsigned int thoff = 0;
+       unsigned short frag_off;
+       int protohdr;
+       u32 pkt_len;
+
+       ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+                                 &_ip6h);
+       if (!ip6h)
+               return;
+
+       if (ip6h->version != 6)
+               return;
+
+       pkt_len = ntohs(ip6h->payload_len);
+       if (pkt_len + sizeof(*ip6h) > skb->len)
+               return;
+
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       if (protohdr < 0)
+                return;
+
+       pkt->tprot = protohdr;
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = frag_off;
+#endif
+}
+
+static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                                              const struct nf_hook_ops *ops,
+                                              struct sk_buff *skb,
+                                              const struct nf_hook_state *state)
+{
+       nft_set_pktinfo(pkt, ops, skb, state);
+       __nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state);
+}
+
+static unsigned int
+nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb,
+                   const struct nf_hook_state *state)
+{
+       struct nft_pktinfo pkt;
+
+       switch (eth_hdr(skb)->h_proto) {
+       case htons(ETH_P_IP):
+               nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state);
+               break;
+       case htons(ETH_P_IPV6):
+               nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state);
+               break;
+       default:
+               nft_set_pktinfo(&pkt, ops, skb, state);
+               break;
+       }
+
+       return nft_do_chain(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_netdev __read_mostly = {
+       .family         = NFPROTO_NETDEV,
+       .nhooks         = NF_NETDEV_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .flags          = NFT_AF_NEEDS_DEV,
+       .nops           = 1,
+       .hooks          = {
+               [NF_NETDEV_INGRESS]     = nft_do_chain_netdev,
+       },
+};
+
+static int nf_tables_netdev_init_net(struct net *net)
+{
+       net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.netdev == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
+
+       if (nft_register_afinfo(net, net->nft.netdev) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.netdev);
+       return -ENOMEM;
+}
+
+static void nf_tables_netdev_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.netdev);
+       kfree(net->nft.netdev);
+}
+
+static struct pernet_operations nf_tables_netdev_net_ops = {
+       .init   = nf_tables_netdev_init_net,
+       .exit   = nf_tables_netdev_exit_net,
+};
+
+static const struct nf_chain_type nft_filter_chain_netdev = {
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .family         = NFPROTO_NETDEV,
+       .owner          = THIS_MODULE,
+       .hook_mask      = (1 << NF_NETDEV_INGRESS),
+};
+
+static int __init nf_tables_netdev_init(void)
+{
+       int ret;
+
+       nft_register_chain_type(&nft_filter_chain_netdev);
+       ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
+       if (ret < 0)
+               nft_unregister_chain_type(&nft_filter_chain_netdev);
+
+       return ret;
+}
+
+static void __exit nf_tables_netdev_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_netdev_net_ops);
+       nft_unregister_chain_type(&nft_filter_chain_netdev);
+}
+
+module_init(nf_tables_netdev_init);
+module_exit(nf_tables_netdev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
index 9370f953e16fc2a05941375439c0e031f6af2cbf..30ea82a9b0f13b44d34767241586a3d9648b906e 100644 (file)
@@ -410,7 +410,7 @@ static int tipc_release(struct socket *sock)
        struct net *net;
        struct tipc_sock *tsk;
        struct sk_buff *skb;
-       u32 dnode, probing_state;
+       u32 dnode;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -448,10 +448,7 @@ static int tipc_release(struct socket *sock)
        }
 
        tipc_sk_withdraw(tsk, 0, NULL);
-       probing_state = tsk->probing_state;
-       if (del_timer_sync(&sk->sk_timer) &&
-           probing_state != TIPC_CONN_PROBING)
-               sock_put(sk);
+       sk_stop_timer(sk, &sk->sk_timer);
        tipc_sk_remove(tsk);
        if (tsk->connected) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,