net/mlx5_core: Support MANAGE_PAGES and QUERY_PAGES firmware command changes
authorMoshe Lazer <moshel@mellanox.com>
Wed, 14 Aug 2013 14:46:48 +0000 (17:46 +0300)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Aug 2013 22:42:57 +0000 (15:42 -0700)
In the previous QUERY_PAGES command version we used one command to get the
required amount of boot, init and post init pages.  The new version uses the
op_mod field to specify whether the query is for the required amount of boot,
init or post init pages. In addition the output field size for the required
amount of pages increased from 16 to 32 bits.

In MANAGE_PAGES command the input_num_entries and output_num_entries fields
sizes changed from 16 to 32 bits and the PAS tables offset changed to 0x10.

In the pages request event the num_pages field also changed to 32 bits.

In the HCA-capabilities-layout the size and location of max_qp_mcg field has
been changed to support 24 bits.

This patch isn't compatible with firmware versions < 5; however, it  turns out that the
first GA firmware we will publish will not support previous versions so this should be OK.

Signed-off-by: Moshe Lazer <moshel@mellanox.com>
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
include/linux/mlx5/device.h
include/linux/mlx5/driver.h

index c571de85d0f995bb2c302210cfbd979b9e1bec9e..5472cbd34028d9038539824c4167a0c3010a3a01 100644 (file)
@@ -46,7 +46,7 @@
 #include "mlx5_core.h"
 
 enum {
-       CMD_IF_REV = 4,
+       CMD_IF_REV = 5,
 };
 
 enum {
index c02cbcfd0fb83a4b10d198888c88f67fc4670cc9..443cc4d7b024c02d2cc77861868e1c1b17ee2524 100644 (file)
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                case MLX5_EVENT_TYPE_PAGE_REQUEST:
                        {
                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
-                               s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+                               s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 
                                mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
                                mlx5_core_req_pages_handler(dev, func_id, npages);
index 72a5222447f558b3e4a118ccaac70de358af8c64..f012658b6a927baf1fca6a2d39d806346c61a1a6 100644 (file)
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
        caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
        caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
        caps->log_max_mcg = out->hca_cap.log_max_mcg;
-       caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+       caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
        caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
        caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
        caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
index 4a3e137931a38b898468061ae8334016e4e2f23e..3a2408d448203623754d0aa87e35c16e809da43f 100644 (file)
@@ -43,10 +43,16 @@ enum {
        MLX5_PAGES_TAKE         = 2
 };
 
+enum {
+       MLX5_BOOT_PAGES         = 1,
+       MLX5_INIT_PAGES         = 2,
+       MLX5_POST_INIT_PAGES    = 3
+};
+
 struct mlx5_pages_req {
        struct mlx5_core_dev *dev;
        u32     func_id;
-       s16     npages;
+       s32     npages;
        struct work_struct work;
 };
 
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
 
 struct mlx5_query_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       __be16                  num_boot_pages;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  init_pages;
-       __be16                  num_pages;
+       __be32                  num_pages;
 };
 
 struct mlx5_manage_pages_inbox {
        struct mlx5_inbox_hdr   hdr;
-       __be16                  rsvd0;
+       __be16                  rsvd;
        __be16                  func_id;
-       __be16                  rsvd1;
-       __be16                  num_entries;
-       u8                      rsvd2[16];
+       __be32                  num_entries;
        __be64                  pas[0];
 };
 
 struct mlx5_manage_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       u8                      rsvd0[2];
-       __be16                  num_entries;
-       u8                      rsvd1[20];
+       __be32                  num_entries;
+       u8                      rsvd[4];
        __be64                  pas[0];
 };
 
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
 }
 
 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
-                               s16 *pages, s16 *init_pages, u16 *boot_pages)
+                               s32 *npages, int boot)
 {
        struct mlx5_query_pages_inbox   in;
        struct mlx5_query_pages_outbox  out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+       in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
        if (err)
                return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       if (pages)
-               *pages = be16_to_cpu(out.num_pages);
-
-       if (init_pages)
-               *init_pages = be16_to_cpu(out.init_pages);
-
-       if (boot_pages)
-               *boot_pages = be16_to_cpu(out.num_boot_pages);
-
+       *npages = be32_to_cpu(out.num_pages);
        *func_id = be16_to_cpu(out.func_id);
 
        return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
        in->func_id = cpu_to_be16(func_id);
-       in->num_entries = cpu_to_be16(npages);
+       in->num_entries = cpu_to_be32(npages);
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        mlx5_core_dbg(dev, "err %d\n", err);
        if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
        in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
        in.func_id = cpu_to_be16(func_id);
-       in.num_entries = cpu_to_be16(npages);
+       in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                goto out_free;
        }
 
-       num_claimed = be16_to_cpu(out->num_entries);
+       num_claimed = be32_to_cpu(out->num_entries);
        if (nclaimed)
                *nclaimed = num_claimed;
 
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
 }
 
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages)
+                                s32 npages)
 {
        struct mlx5_pages_req *req;
 
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
 
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
 {
-       u16 uninitialized_var(boot_pages);
-       s16 uninitialized_var(init_pages);
        u16 uninitialized_var(func_id);
+       s32 uninitialized_var(npages);
        int err;
 
-       err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
-                                  &boot_pages);
+       err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
        if (err)
                return err;
 
+       mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+                     npages, boot ? "boot" : "init", func_id);
 
-       mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
-                     init_pages, boot_pages, func_id);
-       return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
+       return give_pages(dev, func_id, npages, 0);
 }
 
 static int optimal_reclaimed_pages(void)
index 737685e9e852e91fd93ccf142500c33773f42e71..68029b30c3dc89e2d2620fd41101b2ab05069344 100644 (file)
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
        __be16  max_desc_sz_rq;
        u8      rsvd21[2];
        __be16  max_desc_sz_sq_dc;
-       u8      rsvd22[4];
-       __be16  max_qp_mcg;
-       u8      rsvd23;
+       __be32  max_qp_mcg;
+       u8      rsvd22[3];
        u8      log_max_mcg;
-       u8      rsvd24;
+       u8      rsvd23;
        u8      log_max_pd;
-       u8      rsvd25;
+       u8      rsvd24;
        u8      log_max_xrcd;
-       u8      rsvd26[42];
+       u8      rsvd25[42];
        __be16  log_uar_page_sz;
-       u8      rsvd27[28];
+       u8      rsvd26[28];
        u8      log_msx_atomic_size_qp;
-       u8      rsvd28[2];
+       u8      rsvd27[2];
        u8      log_msx_atomic_size_dc;
-       u8      rsvd29[76];
+       u8      rsvd28[76];
 };
 
 
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
 struct mlx5_eqe_page_req {
        u8              rsvd0[2];
        __be16          func_id;
-       u8              rsvd1[2];
-       __be16          num_pages;
-       __be32          rsvd2[5];
+       __be32          num_pages;
+       __be32          rsvd1[5];
 };
 
 union ev_data {
index 611e65e76b00185c30b6a4ec6a3157a56ec6e5d9..8888381fc150b8f3f852077407ee8187a54cb7aa 100644 (file)
@@ -358,7 +358,7 @@ struct mlx5_caps {
        u32     reserved_lkey;
        u8      local_ca_ack_delay;
        u8      log_max_mcg;
-       u16     max_qp_mcg;
+       u32     max_qp_mcg;
        int     min_page_sz;
 };
 
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-                                s16 npages);
+                                s32 npages);
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);