RDMA/mlx5: Introduce ODP prefetch counter
authorMaor Gottlieb <maorg@mellanox.com>
Sun, 21 Jun 2020 10:41:47 +0000 (13:41 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 3 Jul 2020 12:16:25 +0000 (09:16 -0300)
For debugging purpose it will be easier to understand if prefetch works
okay if it has its own counter. Introduce ODP prefetch counter and count
per MR the total number of prefetched pages.

In addition remove comment which is not relevant anymore and anyway not in
the correct place.

Link: https://lore.kernel.org/r/20200621104147.53795-1-leon@kernel.org
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/restrack.c
include/rdma/ib_verbs.h

index 7d2ec9ee5097b0903da57d4209b76cfd6e9e354b..ee88b32d143d4e6938f0cc1cf2cfcd7b5b416766 100644 (file)
@@ -913,11 +913,6 @@ next_mr:
                if (ret < 0)
                        goto srcu_unlock;
 
-               /*
-                * When prefetching a page, page fault is generated
-                * in order to bring the page to the main memory.
-                * In the current flow, page faults are being counted.
-                */
                mlx5_update_odp_stats(mr, faults, ret);
 
                npages += ret;
@@ -1755,12 +1750,17 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
        struct prefetch_mr_work *work =
                container_of(w, struct prefetch_mr_work, work);
        u32 bytes_mapped = 0;
+       int ret;
        u32 i;
 
-       for (i = 0; i < work->num_sge; ++i)
-               pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
-                            work->frags[i].length, &bytes_mapped,
-                            work->pf_flags);
+       for (i = 0; i < work->num_sge; ++i) {
+               ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+                                  work->frags[i].length, &bytes_mapped,
+                                  work->pf_flags);
+               if (ret <= 0)
+                       continue;
+               mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+       }
 
        destroy_prefetch_work(work);
 }
@@ -1818,6 +1818,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
                                   &bytes_mapped, pf_flags);
                if (ret < 0)
                        goto out;
+               mlx5_update_odp_stats(mr, prefetch, ret);
        }
        ret = 0;
 
index 224a63975822867fb74a05caed5c6cfdb9bdac91..32c6d0397946af5aa9f1034a9ccaea16271383e0 100644 (file)
@@ -99,6 +99,9 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
                    msg, "page_invalidations",
                    atomic64_read(&mr->odp_stats.invalidations)))
                goto err_table;
+       if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
+                                        atomic64_read(&mr->odp_stats.prefetch)))
+               goto err_table;
 
        nla_nest_end(msg, table_attr);
        return 0;
index 1e902a8f1713bfae935a095bcba03e3f71603184..f6b51a709818e55b9b07aa455d26dca802acd0de 100644 (file)
@@ -2271,6 +2271,7 @@ struct rdma_netdev_alloc_params {
 struct ib_odp_counters {
        atomic64_t faults;
        atomic64_t invalidations;
+       atomic64_t prefetch;
 };
 
 struct ib_counters {