RDMA/mlx5: Introduce ODP prefetch counter

For debugging purpose it will be easier to understand if prefetch works
okay if it has its own counter. Introduce ODP prefetch counter and count
per MR the total number of prefetched pages.

In addition remove comment which is not relevant anymore and anyway not in
the correct place.

Link: https://lore.kernel.org/r/20200621104147.53795-1-leon@kernel.org
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Maor Gottlieb 2020-06-21 13:41:47 +03:00 committed by Jason Gunthorpe
parent 0cb42c0265
commit d473f4dc2f
3 changed files with 14 additions and 9 deletions

View File

@ -913,11 +913,6 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
if (ret < 0) if (ret < 0)
goto srcu_unlock; goto srcu_unlock;
/*
* When prefetching a page, page fault is generated
* in order to bring the page to the main memory.
* In the current flow, page faults are being counted.
*/
mlx5_update_odp_stats(mr, faults, ret); mlx5_update_odp_stats(mr, faults, ret);
npages += ret; npages += ret;
@ -1755,12 +1750,17 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
struct prefetch_mr_work *work = struct prefetch_mr_work *work =
container_of(w, struct prefetch_mr_work, work); container_of(w, struct prefetch_mr_work, work);
u32 bytes_mapped = 0; u32 bytes_mapped = 0;
int ret;
u32 i; u32 i;
for (i = 0; i < work->num_sge; ++i) for (i = 0; i < work->num_sge; ++i) {
pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
work->frags[i].length, &bytes_mapped, work->frags[i].length, &bytes_mapped,
work->pf_flags); work->pf_flags);
if (ret <= 0)
continue;
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
}
destroy_prefetch_work(work); destroy_prefetch_work(work);
} }
@ -1818,6 +1818,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
&bytes_mapped, pf_flags); &bytes_mapped, pf_flags);
if (ret < 0) if (ret < 0)
goto out; goto out;
mlx5_update_odp_stats(mr, prefetch, ret);
} }
ret = 0; ret = 0;

View File

@ -99,6 +99,9 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
msg, "page_invalidations", msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations))) atomic64_read(&mr->odp_stats.invalidations)))
goto err_table; goto err_table;
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;
nla_nest_end(msg, table_attr); nla_nest_end(msg, table_attr);
return 0; return 0;

View File

@ -2271,6 +2271,7 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters { struct ib_odp_counters {
atomic64_t faults; atomic64_t faults;
atomic64_t invalidations; atomic64_t invalidations;
atomic64_t prefetch;
}; };
struct ib_counters { struct ib_counters {