[PATCH] IB/mlx5: Flow through a more detailed return code from get_prefetchable_mr()

From: Jason Gunthorpe
Date: Fri Sep 03 2021 - 04:48:15 EST


The error returns for various cases detected by get_prefetchable_mr() get
confused as it flows back to userspace. Properly label each error path and
flow the error code properly back to the system call.

Suggested-by: Li Zhijian <lizhijian@xxxxxxxxxxxxxx>
Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
---
drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
1 file changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebcc3..77890a85fc2dd3 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,

xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
- if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }

mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);

if (mr->ibmr.pd != pd) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}

/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
!mr->umem->writable) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}

@@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
destroy_prefetch_work(work);
}

-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
@@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->pf_flags = pf_flags;

for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
+ work->num_sge = i;
+ return PTR_ERR(mr);
+ }
work->frags[i].io_virt = sg_list[i].addr;
work->frags[i].length = sg_list[i].length;
- work->frags[i].mr =
- get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!work->frags[i].mr) {
- work->num_sge = i;
- return false;
- }
+ work->frags[i].mr = mr;
}
work->num_sge = num_sge;
- return true;
+ return 0;
}

static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
struct mlx5_ib_mr *mr;

mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr)
- return -ENOENT;
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
if (ret < 0) {
@@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
u32 pf_flags = 0;
struct prefetch_mr_work *work;
+ int rc;

if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (!work)
return -ENOMEM;

- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
destroy_prefetch_work(work);
- return -EINVAL;
+ return rc;
}
queue_work(system_unbound_wq, &work->work);
return 0;
--
2.33.0