[PATCH 4.7 159/186] IB/core, RDMA RW API: Do not exceed QP SGE send limit

From: Greg Kroah-Hartman
Date: Thu Aug 18 2016 - 10:29:28 EST


4.7-stable review patch. If anyone has any objections, please let me know.

------------------

From: Bart Van Assche <bart.vanassche@xxxxxxxxxxx>

commit 632bc3f65081dd1e2e5394a9161580a0f78e8839 upstream.

Compute the SGE limit for RDMA READ and WRITE requests in
ib_create_qp(). Use that limit in the RDMA RW API implementation.

Signed-off-by: Bart Van Assche <bart.vanassche@xxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Sagi Grimberg <sagi@xxxxxxxxxxx>
Cc: Steve Wise <swise@xxxxxxxxxxxxxxxxxxxxx>
Cc: Parav Pandit <pandit.parav@xxxxxxxxx>
Cc: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx>
Cc: Laurence Oberman <loberman@xxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Doug Ledford <dledford@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
drivers/infiniband/core/rw.c | 10 ++--------
drivers/infiniband/core/verbs.c | 9 +++++++++
include/rdma/ib_verbs.h | 6 ++++++
3 files changed, 17 insertions(+), 8 deletions(-)

--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -58,13 +58,6 @@ static inline bool rdma_rw_io_needs_mr(s
return false;
}

-static inline u32 rdma_rw_max_sge(struct ib_device *dev,
- enum dma_data_direction dir)
-{
- return dir == DMA_TO_DEVICE ?
- dev->attrs.max_sge : dev->attrs.max_sge_rd;
-}
-
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
{
/* arbitrary limit to avoid allocating gigantic resources */
@@ -186,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct r
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct ib_device *dev = qp->pd->device;
- u32 max_sge = rdma_rw_max_sge(dev, dir);
+ u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
+ qp->max_read_sge;
struct ib_sge *sge;
u32 total_len = 0, i, j;

--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd
}
}

+ /*
+ * Note: all hw drivers guarantee that max_send_sge is lower than
+ * the device RDMA WRITE SGE limit but not all hw drivers ensure that
+ * max_send_sge <= max_sge_rd.
+ */
+ qp->max_write_sge = qp_init_attr->cap.max_send_sge;
+ qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+ device->attrs.max_sge_rd);
+
return qp;
}
EXPORT_SYMBOL(ib_create_qp);
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1428,6 +1428,10 @@ struct ib_srq {
} ext;
};

+/*
+ * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
+ * @max_read_sge: Maximum SGE elements per RDMA READ request.
+ */
struct ib_qp {
struct ib_device *device;
struct ib_pd *pd;
@@ -1449,6 +1453,8 @@ struct ib_qp {
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
u32 qp_num;
+ u32 max_write_sge;
+ u32 max_read_sge;
enum ib_qp_type qp_type;
};