Skip to content

Commit 632bc3f

Browse files
Bart Van Asschedledford
Bart Van Assche
authored andcommitted
IB/core, RDMA RW API: Do not exceed QP SGE send limit
Compute the SGE limit for RDMA READ and WRITE requests in ib_create_qp(). Use that limit in the RDMA RW API implementation. Signed-off-by: Bart Van Assche <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Sagi Grimberg <[email protected]> Cc: Steve Wise <[email protected]> Cc: Parav Pandit <[email protected]> Cc: Nicholas Bellinger <[email protected]> Cc: Laurence Oberman <[email protected]> Cc: <[email protected]> #v4.7+ Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
1 parent eaa74ec commit 632bc3f

File tree

3 files changed

+17
-8
lines changed

3 files changed

+17
-8
lines changed

drivers/infiniband/core/rw.c

+2-8
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,6 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
5858
return false;
5959
}
6060

61-
static inline u32 rdma_rw_max_sge(struct ib_device *dev,
62-
enum dma_data_direction dir)
63-
{
64-
return dir == DMA_TO_DEVICE ?
65-
dev->attrs.max_sge : dev->attrs.max_sge_rd;
66-
}
67-
6861
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
6962
{
7063
/* arbitrary limit to avoid allocating gigantic resources */
@@ -186,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
186179
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
187180
{
188181
struct ib_device *dev = qp->pd->device;
189-
u32 max_sge = rdma_rw_max_sge(dev, dir);
182+
u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
183+
qp->max_read_sge;
190184
struct ib_sge *sge;
191185
u32 total_len = 0, i, j;
192186

drivers/infiniband/core/verbs.c

+9
Original file line numberDiff line numberDiff line change
@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
814814
}
815815
}
816816

817+
/*
818+
* Note: all hw drivers guarantee that max_send_sge is lower than
819+
* the device RDMA WRITE SGE limit but not all hw drivers ensure that
820+
* max_send_sge <= max_sge_rd.
821+
*/
822+
qp->max_write_sge = qp_init_attr->cap.max_send_sge;
823+
qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
824+
device->attrs.max_sge_rd);
825+
817826
return qp;
818827
}
819828
EXPORT_SYMBOL(ib_create_qp);

include/rdma/ib_verbs.h

+6
Original file line numberDiff line numberDiff line change
@@ -1428,6 +1428,10 @@ struct ib_srq {
14281428
} ext;
14291429
};
14301430

1431+
/*
1432+
* @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1433+
* @max_read_sge: Maximum SGE elements per RDMA READ request.
1434+
*/
14311435
struct ib_qp {
14321436
struct ib_device *device;
14331437
struct ib_pd *pd;
@@ -1449,6 +1453,8 @@ struct ib_qp {
14491453
void (*event_handler)(struct ib_event *, void *);
14501454
void *qp_context;
14511455
u32 qp_num;
1456+
u32 max_write_sge;
1457+
u32 max_read_sge;
14521458
enum ib_qp_type qp_type;
14531459
};
14541460

0 commit comments

Comments
 (0)