forked from luck/tmp_suning_uos_patched
RDMA/rtrs-clt: Set mininum limit when create QP
[ Upstream commit f47e4e3e71724f625958b0059f6c8ac5d44d27ef ]
Currently rtrs when create_qp use a coarse numbers (bigger in general),
which leads to hardware create more resources which only waste memory
with no benefits.
- SERVICE con,
For max_send_wr/max_recv_wr, it's 2 times SERVICE_CON_QUEUE_DEPTH + 2
- IO con
For max_send_wr/max_recv_wr, it's sess->queue_depth * 3 + 1
Fixes: 6a98d71dae
("RDMA/rtrs: client: main functionality")
Link: https://lore.kernel.org/r/20201217141915.56989-6-jinpu.wang@cloud.ionos.com
Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
Reviewed-by: Md Haris Iqbal <haris.iqbal@cloud.ionos.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
f8f1833d2a
commit
1a8e1385b2
|
@ -1516,7 +1516,7 @@ static void destroy_con(struct rtrs_clt_con *con)
|
|||
static int create_con_cq_qp(struct rtrs_clt_con *con)
|
||||
{
|
||||
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
|
||||
u32 wr_queue_size;
|
||||
u32 max_send_wr, max_recv_wr, cq_size;
|
||||
int err, cq_vector;
|
||||
struct rtrs_msg_rkey_rsp *rsp;
|
||||
|
||||
|
@ -1536,7 +1536,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
|
|||
* + 2 for drain and heartbeat
|
||||
* in case qp gets into error state
|
||||
*/
|
||||
wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
|
||||
max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
|
||||
max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
|
||||
/* We must be the first here */
|
||||
if (WARN_ON(sess->s.dev))
|
||||
return -EINVAL;
|
||||
|
@ -1568,25 +1569,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
|
|||
|
||||
/* Shared between connections */
|
||||
sess->s.dev_ref++;
|
||||
wr_queue_size =
|
||||
max_send_wr =
|
||||
min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
|
||||
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
|
||||
sess->queue_depth * 3 + 1);
|
||||
max_recv_wr =
|
||||
min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
|
||||
sess->queue_depth * 3 + 1);
|
||||
}
|
||||
/* alloc iu to recv new rkey reply when server reports flags set */
|
||||
if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
|
||||
con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
|
||||
con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
|
||||
GFP_KERNEL, sess->s.dev->ib_dev,
|
||||
DMA_FROM_DEVICE,
|
||||
rtrs_clt_rdma_done);
|
||||
if (!con->rsp_ius)
|
||||
return -ENOMEM;
|
||||
con->queue_size = wr_queue_size;
|
||||
con->queue_size = max_recv_wr;
|
||||
}
|
||||
cq_size = max_send_wr + max_recv_wr;
|
||||
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
|
||||
err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
|
||||
cq_vector, wr_queue_size, wr_queue_size,
|
||||
wr_queue_size, IB_POLL_SOFTIRQ);
|
||||
cq_vector, cq_size, max_send_wr,
|
||||
max_recv_wr, IB_POLL_SOFTIRQ);
|
||||
/*
|
||||
* In case of error we do not bother to clean previous allocations,
|
||||
* since destroy_con_cq_qp() must be called.
|
||||
|
|
Loading…
Reference in New Issue
Block a user