forked from luck/tmp_suning_uos_patched
5.3 rc RDMA pull request
A few regression and bug fixes for the patches merged in the last cycle: - hns fixes a subtle crash from the ib core SGL rework - hfi1 fixes various error handling, oops and protocol errors - bnxt_re fixes a regression where nvmeof doesn't work on some configurations - mlx5 fixes a serious 'use after free' bug in how MR caching is handled - Some edge case crashers in the new statistic core code - More siw static checker fixups -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl1AMh0ACgkQOG33FX4g mxr0EQ//RqPk9ne1yoL7q7kbrDIIvhJ7QSI/4hqpcwAqtXc1+N0ujBigAAKZtEyy jSQH6p8LIo4oCDlw66JMGIbHqWBbtsiy15oqPUH6d/uI1yguS78ZaMlPrEN+3yWR qGGF+M9MW6OXUykm8/dzYSpkM9/w0WiPBsV6MMmlF3dDcRr3NZpDo4qfeZ4gBarJ 1y+MMEn8Hh5sP1KKKDpcf3sKYxS5rdC6TACyvMz+SRKge+kSWWbEWbJsYHYhBD3M JMKsiCw64U9wonvJcIX67WDo2R8gtKsr+7wboOqwrr7AAVIk5s5GPLatCeMq9Z6J x4kjlK3+6L+mrJ9RyNRmQZOiOqZwxu7XTzJxrEYnVlTnvqeg1JdUzVyizDhILqtx 817rCA2PH2KfS7tHsDtnZ/qlzx/lET7N9jCgCD/ltY/SXrTnQcKxw223oY3pDrgY 7kqPfp8ck1N3zazkmBavFrZgq00sSu0f8Sb0pDzgOGK1EBPnN4EURdAhidCJVGMJ cPVcUMASA8wr0g0PdbFJ2h/vNogKIztTpPkaCzweDJ6jx0qI3G/ex8g2/VS458++ G66WxUe9A6coLDgMSASi9VCDE5ELvAbw0luzojf7HId15LnqRBq4jx7rkfh7ZHZ/ R9gJXRoiasUyFRV0duEo6MFStbHTIOlRFAR4gD2IAie4VFG4xes= =tRje -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A few regression and bug fixes for the patches merged in the last cycle: - hns fixes a subtle crash from the ib core SGL rework - hfi1 fixes various error handling, oops and protocol errors - bnxt_re fixes a regression where nvmeof doesn't work on some configurations - mlx5 fixes a serious 'use after free' bug in how MR caching is handled - some edge case crashers in the new statistic core code - more siw static checker fixups" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/mlx5: Fix RSS Toeplitz setup to be aligned with the HW specification IB/counters: Always initialize the port counter object IB/core: Fix querying total rdma stats IB/mlx5: Prevent concurrent MR updates during invalidation IB/mlx5: Fix clean_mr() to work in the expected order IB/mlx5: Move MRs to a kernel PD when freeing them to the MR cache IB/mlx5: Use direct mkey destroy command upon UMR unreg failure IB/mlx5: Fix unreg_umr to ignore the mkey state RDMA/siw: Remove set but not used variables 'rv' IB/mlx5: Replace kfree with kvfree RDMA/bnxt_re: Honor vlan_id in GID entry comparison IB/hfi1: Drop all TID RDMA READ RESP packets after r_next_psn IB/hfi1: Field not zero-ed when allocating TID flow memory IB/hfi1: Unreserve a flushed OPFN request IB/hfi1: Check for error on call to alloc_rsm_map_table RDMA/hns: Fix sg offset non-zero issue RDMA/siw: Fix error return code in siw_init_module()
This commit is contained in:
commit
32a024b9a9
|
@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
|
|||
u64 sum;
|
||||
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
if (!port_counter->hstats)
|
||||
return 0;
|
||||
|
||||
sum = get_running_counters_hwstat_sum(dev, port, index);
|
||||
sum += port_counter->hstats->value[index];
|
||||
|
||||
|
@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
|
|||
struct rdma_port_counter *port_counter;
|
||||
u32 port;
|
||||
|
||||
if (!dev->ops.alloc_hw_stats || !dev->port_data)
|
||||
if (!dev->port_data)
|
||||
return;
|
||||
|
||||
rdma_for_each_port(dev, port) {
|
||||
|
@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
|
|||
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
|
||||
mutex_init(&port_counter->lock);
|
||||
|
||||
if (!dev->ops.alloc_hw_stats)
|
||||
continue;
|
||||
|
||||
port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
|
||||
if (!port_counter->hstats)
|
||||
goto fail;
|
||||
|
@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
|
|||
struct rdma_port_counter *port_counter;
|
||||
u32 port;
|
||||
|
||||
if (!dev->ops.alloc_hw_stats)
|
||||
return;
|
||||
|
||||
rdma_for_each_port(dev, port) {
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
kfree(port_counter->hstats);
|
||||
|
|
|
@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
|
|||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
|
||||
struct bnxt_qplib_gid *gid_to_del;
|
||||
u16 vlan_id = 0xFFFF;
|
||||
|
||||
/* Delete the entry from the hardware */
|
||||
ctx = *context;
|
||||
|
@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
|
|||
if (sgid_tbl && sgid_tbl->active) {
|
||||
if (ctx->idx >= sgid_tbl->max)
|
||||
return -EINVAL;
|
||||
gid_to_del = &sgid_tbl->tbl[ctx->idx];
|
||||
gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
|
||||
vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
|
||||
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
|
||||
* or via the ib_unregister_device path. In the former case QP1
|
||||
* may not be destroyed yet, in which case just return as FW
|
||||
|
@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
|
|||
}
|
||||
ctx->refcnt--;
|
||||
if (!ctx->refcnt) {
|
||||
rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
|
||||
rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
|
||||
vlan_id, true);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev),
|
||||
"Failed to remove GID: %#x", rc);
|
||||
|
|
|
@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
u16 max)
|
||||
{
|
||||
sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
|
||||
sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
|
||||
if (!sgid_tbl->tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
|
|||
for (i = 0; i < sgid_tbl->max; i++) {
|
||||
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero)))
|
||||
bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
|
||||
bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
|
||||
sgid_tbl->tbl[i].vlan_id, true);
|
||||
}
|
||||
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
|
||||
memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
|
||||
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
|
||||
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
|
||||
sgid_tbl->active = 0;
|
||||
|
@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
|
|||
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < sgid_tbl->max; i++)
|
||||
sgid_tbl->tbl[i].vlan_id = 0xffff;
|
||||
|
||||
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
|
||||
}
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
|
|||
};
|
||||
|
||||
struct bnxt_qplib_sgid_tbl {
|
||||
struct bnxt_qplib_gid *tbl;
|
||||
struct bnxt_qplib_gid_info *tbl;
|
||||
u16 *hw_id;
|
||||
u16 max;
|
||||
u16 active;
|
||||
|
|
|
@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
|
|||
index, sgid_tbl->max);
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
|
||||
memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, bool update)
|
||||
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
|
||||
{
|
||||
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
|
@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
return -ENOMEM;
|
||||
}
|
||||
for (index = 0; index < sgid_tbl->max; index++) {
|
||||
if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
|
||||
if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
|
||||
vlan_id == sgid_tbl->tbl[index].vlan_id)
|
||||
break;
|
||||
}
|
||||
if (index == sgid_tbl->max) {
|
||||
|
@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
|
||||
memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero));
|
||||
sgid_tbl->tbl[index].vlan_id = 0xFFFF;
|
||||
sgid_tbl->vlan[index] = 0;
|
||||
sgid_tbl->active--;
|
||||
dev_dbg(&res->pdev->dev,
|
||||
|
@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
}
|
||||
free_idx = sgid_tbl->max;
|
||||
for (i = 0; i < sgid_tbl->max; i++) {
|
||||
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
|
||||
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
|
||||
sgid_tbl->tbl[i].vlan_id == vlan_id) {
|
||||
dev_dbg(&res->pdev->dev,
|
||||
"SGID entry already exist in entry %d!\n", i);
|
||||
*index = i;
|
||||
|
@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
|||
}
|
||||
/* Add GID to the sgid_tbl */
|
||||
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
|
||||
sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
|
||||
sgid_tbl->active++;
|
||||
if (vlan_id != 0xFFFF)
|
||||
sgid_tbl->vlan[free_idx] = 1;
|
||||
|
|
|
@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
|
|||
u8 data[16];
|
||||
};
|
||||
|
||||
struct bnxt_qplib_gid_info {
|
||||
struct bnxt_qplib_gid gid;
|
||||
u16 vlan_id;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_ah {
|
||||
struct bnxt_qplib_gid dgid;
|
||||
struct bnxt_qplib_pd *pd;
|
||||
|
@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
|
|||
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
|
||||
struct bnxt_qplib_gid *gid);
|
||||
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, bool update);
|
||||
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
|
||||
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
|
||||
bool update, u32 *index);
|
||||
|
|
|
@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
|
|||
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
|
||||
}
|
||||
|
||||
static void init_rxe(struct hfi1_devdata *dd)
|
||||
static int init_rxe(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct rsm_map_table *rmt;
|
||||
u64 val;
|
||||
|
@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
|
|||
write_csr(dd, RCV_ERR_MASK, ~0ull);
|
||||
|
||||
rmt = alloc_rsm_map_table(dd);
|
||||
if (!rmt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* set up QOS, including the QPN map table */
|
||||
init_qos(dd, rmt);
|
||||
init_fecn_handling(dd, rmt);
|
||||
|
@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
|
|||
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
|
||||
RCV_BYPASS_HDR_SIZE_SHIFT);
|
||||
write_csr(dd, RCV_BYPASS, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_other(struct hfi1_devdata *dd)
|
||||
|
@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
|
|||
goto bail_cleanup;
|
||||
|
||||
/* set initial RXE CSRs */
|
||||
init_rxe(dd);
|
||||
ret = init_rxe(dd);
|
||||
if (ret)
|
||||
goto bail_cleanup;
|
||||
|
||||
/* set initial TXE CSRs */
|
||||
init_txe(dd);
|
||||
/* set initial non-RXE, non-TXE CSRs */
|
||||
|
|
|
@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
|||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
|
||||
break;
|
||||
trdma_clean_swqe(qp, wqe);
|
||||
rvt_qp_wqe_unreserve(qp, wqe);
|
||||
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
|
||||
rvt_qp_complete_swqe(qp,
|
||||
wqe,
|
||||
|
@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
|
|||
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
|
||||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
|
||||
trdma_clean_swqe(qp, wqe);
|
||||
rvt_qp_wqe_unreserve(qp, wqe);
|
||||
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
|
||||
rvt_qp_complete_swqe(qp,
|
||||
wqe,
|
||||
|
|
|
@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
|||
flows[i].req = req;
|
||||
flows[i].npagesets = 0;
|
||||
flows[i].pagesets[0].mapped = 0;
|
||||
flows[i].resync_npkts = 0;
|
||||
}
|
||||
req->flows = flows;
|
||||
return 0;
|
||||
|
@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct tid_rdma_flow *
|
||||
__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
|
||||
u32 psn, u16 *fidx)
|
||||
{
|
||||
for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
|
||||
tail = CIRC_NEXT(tail, MAX_FLOWS)) {
|
||||
struct tid_rdma_flow *flow = &req->flows[tail];
|
||||
u32 spsn, lpsn;
|
||||
|
||||
spsn = full_flow_psn(flow, flow->flow_state.spsn);
|
||||
lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
|
||||
|
||||
if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
|
||||
if (fidx)
|
||||
*fidx = tail;
|
||||
return flow;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
|
||||
u32 psn, u16 *fidx)
|
||||
{
|
||||
return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
|
||||
fidx);
|
||||
}
|
||||
|
||||
/* TID RDMA READ functions */
|
||||
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
|
||||
struct ib_other_headers *ohdr, u32 *bth1,
|
||||
|
@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
|||
* to prevent continuous Flow Sequence errors for any
|
||||
* packets that could be still in the fabric.
|
||||
*/
|
||||
flow = find_flow(req, psn, NULL);
|
||||
if (!flow) {
|
||||
/*
|
||||
* We can't find the IB PSN matching the
|
||||
* received KDETH PSN. The only thing we can
|
||||
* do at this point is report the error to
|
||||
* the QP.
|
||||
*/
|
||||
hfi1_kern_read_tid_flow_free(qp);
|
||||
spin_unlock(&qp->s_lock);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
return ret;
|
||||
}
|
||||
flow = &req->flows[req->clear_tail];
|
||||
if (priv->s_flags & HFI1_R_TID_SW_PSN) {
|
||||
diff = cmp_psn(psn,
|
||||
flow->flow_state.r_next_psn);
|
||||
|
|
|
@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
struct ib_udata *udata, unsigned long virt,
|
||||
struct hns_roce_db *db)
|
||||
{
|
||||
unsigned long page_addr = virt & PAGE_MASK;
|
||||
struct hns_roce_user_db_page *page;
|
||||
unsigned int offset;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&context->page_mutex);
|
||||
|
||||
list_for_each_entry(page, &context->page_list, list)
|
||||
if (page->user_virt == (virt & PAGE_MASK))
|
||||
if (page->user_virt == page_addr)
|
||||
goto found;
|
||||
|
||||
page = kmalloc(sizeof(*page), GFP_KERNEL);
|
||||
|
@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
}
|
||||
|
||||
refcount_set(&page->refcount, 1);
|
||||
page->user_virt = (virt & PAGE_MASK);
|
||||
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
|
||||
page->user_virt = page_addr;
|
||||
page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
|
||||
if (IS_ERR(page->umem)) {
|
||||
ret = PTR_ERR(page->umem);
|
||||
kfree(page);
|
||||
|
@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
list_add(&page->list, &context->page_list);
|
||||
|
||||
found:
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) +
|
||||
(virt & ~PAGE_MASK);
|
||||
page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
|
||||
db->virt_addr = sg_virt(page->umem->sg_head.sgl);
|
||||
offset = virt - page_addr;
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
|
||||
db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
|
||||
db->u.user_page = page;
|
||||
refcount_inc(&page->refcount);
|
||||
|
||||
|
|
|
@ -481,6 +481,7 @@ struct mlx5_umr_wr {
|
|||
u64 length;
|
||||
int access_flags;
|
||||
u32 mkey;
|
||||
u8 ignore_free_state:1;
|
||||
};
|
||||
|
||||
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
|
||||
|
|
|
@ -545,14 +545,17 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
return;
|
||||
|
||||
c = order2idx(dev, mr->order);
|
||||
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
|
||||
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
|
||||
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
|
||||
|
||||
if (unreg_umr(dev, mr)) {
|
||||
mr->allocated_from_cache = false;
|
||||
destroy_mkey(dev, mr);
|
||||
ent = &cache->ent[c];
|
||||
if (ent->cur < ent->limit)
|
||||
queue_work(cache->wq, &ent->work);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unreg_umr(dev, mr))
|
||||
return;
|
||||
|
||||
ent = &cache->ent[c];
|
||||
spin_lock_irq(&ent->lock);
|
||||
list_add_tail(&mr->list, &ent->head);
|
||||
|
@ -1373,9 +1376,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
return 0;
|
||||
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
||||
MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.pd = dev->umrc.pd;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
umrwr.ignore_free_state = 1;
|
||||
|
||||
return mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
}
|
||||
|
@ -1577,10 +1582,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
mr->sig = NULL;
|
||||
}
|
||||
|
||||
mlx5_free_priv_descs(mr);
|
||||
|
||||
if (!allocated_from_cache)
|
||||
if (!allocated_from_cache) {
|
||||
destroy_mkey(dev, mr);
|
||||
mlx5_free_priv_descs(mr);
|
||||
}
|
||||
}
|
||||
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
|
|
|
@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
|
|||
* overwrite the same MTTs. Concurent invalidations might race us,
|
||||
* but they will write 0s as well, so no difference in the end result.
|
||||
*/
|
||||
|
||||
mutex_lock(&umem_odp->umem_mutex);
|
||||
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
|
||||
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
|
||||
/*
|
||||
|
@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
|
|||
idx - blk_start_idx + 1, 0,
|
||||
MLX5_IB_UPD_XLT_ZAP |
|
||||
MLX5_IB_UPD_XLT_ATOMIC);
|
||||
mutex_unlock(&umem_odp->umem_mutex);
|
||||
/*
|
||||
* We are now sure that the device will not access the
|
||||
* memory. We can safely unmap it, and mark it as dirty if
|
||||
|
@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
|
|||
|
||||
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
|
||||
w->num_sge, 0);
|
||||
kfree(w);
|
||||
kvfree(w);
|
||||
}
|
||||
|
||||
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
||||
|
@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
|||
if (valid_req)
|
||||
queue_work(system_unbound_wq, &work->work);
|
||||
else
|
||||
kfree(work);
|
||||
kvfree(work);
|
||||
|
||||
srcu_read_unlock(&dev->mr_srcu, srcu_key);
|
||||
|
||||
|
|
|
@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
}
|
||||
|
||||
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
|
||||
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
|
||||
memcpy(rss_key, ucmd.rx_hash_key, len);
|
||||
break;
|
||||
}
|
||||
|
@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
|
|||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
|
||||
else
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
|
||||
if (!umrwr->ignore_free_state) {
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
/* fail if free */
|
||||
umr->flags = MLX5_UMR_CHECK_FREE;
|
||||
else
|
||||
/* fail if not free */
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
|
||||
}
|
||||
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
|
||||
|
|
|
@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
|
|||
static void siw_cep_set_inuse(struct siw_cep *cep)
|
||||
{
|
||||
unsigned long flags;
|
||||
int rv;
|
||||
retry:
|
||||
spin_lock_irqsave(&cep->lock, flags);
|
||||
|
||||
if (cep->in_use) {
|
||||
spin_unlock_irqrestore(&cep->lock, flags);
|
||||
rv = wait_event_interruptible(cep->waitq, !cep->in_use);
|
||||
wait_event_interruptible(cep->waitq, !cep->in_use);
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
goto retry;
|
||||
|
|
|
@ -612,6 +612,7 @@ static __init int siw_init_module(void)
|
|||
|
||||
if (!siw_create_tx_threads()) {
|
||||
pr_info("siw: Could not start any TX thread\n");
|
||||
rv = -ENOMEM;
|
||||
goto out_error;
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
|
|||
/**
|
||||
* rvt_qp_wqe_unreserve - clean reserved operation
|
||||
* @qp - the rvt qp
|
||||
* @wqe - the send wqe
|
||||
* @flags - send wqe flags
|
||||
*
|
||||
* This decrements the reserve use count.
|
||||
*
|
||||
|
@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
|
|||
* the compiler does not juggle the order of the s_last
|
||||
* ring index and the decrementing of s_reserved_used.
|
||||
*/
|
||||
static inline void rvt_qp_wqe_unreserve(
|
||||
struct rvt_qp *qp,
|
||||
struct rvt_swqe *wqe)
|
||||
static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
|
||||
{
|
||||
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
|
||||
if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
|
||||
atomic_dec(&qp->s_reserved_used);
|
||||
/* insure no compiler re-order up to s_last change */
|
||||
smp_mb__after_atomic();
|
||||
|
@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
|
|||
u32 byte_len, last;
|
||||
int flags = wqe->wr.send_flags;
|
||||
|
||||
rvt_qp_wqe_unreserve(qp, flags);
|
||||
rvt_put_qp_swqe(qp, wqe);
|
||||
|
||||
need_completion =
|
||||
|
|
Loading…
Reference in New Issue
Block a user