Merge branch 'i40iw' into k.o/for-4.8
This commit is contained in:
commit
6a89d89d85
|
@ -1567,12 +1567,12 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
|
|||
ret = i40iw_manage_qhash(iwdev, cm_info,
|
||||
I40IW_QHASH_TYPE_TCP_SYN,
|
||||
I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
|
||||
kfree(child_listen_node);
|
||||
cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
|
||||
i40iw_debug(&iwdev->sc_dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"freed pointer = %p\n",
|
||||
child_listen_node);
|
||||
kfree(child_listen_node);
|
||||
cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
|
||||
}
|
||||
spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
|
||||
|
||||
|
|
|
@ -1557,6 +1557,9 @@ enum i40iw_alignment {
|
|||
#define I40IW_RING_MOVE_TAIL(_ring) \
|
||||
(_ring).tail = ((_ring).tail + 1) % (_ring).size
|
||||
|
||||
#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \
|
||||
(_ring).head = ((_ring).head + 1) % (_ring).size
|
||||
|
||||
#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
|
||||
(_ring).tail = ((_ring).tail + (_count)) % (_ring).size
|
||||
|
||||
|
|
|
@ -1025,6 +1025,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
|
|||
u16 txoffset, bufoffset;
|
||||
|
||||
buf = i40iw_puda_get_listbuf(pbufl);
|
||||
if (!buf)
|
||||
return;
|
||||
nextseqnum = buf->seqnum + fpdu_len;
|
||||
txbuf->totallen = buf->hdrlen + fpdu_len;
|
||||
txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
|
||||
|
@ -1048,6 +1050,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
|
|||
fpdu_len -= buf->datalen;
|
||||
i40iw_puda_ret_bufpool(ieq, buf);
|
||||
buf = i40iw_puda_get_listbuf(pbufl);
|
||||
if (!buf)
|
||||
return;
|
||||
bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
|
||||
} while (1);
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ struct i40iw_tcp_offload_info {
|
|||
bool time_stamp;
|
||||
u8 cwnd_inc_limit;
|
||||
bool drop_ooo_seg;
|
||||
bool dup_ack_thresh;
|
||||
u8 dup_ack_thresh;
|
||||
u8 ttl;
|
||||
u8 src_mac_addr_idx;
|
||||
bool avoid_stretch_ack;
|
||||
|
|
|
@ -291,9 +291,9 @@ static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
|
|||
|
||||
i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
|
||||
|
||||
for (i = 1; i < op_info->num_lo_sges; i++) {
|
||||
byte_off = 32 + (i - 1) * 16;
|
||||
for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
|
||||
i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
|
||||
byte_off += 16;
|
||||
}
|
||||
|
||||
wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
@ -401,9 +401,9 @@ static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
|
|||
|
||||
i40iw_set_fragment(wqe, 0, op_info->sg_list);
|
||||
|
||||
for (i = 1; i < op_info->num_sges; i++) {
|
||||
byte_off = 32 + (i - 1) * 16;
|
||||
for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
|
||||
i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
|
||||
byte_off += 16;
|
||||
}
|
||||
|
||||
wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
@ -685,9 +685,9 @@ static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
|
|||
|
||||
i40iw_set_fragment(wqe, 0, info->sg_list);
|
||||
|
||||
for (i = 1; i < info->num_sges; i++) {
|
||||
byte_off = 32 + (i - 1) * 16;
|
||||
for (i = 1, byte_off = 32; i < info->num_sges; i++) {
|
||||
i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
|
||||
byte_off += 16;
|
||||
}
|
||||
|
||||
wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
@ -753,8 +753,7 @@ static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
|
|||
* @post_cq: update cq tail
|
||||
*/
|
||||
static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
|
||||
struct i40iw_cq_poll_info *info,
|
||||
bool post_cq)
|
||||
struct i40iw_cq_poll_info *info)
|
||||
{
|
||||
u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
|
||||
u64 *cqe, *sw_wqe;
|
||||
|
@ -762,7 +761,6 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
|
|||
struct i40iw_ring *pring = NULL;
|
||||
u32 wqe_idx, q_type, array_idx = 0;
|
||||
enum i40iw_status_code ret_code = 0;
|
||||
enum i40iw_status_code ret_code2 = 0;
|
||||
bool move_cq_head = true;
|
||||
u8 polarity;
|
||||
u8 addl_wqes = 0;
|
||||
|
@ -870,19 +868,14 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
|
|||
move_cq_head = false;
|
||||
|
||||
if (move_cq_head) {
|
||||
I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
|
||||
|
||||
if (ret_code2 && !ret_code)
|
||||
ret_code = ret_code2;
|
||||
I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
|
||||
|
||||
if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
|
||||
cq->polarity ^= 1;
|
||||
|
||||
if (post_cq) {
|
||||
I40IW_RING_MOVE_TAIL(cq->cq_ring);
|
||||
set_64bit_val(cq->shadow_area, 0,
|
||||
I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
|
||||
}
|
||||
I40IW_RING_MOVE_TAIL(cq->cq_ring);
|
||||
set_64bit_val(cq->shadow_area, 0,
|
||||
I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
|
||||
} else {
|
||||
if (info->is_srq)
|
||||
return ret_code;
|
||||
|
|
|
@ -327,7 +327,7 @@ struct i40iw_cq_ops {
|
|||
void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
|
||||
enum i40iw_completion_notify);
|
||||
enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
|
||||
struct i40iw_cq_poll_info *, bool);
|
||||
struct i40iw_cq_poll_info *);
|
||||
enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
|
||||
void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
|
||||
};
|
||||
|
|
|
@ -528,7 +528,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
|
|||
status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
|
||||
|
||||
if (status)
|
||||
return -ENOSYS;
|
||||
return -ENOMEM;
|
||||
|
||||
sqdepth = sq_size << sqshift;
|
||||
rqdepth = rq_size << rqshift;
|
||||
|
@ -670,7 +670,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
|
||||
|
||||
if (init_attr->qp_type != IB_QPT_RC) {
|
||||
err_code = -ENOSYS;
|
||||
err_code = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
if (iwdev->push_mode)
|
||||
|
@ -1838,6 +1838,7 @@ struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
|
|||
iwmr->ibmr.lkey = stag;
|
||||
iwmr->page_cnt = 1;
|
||||
iwmr->pgaddrmem[0] = addr;
|
||||
iwmr->length = size;
|
||||
status = i40iw_hwreg_mr(iwdev, iwmr, access);
|
||||
if (status) {
|
||||
i40iw_free_stag(iwdev, stag);
|
||||
|
@ -1861,7 +1862,7 @@ static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
|
|||
{
|
||||
u64 kva = 0;
|
||||
|
||||
return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
|
||||
return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2075,8 +2076,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
err = -EIO;
|
||||
if (ret) {
|
||||
if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = -EINVAL;
|
||||
}
|
||||
break;
|
||||
case IB_WR_RDMA_WRITE:
|
||||
info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
|
||||
|
@ -2097,8 +2102,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
err = -EIO;
|
||||
if (ret) {
|
||||
if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = -EINVAL;
|
||||
}
|
||||
break;
|
||||
case IB_WR_RDMA_READ_WITH_INV:
|
||||
inv_stag = true;
|
||||
|
@ -2116,15 +2125,19 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
|
||||
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
|
||||
ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
|
||||
if (ret)
|
||||
err = -EIO;
|
||||
if (ret) {
|
||||
if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = -EINVAL;
|
||||
}
|
||||
break;
|
||||
case IB_WR_LOCAL_INV:
|
||||
info.op_type = I40IW_OP_TYPE_INV_STAG;
|
||||
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
|
||||
ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
|
||||
if (ret)
|
||||
err = -EIO;
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
case IB_WR_REG_MR:
|
||||
{
|
||||
|
@ -2153,7 +2166,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
|
|||
|
||||
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
|
||||
if (ret)
|
||||
err = -EIO;
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -2193,6 +2206,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
|
|||
struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
|
||||
enum i40iw_status_code ret = 0;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
iwqp = (struct i40iw_qp *)ibqp;
|
||||
ukqp = &iwqp->sc_qp.qp_uk;
|
||||
|
@ -2207,6 +2221,10 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
|
|||
ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
|
||||
if (ret) {
|
||||
i40iw_pr_err(" post_recv err %d\n", ret);
|
||||
if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = -EINVAL;
|
||||
*bad_wr = ib_wr;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2214,9 +2232,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
|
|||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&iwqp->lock, flags);
|
||||
if (ret)
|
||||
return -ENOSYS;
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2243,7 +2259,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
|
|||
|
||||
spin_lock_irqsave(&iwcq->lock, flags);
|
||||
while (cqe_count < num_entries) {
|
||||
ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
|
||||
ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
|
||||
if (ret == I40IW_ERR_QUEUE_EMPTY) {
|
||||
break;
|
||||
} else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
|
||||
|
@ -2513,7 +2529,7 @@ static int i40iw_modify_port(struct ib_device *ibdev,
|
|||
int port_modify_mask,
|
||||
struct ib_port_modify *props)
|
||||
{
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2709,7 +2725,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
|
|||
|
||||
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
|
||||
if (!iwdev->iwibdev)
|
||||
return -ENOSYS;
|
||||
return -ENOMEM;
|
||||
iwibdev = iwdev->iwibdev;
|
||||
|
||||
ret = ib_register_device(&iwibdev->ibdev, NULL);
|
||||
|
@ -2734,5 +2750,5 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
|
|||
kfree(iwdev->iwibdev->ibdev.iwcm);
|
||||
iwdev->iwibdev->ibdev.iwcm = NULL;
|
||||
ib_dealloc_device(&iwdev->iwibdev->ibdev);
|
||||
return -ENOSYS;
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user