net/mlx5: Synchronize correct IRQ when destroying CQ

[ Upstream commit 563476ae0c5e48a028cbfa38fa9d2fc0418eb88f ]

The CQ destroy is performed based on the IRQ number that is stored in
cq->irqn. That number wasn't set explicitly during CQ creation and as
expected some of the API users of mlx5_core_create_cq() forgot to update
it.

This caused to wrong synchronization call of the wrong IRQ with a number
0 instead of the real one.

As a fix, set the IRQ number directly in the mlx5_core_create_cq() and
update all users accordingly.

Fixes: 1a86b377aa ("vdpa/mlx5: Add VDPA driver for supported mlx5 devices")
Fixes: ef1659ade3 ("IB/mlx5: Add DEVX support for CQ events")
Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Shay Drory 2021-04-11 15:32:55 +03:00 committed by Greg Kroah-Hartman
parent 00a0c11ddd
commit 303ba011f5
10 changed files with 27 additions and 30 deletions

View File

@ -930,7 +930,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
u32 *cqb = NULL;
void *cqc;
int cqe_size;
unsigned int irqn;
int eqn;
int err;
@ -969,7 +968,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;
@ -992,7 +991,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
cq->mcq.irqn = irqn;
if (udata)
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
else

View File

@ -904,7 +904,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
struct mlx5_ib_dev *dev;
int user_vector;
int dev_eqn;
unsigned int irqn;
int err;
if (uverbs_copy_from(&user_vector, attrs,
@ -916,7 +915,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
if (err < 0)
return err;

View File

@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn);
cq->uar = dev->priv.uar;
cq->irqn = eq->core.irqn;
return 0;

View File

@ -1547,15 +1547,9 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
unsigned int irqn;
int err;
u32 i;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
if (err)
return err;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
@ -1569,7 +1563,6 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@ -1615,11 +1608,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
void *in;
void *cqc;
int inlen;
unsigned int irqn_not_used;
int eqn;
int err;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
if (err)
return err;
@ -1977,9 +1969,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
err = mlx5_vector2irqn(priv->mdev, ix, &irq);
if (err)
return err;

View File

@ -859,8 +859,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
return err;
}
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq, *n;
@ -869,8 +869,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
if (i++ == vector) {
*eqn = eq->core.eqn;
*irqn = eq->core.irqn;
if (irqn)
*irqn = eq->core.irqn;
if (eqn)
*eqn = eq->core.eqn;
err = 0;
break;
}
@ -878,8 +880,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
return err;
}
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
{
return vector2eqnirqn(dev, vector, eqn, NULL);
}
EXPORT_SYMBOL(mlx5_vector2eqn);
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
return vector2eqnirqn(dev, vector, NULL, irqn);
}
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{
return dev->priv.eq_table->num_comp_eqs;

View File

@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
struct mlx5_wq_param wqp;
struct mlx5_cqe64 *cqe;
int inlen, err, eqn;
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
u32 i;
@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);

View File

@ -98,4 +98,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif

View File

@ -711,7 +711,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_cqe64 *cqe;
struct mlx5dr_cq *cq;
int inlen, err, eqn;
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
int vector;
@ -744,7 +743,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
err = mlx5_vector2eqn(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@ -780,7 +779,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
cq->mcq.irqn = irqn;
cq->mcq.uar = uar;
return cq;

View File

@ -511,7 +511,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
void __iomem *uar_page = ndev->mvdev.res.uar->map;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_vdpa_cq *vcq = &mvq->cq;
unsigned int irqn;
__be64 *pas;
int inlen;
void *cqc;
@ -551,7 +550,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
/* Use vector 0 by default. Consider adding code to choose least used
* vector.
*/
err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
err = mlx5_vector2eqn(mdev, 0, &eqn);
if (err)
goto err_vec;

View File

@ -981,8 +981,7 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);