RDMA: Convert RWQ table logic to ib_core allocation scheme
Move struct ib_rwq_ind_table allocation to ib_core. Link: https://lore.kernel.org/r/20200902081623.746359-3-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
d18bb3e152
commit
c0a6b5ecc5
|
@ -2697,6 +2697,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
|||
SET_OBJ_SIZE(dev_ops, ib_cq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_mw);
|
||||
SET_OBJ_SIZE(dev_ops, ib_pd);
|
||||
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
|
||||
SET_OBJ_SIZE(dev_ops, ib_srq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
||||
SET_OBJ_SIZE(dev_ops, ib_xrcd);
|
||||
|
|
|
@ -2993,11 +2993,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
|
|||
{
|
||||
struct ib_uverbs_ex_create_rwq_ind_table cmd;
|
||||
struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
|
||||
struct ib_uobject *uobj;
|
||||
struct ib_uobject *uobj;
|
||||
int err;
|
||||
struct ib_rwq_ind_table_init_attr init_attr = {};
|
||||
struct ib_rwq_ind_table *rwq_ind_tbl;
|
||||
struct ib_wq **wqs = NULL;
|
||||
struct ib_wq **wqs = NULL;
|
||||
u32 *wqs_handles = NULL;
|
||||
struct ib_wq *wq = NULL;
|
||||
int i, num_read_wqs;
|
||||
|
@ -3055,17 +3055,15 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
|
|||
goto put_wqs;
|
||||
}
|
||||
|
||||
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
|
||||
init_attr.ind_tbl = wqs;
|
||||
|
||||
rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
|
||||
&attrs->driver_udata);
|
||||
|
||||
if (IS_ERR(rwq_ind_tbl)) {
|
||||
err = PTR_ERR(rwq_ind_tbl);
|
||||
rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
|
||||
if (!rwq_ind_tbl) {
|
||||
err = -ENOMEM;
|
||||
goto err_uobj;
|
||||
}
|
||||
|
||||
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
|
||||
init_attr.ind_tbl = wqs;
|
||||
|
||||
rwq_ind_tbl->ind_tbl = wqs;
|
||||
rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
|
||||
rwq_ind_tbl->uobject = uobj;
|
||||
|
@ -3073,6 +3071,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
|
|||
rwq_ind_tbl->device = ib_dev;
|
||||
atomic_set(&rwq_ind_tbl->usecnt, 0);
|
||||
|
||||
err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
|
||||
&attrs->driver_udata);
|
||||
if (err)
|
||||
goto err_create;
|
||||
|
||||
for (i = 0; i < num_wq_handles; i++)
|
||||
rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
|
||||
UVERBS_LOOKUP_READ);
|
||||
|
@ -3084,6 +3087,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
|
|||
resp.response_length = uverbs_response_length(attrs, sizeof(resp));
|
||||
return uverbs_response(attrs, &resp, sizeof(resp));
|
||||
|
||||
err_create:
|
||||
kfree(rwq_ind_tbl);
|
||||
err_uobj:
|
||||
uobj_alloc_abort(uobj, attrs);
|
||||
put_wqs:
|
||||
|
|
|
@ -81,12 +81,20 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
|
|||
{
|
||||
struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
|
||||
struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
|
||||
int ret;
|
||||
u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size);
|
||||
int ret, i;
|
||||
|
||||
ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
|
||||
if (atomic_read(&rwq_ind_tbl->usecnt))
|
||||
return -EBUSY;
|
||||
|
||||
ret = rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < table_size; i++)
|
||||
atomic_dec(&ind_tbl[i]->usecnt);
|
||||
|
||||
kfree(rwq_ind_tbl);
|
||||
kfree(ind_tbl);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2443,29 +2443,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_modify_wq);
|
||||
|
||||
/*
|
||||
* ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
|
||||
* @wq_ind_table: The Indirection Table to destroy.
|
||||
*/
|
||||
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
|
||||
{
|
||||
int err, i;
|
||||
u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
|
||||
struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
|
||||
|
||||
if (atomic_read(&rwq_ind_table->usecnt))
|
||||
return -EBUSY;
|
||||
|
||||
err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
|
||||
if (!err) {
|
||||
for (i = 0; i < table_size; i++)
|
||||
atomic_dec(&ind_tbl[i]->usecnt);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
|
||||
|
||||
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
|
||||
struct ib_mr_status *mr_status)
|
||||
{
|
||||
|
|
|
@ -2577,6 +2577,9 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
|
|||
.destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
|
||||
.destroy_wq = mlx4_ib_destroy_wq,
|
||||
.modify_wq = mlx4_ib_modify_wq,
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
|
||||
ib_rwq_ind_tbl),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
|
||||
|
|
|
@ -367,6 +367,10 @@ struct mlx4_ib_ah {
|
|||
union mlx4_ext_av av;
|
||||
};
|
||||
|
||||
struct mlx4_ib_rwq_ind_table {
|
||||
struct ib_rwq_ind_table ib_rwq_ind_tbl;
|
||||
};
|
||||
|
||||
/****************************************/
|
||||
/* alias guid support */
|
||||
/****************************************/
|
||||
|
@ -902,11 +906,14 @@ int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
|||
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
|
||||
struct ib_rwq_ind_table
|
||||
*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
||||
int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
static inline int
|
||||
mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
|
||||
int *num_of_mtts);
|
||||
|
||||
|
|
|
@ -4339,34 +4339,32 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct ib_rwq_ind_table
|
||||
*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_rwq_ind_table *rwq_ind_table;
|
||||
struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
|
||||
unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
|
||||
struct ib_device *device = rwq_ind_table->device;
|
||||
unsigned int base_wqn;
|
||||
size_t min_resp_len;
|
||||
int i;
|
||||
int err;
|
||||
int i, err = 0;
|
||||
|
||||
if (udata->inlen > 0 &&
|
||||
!ib_is_udata_cleared(udata, 0,
|
||||
udata->inlen))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
|
||||
if (udata->outlen && udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (ind_tbl_size >
|
||||
device->attrs.rss_caps.max_rwq_indirection_table_size) {
|
||||
pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
|
||||
ind_tbl_size,
|
||||
device->attrs.rss_caps.max_rwq_indirection_table_size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
base_wqn = init_attr->ind_tbl[0]->wq_num;
|
||||
|
@ -4374,39 +4372,23 @@ struct ib_rwq_ind_table
|
|||
if (base_wqn % ind_tbl_size) {
|
||||
pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
|
||||
base_wqn);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 1; i < ind_tbl_size; i++) {
|
||||
if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
|
||||
pr_debug("indirection table's WQNs aren't consecutive\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
|
||||
if (!rwq_ind_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (udata->outlen) {
|
||||
resp.response_length = offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length);
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return rwq_ind_table;
|
||||
|
||||
err:
|
||||
kfree(rwq_ind_table);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
|
||||
{
|
||||
kfree(ib_rwq_ind_tbl);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
struct mlx4_ib_drain_cqe {
|
||||
|
|
|
@ -168,14 +168,14 @@ void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
|
|||
mlx5_cmd_exec_in(dev, destroy_tis, in);
|
||||
}
|
||||
|
||||
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
|
||||
int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
|
||||
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
|
||||
MLX5_SET(destroy_rqt_in, in, uid, uid);
|
||||
mlx5_cmd_exec_in(dev, destroy_rqt, in);
|
||||
return mlx5_cmd_exec_in(dev, destroy_rqt, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
|
||||
|
|
|
@ -47,7 +47,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
|
|||
int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
|
||||
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
|
||||
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
|
||||
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
|
||||
int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
|
||||
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
|
||||
u16 uid);
|
||||
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
|
||||
|
|
|
@ -4270,6 +4270,9 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
|
|||
.destroy_wq = mlx5_ib_destroy_wq,
|
||||
.get_netdev = mlx5_ib_get_netdev,
|
||||
.modify_wq = mlx5_ib_modify_wq,
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
|
||||
ib_rwq_ind_tbl),
|
||||
};
|
||||
|
||||
static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
|
||||
|
|
|
@ -1243,9 +1243,9 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
||||
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context,
|
||||
|
|
|
@ -5099,12 +5099,13 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
|
||||
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
|
||||
to_mrwq_ind_table(ib_rwq_ind_table);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
|
||||
int sz = 1 << init_attr->log_ind_tbl_size;
|
||||
struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
|
||||
size_t min_resp_len;
|
||||
|
@ -5117,31 +5118,25 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
if (udata->inlen > 0 &&
|
||||
!ib_is_udata_cleared(udata, 0,
|
||||
udata->inlen))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (init_attr->log_ind_tbl_size >
|
||||
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
|
||||
mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
|
||||
init_attr->log_ind_tbl_size,
|
||||
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
min_resp_len =
|
||||
offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
|
||||
if (udata->outlen && udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
|
||||
if (!rwq_ind_tbl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -EINVAL;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
|
||||
|
||||
|
@ -5156,9 +5151,8 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
|
||||
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
|
||||
kvfree(in);
|
||||
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
|
||||
if (udata->outlen) {
|
||||
|
@ -5170,13 +5164,11 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
goto err_copy;
|
||||
}
|
||||
|
||||
return &rwq_ind_tbl->ib_rwq_ind_tbl;
|
||||
return 0;
|
||||
|
||||
err_copy:
|
||||
mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
|
||||
err:
|
||||
kfree(rwq_ind_tbl);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
|
||||
|
@ -5184,10 +5176,7 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
|
|||
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
|
||||
|
||||
mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
|
||||
|
||||
kfree(rwq_ind_tbl);
|
||||
return 0;
|
||||
return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
|
||||
}
|
||||
|
||||
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||
|
|
|
@ -2484,10 +2484,9 @@ struct ib_device_ops {
|
|||
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
|
||||
struct ib_device *device,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
|
||||
struct ib_dm *(*alloc_dm)(struct ib_device *device,
|
||||
struct ib_ucontext *context,
|
||||
|
@ -2611,6 +2610,7 @@ struct ib_device_ops {
|
|||
DECLARE_RDMA_OBJ_SIZE(ib_cq);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_mw);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_pd);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_srq);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
|
||||
|
@ -4297,7 +4297,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
|
|||
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
|
||||
u32 wq_attr_mask);
|
||||
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
||||
|
||||
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
|
||||
unsigned int *sg_offset, unsigned int page_size);
|
||||
|
|
Loading…
Reference in New Issue
Block a user