net: Fix offloading indirect devices dependency on qdisc order creation

[ Upstream commit 74fc4f828769cca1c3be89ea92cb88feaa27ef52 ]

Currently, when creating an ingress qdisc on an indirect device before
the driver registered for callbacks, the driver will not have a chance
to register its filter configuration callbacks.

To fix that, modify the code such that it keeps track of all the ingress
qdiscs that call flow_indr_dev_setup_offload(). When a driver calls
flow_indr_dev_register(),  go through the list of tracked ingress qdiscs
and call the driver callback entry point so as to give it a chance to
register its callback.

Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Eli Cohen <elic@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Eli Cohen 2021-08-17 20:05:18 +03:00 committed by Greg Kroah-Hartman
parent 2a69325ee5
commit 1f5db5b8a3
5 changed files with 92 additions and 1 deletions

View File

@ -444,6 +444,7 @@ struct flow_block_offload {
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
struct Qdisc *sch;
struct list_head *cb_list_head;
};
enum tc_setup_type;

View File

@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
static DEFINE_MUTEX(flow_indr_block_lock);
static LIST_HEAD(flow_block_indr_list);
static LIST_HEAD(flow_block_indr_dev_list);
static LIST_HEAD(flow_indir_dev_list);
struct flow_indr_dev {
struct list_head list;
@ -346,6 +347,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
return indr_dev;
}
struct flow_indir_dev_info {
void *data;
struct net_device *dev;
struct Qdisc *sch;
enum tc_setup_type type;
void (*cleanup)(struct flow_block_cb *block_cb);
struct list_head list;
enum flow_block_command command;
enum flow_block_binder_type binder_type;
struct list_head *cb_list;
};
static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_block_offload bo;
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
memset(&bo, 0, sizeof(bo));
bo.command = cur->command;
bo.binder_type = cur->binder_type;
INIT_LIST_HEAD(&bo.cb_list);
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
list_splice(&bo.cb_list, cur->cb_list);
}
}
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_indr_dev *indr_dev;
@ -367,6 +395,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
}
list_add(&indr_dev->list, &flow_block_indr_dev_list);
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock);
return 0;
@ -463,7 +492,59 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
}
EXPORT_SYMBOL(flow_indr_block_cb_alloc);
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
static struct flow_indir_dev_info *find_indir_dev(void *data)
{
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
if (cur->data == data)
return cur;
}
return NULL;
}
static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
struct flow_block_offload *bo)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (info)
return -EEXIST;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->data = data;
info->dev = dev;
info->sch = sch;
info->type = type;
info->cleanup = cleanup;
info->command = bo->command;
info->binder_type = bo->binder_type;
info->cb_list = bo->cb_list_head;
list_add(&info->list, &flow_indir_dev_list);
return 0;
}
static int indir_dev_remove(void *data)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (!info)
return -ENOENT;
list_del(&info->list);
kfree(info);
return 0;
}
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb))
@ -471,6 +552,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
struct flow_indr_dev *this;
mutex_lock(&flow_indr_block_lock);
if (bo->command == FLOW_BLOCK_BIND)
indir_dev_add(data, dev, sch, type, cleanup, bo);
else if (bo->command == FLOW_BLOCK_UNBIND)
indir_dev_remove(data);
list_for_each_entry(this, &flow_block_indr_dev_list, list)
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);

View File

@ -940,6 +940,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &flowtable->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}

View File

@ -323,6 +323,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &basechain->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}

View File

@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
bo->block_shared = shared;
bo->extack = extack;
bo->sch = sch;
bo->cb_list_head = &flow_block->cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}