bpf: move knowledge about post-translation offsets out of verifier

Use the fact that verifier ops are now separate from program
ops to define a separate set of callbacks for verification of
already translated programs.

Since we expect the analyzer ops to be defined only for
a small subset of all program types initialize their array
by hand (don't use linux/bpf_types.h).

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2017-10-16 16:40:55 -07:00 committed by David S. Miller
parent 00176a34d9
commit 4f9218aaf8
3 changed files with 59 additions and 39 deletions

View File

@ -291,6 +291,9 @@ DECLARE_PER_CPU(int, bpf_prog_active);
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);

View File

@ -822,36 +822,6 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
return err;
}
static bool analyzer_is_valid_access(struct bpf_verifier_env *env, int off,
struct bpf_insn_access_aux *info)
{
switch (env->prog->type) {
case BPF_PROG_TYPE_XDP:
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
case BPF_PROG_TYPE_SCHED_CLS:
switch (off) {
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
default:
return false;
}
}
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
@ -860,13 +830,8 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
.reg_type = *reg_type,
};
if (env->analyzer_ops) {
if (analyzer_is_valid_access(env, off, &info)) {
*reg_type = info.reg_type;
return 0;
}
} else if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, &info)) {
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
@ -874,9 +839,12 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
*reg_type = info.reg_type;
if (env->analyzer_ops)
return 0;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@ -4400,12 +4368,21 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
return ret;
}
static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
[BPF_PROG_TYPE_XDP] = &xdp_analyzer_ops,
[BPF_PROG_TYPE_SCHED_CLS] = &tc_cls_act_analyzer_ops,
};
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv)
{
struct bpf_verifier_env *env;
int ret;
if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
!bpf_analyzer_ops[prog->type])
return -EOPNOTSUPP;
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
@ -4416,7 +4393,7 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
if (!env->insn_aux_data)
goto err_free_env;
env->prog = prog;
env->ops = bpf_verifier_ops[env->prog->type];
env->ops = bpf_analyzer_ops[env->prog->type];
env->analyzer_ops = ops;
env->analyzer_priv = priv;

View File

@ -3732,6 +3732,23 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, info);
}
static bool
tc_cls_act_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
@ -3766,6 +3783,21 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
static bool xdp_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
void bpf_warn_invalid_xdp_action(u32 act)
{
const u32 act_max = XDP_REDIRECT;
@ -4411,6 +4443,10 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.gen_prologue = tc_cls_act_prologue,
};
const struct bpf_verifier_ops tc_cls_act_analyzer_ops = {
.is_valid_access = tc_cls_act_is_valid_access_analyzer,
};
const struct bpf_prog_ops tc_cls_act_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
@ -4421,6 +4457,10 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.convert_ctx_access = xdp_convert_ctx_access,
};
const struct bpf_verifier_ops xdp_analyzer_ops = {
.is_valid_access = xdp_is_valid_access_analyzer,
};
const struct bpf_prog_ops xdp_prog_ops = {
.test_run = bpf_prog_test_run_xdp,
};