forked from luck/tmp_suning_uos_patched
bpf: remove stubs for cBPF from arch code
Remove the dummy bpf_jit_compile() stubs for eBPF JITs and make that a single __weak function in the core that can be overridden similarly to the eBPF one. Also remove stale pr_err() mentions of bpf_jit_compile. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c78f8bdfa1
commit
9383191da4
|
@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end)
|
|||
flush_icache_range((unsigned long)start, (unsigned long)end);
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
/* Nothing to do here. We support Internal BPF. */
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
|
|
|
@ -961,8 +961,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp) { }
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
u32 proglen;
|
||||
|
|
|
@ -1262,14 +1262,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Classic BPF function stub. BPF programs will be converted into
|
||||
* eBPF and then bpf_int_jit_compile() will be called.
|
||||
*/
|
||||
void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Compile eBPF program "fp"
|
||||
*/
|
||||
|
|
|
@ -1067,13 +1067,13 @@ xadd: if (is_imm8(insn->off))
|
|||
|
||||
ilen = prog - temp;
|
||||
if (ilen > BPF_MAX_INSN_SIZE) {
|
||||
pr_err("bpf_jit_compile fatal insn size error\n");
|
||||
pr_err("bpf_jit: fatal insn size error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (image) {
|
||||
if (unlikely(proglen + ilen > oldproglen)) {
|
||||
pr_err("bpf_jit_compile fatal error\n");
|
||||
pr_err("bpf_jit: fatal error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
memcpy(image + proglen, temp, ilen);
|
||||
|
@ -1085,10 +1085,6 @@ xadd: if (is_imm8(insn->off))
|
|||
return proglen;
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_binary_header *header = NULL;
|
||||
|
|
|
@ -607,6 +607,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
|||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
|
@ -625,7 +626,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
|||
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
|
||||
|
@ -669,10 +669,6 @@ static inline bool bpf_jit_blinding_enabled(void)
|
|||
return true;
|
||||
}
|
||||
#else
|
||||
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
bpf_prog_unlock_free(fp);
|
||||
|
|
|
@ -1154,12 +1154,22 @@ const struct bpf_func_proto bpf_tail_call_proto = {
|
|||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
|
||||
/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
|
||||
* It is encouraged to implement bpf_int_jit_compile() instead, so that
|
||||
* eBPF and implicitly also cBPF can get JITed!
|
||||
*/
|
||||
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
return prog;
|
||||
}
|
||||
|
||||
/* Stub for JITs that support eBPF. All cBPF code gets transformed into
|
||||
* eBPF by the kernel and is later compiled by bpf_int_jit_compile().
|
||||
*/
|
||||
void __weak bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
|
||||
bool __weak bpf_helper_changes_pkt_data(void *func)
|
||||
{
|
||||
return false;
|
||||
|
|
Loading…
Reference in New Issue
Block a user