bpf: Allow for map-in-map with dynamic inner array map entries
Recent work inf4d0525921
("bpf: Add map_meta_equal map ops") and134fede4ee
("bpf: Relax max_entries check for most of the inner map types") added support for dynamic inner max elements for most map-in-map types. Exceptions were maps like array or prog array where the map_gen_lookup() callback uses the maps' max_entries field as a constant when emitting instructions. We recently implemented Maglev consistent hashing into Cilium's load balancer which uses map-in-map with an outer map being hash and inner being array holding the Maglev backend table for each service. This has been designed this way in order to reduce overall memory consumption given the outer hash map allows to avoid preallocating a large, flat memory area for all services. Also, the number of service mappings is not always known a-priori. The use case for dynamic inner array map entries is to further reduce memory overhead, for example, some services might just have a small number of back ends while others could have a large number. Right now the Maglev backend table for small and large number of backends would need to have the same inner array map entries which adds a lot of unneeded overhead. Dynamic inner array map entries can be realized by avoiding the inlined code generation for their lookup. The lookup will still be efficient since it will be calling into array_map_lookup_elem() directly and thus avoiding retpoline. The patch adds a BPF_F_INNER_MAP flag to map creation which therefore skips inline code generation and relaxes array_map_meta_equal() check to ignore both maps' max_entries. This also still allows to have faster lookups for map-in-map when BPF_F_INNER_MAP is not specified and hence dynamic max_entries not needed. Example code generation where inner map is dynamic sized array: # bpftool p d x i 125 int handle__sys_enter(void * ctx): ; int handle__sys_enter(void *ctx) 0: (b4) w1 = 0 ; int key = 0; 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 ; 3: (07) r2 += -4 ; inner_map = bpf_map_lookup_elem(&outer_arr_dyn, &key); 4: (18) r1 = map[id:468] 6: (07) r1 += 272 7: (61) r0 = *(u32 *)(r2 +0) 8: (35) if r0 >= 0x3 goto pc+5 9: (67) r0 <<= 3 10: (0f) r0 += r1 11: (79) r0 = *(u64 *)(r0 +0) 12: (15) if r0 == 0x0 goto pc+1 13: (05) goto pc+1 14: (b7) r0 = 0 15: (b4) w6 = -1 ; if (!inner_map) 16: (15) if r0 == 0x0 goto pc+6 17: (bf) r2 = r10 ; 18: (07) r2 += -4 ; val = bpf_map_lookup_elem(inner_map, &key); 19: (bf) r1 = r0 | No inlining but instead 20: (85) call array_map_lookup_elem#149280 | call to array_map_lookup_elem() ; return val ? *val : -1; | for inner array lookup. 21: (15) if r0 == 0x0 goto pc+1 ; return val ? *val : -1; 22: (61) r6 = *(u32 *)(r0 +0) ; } 23: (bc) w0 = w6 24: (95) exit Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20201010234006.7075-4-daniel@iogearbox.net
This commit is contained in:
parent
9aa1206e8f
commit
4a8f87e60f
|
@ -82,7 +82,7 @@ struct bpf_map_ops {
|
||||||
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
|
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
|
||||||
int fd);
|
int fd);
|
||||||
void (*map_fd_put_ptr)(void *ptr);
|
void (*map_fd_put_ptr)(void *ptr);
|
||||||
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
|
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
|
||||||
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
||||||
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
||||||
struct seq_file *m);
|
struct seq_file *m);
|
||||||
|
|
|
@ -435,6 +435,9 @@ enum {
|
||||||
|
|
||||||
/* Share perf_event among processes */
|
/* Share perf_event among processes */
|
||||||
BPF_F_PRESERVE_ELEMS = (1U << 11),
|
BPF_F_PRESERVE_ELEMS = (1U << 11),
|
||||||
|
|
||||||
|
/* Create a map that is suitable to be an inner map with dynamic max entries */
|
||||||
|
BPF_F_INNER_MAP = (1U << 12),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags for BPF_PROG_QUERY. */
|
/* Flags for BPF_PROG_QUERY. */
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
#define ARRAY_CREATE_FLAG_MASK \
|
#define ARRAY_CREATE_FLAG_MASK \
|
||||||
(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
|
(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
|
||||||
BPF_F_PRESERVE_ELEMS)
|
BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
|
||||||
|
|
||||||
static void bpf_array_free_percpu(struct bpf_array *array)
|
static void bpf_array_free_percpu(struct bpf_array *array)
|
||||||
{
|
{
|
||||||
|
@ -62,7 +62,7 @@ int array_map_alloc_check(union bpf_attr *attr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
|
if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
|
||||||
attr->map_flags & BPF_F_MMAPABLE)
|
attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
|
if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
|
||||||
|
@ -214,7 +214,7 @@ static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
|
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
|
||||||
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||||
struct bpf_insn *insn = insn_buf;
|
struct bpf_insn *insn = insn_buf;
|
||||||
|
@ -223,6 +223,9 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||||
const int map_ptr = BPF_REG_1;
|
const int map_ptr = BPF_REG_1;
|
||||||
const int index = BPF_REG_2;
|
const int index = BPF_REG_2;
|
||||||
|
|
||||||
|
if (map->map_flags & BPF_F_INNER_MAP)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
||||||
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
||||||
if (!map->bypass_spec_v1) {
|
if (!map->bypass_spec_v1) {
|
||||||
|
@ -496,8 +499,10 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
|
||||||
static bool array_map_meta_equal(const struct bpf_map *meta0,
|
static bool array_map_meta_equal(const struct bpf_map *meta0,
|
||||||
const struct bpf_map *meta1)
|
const struct bpf_map *meta1)
|
||||||
{
|
{
|
||||||
return meta0->max_entries == meta1->max_entries &&
|
if (!bpf_map_meta_equal(meta0, meta1))
|
||||||
bpf_map_meta_equal(meta0, meta1);
|
return false;
|
||||||
|
return meta0->map_flags & BPF_F_INNER_MAP ? true :
|
||||||
|
meta0->max_entries == meta1->max_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bpf_iter_seq_array_map_info {
|
struct bpf_iter_seq_array_map_info {
|
||||||
|
@ -1251,7 +1256,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
return READ_ONCE(*inner_map);
|
return READ_ONCE(*inner_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 array_of_map_gen_lookup(struct bpf_map *map,
|
static int array_of_map_gen_lookup(struct bpf_map *map,
|
||||||
struct bpf_insn *insn_buf)
|
struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||||
|
|
|
@ -612,7 +612,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
* bpf_prog
|
* bpf_prog
|
||||||
* __htab_map_lookup_elem
|
* __htab_map_lookup_elem
|
||||||
*/
|
*/
|
||||||
static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
struct bpf_insn *insn = insn_buf;
|
struct bpf_insn *insn = insn_buf;
|
||||||
const int ret = BPF_REG_0;
|
const int ret = BPF_REG_0;
|
||||||
|
@ -651,7 +651,7 @@ static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
|
||||||
return __htab_lru_map_lookup_elem(map, key, false);
|
return __htab_lru_map_lookup_elem(map, key, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
|
static int htab_lru_map_gen_lookup(struct bpf_map *map,
|
||||||
struct bpf_insn *insn_buf)
|
struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
struct bpf_insn *insn = insn_buf;
|
struct bpf_insn *insn = insn_buf;
|
||||||
|
@ -2070,7 +2070,7 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
return READ_ONCE(*inner_map);
|
return READ_ONCE(*inner_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 htab_of_map_gen_lookup(struct bpf_map *map,
|
static int htab_of_map_gen_lookup(struct bpf_map *map,
|
||||||
struct bpf_insn *insn_buf)
|
struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
struct bpf_insn *insn = insn_buf;
|
struct bpf_insn *insn = insn_buf;
|
||||||
|
|
|
@ -11049,7 +11049,9 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||||
if (insn->imm == BPF_FUNC_map_lookup_elem &&
|
if (insn->imm == BPF_FUNC_map_lookup_elem &&
|
||||||
ops->map_gen_lookup) {
|
ops->map_gen_lookup) {
|
||||||
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
|
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
|
||||||
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
|
if (cnt == -EOPNOTSUPP)
|
||||||
|
goto patch_map_ops_generic;
|
||||||
|
if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
|
||||||
verbose(env, "bpf verifier is misconfigured\n");
|
verbose(env, "bpf verifier is misconfigured\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -11079,7 +11081,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||||
(int (*)(struct bpf_map *map, void *value))NULL));
|
(int (*)(struct bpf_map *map, void *value))NULL));
|
||||||
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
|
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
|
||||||
(int (*)(struct bpf_map *map, void *value))NULL));
|
(int (*)(struct bpf_map *map, void *value))NULL));
|
||||||
|
patch_map_ops_generic:
|
||||||
switch (insn->imm) {
|
switch (insn->imm) {
|
||||||
case BPF_FUNC_map_lookup_elem:
|
case BPF_FUNC_map_lookup_elem:
|
||||||
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
|
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
|
||||||
|
|
|
@ -132,7 +132,7 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
|
const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
|
||||||
struct bpf_insn *insn = insn_buf;
|
struct bpf_insn *insn = insn_buf;
|
||||||
|
|
|
@ -435,6 +435,9 @@ enum {
|
||||||
|
|
||||||
/* Share perf_event among processes */
|
/* Share perf_event among processes */
|
||||||
BPF_F_PRESERVE_ELEMS = (1U << 11),
|
BPF_F_PRESERVE_ELEMS = (1U << 11),
|
||||||
|
|
||||||
|
/* Create a map that is suitable to be an inner map with dynamic max entries */
|
||||||
|
BPF_F_INNER_MAP = (1U << 12),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags for BPF_PROG_QUERY. */
|
/* Flags for BPF_PROG_QUERY. */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user