forked from luck/tmp_suning_uos_patched
Merge branch 'fix-bpf-docs'
Quentin Monnet says: ==================== Hi, This set is an update to the documentation for the BPF helper functions in the UAPI header bpf.h, used to generate the bpf-helpers(7) man page. First patch contains fixes to the current documentation. The second patch adds documentation for the two helpers bpf_spin_lock() and bpf_spin_unlock(). The last patch simply reports the changes to the version of that header in tools/. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
895383983e
|
@ -502,16 +502,6 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
|
|
||||||
* Description
|
|
||||||
* Push an element *value* in *map*. *flags* is one of:
|
|
||||||
*
|
|
||||||
* **BPF_EXIST**
|
|
||||||
* If the queue/stack is full, the oldest element is removed to
|
|
||||||
* make room for this.
|
|
||||||
* Return
|
|
||||||
* 0 on success, or a negative error in case of failure.
|
|
||||||
*
|
|
||||||
* int bpf_probe_read(void *dst, u32 size, const void *src)
|
* int bpf_probe_read(void *dst, u32 size, const void *src)
|
||||||
* Description
|
* Description
|
||||||
* For tracing programs, safely attempt to read *size* bytes from
|
* For tracing programs, safely attempt to read *size* bytes from
|
||||||
|
@ -1435,14 +1425,14 @@ union bpf_attr {
|
||||||
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
|
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
|
||||||
* Description
|
* Description
|
||||||
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
||||||
* *skb*, but gets socket from **struct bpf_sock_addr** contex.
|
* *skb*, but gets socket from **struct bpf_sock_addr** context.
|
||||||
* Return
|
* Return
|
||||||
* A 8-byte long non-decreasing number.
|
* A 8-byte long non-decreasing number.
|
||||||
*
|
*
|
||||||
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
|
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
|
||||||
* Description
|
* Description
|
||||||
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
||||||
* *skb*, but gets socket from **struct bpf_sock_ops** contex.
|
* *skb*, but gets socket from **struct bpf_sock_ops** context.
|
||||||
* Return
|
* Return
|
||||||
* A 8-byte long non-decreasing number.
|
* A 8-byte long non-decreasing number.
|
||||||
*
|
*
|
||||||
|
@ -2098,6 +2088,25 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* int bpf_rc_repeat(void *ctx)
|
||||||
|
* Description
|
||||||
|
* This helper is used in programs implementing IR decoding, to
|
||||||
|
* report a successfully decoded repeat key message. This delays
|
||||||
|
* the generation of a key up event for previously generated
|
||||||
|
* key down event.
|
||||||
|
*
|
||||||
|
* Some IR protocols like NEC have a special IR message for
|
||||||
|
* repeating last button, for when a button is held down.
|
||||||
|
*
|
||||||
|
* The *ctx* should point to the lirc sample as passed into
|
||||||
|
* the program.
|
||||||
|
*
|
||||||
|
* This helper is only available is the kernel was compiled with
|
||||||
|
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
|
||||||
|
* "**y**".
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
* int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
|
* int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
|
||||||
* Description
|
* Description
|
||||||
* This helper is used in programs implementing IR decoding, to
|
* This helper is used in programs implementing IR decoding, to
|
||||||
|
@ -2124,26 +2133,7 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0
|
* 0
|
||||||
*
|
*
|
||||||
* int bpf_rc_repeat(void *ctx)
|
* u64 bpf_skb_cgroup_id(struct sk_buff *skb)
|
||||||
* Description
|
|
||||||
* This helper is used in programs implementing IR decoding, to
|
|
||||||
* report a successfully decoded repeat key message. This delays
|
|
||||||
* the generation of a key up event for previously generated
|
|
||||||
* key down event.
|
|
||||||
*
|
|
||||||
* Some IR protocols like NEC have a special IR message for
|
|
||||||
* repeating last button, for when a button is held down.
|
|
||||||
*
|
|
||||||
* The *ctx* should point to the lirc sample as passed into
|
|
||||||
* the program.
|
|
||||||
*
|
|
||||||
* This helper is only available is the kernel was compiled with
|
|
||||||
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
|
|
||||||
* "**y**".
|
|
||||||
* Return
|
|
||||||
* 0
|
|
||||||
*
|
|
||||||
* uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
|
|
||||||
* Description
|
* Description
|
||||||
* Return the cgroup v2 id of the socket associated with the *skb*.
|
* Return the cgroup v2 id of the socket associated with the *skb*.
|
||||||
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
|
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
|
||||||
|
@ -2159,30 +2149,12 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* The id is returned or 0 in case the id could not be retrieved.
|
* The id is returned or 0 in case the id could not be retrieved.
|
||||||
*
|
*
|
||||||
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
|
|
||||||
* Description
|
|
||||||
* Return id of cgroup v2 that is ancestor of cgroup associated
|
|
||||||
* with the *skb* at the *ancestor_level*. The root cgroup is at
|
|
||||||
* *ancestor_level* zero and each step down the hierarchy
|
|
||||||
* increments the level. If *ancestor_level* == level of cgroup
|
|
||||||
* associated with *skb*, then return value will be same as that
|
|
||||||
* of **bpf_skb_cgroup_id**\ ().
|
|
||||||
*
|
|
||||||
* The helper is useful to implement policies based on cgroups
|
|
||||||
* that are upper in hierarchy than immediate cgroup associated
|
|
||||||
* with *skb*.
|
|
||||||
*
|
|
||||||
* The format of returned id and helper limitations are same as in
|
|
||||||
* **bpf_skb_cgroup_id**\ ().
|
|
||||||
* Return
|
|
||||||
* The id is returned or 0 in case the id could not be retrieved.
|
|
||||||
*
|
|
||||||
* u64 bpf_get_current_cgroup_id(void)
|
* u64 bpf_get_current_cgroup_id(void)
|
||||||
* Return
|
* Return
|
||||||
* A 64-bit integer containing the current cgroup id based
|
* A 64-bit integer containing the current cgroup id based
|
||||||
* on the cgroup within which the current task is running.
|
* on the cgroup within which the current task is running.
|
||||||
*
|
*
|
||||||
* void* get_local_storage(void *map, u64 flags)
|
* void *bpf_get_local_storage(void *map, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Get the pointer to the local storage area.
|
* Get the pointer to the local storage area.
|
||||||
* The type and the size of the local storage is defined
|
* The type and the size of the local storage is defined
|
||||||
|
@ -2209,6 +2181,24 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
|
||||||
|
* Description
|
||||||
|
* Return id of cgroup v2 that is ancestor of cgroup associated
|
||||||
|
* with the *skb* at the *ancestor_level*. The root cgroup is at
|
||||||
|
* *ancestor_level* zero and each step down the hierarchy
|
||||||
|
* increments the level. If *ancestor_level* == level of cgroup
|
||||||
|
* associated with *skb*, then return value will be same as that
|
||||||
|
* of **bpf_skb_cgroup_id**\ ().
|
||||||
|
*
|
||||||
|
* The helper is useful to implement policies based on cgroups
|
||||||
|
* that are upper in hierarchy than immediate cgroup associated
|
||||||
|
* with *skb*.
|
||||||
|
*
|
||||||
|
* The format of returned id and helper limitations are same as in
|
||||||
|
* **bpf_skb_cgroup_id**\ ().
|
||||||
|
* Return
|
||||||
|
* The id is returned or 0 in case the id could not be retrieved.
|
||||||
|
*
|
||||||
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Look for TCP socket matching *tuple*, optionally in a child
|
* Look for TCP socket matching *tuple*, optionally in a child
|
||||||
|
@ -2289,6 +2279,16 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
|
||||||
|
* Description
|
||||||
|
* Push an element *value* in *map*. *flags* is one of:
|
||||||
|
*
|
||||||
|
* **BPF_EXIST**
|
||||||
|
* If the queue/stack is full, the oldest element is
|
||||||
|
* removed to make room for this.
|
||||||
|
* Return
|
||||||
|
* 0 on success, or a negative error in case of failure.
|
||||||
|
*
|
||||||
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
|
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
|
||||||
* Description
|
* Description
|
||||||
* Pop an element from *map*.
|
* Pop an element from *map*.
|
||||||
|
@ -2343,36 +2343,93 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0
|
* 0
|
||||||
*
|
*
|
||||||
|
* int bpf_spin_lock(struct bpf_spin_lock *lock)
|
||||||
|
* Description
|
||||||
|
* Acquire a spinlock represented by the pointer *lock*, which is
|
||||||
|
* stored as part of a value of a map. Taking the lock allows to
|
||||||
|
* safely update the rest of the fields in that value. The
|
||||||
|
* spinlock can (and must) later be released with a call to
|
||||||
|
* **bpf_spin_unlock**\ (\ *lock*\ ).
|
||||||
|
*
|
||||||
|
* Spinlocks in BPF programs come with a number of restrictions
|
||||||
|
* and constraints:
|
||||||
|
*
|
||||||
|
* * **bpf_spin_lock** objects are only allowed inside maps of
|
||||||
|
* types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
|
||||||
|
* list could be extended in the future).
|
||||||
|
* * BTF description of the map is mandatory.
|
||||||
|
* * The BPF program can take ONE lock at a time, since taking two
|
||||||
|
* or more could cause dead locks.
|
||||||
|
* * Only one **struct bpf_spin_lock** is allowed per map element.
|
||||||
|
* * When the lock is taken, calls (either BPF to BPF or helpers)
|
||||||
|
* are not allowed.
|
||||||
|
* * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
|
||||||
|
* allowed inside a spinlock-ed region.
|
||||||
|
* * The BPF program MUST call **bpf_spin_unlock**\ () to release
|
||||||
|
* the lock, on all execution paths, before it returns.
|
||||||
|
* * The BPF program can access **struct bpf_spin_lock** only via
|
||||||
|
* the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
|
||||||
|
* helpers. Loading or storing data into the **struct
|
||||||
|
* bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
|
||||||
|
* * To use the **bpf_spin_lock**\ () helper, the BTF description
|
||||||
|
* of the map value must be a struct and have **struct
|
||||||
|
* bpf_spin_lock** *anyname*\ **;** field at the top level.
|
||||||
|
* Nested lock inside another struct is not allowed.
|
||||||
|
* * The **struct bpf_spin_lock** *lock* field in a map value must
|
||||||
|
* be aligned on a multiple of 4 bytes in that value.
|
||||||
|
* * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
|
||||||
|
* the **bpf_spin_lock** field to user space.
|
||||||
|
* * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
|
||||||
|
* a BPF program, do not update the **bpf_spin_lock** field.
|
||||||
|
* * **bpf_spin_lock** cannot be on the stack or inside a
|
||||||
|
* networking packet (it can only be inside of a map values).
|
||||||
|
* * **bpf_spin_lock** is available to root only.
|
||||||
|
* * Tracing programs and socket filter programs cannot use
|
||||||
|
* **bpf_spin_lock**\ () due to insufficient preemption checks
|
||||||
|
* (but this may change in the future).
|
||||||
|
* * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
|
* int bpf_spin_unlock(struct bpf_spin_lock *lock)
|
||||||
|
* Description
|
||||||
|
* Release the *lock* previously locked by a call to
|
||||||
|
* **bpf_spin_lock**\ (\ *lock*\ ).
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
|
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* This helper gets a **struct bpf_sock** pointer such
|
* This helper gets a **struct bpf_sock** pointer such
|
||||||
* that all the fields in bpf_sock can be accessed.
|
* that all the fields in this **bpf_sock** can be accessed.
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_sock** pointer on success, or NULL in
|
* A **struct bpf_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*
|
*
|
||||||
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
|
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* This helper gets a **struct bpf_tcp_sock** pointer from a
|
* This helper gets a **struct bpf_tcp_sock** pointer from a
|
||||||
* **struct bpf_sock** pointer.
|
* **struct bpf_sock** pointer.
|
||||||
*
|
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_tcp_sock** pointer on success, or NULL in
|
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*
|
*
|
||||||
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
|
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
|
||||||
* Description
|
* Description
|
||||||
* Sets ECN of IP header to ce (congestion encountered) if
|
* Set ECN (Explicit Congestion Notification) field of IP header
|
||||||
* current value is ect (ECN capable). Works with IPv6 and IPv4.
|
* to **CE** (Congestion Encountered) if current value is **ECT**
|
||||||
* Return
|
* (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
|
||||||
* 1 if set, 0 if not set.
|
* and IPv4.
|
||||||
|
* Return
|
||||||
|
* 1 if the **CE** flag is set (either by the current helper call
|
||||||
|
* or because it was already present), 0 if it is not set.
|
||||||
*
|
*
|
||||||
* struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
|
* struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* Return a **struct bpf_sock** pointer in TCP_LISTEN state.
|
* Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
|
||||||
* bpf_sk_release() is unnecessary and not allowed.
|
* **bpf_sk_release**\ () is unnecessary and not allowed.
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_sock** pointer on success, or NULL in
|
* A **struct bpf_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
|
|
|
@ -502,16 +502,6 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
|
|
||||||
* Description
|
|
||||||
* Push an element *value* in *map*. *flags* is one of:
|
|
||||||
*
|
|
||||||
* **BPF_EXIST**
|
|
||||||
* If the queue/stack is full, the oldest element is removed to
|
|
||||||
* make room for this.
|
|
||||||
* Return
|
|
||||||
* 0 on success, or a negative error in case of failure.
|
|
||||||
*
|
|
||||||
* int bpf_probe_read(void *dst, u32 size, const void *src)
|
* int bpf_probe_read(void *dst, u32 size, const void *src)
|
||||||
* Description
|
* Description
|
||||||
* For tracing programs, safely attempt to read *size* bytes from
|
* For tracing programs, safely attempt to read *size* bytes from
|
||||||
|
@ -1435,14 +1425,14 @@ union bpf_attr {
|
||||||
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
|
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
|
||||||
* Description
|
* Description
|
||||||
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
||||||
* *skb*, but gets socket from **struct bpf_sock_addr** contex.
|
* *skb*, but gets socket from **struct bpf_sock_addr** context.
|
||||||
* Return
|
* Return
|
||||||
* A 8-byte long non-decreasing number.
|
* A 8-byte long non-decreasing number.
|
||||||
*
|
*
|
||||||
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
|
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
|
||||||
* Description
|
* Description
|
||||||
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
* Equivalent to bpf_get_socket_cookie() helper that accepts
|
||||||
* *skb*, but gets socket from **struct bpf_sock_ops** contex.
|
* *skb*, but gets socket from **struct bpf_sock_ops** context.
|
||||||
* Return
|
* Return
|
||||||
* A 8-byte long non-decreasing number.
|
* A 8-byte long non-decreasing number.
|
||||||
*
|
*
|
||||||
|
@ -2098,6 +2088,25 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* int bpf_rc_repeat(void *ctx)
|
||||||
|
* Description
|
||||||
|
* This helper is used in programs implementing IR decoding, to
|
||||||
|
* report a successfully decoded repeat key message. This delays
|
||||||
|
* the generation of a key up event for previously generated
|
||||||
|
* key down event.
|
||||||
|
*
|
||||||
|
* Some IR protocols like NEC have a special IR message for
|
||||||
|
* repeating last button, for when a button is held down.
|
||||||
|
*
|
||||||
|
* The *ctx* should point to the lirc sample as passed into
|
||||||
|
* the program.
|
||||||
|
*
|
||||||
|
* This helper is only available is the kernel was compiled with
|
||||||
|
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
|
||||||
|
* "**y**".
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
* int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
|
* int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
|
||||||
* Description
|
* Description
|
||||||
* This helper is used in programs implementing IR decoding, to
|
* This helper is used in programs implementing IR decoding, to
|
||||||
|
@ -2124,26 +2133,7 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0
|
* 0
|
||||||
*
|
*
|
||||||
* int bpf_rc_repeat(void *ctx)
|
* u64 bpf_skb_cgroup_id(struct sk_buff *skb)
|
||||||
* Description
|
|
||||||
* This helper is used in programs implementing IR decoding, to
|
|
||||||
* report a successfully decoded repeat key message. This delays
|
|
||||||
* the generation of a key up event for previously generated
|
|
||||||
* key down event.
|
|
||||||
*
|
|
||||||
* Some IR protocols like NEC have a special IR message for
|
|
||||||
* repeating last button, for when a button is held down.
|
|
||||||
*
|
|
||||||
* The *ctx* should point to the lirc sample as passed into
|
|
||||||
* the program.
|
|
||||||
*
|
|
||||||
* This helper is only available is the kernel was compiled with
|
|
||||||
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
|
|
||||||
* "**y**".
|
|
||||||
* Return
|
|
||||||
* 0
|
|
||||||
*
|
|
||||||
* uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
|
|
||||||
* Description
|
* Description
|
||||||
* Return the cgroup v2 id of the socket associated with the *skb*.
|
* Return the cgroup v2 id of the socket associated with the *skb*.
|
||||||
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
|
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
|
||||||
|
@ -2159,30 +2149,12 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* The id is returned or 0 in case the id could not be retrieved.
|
* The id is returned or 0 in case the id could not be retrieved.
|
||||||
*
|
*
|
||||||
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
|
|
||||||
* Description
|
|
||||||
* Return id of cgroup v2 that is ancestor of cgroup associated
|
|
||||||
* with the *skb* at the *ancestor_level*. The root cgroup is at
|
|
||||||
* *ancestor_level* zero and each step down the hierarchy
|
|
||||||
* increments the level. If *ancestor_level* == level of cgroup
|
|
||||||
* associated with *skb*, then return value will be same as that
|
|
||||||
* of **bpf_skb_cgroup_id**\ ().
|
|
||||||
*
|
|
||||||
* The helper is useful to implement policies based on cgroups
|
|
||||||
* that are upper in hierarchy than immediate cgroup associated
|
|
||||||
* with *skb*.
|
|
||||||
*
|
|
||||||
* The format of returned id and helper limitations are same as in
|
|
||||||
* **bpf_skb_cgroup_id**\ ().
|
|
||||||
* Return
|
|
||||||
* The id is returned or 0 in case the id could not be retrieved.
|
|
||||||
*
|
|
||||||
* u64 bpf_get_current_cgroup_id(void)
|
* u64 bpf_get_current_cgroup_id(void)
|
||||||
* Return
|
* Return
|
||||||
* A 64-bit integer containing the current cgroup id based
|
* A 64-bit integer containing the current cgroup id based
|
||||||
* on the cgroup within which the current task is running.
|
* on the cgroup within which the current task is running.
|
||||||
*
|
*
|
||||||
* void* get_local_storage(void *map, u64 flags)
|
* void *bpf_get_local_storage(void *map, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Get the pointer to the local storage area.
|
* Get the pointer to the local storage area.
|
||||||
* The type and the size of the local storage is defined
|
* The type and the size of the local storage is defined
|
||||||
|
@ -2209,6 +2181,24 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
|
||||||
|
* Description
|
||||||
|
* Return id of cgroup v2 that is ancestor of cgroup associated
|
||||||
|
* with the *skb* at the *ancestor_level*. The root cgroup is at
|
||||||
|
* *ancestor_level* zero and each step down the hierarchy
|
||||||
|
* increments the level. If *ancestor_level* == level of cgroup
|
||||||
|
* associated with *skb*, then return value will be same as that
|
||||||
|
* of **bpf_skb_cgroup_id**\ ().
|
||||||
|
*
|
||||||
|
* The helper is useful to implement policies based on cgroups
|
||||||
|
* that are upper in hierarchy than immediate cgroup associated
|
||||||
|
* with *skb*.
|
||||||
|
*
|
||||||
|
* The format of returned id and helper limitations are same as in
|
||||||
|
* **bpf_skb_cgroup_id**\ ().
|
||||||
|
* Return
|
||||||
|
* The id is returned or 0 in case the id could not be retrieved.
|
||||||
|
*
|
||||||
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Look for TCP socket matching *tuple*, optionally in a child
|
* Look for TCP socket matching *tuple*, optionally in a child
|
||||||
|
@ -2289,6 +2279,16 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure.
|
* 0 on success, or a negative error in case of failure.
|
||||||
*
|
*
|
||||||
|
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
|
||||||
|
* Description
|
||||||
|
* Push an element *value* in *map*. *flags* is one of:
|
||||||
|
*
|
||||||
|
* **BPF_EXIST**
|
||||||
|
* If the queue/stack is full, the oldest element is
|
||||||
|
* removed to make room for this.
|
||||||
|
* Return
|
||||||
|
* 0 on success, or a negative error in case of failure.
|
||||||
|
*
|
||||||
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
|
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
|
||||||
* Description
|
* Description
|
||||||
* Pop an element from *map*.
|
* Pop an element from *map*.
|
||||||
|
@ -2343,36 +2343,93 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* 0
|
* 0
|
||||||
*
|
*
|
||||||
|
* int bpf_spin_lock(struct bpf_spin_lock *lock)
|
||||||
|
* Description
|
||||||
|
* Acquire a spinlock represented by the pointer *lock*, which is
|
||||||
|
* stored as part of a value of a map. Taking the lock allows to
|
||||||
|
* safely update the rest of the fields in that value. The
|
||||||
|
* spinlock can (and must) later be released with a call to
|
||||||
|
* **bpf_spin_unlock**\ (\ *lock*\ ).
|
||||||
|
*
|
||||||
|
* Spinlocks in BPF programs come with a number of restrictions
|
||||||
|
* and constraints:
|
||||||
|
*
|
||||||
|
* * **bpf_spin_lock** objects are only allowed inside maps of
|
||||||
|
* types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
|
||||||
|
* list could be extended in the future).
|
||||||
|
* * BTF description of the map is mandatory.
|
||||||
|
* * The BPF program can take ONE lock at a time, since taking two
|
||||||
|
* or more could cause dead locks.
|
||||||
|
* * Only one **struct bpf_spin_lock** is allowed per map element.
|
||||||
|
* * When the lock is taken, calls (either BPF to BPF or helpers)
|
||||||
|
* are not allowed.
|
||||||
|
* * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
|
||||||
|
* allowed inside a spinlock-ed region.
|
||||||
|
* * The BPF program MUST call **bpf_spin_unlock**\ () to release
|
||||||
|
* the lock, on all execution paths, before it returns.
|
||||||
|
* * The BPF program can access **struct bpf_spin_lock** only via
|
||||||
|
* the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
|
||||||
|
* helpers. Loading or storing data into the **struct
|
||||||
|
* bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
|
||||||
|
* * To use the **bpf_spin_lock**\ () helper, the BTF description
|
||||||
|
* of the map value must be a struct and have **struct
|
||||||
|
* bpf_spin_lock** *anyname*\ **;** field at the top level.
|
||||||
|
* Nested lock inside another struct is not allowed.
|
||||||
|
* * The **struct bpf_spin_lock** *lock* field in a map value must
|
||||||
|
* be aligned on a multiple of 4 bytes in that value.
|
||||||
|
* * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
|
||||||
|
* the **bpf_spin_lock** field to user space.
|
||||||
|
* * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
|
||||||
|
* a BPF program, do not update the **bpf_spin_lock** field.
|
||||||
|
* * **bpf_spin_lock** cannot be on the stack or inside a
|
||||||
|
* networking packet (it can only be inside of a map values).
|
||||||
|
* * **bpf_spin_lock** is available to root only.
|
||||||
|
* * Tracing programs and socket filter programs cannot use
|
||||||
|
* **bpf_spin_lock**\ () due to insufficient preemption checks
|
||||||
|
* (but this may change in the future).
|
||||||
|
* * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
|
* int bpf_spin_unlock(struct bpf_spin_lock *lock)
|
||||||
|
* Description
|
||||||
|
* Release the *lock* previously locked by a call to
|
||||||
|
* **bpf_spin_lock**\ (\ *lock*\ ).
|
||||||
|
* Return
|
||||||
|
* 0
|
||||||
|
*
|
||||||
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
|
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* This helper gets a **struct bpf_sock** pointer such
|
* This helper gets a **struct bpf_sock** pointer such
|
||||||
* that all the fields in bpf_sock can be accessed.
|
* that all the fields in this **bpf_sock** can be accessed.
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_sock** pointer on success, or NULL in
|
* A **struct bpf_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*
|
*
|
||||||
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
|
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* This helper gets a **struct bpf_tcp_sock** pointer from a
|
* This helper gets a **struct bpf_tcp_sock** pointer from a
|
||||||
* **struct bpf_sock** pointer.
|
* **struct bpf_sock** pointer.
|
||||||
*
|
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_tcp_sock** pointer on success, or NULL in
|
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*
|
*
|
||||||
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
|
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
|
||||||
* Description
|
* Description
|
||||||
* Sets ECN of IP header to ce (congestion encountered) if
|
* Set ECN (Explicit Congestion Notification) field of IP header
|
||||||
* current value is ect (ECN capable). Works with IPv6 and IPv4.
|
* to **CE** (Congestion Encountered) if current value is **ECT**
|
||||||
* Return
|
* (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
|
||||||
* 1 if set, 0 if not set.
|
* and IPv4.
|
||||||
|
* Return
|
||||||
|
* 1 if the **CE** flag is set (either by the current helper call
|
||||||
|
* or because it was already present), 0 if it is not set.
|
||||||
*
|
*
|
||||||
* struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
|
* struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* Return a **struct bpf_sock** pointer in TCP_LISTEN state.
|
* Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
|
||||||
* bpf_sk_release() is unnecessary and not allowed.
|
* **bpf_sk_release**\ () is unnecessary and not allowed.
|
||||||
* Return
|
* Return
|
||||||
* A **struct bpf_sock** pointer on success, or NULL in
|
* A **struct bpf_sock** pointer on success, or **NULL** in
|
||||||
* case of failure.
|
* case of failure.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
|
|
Loading…
Reference in New Issue
Block a user