forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: "Some merge window fallout, some longer term fixes: 1) Handle headroom properly in lapbether and x25_asy drivers, from Xie He. 2) Fetch MAC address from correct r8152 device node, from Thierry Reding. 3) In the sw kTLS path we should allow MSG_CMSG_COMPAT in sendmsg, from Rouven Czerwinski. 4) Correct fdputs in socket layer, from Miaohe Lin. 5) Revert troublesome sockptr_t optimization, from Christoph Hellwig. 6) Fix TCP TFO key reading on big endian, from Jason Baron. 7) Missing CAP_NET_RAW check in nfc, from Qingyu Li. 8) Fix inet fastreuse optimization with tproxy sockets, from Tim Froidcoeur. 9) Fix 64-bit divide in new SFC driver, from Edward Cree. 10) Add a tracepoint for prandom_u32 so that we can more easily perform usage analysis. From Eric Dumazet. 11) Fix rwlock imbalance in AF_PACKET, from John Ogness" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (49 commits) net: openvswitch: introduce common code for flushing flows af_packet: TPACKET_V3: fix fill status rwlock imbalance random32: add a tracepoint for prandom_u32() Revert "ipv4: tunnel: fix compilation on ARCH=um" net: accept an empty mask in /sys/class/net/*/queues/rx-*/rps_cpus net: ethernet: stmmac: Disable hardware multicast filter net: stmmac: dwmac1000: provide multicast filter fallback ipv4: tunnel: fix compilation on ARCH=um vsock: fix potential null pointer dereference in vsock_poll() sfc: fix ef100 design-param checking net: initialize fastreuse on inet_inherit_port net: refactor bind_bucket fastreuse into helper net: phy: marvell10g: fix null pointer dereference net: Fix potential memory leak in proto_register() net: qcom/emac: add missed clk_disable_unprepare in error path of emac_clks_phase1_init ionic_lif: Use devm_kcalloc() in ionic_qcq_alloc() net/nfc/rawsock.c: add CAP_NET_RAW check. hinic: fix strncpy output truncated compile warnings drivers/net/wan/x25_asy: Added needed_headroom and a skb->len check net/tls: Fix kmap usage ...
This commit is contained in:
commit
a1d21081a6
|
@ -246,17 +246,6 @@ program is loaded the kernel will print warning message, so
|
|||
this helper is only useful for experiments and prototypes.
|
||||
Tracing BPF programs are root only.
|
||||
|
||||
Q: bpf_trace_printk() helper warning
|
||||
------------------------------------
|
||||
Q: When bpf_trace_printk() helper is used the kernel prints nasty
|
||||
warning message. Why is that?
|
||||
|
||||
A: This is done to nudge program authors into better interfaces when
|
||||
programs need to pass data to user space. Like bpf_perf_event_output()
|
||||
can be used to efficiently stream data via perf ring buffer.
|
||||
BPF maps can be used for asynchronous data sharing between kernel
|
||||
and user space. bpf_trace_printk() should only be used for debugging.
|
||||
|
||||
Q: New functionality via kernel modules?
|
||||
----------------------------------------
|
||||
Q: Can BPF functionality such as new program or map types, new
|
||||
|
|
|
@ -11986,7 +11986,8 @@ F: include/uapi/linux/netrom.h
|
|||
F: net/netrom/
|
||||
|
||||
NETRONOME ETHERNET DRIVERS
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Simon Horman <simon.horman@netronome.com>
|
||||
R: Jakub Kicinski <kuba@kernel.org>
|
||||
L: oss-drivers@netronome.com
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/netronome/
|
||||
|
|
|
@ -1728,7 +1728,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
|||
/* hardware completion status should be available by this time */
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could'nt get reset done status from h/w, timeout!\n");
|
||||
"couldn't get reset done status from h/w, timeout!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -334,19 +334,14 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
|
|||
static int chip_fault_show(struct devlink_fmsg *fmsg,
|
||||
struct hinic_fault_event *event)
|
||||
{
|
||||
char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
|
||||
"fatal", "reset", "flr", "general", "suggestion"};
|
||||
char level_str[FAULT_SHOW_STR_LEN + 1] = {0};
|
||||
u8 level;
|
||||
const char * const level_str[FAULT_LEVEL_MAX + 1] = {
|
||||
"fatal", "reset", "flr", "general", "suggestion", "Unknown"};
|
||||
u8 fault_level;
|
||||
int err;
|
||||
|
||||
level = event->event.chip.err_level;
|
||||
if (level < FAULT_LEVEL_MAX)
|
||||
strncpy(level_str, fault_level[level], strlen(fault_level[level]));
|
||||
else
|
||||
strncpy(level_str, "Unknown", strlen("Unknown"));
|
||||
|
||||
if (level == FAULT_LEVEL_SERIOUS_FLR) {
|
||||
fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
|
||||
event->event.chip.err_level : FAULT_LEVEL_MAX;
|
||||
if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
|
||||
err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
|
||||
(u32)event->event.chip.func_id);
|
||||
if (err)
|
||||
|
@ -361,7 +356,7 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str);
|
||||
err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -381,18 +376,15 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
|
|||
static int fault_report_show(struct devlink_fmsg *fmsg,
|
||||
struct hinic_fault_event *event)
|
||||
{
|
||||
char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
|
||||
const char * const type_str[FAULT_TYPE_MAX + 1] = {
|
||||
"chip", "ucode", "mem rd timeout", "mem wr timeout",
|
||||
"reg rd timeout", "reg wr timeout", "phy fault"};
|
||||
char type_str[FAULT_SHOW_STR_LEN + 1] = {0};
|
||||
"reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
|
||||
u8 fault_type;
|
||||
int err;
|
||||
|
||||
if (event->type < FAULT_TYPE_MAX)
|
||||
strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type]));
|
||||
else
|
||||
strncpy(type_str, "Unknown", strlen("Unknown"));
|
||||
fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
|
||||
|
||||
err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str);
|
||||
err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -504,8 +504,6 @@ enum hinic_fault_type {
|
|||
FAULT_TYPE_MAX,
|
||||
};
|
||||
|
||||
#define FAULT_SHOW_STR_LEN 16
|
||||
|
||||
enum hinic_fault_err_level {
|
||||
FAULT_LEVEL_FATAL,
|
||||
FAULT_LEVEL_SERIOUS_RESET,
|
||||
|
|
|
@ -412,7 +412,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||
|
||||
new->flags = flags;
|
||||
|
||||
new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
|
||||
new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
|
||||
GFP_KERNEL);
|
||||
if (!new->q.info) {
|
||||
netdev_err(lif->netdev, "Cannot allocate queue info\n");
|
||||
|
@ -462,7 +462,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||
new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
|
||||
}
|
||||
|
||||
new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
|
||||
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
|
||||
GFP_KERNEL);
|
||||
if (!new->cq.info) {
|
||||
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
|
||||
|
|
|
@ -474,13 +474,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
|
|||
|
||||
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_clk_axi;
|
||||
|
||||
ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_clk_cfg_ahb;
|
||||
|
||||
return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
|
||||
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
|
||||
if (ret)
|
||||
goto disable_clk_cfg_ahb;
|
||||
|
||||
return 0;
|
||||
|
||||
disable_clk_cfg_ahb:
|
||||
clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
|
||||
disable_clk_axi:
|
||||
clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable clocks; needs emac_clks_phase1_init to be called before */
|
||||
|
|
|
@ -979,7 +979,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
|
|||
* EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
|
||||
* This is very unlikely to fail.
|
||||
*/
|
||||
if (EFX_MIN_DMAQ_SIZE % reader->value) {
|
||||
if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
|
||||
EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"%s size granularity is %llu, can't guarantee safety\n",
|
||||
reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
|
||||
|
|
|
@ -351,6 +351,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|||
plat_dat->has_gmac = true;
|
||||
plat_dat->bsp_priv = gmac;
|
||||
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
|
||||
plat_dat->multicast_filter_bins = 0;
|
||||
|
||||
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (err)
|
||||
|
|
|
@ -164,6 +164,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
|
|||
value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
|
||||
} else if (dev->flags & IFF_ALLMULTI) {
|
||||
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
|
||||
} else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
|
||||
/* Fall back to all multicast if we've no filter */
|
||||
value = GMAC_FRAME_FILTER_PM;
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
|
|
|
@ -208,13 +208,6 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
|
|||
MV_V2_TEMP_CTRL_MASK, val);
|
||||
}
|
||||
|
||||
static void mv3310_hwmon_disable(void *data)
|
||||
{
|
||||
struct phy_device *phydev = data;
|
||||
|
||||
mv3310_hwmon_config(phydev, false);
|
||||
}
|
||||
|
||||
static int mv3310_hwmon_probe(struct phy_device *phydev)
|
||||
{
|
||||
struct device *dev = &phydev->mdio.dev;
|
||||
|
@ -238,10 +231,6 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->hwmon_dev = devm_hwmon_device_register_with_info(dev,
|
||||
priv->hwmon_name, phydev,
|
||||
&mv3310_hwmon_chip_info, NULL);
|
||||
|
@ -426,6 +415,11 @@ static int mv3310_probe(struct phy_device *phydev)
|
|||
return phy_sfp_probe(phydev, &mv3310_sfp_ops);
|
||||
}
|
||||
|
||||
static void mv3310_remove(struct phy_device *phydev)
|
||||
{
|
||||
mv3310_hwmon_config(phydev, false);
|
||||
}
|
||||
|
||||
static int mv3310_suspend(struct phy_device *phydev)
|
||||
{
|
||||
return mv3310_power_down(phydev);
|
||||
|
@ -784,6 +778,7 @@ static struct phy_driver mv3310_drivers[] = {
|
|||
.read_status = mv3310_read_status,
|
||||
.get_tunable = mv3310_get_tunable,
|
||||
.set_tunable = mv3310_set_tunable,
|
||||
.remove = mv3310_remove,
|
||||
},
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88E2110,
|
||||
|
@ -798,6 +793,7 @@ static struct phy_driver mv3310_drivers[] = {
|
|||
.read_status = mv3310_read_status,
|
||||
.get_tunable = mv3310_get_tunable,
|
||||
.set_tunable = mv3310_set_tunable,
|
||||
.remove = mv3310_remove,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
|
|||
if (c45_ids)
|
||||
dev->c45_ids = *c45_ids;
|
||||
dev->irq = bus->irq[addr];
|
||||
|
||||
dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
|
||||
device_initialize(&mdiodev->dev);
|
||||
|
||||
dev->state = PHY_DOWN;
|
||||
|
||||
|
@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
|
|||
ret = phy_request_driver_module(dev, phy_id);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
device_initialize(&mdiodev->dev);
|
||||
} else {
|
||||
kfree(dev);
|
||||
if (ret) {
|
||||
put_device(&mdiodev->dev);
|
||||
dev = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -1504,7 +1504,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
|
|||
|
||||
sa->sa_family = dev->type;
|
||||
|
||||
ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data);
|
||||
ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
|
||||
if (ret < 0) {
|
||||
if (tp->version == RTL_VER_01) {
|
||||
ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
|
||||
|
|
|
@ -886,7 +886,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
|
||||
switch (protocol) {
|
||||
case IPPROTO_TCP:
|
||||
ctx->l4_hdr_size = tcp_hdrlen(skb);
|
||||
ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
|
||||
tcp_hdrlen(skb);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
ctx->l4_hdr_size = sizeof(struct udphdr);
|
||||
|
|
|
@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
|
|||
if (!netif_running(dev))
|
||||
goto drop;
|
||||
|
||||
/* There should be a pseudo header of 1 byte added by upper layers.
|
||||
* Check to make sure it is there before reading it.
|
||||
*/
|
||||
if (skb->len < 1)
|
||||
goto drop;
|
||||
|
||||
switch (skb->data[0]) {
|
||||
case X25_IFACE_DATA:
|
||||
break;
|
||||
|
@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev)
|
|||
dev->netdev_ops = &lapbeth_netdev_ops;
|
||||
dev->needs_free_netdev = true;
|
||||
dev->type = ARPHRD_X25;
|
||||
dev->hard_header_len = 0;
|
||||
dev->mtu = 1000;
|
||||
dev->addr_len = 0;
|
||||
}
|
||||
|
@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev)
|
|||
* then this driver prepends a length field of 2 bytes,
|
||||
* then the underlying Ethernet device prepends its own header.
|
||||
*/
|
||||
ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
|
||||
ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
|
||||
+ dev->needed_headroom;
|
||||
|
||||
lapbeth = netdev_priv(ndev);
|
||||
lapbeth->axdev = ndev;
|
||||
|
|
|
@ -307,6 +307,14 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* There should be a pseudo header of 1 byte added by upper layers.
|
||||
* Check to make sure it is there before reading it.
|
||||
*/
|
||||
if (skb->len < 1) {
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
switch (skb->data[0]) {
|
||||
case X25_IFACE_DATA:
|
||||
break;
|
||||
|
@ -752,6 +760,12 @@ static void x25_asy_setup(struct net_device *dev)
|
|||
dev->type = ARPHRD_X25;
|
||||
dev->tx_queue_len = 10;
|
||||
|
||||
/* When transmitting data:
|
||||
* first this driver removes a pseudo header of 1 byte,
|
||||
* then the lapb module prepends an LAPB header of at most 3 bytes.
|
||||
*/
|
||||
dev->needed_headroom = 3 - 1;
|
||||
|
||||
/* New-style flags. */
|
||||
dev->flags = IFF_NOARP;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
*/
|
||||
#define FIELD_FIT(_mask, _val) \
|
||||
({ \
|
||||
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
|
||||
__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
|
||||
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
|
||||
})
|
||||
|
||||
|
|
|
@ -1214,15 +1214,17 @@ struct bpf_iter_aux_info {
|
|||
struct bpf_map *map;
|
||||
};
|
||||
|
||||
typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog,
|
||||
struct bpf_iter_aux_info *aux);
|
||||
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
|
||||
union bpf_iter_link_info *linfo,
|
||||
struct bpf_iter_aux_info *aux);
|
||||
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
|
||||
|
||||
#define BPF_ITER_CTX_ARG_MAX 2
|
||||
struct bpf_iter_reg {
|
||||
const char *target;
|
||||
bpf_iter_check_target_t check_target;
|
||||
bpf_iter_attach_target_t attach_target;
|
||||
bpf_iter_detach_target_t detach_target;
|
||||
u32 ctx_arg_info_size;
|
||||
enum bpf_iter_link_info req_linfo;
|
||||
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
|
||||
const struct bpf_iter_seq_info *seq_info;
|
||||
};
|
||||
|
|
|
@ -8,26 +8,9 @@
|
|||
#ifndef _LINUX_SOCKPTR_H
|
||||
#define _LINUX_SOCKPTR_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
typedef union {
|
||||
void *kernel;
|
||||
void __user *user;
|
||||
} sockptr_t;
|
||||
|
||||
static inline bool sockptr_is_kernel(sockptr_t sockptr)
|
||||
{
|
||||
return (unsigned long)sockptr.kernel >= TASK_SIZE;
|
||||
}
|
||||
|
||||
static inline sockptr_t KERNEL_SOCKPTR(void *p)
|
||||
{
|
||||
return (sockptr_t) { .kernel = p };
|
||||
}
|
||||
#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
|
||||
typedef struct {
|
||||
union {
|
||||
void *kernel;
|
||||
|
@ -45,15 +28,10 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p)
|
|||
{
|
||||
return (sockptr_t) { .kernel = p, .is_kernel = true };
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
|
||||
|
||||
static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p,
|
||||
size_t size)
|
||||
static inline sockptr_t USER_SOCKPTR(void __user *p)
|
||||
{
|
||||
if (!access_ok(p, size))
|
||||
return -EFAULT;
|
||||
*sp = (sockptr_t) { .user = p };
|
||||
return 0;
|
||||
return (sockptr_t) { .user = p };
|
||||
}
|
||||
|
||||
static inline bool sockptr_is_null(sockptr_t sockptr)
|
||||
|
|
|
@ -304,6 +304,10 @@ void inet_csk_listen_stop(struct sock *sk);
|
|||
|
||||
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
|
||||
|
||||
/* update the fast reuse flag when adding a socket */
|
||||
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||
struct sock *sk);
|
||||
|
||||
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
|
||||
|
||||
#define TCP_PINGPONG_THRESH 3
|
||||
|
|
|
@ -1672,6 +1672,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
|
|||
void tcp_fastopen_ctx_destroy(struct net *net);
|
||||
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
|
||||
void *primary_key, void *backup_key);
|
||||
int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
|
||||
u64 *key);
|
||||
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
|
|
|
@ -307,6 +307,23 @@ TRACE_EVENT(urandom_read,
|
|||
__entry->pool_left, __entry->input_left)
|
||||
);
|
||||
|
||||
TRACE_EVENT(prandom_u32,
|
||||
|
||||
TP_PROTO(unsigned int ret),
|
||||
|
||||
TP_ARGS(ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, ret)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("ret=%u" , __entry->ret)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_RANDOM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
|
|||
__u32 attach_type; /* program attach type */
|
||||
};
|
||||
|
||||
union bpf_iter_link_info {
|
||||
struct {
|
||||
__u32 map_fd;
|
||||
} map;
|
||||
};
|
||||
|
||||
/* BPF syscall commands, see bpf(2) man-page for details. */
|
||||
enum bpf_cmd {
|
||||
BPF_MAP_CREATE,
|
||||
|
@ -249,13 +255,6 @@ enum bpf_link_type {
|
|||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
|
||||
enum bpf_iter_link_info {
|
||||
BPF_ITER_LINK_UNSPEC = 0,
|
||||
BPF_ITER_LINK_MAP_FD = 1,
|
||||
|
||||
MAX_BPF_ITER_LINK_INFO,
|
||||
};
|
||||
|
||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||
*
|
||||
* NONE(default): No further bpf programs allowed in the subtree.
|
||||
|
@ -623,6 +622,8 @@ union bpf_attr {
|
|||
};
|
||||
__u32 attach_type; /* attach type */
|
||||
__u32 flags; /* extra flags */
|
||||
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
|
||||
__u32 iter_info_len; /* iter_info length */
|
||||
} link_create;
|
||||
|
||||
struct { /* struct used by BPF_LINK_UPDATE command */
|
||||
|
|
|
@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link)
|
|||
struct bpf_iter_link *iter_link =
|
||||
container_of(link, struct bpf_iter_link, link);
|
||||
|
||||
if (iter_link->aux.map)
|
||||
bpf_map_put_with_uref(iter_link->aux.map);
|
||||
if (iter_link->tinfo->reg_info->detach_target)
|
||||
iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
|
||||
}
|
||||
|
||||
static void bpf_iter_link_dealloc(struct bpf_link *link)
|
||||
|
@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link)
|
|||
|
||||
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
||||
{
|
||||
union bpf_iter_link_info __user *ulinfo;
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_iter_target_info *tinfo;
|
||||
struct bpf_iter_aux_info aux = {};
|
||||
union bpf_iter_link_info linfo;
|
||||
struct bpf_iter_link *link;
|
||||
u32 prog_btf_id, target_fd;
|
||||
u32 prog_btf_id, linfo_len;
|
||||
bool existed = false;
|
||||
struct bpf_map *map;
|
||||
int err;
|
||||
|
||||
if (attr->link_create.target_fd || attr->link_create.flags)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&linfo, 0, sizeof(union bpf_iter_link_info));
|
||||
|
||||
ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
|
||||
linfo_len = attr->link_create.iter_info_len;
|
||||
if (!ulinfo ^ !linfo_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (ulinfo) {
|
||||
err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
|
||||
linfo_len);
|
||||
if (err)
|
||||
return err;
|
||||
linfo_len = min_t(u32, linfo_len, sizeof(linfo));
|
||||
if (copy_from_user(&linfo, ulinfo, linfo_len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
prog_btf_id = prog->aux->attach_btf_id;
|
||||
mutex_lock(&targets_mutex);
|
||||
list_for_each_entry(tinfo, &targets, list) {
|
||||
|
@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
|||
if (!existed)
|
||||
return -ENOENT;
|
||||
|
||||
/* Make sure user supplied flags are target expected. */
|
||||
target_fd = attr->link_create.target_fd;
|
||||
if (attr->link_create.flags != tinfo->reg_info->req_linfo)
|
||||
return -EINVAL;
|
||||
if (!attr->link_create.flags && target_fd)
|
||||
return -EINVAL;
|
||||
|
||||
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
|
||||
if (!link)
|
||||
return -ENOMEM;
|
||||
|
@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) {
|
||||
map = bpf_map_get_with_uref(target_fd);
|
||||
if (IS_ERR(map)) {
|
||||
err = PTR_ERR(map);
|
||||
goto cleanup_link;
|
||||
}
|
||||
|
||||
aux.map = map;
|
||||
err = tinfo->reg_info->check_target(prog, &aux);
|
||||
if (tinfo->reg_info->attach_target) {
|
||||
err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
|
||||
if (err) {
|
||||
bpf_map_put_with_uref(map);
|
||||
goto cleanup_link;
|
||||
bpf_link_cleanup(&link_primer);
|
||||
return err;
|
||||
}
|
||||
|
||||
link->aux.map = map;
|
||||
}
|
||||
|
||||
return bpf_link_settle(&link_primer);
|
||||
|
||||
cleanup_link:
|
||||
bpf_link_cleanup(&link_primer);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
|
||||
|
|
|
@ -1966,7 +1966,7 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
|
|||
* @index: the index of the program to replace
|
||||
*
|
||||
* Skips over dummy programs, by not counting them, when calculating
|
||||
* the the position of the program to replace.
|
||||
* the position of the program to replace.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - Success
|
||||
|
|
|
@ -98,12 +98,21 @@ static struct bpf_iter_reg bpf_map_reg_info = {
|
|||
.seq_info = &bpf_map_seq_info,
|
||||
};
|
||||
|
||||
static int bpf_iter_check_map(struct bpf_prog *prog,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
static int bpf_iter_attach_map(struct bpf_prog *prog,
|
||||
union bpf_iter_link_info *linfo,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
u32 key_acc_size, value_acc_size, key_size, value_size;
|
||||
struct bpf_map *map = aux->map;
|
||||
struct bpf_map *map;
|
||||
bool is_percpu = false;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!linfo->map.map_fd)
|
||||
return -EBADF;
|
||||
|
||||
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
|
||||
|
@ -112,7 +121,7 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
|
|||
else if (map->map_type != BPF_MAP_TYPE_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_ARRAY)
|
||||
return -EINVAL;
|
||||
goto put_map;
|
||||
|
||||
key_acc_size = prog->aux->max_rdonly_access;
|
||||
value_acc_size = prog->aux->max_rdwr_access;
|
||||
|
@ -122,10 +131,22 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
|
|||
else
|
||||
value_size = round_up(map->value_size, 8) * num_possible_cpus();
|
||||
|
||||
if (key_acc_size > key_size || value_acc_size > value_size)
|
||||
return -EACCES;
|
||||
if (key_acc_size > key_size || value_acc_size > value_size) {
|
||||
err = -EACCES;
|
||||
goto put_map;
|
||||
}
|
||||
|
||||
aux->map = map;
|
||||
return 0;
|
||||
|
||||
put_map:
|
||||
bpf_map_put_with_uref(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
bpf_map_put_with_uref(aux->map);
|
||||
}
|
||||
|
||||
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
|
||||
|
@ -133,8 +154,8 @@ DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
|
|||
|
||||
static const struct bpf_iter_reg bpf_map_elem_reg_info = {
|
||||
.target = "bpf_map_elem",
|
||||
.check_target = bpf_iter_check_map,
|
||||
.req_linfo = BPF_ITER_LINK_MAP_FD,
|
||||
.attach_target = bpf_iter_attach_map,
|
||||
.detach_target = bpf_iter_detach_map,
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__bpf_map_elem, key),
|
||||
|
|
|
@ -3883,7 +3883,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define BPF_LINK_CREATE_LAST_FIELD link_create.flags
|
||||
#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
|
||||
static int link_create(union bpf_attr *attr)
|
||||
{
|
||||
enum bpf_prog_type ptype;
|
||||
|
|
|
@ -8294,7 +8294,7 @@ static bool stacksafe(struct bpf_func_state *old,
|
|||
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
|
||||
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
|
||||
/* Ex: old explored (safe) state has STACK_SPILL in
|
||||
* this stack slot, but current has has STACK_MISC ->
|
||||
* this stack slot, but current has STACK_MISC ->
|
||||
* this verifier states are not equivalent,
|
||||
* return false to continue verification of this path
|
||||
*/
|
||||
|
|
|
@ -383,7 +383,7 @@ static DEFINE_RAW_SPINLOCK(trace_printk_lock);
|
|||
|
||||
#define BPF_TRACE_PRINTK_SIZE 1024
|
||||
|
||||
static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
|
||||
static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
|
||||
{
|
||||
static char buf[BPF_TRACE_PRINTK_SIZE];
|
||||
unsigned long flags;
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <trace/events/random.h>
|
||||
|
||||
#ifdef CONFIG_RANDOM32_SELFTEST
|
||||
static void __init prandom_state_selftest(void);
|
||||
|
@ -82,6 +83,7 @@ u32 prandom_u32(void)
|
|||
u32 res;
|
||||
|
||||
res = prandom_u32_state(state);
|
||||
trace_prandom_u32(res);
|
||||
put_cpu_var(net_rand_state);
|
||||
|
||||
return res;
|
||||
|
|
|
@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_iter_check_map(struct bpf_prog *prog,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
static int bpf_iter_attach_map(struct bpf_prog *prog,
|
||||
union bpf_iter_link_info *linfo,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
struct bpf_map *map = aux->map;
|
||||
struct bpf_map *map;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!linfo->map.map_fd)
|
||||
return -EBADF;
|
||||
|
||||
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
|
||||
return -EINVAL;
|
||||
goto put_map;
|
||||
|
||||
if (prog->aux->max_rdonly_access > map->value_size)
|
||||
return -EACCES;
|
||||
if (prog->aux->max_rdonly_access > map->value_size) {
|
||||
err = -EACCES;
|
||||
goto put_map;
|
||||
}
|
||||
|
||||
aux->map = map;
|
||||
return 0;
|
||||
|
||||
put_map:
|
||||
bpf_map_put_with_uref(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
bpf_map_put_with_uref(aux->map);
|
||||
}
|
||||
|
||||
static const struct seq_operations bpf_sk_storage_map_seq_ops = {
|
||||
|
@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
|||
|
||||
static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
|
||||
.target = "bpf_sk_storage_map",
|
||||
.check_target = bpf_iter_check_map,
|
||||
.req_linfo = BPF_ITER_LINK_MAP_FD,
|
||||
.attach_target = bpf_iter_attach_map,
|
||||
.detach_target = bpf_iter_detach_map,
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
|
||||
|
|
|
@ -757,11 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
|||
return err;
|
||||
}
|
||||
|
||||
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||
cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
|
||||
if (cpumask_empty(mask)) {
|
||||
free_cpumask_var(mask);
|
||||
return -EINVAL;
|
||||
if (!cpumask_empty(mask)) {
|
||||
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||
cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
|
||||
if (cpumask_empty(mask)) {
|
||||
free_cpumask_var(mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
map = kzalloc(max_t(unsigned int,
|
||||
|
|
|
@ -4853,7 +4853,7 @@ static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
|
|||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
|
||||
if (ip_is_fragment(ip_hdr(skb)))
|
||||
fragment = true;
|
||||
|
||||
off = ip_hdrlen(skb);
|
||||
|
|
|
@ -3414,6 +3414,16 @@ static void sock_inuse_add(struct net *net, int val)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
|
||||
{
|
||||
if (!twsk_prot)
|
||||
return;
|
||||
kfree(twsk_prot->twsk_slab_name);
|
||||
twsk_prot->twsk_slab_name = NULL;
|
||||
kmem_cache_destroy(twsk_prot->twsk_slab);
|
||||
twsk_prot->twsk_slab = NULL;
|
||||
}
|
||||
|
||||
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
|
||||
{
|
||||
if (!rsk_prot)
|
||||
|
@ -3484,7 +3494,7 @@ int proto_register(struct proto *prot, int alloc_slab)
|
|||
prot->slab_flags,
|
||||
NULL);
|
||||
if (prot->twsk_prot->twsk_slab == NULL)
|
||||
goto out_free_timewait_sock_slab_name;
|
||||
goto out_free_timewait_sock_slab;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3492,15 +3502,15 @@ int proto_register(struct proto *prot, int alloc_slab)
|
|||
ret = assign_proto_idx(prot);
|
||||
if (ret) {
|
||||
mutex_unlock(&proto_list_mutex);
|
||||
goto out_free_timewait_sock_slab_name;
|
||||
goto out_free_timewait_sock_slab;
|
||||
}
|
||||
list_add(&prot->node, &proto_list);
|
||||
mutex_unlock(&proto_list_mutex);
|
||||
return ret;
|
||||
|
||||
out_free_timewait_sock_slab_name:
|
||||
out_free_timewait_sock_slab:
|
||||
if (alloc_slab && prot->twsk_prot)
|
||||
kfree(prot->twsk_prot->twsk_slab_name);
|
||||
tw_prot_cleanup(prot->twsk_prot);
|
||||
out_free_request_sock_slab:
|
||||
if (alloc_slab) {
|
||||
req_prot_cleanup(prot->rsk_prot);
|
||||
|
@ -3524,12 +3534,7 @@ void proto_unregister(struct proto *prot)
|
|||
prot->slab = NULL;
|
||||
|
||||
req_prot_cleanup(prot->rsk_prot);
|
||||
|
||||
if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
|
||||
kmem_cache_destroy(prot->twsk_prot->twsk_slab);
|
||||
kfree(prot->twsk_prot->twsk_slab_name);
|
||||
prot->twsk_prot->twsk_slab = NULL;
|
||||
}
|
||||
tw_prot_cleanup(prot->twsk_prot);
|
||||
}
|
||||
EXPORT_SYMBOL(proto_unregister);
|
||||
|
||||
|
|
|
@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
|
|||
return bpfilter_mbox_request(sk, optname, optval, optlen, true);
|
||||
}
|
||||
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname,
|
||||
char __user *user_optval, int __user *optlen)
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
int __user *optlen)
|
||||
{
|
||||
sockptr_t optval;
|
||||
int err, len;
|
||||
int len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
err = init_user_sockptr(&optval, user_optval, len);
|
||||
if (err)
|
||||
return err;
|
||||
return bpfilter_mbox_request(sk, optname, optval, len, false);
|
||||
|
||||
return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len,
|
||||
false);
|
||||
}
|
||||
|
||||
static int __init bpfilter_sockopt_init(void)
|
||||
|
|
|
@ -296,55 +296,12 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
|||
ipv6_only_sock(sk), true, false);
|
||||
}
|
||||
|
||||
/* Obtain a reference to a local port for the given sock,
|
||||
* if snum is zero it means select any available local port.
|
||||
* We try to allocate an odd port (and leave even ports for connect())
|
||||
*/
|
||||
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
||||
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||
struct sock *sk)
|
||||
{
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
||||
int ret = 1, port = snum;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct net *net = sock_net(sk);
|
||||
struct inet_bind_bucket *tb = NULL;
|
||||
kuid_t uid = sock_i_uid(sk);
|
||||
int l3mdev;
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
|
||||
l3mdev = inet_sk_bound_l3mdev(sk);
|
||||
|
||||
if (!port) {
|
||||
head = inet_csk_find_open_port(sk, &tb, &port);
|
||||
if (!head)
|
||||
return ret;
|
||||
if (!tb)
|
||||
goto tb_not_found;
|
||||
goto success;
|
||||
}
|
||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||
hinfo->bhash_size)];
|
||||
spin_lock_bh(&head->lock);
|
||||
inet_bind_bucket_for_each(tb, &head->chain)
|
||||
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
||||
tb->port == port)
|
||||
goto tb_found;
|
||||
tb_not_found:
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
||||
net, head, port, l3mdev);
|
||||
if (!tb)
|
||||
goto fail_unlock;
|
||||
tb_found:
|
||||
if (!hlist_empty(&tb->owners)) {
|
||||
if (sk->sk_reuse == SK_FORCE_REUSE)
|
||||
goto success;
|
||||
|
||||
if ((tb->fastreuse > 0 && reuse) ||
|
||||
sk_reuseport_match(tb, sk))
|
||||
goto success;
|
||||
if (inet_csk_bind_conflict(sk, tb, true, true))
|
||||
goto fail_unlock;
|
||||
}
|
||||
success:
|
||||
if (hlist_empty(&tb->owners)) {
|
||||
tb->fastreuse = reuse;
|
||||
if (sk->sk_reuseport) {
|
||||
|
@ -388,6 +345,58 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
|||
tb->fastreuseport = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Obtain a reference to a local port for the given sock,
|
||||
* if snum is zero it means select any available local port.
|
||||
* We try to allocate an odd port (and leave even ports for connect())
|
||||
*/
|
||||
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
||||
int ret = 1, port = snum;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct net *net = sock_net(sk);
|
||||
struct inet_bind_bucket *tb = NULL;
|
||||
int l3mdev;
|
||||
|
||||
l3mdev = inet_sk_bound_l3mdev(sk);
|
||||
|
||||
if (!port) {
|
||||
head = inet_csk_find_open_port(sk, &tb, &port);
|
||||
if (!head)
|
||||
return ret;
|
||||
if (!tb)
|
||||
goto tb_not_found;
|
||||
goto success;
|
||||
}
|
||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||
hinfo->bhash_size)];
|
||||
spin_lock_bh(&head->lock);
|
||||
inet_bind_bucket_for_each(tb, &head->chain)
|
||||
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
||||
tb->port == port)
|
||||
goto tb_found;
|
||||
tb_not_found:
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
||||
net, head, port, l3mdev);
|
||||
if (!tb)
|
||||
goto fail_unlock;
|
||||
tb_found:
|
||||
if (!hlist_empty(&tb->owners)) {
|
||||
if (sk->sk_reuse == SK_FORCE_REUSE)
|
||||
goto success;
|
||||
|
||||
if ((tb->fastreuse > 0 && reuse) ||
|
||||
sk_reuseport_match(tb, sk))
|
||||
goto success;
|
||||
if (inet_csk_bind_conflict(sk, tb, true, true))
|
||||
goto fail_unlock;
|
||||
}
|
||||
success:
|
||||
inet_csk_update_fastreuse(tb, sk);
|
||||
|
||||
if (!inet_csk(sk)->icsk_bind_hash)
|
||||
inet_bind_hash(sk, tb, port);
|
||||
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
||||
|
|
|
@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
|
|||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
inet_csk_update_fastreuse(tb, child);
|
||||
}
|
||||
inet_bind_hash(child, tb, port);
|
||||
spin_unlock(&head->lock);
|
||||
|
|
|
@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
|||
struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
|
||||
2 * TCP_FASTOPEN_KEY_MAX) +
|
||||
(TCP_FASTOPEN_KEY_MAX * 5)) };
|
||||
struct tcp_fastopen_context *ctx;
|
||||
u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
|
||||
__le32 key[TCP_FASTOPEN_KEY_MAX * 4];
|
||||
u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
|
||||
__le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
|
||||
char *backup_data;
|
||||
int ret, i = 0, off = 0, n_keys = 0;
|
||||
int ret, i = 0, off = 0, n_keys;
|
||||
|
||||
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
|
||||
if (!tbl.data)
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
|
||||
if (ctx) {
|
||||
n_keys = tcp_fastopen_context_len(ctx);
|
||||
memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
|
||||
if (!n_keys) {
|
||||
memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
|
||||
n_keys = 1;
|
||||
|
|
|
@ -3685,22 +3685,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
|||
return 0;
|
||||
|
||||
case TCP_FASTOPEN_KEY: {
|
||||
__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
|
||||
struct tcp_fastopen_context *ctx;
|
||||
unsigned int key_len = 0;
|
||||
u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
|
||||
unsigned int key_len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
|
||||
if (ctx) {
|
||||
key_len = tcp_fastopen_context_len(ctx) *
|
||||
TCP_FASTOPEN_KEY_LENGTH;
|
||||
memcpy(&key[0], &ctx->key[0], key_len);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
key_len = tcp_fastopen_get_cipher(net, icsk, key) *
|
||||
TCP_FASTOPEN_KEY_LENGTH;
|
||||
len = min_t(unsigned int, len, key_len);
|
||||
if (put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -108,6 +108,29 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
|
|||
return err;
|
||||
}
|
||||
|
||||
int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
|
||||
u64 *key)
|
||||
{
|
||||
struct tcp_fastopen_context *ctx;
|
||||
int n_keys = 0, i;
|
||||
|
||||
rcu_read_lock();
|
||||
if (icsk)
|
||||
ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
|
||||
else
|
||||
ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
|
||||
if (ctx) {
|
||||
n_keys = tcp_fastopen_context_len(ctx);
|
||||
for (i = 0; i < n_keys; i++) {
|
||||
put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
|
||||
put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return n_keys;
|
||||
}
|
||||
|
||||
static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
|
||||
struct sk_buff *syn,
|
||||
const siphash_key_t *key,
|
||||
|
|
|
@ -423,12 +423,12 @@ static void mptcp_sock_destruct(struct sock *sk)
|
|||
* also remove the mptcp socket, via
|
||||
* sock_put(ctx->conn).
|
||||
*
|
||||
* Problem is that the mptcp socket will not be in
|
||||
* SYN_RECV state and doesn't have SOCK_DEAD flag.
|
||||
* Problem is that the mptcp socket will be in
|
||||
* ESTABLISHED state and will not have the SOCK_DEAD flag.
|
||||
* Both result in warnings from inet_sock_destruct.
|
||||
*/
|
||||
|
||||
if (sk->sk_state == TCP_SYN_RECV) {
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
WARN_ON_ONCE(sk->sk_socket);
|
||||
sock_orphan(sk);
|
||||
|
|
|
@ -328,10 +328,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
|
|||
if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
if (sock->type == SOCK_RAW)
|
||||
if (sock->type == SOCK_RAW) {
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
sock->ops = &rawsock_raw_ops;
|
||||
else
|
||||
} else {
|
||||
sock->ops = &rawsock_ops;
|
||||
}
|
||||
|
||||
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
|
||||
if (!sk)
|
||||
|
|
|
@ -1756,6 +1756,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
|||
/* Called with ovs_mutex. */
|
||||
static void __dp_destroy(struct datapath *dp)
|
||||
{
|
||||
struct flow_table *table = &dp->table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
|
||||
|
@ -1774,7 +1775,14 @@ static void __dp_destroy(struct datapath *dp)
|
|||
*/
|
||||
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
|
||||
|
||||
/* RCU destroy the flow table */
|
||||
/* Flush sw_flow in the tables. RCU cb only releases resource
|
||||
* such as dp, ports and tables. That may avoid some issues
|
||||
* such as RCU usage warning.
|
||||
*/
|
||||
table_instance_flow_flush(table, ovsl_dereference(table->ti),
|
||||
ovsl_dereference(table->ufid_ti));
|
||||
|
||||
/* RCU destroy the ports, meters and flow tables. */
|
||||
call_rcu(&dp->rcu, destroy_dp_rcu);
|
||||
}
|
||||
|
||||
|
|
|
@ -473,19 +473,15 @@ static void table_instance_flow_free(struct flow_table *table,
|
|||
flow_mask_remove(table, flow->mask);
|
||||
}
|
||||
|
||||
static void table_instance_destroy(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti,
|
||||
bool deferred)
|
||||
/* Must be called with OVS mutex held. */
|
||||
void table_instance_flow_flush(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ti)
|
||||
return;
|
||||
|
||||
BUG_ON(!ufid_ti);
|
||||
if (ti->keep_flows)
|
||||
goto skip_flows;
|
||||
return;
|
||||
|
||||
for (i = 0; i < ti->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
|
@ -497,18 +493,16 @@ static void table_instance_destroy(struct flow_table *table,
|
|||
|
||||
table_instance_flow_free(table, ti, ufid_ti,
|
||||
flow, false);
|
||||
ovs_flow_free(flow, deferred);
|
||||
ovs_flow_free(flow, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
skip_flows:
|
||||
if (deferred) {
|
||||
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
} else {
|
||||
__table_instance_destroy(ti);
|
||||
__table_instance_destroy(ufid_ti);
|
||||
}
|
||||
static void table_instance_destroy(struct table_instance *ti,
|
||||
struct table_instance *ufid_ti)
|
||||
{
|
||||
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
}
|
||||
|
||||
/* No need for locking this function is called from RCU callback or
|
||||
|
@ -523,7 +517,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
|
|||
|
||||
call_rcu(&mc->rcu, mask_cache_rcu_cb);
|
||||
call_rcu(&ma->rcu, mask_array_rcu_cb);
|
||||
table_instance_destroy(table, ti, ufid_ti, false);
|
||||
table_instance_destroy(ti, ufid_ti);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
|
||||
|
@ -641,7 +635,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
|
|||
flow_table->count = 0;
|
||||
flow_table->ufid_count = 0;
|
||||
|
||||
table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
|
||||
table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
|
||||
table_instance_destroy(old_ti, old_ufid_ti);
|
||||
return 0;
|
||||
|
||||
err_free_ti:
|
||||
|
|
|
@ -105,5 +105,8 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
|||
bool full, const struct sw_flow_mask *mask);
|
||||
|
||||
void ovs_flow_masks_rebalance(struct flow_table *table);
|
||||
void table_instance_flow_flush(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti);
|
||||
|
||||
#endif /* flow_table.h */
|
||||
|
|
|
@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
|
|||
}
|
||||
|
||||
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
|
||||
__releases(&pkc->blk_fill_in_prog_lock)
|
||||
{
|
||||
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
|
||||
|
||||
|
@ -989,6 +990,7 @@ static void prb_fill_curr_block(char *curr,
|
|||
struct tpacket_kbdq_core *pkc,
|
||||
struct tpacket_block_desc *pbd,
|
||||
unsigned int len)
|
||||
__acquires(&pkc->blk_fill_in_prog_lock)
|
||||
{
|
||||
struct tpacket3_hdr *ppd;
|
||||
|
||||
|
@ -2286,8 +2288,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
if (do_vnet &&
|
||||
virtio_net_hdr_from_skb(skb, h.raw + macoff -
|
||||
sizeof(struct virtio_net_hdr),
|
||||
vio_le(), true, 0))
|
||||
vio_le(), true, 0)) {
|
||||
if (po->tp_version == TPACKET_V3)
|
||||
prb_clear_blk_fill_status(&po->rx_ring);
|
||||
goto drop_n_account;
|
||||
}
|
||||
|
||||
if (po->tp_version <= TPACKET_V2) {
|
||||
packet_increment_rx_head(po, &po->rx_ring);
|
||||
|
@ -2393,7 +2398,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
sk->sk_data_ready(sk);
|
||||
} else {
|
||||
} else if (po->tp_version == TPACKET_V3) {
|
||||
prb_clear_blk_fill_status(&po->rx_ring);
|
||||
}
|
||||
|
||||
|
|
23
net/socket.c
23
net/socket.c
|
@ -500,7 +500,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
|
|||
if (f.file) {
|
||||
sock = sock_from_file(f.file, err);
|
||||
if (likely(sock)) {
|
||||
*fput_needed = f.flags;
|
||||
*fput_needed = f.flags & FDPUT_FPUT;
|
||||
return sock;
|
||||
}
|
||||
fdput(f);
|
||||
|
@ -1325,7 +1325,7 @@ int sock_wake_async(struct socket_wq *wq, int how, int band)
|
|||
case SOCK_WAKE_SPACE:
|
||||
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case SOCK_WAKE_IO:
|
||||
call_kill:
|
||||
kill_fasync(&wq->fasync_list, SIGIO, band);
|
||||
|
@ -1804,8 +1804,7 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
|
|||
ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
|
||||
upeer_addrlen, flags,
|
||||
rlimit(RLIMIT_NOFILE));
|
||||
if (f.flags)
|
||||
fput(f.file);
|
||||
fdput(f);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1868,8 +1867,7 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
|
|||
ret = move_addr_to_kernel(uservaddr, addrlen, &address);
|
||||
if (!ret)
|
||||
ret = __sys_connect_file(f.file, &address, addrlen, 0);
|
||||
if (f.flags)
|
||||
fput(f.file);
|
||||
fdput(f);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2097,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
|
|||
int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
|
||||
int optlen)
|
||||
{
|
||||
sockptr_t optval;
|
||||
sockptr_t optval = USER_SOCKPTR(user_optval);
|
||||
char *kernel_optval = NULL;
|
||||
int err, fput_needed;
|
||||
struct socket *sock;
|
||||
|
@ -2105,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
|
|||
if (optlen < 0)
|
||||
return -EINVAL;
|
||||
|
||||
err = init_user_sockptr(&optval, user_optval, optlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sock = sockfd_lookup_light(fd, &err, &fput_needed);
|
||||
if (!sock)
|
||||
return err;
|
||||
|
@ -3065,7 +3059,7 @@ static int __init sock_init(void)
|
|||
|
||||
err = register_filesystem(&sock_fs_type);
|
||||
if (err)
|
||||
goto out_fs;
|
||||
goto out;
|
||||
sock_mnt = kern_mount(&sock_fs_type);
|
||||
if (IS_ERR(sock_mnt)) {
|
||||
err = PTR_ERR(sock_mnt);
|
||||
|
@ -3088,7 +3082,6 @@ static int __init sock_init(void)
|
|||
|
||||
out_mount:
|
||||
unregister_filesystem(&sock_fs_type);
|
||||
out_fs:
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -3161,13 +3154,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|||
if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
|
||||
return -ENOMEM;
|
||||
buf_size += rule_cnt * sizeof(u32);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case ETHTOOL_GRXRINGS:
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
convert_out = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
buf_size += sizeof(struct ethtool_rxnfc);
|
||||
convert_in = true;
|
||||
|
|
|
@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
|
|||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct iov_iter msg_iter;
|
||||
char *kaddr = kmap(page);
|
||||
char *kaddr;
|
||||
struct kvec iov;
|
||||
int rc;
|
||||
|
||||
|
@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
|
|||
goto out;
|
||||
}
|
||||
|
||||
kaddr = kmap(page);
|
||||
iov.iov_base = kaddr + offset;
|
||||
iov.iov_len = size;
|
||||
iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
|
||||
|
|
|
@ -935,7 +935,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
int ret = 0;
|
||||
int pending;
|
||||
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||
MSG_CMSG_COMPAT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&tls_ctx->tx_lock);
|
||||
|
|
|
@ -1032,7 +1032,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
|
|||
}
|
||||
|
||||
/* Connected sockets that can produce data can be written. */
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
if (transport && sk->sk_state == TCP_ESTABLISHED) {
|
||||
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||
bool space_avail_now = false;
|
||||
int ret = transport->notify_poll_out(
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
static int do_pin(int argc, char **argv)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
|
||||
union bpf_iter_link_info linfo;
|
||||
const char *objfile, *path;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
|
@ -36,6 +37,11 @@ static int do_pin(int argc, char **argv)
|
|||
map_fd = map_parse_fd(&argc, &argv);
|
||||
if (map_fd < 0)
|
||||
return -1;
|
||||
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = map_fd;
|
||||
iter_opts.link_info = &linfo;
|
||||
iter_opts.link_info_len = sizeof(linfo);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,9 +63,6 @@ static int do_pin(int argc, char **argv)
|
|||
goto close_obj;
|
||||
}
|
||||
|
||||
if (map_fd >= 0)
|
||||
iter_opts.map_fd = map_fd;
|
||||
|
||||
link = bpf_program__attach_iter(prog, &iter_opts);
|
||||
if (IS_ERR(link)) {
|
||||
err = PTR_ERR(link);
|
||||
|
|
|
@ -566,6 +566,7 @@ static int sets_patch(struct object *obj)
|
|||
|
||||
next = rb_next(next);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int symbols_patch(struct object *obj)
|
||||
|
|
|
@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
|
|||
__u32 attach_type; /* program attach type */
|
||||
};
|
||||
|
||||
union bpf_iter_link_info {
|
||||
struct {
|
||||
__u32 map_fd;
|
||||
} map;
|
||||
};
|
||||
|
||||
/* BPF syscall commands, see bpf(2) man-page for details. */
|
||||
enum bpf_cmd {
|
||||
BPF_MAP_CREATE,
|
||||
|
@ -249,13 +255,6 @@ enum bpf_link_type {
|
|||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
|
||||
enum bpf_iter_link_info {
|
||||
BPF_ITER_LINK_UNSPEC = 0,
|
||||
BPF_ITER_LINK_MAP_FD = 1,
|
||||
|
||||
MAX_BPF_ITER_LINK_INFO,
|
||||
};
|
||||
|
||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||
*
|
||||
* NONE(default): No further bpf programs allowed in the subtree.
|
||||
|
@ -623,6 +622,8 @@ union bpf_attr {
|
|||
};
|
||||
__u32 attach_type; /* attach type */
|
||||
__u32 flags; /* extra flags */
|
||||
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
|
||||
__u32 iter_info_len; /* iter_info length */
|
||||
} link_create;
|
||||
|
||||
struct { /* struct used by BPF_LINK_UPDATE command */
|
||||
|
|
|
@ -599,6 +599,9 @@ int bpf_link_create(int prog_fd, int target_fd,
|
|||
attr.link_create.target_fd = target_fd;
|
||||
attr.link_create.attach_type = attach_type;
|
||||
attr.link_create.flags = OPTS_GET(opts, flags, 0);
|
||||
attr.link_create.iter_info =
|
||||
ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
|
||||
attr.link_create.iter_info_len = OPTS_GET(opts, iter_info_len, 0);
|
||||
|
||||
return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
}
|
||||
|
|
|
@ -168,11 +168,14 @@ LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
|||
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
|
||||
struct bpf_link_create_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 flags;
|
||||
union bpf_iter_link_info *iter_info;
|
||||
__u32 iter_info_len;
|
||||
};
|
||||
#define bpf_link_create_opts__last_field flags
|
||||
#define bpf_link_create_opts__last_field iter_info_len
|
||||
|
||||
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
|
||||
enum bpf_attach_type attach_type,
|
||||
|
|
|
@ -564,8 +564,8 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
|
|||
|
||||
struct btf *btf__parse_raw(const char *path)
|
||||
{
|
||||
struct btf *btf = NULL;
|
||||
void *data = NULL;
|
||||
struct btf *btf;
|
||||
FILE *f = NULL;
|
||||
__u16 magic;
|
||||
int err = 0;
|
||||
|
|
|
@ -8306,10 +8306,8 @@ bpf_program__attach_iter(struct bpf_program *prog,
|
|||
if (!OPTS_VALID(opts, bpf_iter_attach_opts))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (OPTS_HAS(opts, map_fd)) {
|
||||
target_fd = opts->map_fd;
|
||||
link_create_opts.flags = BPF_ITER_LINK_MAP_FD;
|
||||
}
|
||||
link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
|
||||
link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (prog_fd < 0) {
|
||||
|
|
|
@ -267,9 +267,10 @@ LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
|
|||
|
||||
struct bpf_iter_attach_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 map_fd;
|
||||
union bpf_iter_link_info *link_info;
|
||||
__u32 link_info_len;
|
||||
};
|
||||
#define bpf_iter_attach_opts__last_field map_fd
|
||||
#define bpf_iter_attach_opts__last_field link_info_len
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_iter(struct bpf_program *prog,
|
||||
|
|
|
@ -102,7 +102,7 @@ endif
|
|||
OVERRIDE_TARGETS := 1
|
||||
override define CLEAN
|
||||
$(call msg,CLEAN)
|
||||
$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
|
||||
$(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
|
||||
endef
|
||||
|
||||
include ../lib.mk
|
||||
|
@ -123,17 +123,21 @@ $(notdir $(TEST_GEN_PROGS) \
|
|||
$(TEST_GEN_PROGS_EXTENDED) \
|
||||
$(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
|
||||
|
||||
$(OUTPUT)/%.o: %.c
|
||||
$(call msg,CC,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
|
||||
|
||||
$(OUTPUT)/%:%.c
|
||||
$(call msg,BINARY,,$@)
|
||||
$(LINK.c) $^ $(LDLIBS) -o $@
|
||||
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
|
||||
|
||||
$(OUTPUT)/urandom_read: urandom_read.c
|
||||
$(call msg,BINARY,,$@)
|
||||
$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
|
||||
$(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
|
||||
|
||||
$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
|
||||
$(call msg,CC,,$@)
|
||||
$(CC) -c $(CFLAGS) -o $@ $<
|
||||
$(Q)$(CC) -c $(CFLAGS) -o $@ $<
|
||||
|
||||
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
|
||||
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
|
||||
|
@ -142,7 +146,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
|
|||
/boot/vmlinux-$(shell uname -r)
|
||||
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
|
||||
|
||||
$(OUTPUT)/runqslower: $(BPFOBJ)
|
||||
DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
|
||||
|
||||
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
|
||||
OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
|
||||
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
|
||||
|
@ -164,7 +170,6 @@ $(OUTPUT)/test_netcnt: cgroup_helpers.c
|
|||
$(OUTPUT)/test_sock_fields: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sysctl: cgroup_helpers.c
|
||||
|
||||
DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
|
||||
BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
$(BPFOBJ) | $(BUILD_DIR)/bpftool
|
||||
|
@ -180,15 +185,15 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
|||
|
||||
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
|
||||
$(call msg,MKDIR,,$@)
|
||||
mkdir -p $@
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
|
||||
ifeq ($(VMLINUX_H),)
|
||||
$(call msg,GEN,,$@)
|
||||
$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
|
||||
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
|
||||
else
|
||||
$(call msg,CP,,$@)
|
||||
cp "$(VMLINUX_H)" $@
|
||||
$(Q)cp "$(VMLINUX_H)" $@
|
||||
endif
|
||||
|
||||
$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
|
||||
|
@ -237,28 +242,28 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
|
|||
# $4 - LDFLAGS
|
||||
define CLANG_BPF_BUILD_RULE
|
||||
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
|
||||
($(CLANG) $3 -O2 -target bpf -emit-llvm \
|
||||
$(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \
|
||||
-c $1 -o - || echo "BPF obj compilation failed") | \
|
||||
$(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
|
||||
endef
|
||||
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
|
||||
define CLANG_NOALU32_BPF_BUILD_RULE
|
||||
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
|
||||
($(CLANG) $3 -O2 -target bpf -emit-llvm \
|
||||
$(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \
|
||||
-c $1 -o - || echo "BPF obj compilation failed") | \
|
||||
$(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
|
||||
endef
|
||||
# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
|
||||
define CLANG_NATIVE_BPF_BUILD_RULE
|
||||
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
|
||||
($(CLANG) $3 -O2 -emit-llvm \
|
||||
$(Q)($(CLANG) $3 -O2 -emit-llvm \
|
||||
-c $1 -o - || echo "BPF obj compilation failed") | \
|
||||
$(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
|
||||
endef
|
||||
# Build BPF object using GCC
|
||||
define GCC_BPF_BUILD_RULE
|
||||
$(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
|
||||
$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
|
||||
$(Q)$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
|
||||
endef
|
||||
|
||||
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
|
||||
|
@ -300,7 +305,7 @@ ifeq ($($(TRUNNER_OUTPUT)-dir),)
|
|||
$(TRUNNER_OUTPUT)-dir := y
|
||||
$(TRUNNER_OUTPUT):
|
||||
$$(call msg,MKDIR,,$$@)
|
||||
mkdir -p $$@
|
||||
$(Q)mkdir -p $$@
|
||||
endif
|
||||
|
||||
# ensure we set up BPF objects generation rule just once for a given
|
||||
|
@ -320,7 +325,7 @@ $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
|
|||
$(TRUNNER_OUTPUT)/%.o \
|
||||
| $(BPFTOOL) $(TRUNNER_OUTPUT)
|
||||
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
|
||||
$$(BPFTOOL) gen skeleton $$< > $$@
|
||||
$(Q)$$(BPFTOOL) gen skeleton $$< > $$@
|
||||
endif
|
||||
|
||||
# ensure we set up tests.h header generation rule just once
|
||||
|
@ -344,7 +349,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
|
|||
$(TRUNNER_BPF_SKELS) \
|
||||
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
|
||||
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
|
||||
cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
|
||||
$(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
|
||||
|
||||
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
|
||||
%.c \
|
||||
|
@ -352,13 +357,13 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
|
|||
$(TRUNNER_TESTS_HDR) \
|
||||
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
|
||||
$$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
|
||||
$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
|
||||
$(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
|
||||
|
||||
# only copy extra resources if in flavored build
|
||||
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
|
||||
ifneq ($2,)
|
||||
$$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
|
||||
cp -a $$^ $(TRUNNER_OUTPUT)/
|
||||
$(Q)cp -a $$^ $(TRUNNER_OUTPUT)/
|
||||
endif
|
||||
|
||||
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
|
||||
|
@ -366,8 +371,8 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
|
|||
$(RESOLVE_BTFIDS) \
|
||||
| $(TRUNNER_BINARY)-extras
|
||||
$$(call msg,BINARY,,$$@)
|
||||
$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
|
||||
$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
|
||||
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
|
||||
$(Q)$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
|
||||
|
||||
endef
|
||||
|
||||
|
@ -420,17 +425,17 @@ verifier/tests.h: verifier/*.c
|
|||
) > verifier/tests.h)
|
||||
$(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
|
||||
$(call msg,BINARY,,$@)
|
||||
$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
|
||||
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
|
||||
|
||||
# Make sure we are able to include and link libbpf against c++.
|
||||
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
|
||||
$(call msg,CXX,,$@)
|
||||
$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
|
||||
$(Q)$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
|
||||
|
||||
# Benchmark runner
|
||||
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
|
||||
$(call msg,CC,,$@)
|
||||
$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
|
||||
$(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
|
||||
$(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
|
||||
$(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
|
||||
$(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
|
||||
|
@ -443,7 +448,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
|
|||
$(OUTPUT)/bench_trigger.o \
|
||||
$(OUTPUT)/bench_ringbufs.o
|
||||
$(call msg,BINARY,,$@)
|
||||
$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
|
||||
$(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
|
||||
|
||||
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) \
|
||||
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
|
||||
|
|
|
@ -468,6 +468,7 @@ static void test_bpf_hash_map(void)
|
|||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_bpf_hash_map *skel;
|
||||
int err, i, len, map_fd, iter_fd;
|
||||
union bpf_iter_link_info linfo;
|
||||
__u64 val, expected_val = 0;
|
||||
struct bpf_link *link;
|
||||
struct key_t {
|
||||
|
@ -490,13 +491,16 @@ static void test_bpf_hash_map(void)
|
|||
goto out;
|
||||
|
||||
/* iterator with hashmap2 and hashmap3 should fail */
|
||||
opts.map_fd = bpf_map__fd(skel->maps.hashmap2);
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
|
||||
if (CHECK(!IS_ERR(link), "attach_iter",
|
||||
"attach_iter for hashmap2 unexpected succeeded\n"))
|
||||
goto out;
|
||||
|
||||
opts.map_fd = bpf_map__fd(skel->maps.hashmap3);
|
||||
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
|
||||
if (CHECK(!IS_ERR(link), "attach_iter",
|
||||
"attach_iter for hashmap3 unexpected succeeded\n"))
|
||||
|
@ -519,7 +523,7 @@ static void test_bpf_hash_map(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
opts.map_fd = map_fd;
|
||||
linfo.map.map_fd = map_fd;
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
@ -562,6 +566,7 @@ static void test_bpf_percpu_hash_map(void)
|
|||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_bpf_percpu_hash_map *skel;
|
||||
int err, i, j, len, map_fd, iter_fd;
|
||||
union bpf_iter_link_info linfo;
|
||||
__u32 expected_val = 0;
|
||||
struct bpf_link *link;
|
||||
struct key_t {
|
||||
|
@ -606,7 +611,10 @@ static void test_bpf_percpu_hash_map(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
opts.map_fd = map_fd;
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = map_fd;
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
@ -649,6 +657,7 @@ static void test_bpf_array_map(void)
|
|||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
__u32 expected_key = 0, res_first_key;
|
||||
struct bpf_iter_bpf_array_map *skel;
|
||||
union bpf_iter_link_info linfo;
|
||||
int err, i, map_fd, iter_fd;
|
||||
struct bpf_link *link;
|
||||
char buf[64] = {};
|
||||
|
@ -673,7 +682,10 @@ static void test_bpf_array_map(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
opts.map_fd = map_fd;
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = map_fd;
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
@ -730,6 +742,7 @@ static void test_bpf_percpu_array_map(void)
|
|||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_bpf_percpu_array_map *skel;
|
||||
__u32 expected_key = 0, expected_val = 0;
|
||||
union bpf_iter_link_info linfo;
|
||||
int err, i, j, map_fd, iter_fd;
|
||||
struct bpf_link *link;
|
||||
char buf[64];
|
||||
|
@ -765,7 +778,10 @@ static void test_bpf_percpu_array_map(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
opts.map_fd = map_fd;
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = map_fd;
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
@ -803,6 +819,7 @@ static void test_bpf_sk_storage_map(void)
|
|||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
int err, i, len, map_fd, iter_fd, num_sockets;
|
||||
struct bpf_iter_bpf_sk_storage_map *skel;
|
||||
union bpf_iter_link_info linfo;
|
||||
int sock_fd[3] = {-1, -1, -1};
|
||||
__u32 val, expected_val = 0;
|
||||
struct bpf_link *link;
|
||||
|
@ -829,7 +846,10 @@ static void test_bpf_sk_storage_map(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
opts.map_fd = map_fd;
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = map_fd;
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
|
||||
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
|
||||
goto out;
|
||||
|
@ -871,6 +891,7 @@ static void test_rdonly_buf_out_of_bound(void)
|
|||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_test_kern5 *skel;
|
||||
union bpf_iter_link_info linfo;
|
||||
struct bpf_link *link;
|
||||
|
||||
skel = bpf_iter_test_kern5__open_and_load();
|
||||
|
@ -878,7 +899,10 @@ static void test_rdonly_buf_out_of_bound(void)
|
|||
"skeleton open_and_load failed\n"))
|
||||
return;
|
||||
|
||||
opts.map_fd = bpf_map__fd(skel->maps.hashmap1);
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
|
||||
if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
|
||||
bpf_link__destroy(link);
|
||||
|
|
|
@ -48,21 +48,19 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|||
close(pipe_p2c[1]); /* close write */
|
||||
|
||||
/* notify parent signal handler is installed */
|
||||
write(pipe_c2p[1], buf, 1);
|
||||
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
|
||||
|
||||
/* make sure parent enabled bpf program to send_signal */
|
||||
read(pipe_p2c[0], buf, 1);
|
||||
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
|
||||
|
||||
/* wait a little for signal handler */
|
||||
sleep(1);
|
||||
|
||||
if (sigusr1_received)
|
||||
write(pipe_c2p[1], "2", 1);
|
||||
else
|
||||
write(pipe_c2p[1], "0", 1);
|
||||
buf[0] = sigusr1_received ? '2' : '0';
|
||||
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
|
||||
|
||||
/* wait for parent notification and exit */
|
||||
read(pipe_p2c[0], buf, 1);
|
||||
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
|
||||
|
||||
close(pipe_c2p[1]);
|
||||
close(pipe_p2c[0]);
|
||||
|
@ -99,7 +97,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|||
}
|
||||
|
||||
/* wait until child signal handler installed */
|
||||
read(pipe_c2p[0], buf, 1);
|
||||
CHECK(read(pipe_c2p[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
|
||||
|
||||
/* trigger the bpf send_signal */
|
||||
skel->bss->pid = pid;
|
||||
|
@ -107,7 +105,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|||
skel->bss->signal_thread = signal_thread;
|
||||
|
||||
/* notify child that bpf program can send_signal now */
|
||||
write(pipe_p2c[1], buf, 1);
|
||||
CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
|
||||
|
||||
/* wait for result */
|
||||
err = read(pipe_c2p[0], buf, 1);
|
||||
|
@ -121,7 +119,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|||
CHECK(buf[0] != '2', test_name, "incorrect result\n");
|
||||
|
||||
/* notify child safe to exit */
|
||||
write(pipe_p2c[1], buf, 1);
|
||||
CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
|
||||
|
||||
disable_pmu:
|
||||
close(pmu_fd);
|
||||
|
|
|
@ -6,11 +6,13 @@ static __u64 read_perf_max_sample_freq(void)
|
|||
{
|
||||
__u64 sample_freq = 5000; /* fallback to 5000 on error */
|
||||
FILE *f;
|
||||
__u32 duration = 0;
|
||||
|
||||
f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
|
||||
if (f == NULL)
|
||||
return sample_freq;
|
||||
fscanf(f, "%llu", &sample_freq);
|
||||
CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
|
||||
"return default value: 5000,err %d\n", -errno);
|
||||
fclose(f);
|
||||
return sample_freq;
|
||||
}
|
||||
|
|
1
tools/testing/selftests/bpf/settings
Normal file
1
tools/testing/selftests/bpf/settings
Normal file
|
@ -0,0 +1 @@
|
|||
timeout=0
|
|
@ -124,17 +124,24 @@ int main(int argc, char **argv)
|
|||
sprintf(test_script,
|
||||
"iptables -A INPUT -p tcp --dport %d -j DROP",
|
||||
TESTPORT);
|
||||
system(test_script);
|
||||
if (system(test_script)) {
|
||||
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
|
||||
goto err;
|
||||
}
|
||||
|
||||
sprintf(test_script,
|
||||
"nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
|
||||
TESTPORT);
|
||||
system(test_script);
|
||||
if (system(test_script))
|
||||
printf("execute command: %s, err %d\n", test_script, -errno);
|
||||
|
||||
sprintf(test_script,
|
||||
"iptables -D INPUT -p tcp --dport %d -j DROP",
|
||||
TESTPORT);
|
||||
system(test_script);
|
||||
if (system(test_script)) {
|
||||
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rv = bpf_map_lookup_elem(bpf_map__fd(global_map), &key, &g);
|
||||
if (rv != 0) {
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
CONFIG_MPTCP=y
|
||||
CONFIG_MPTCP_IPV6=y
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_MPTCP_DIAG=m
|
||||
CONFIG_VETH=y
|
||||
CONFIG_NET_SCH_NETEM=m
|
||||
|
|
|
@ -406,10 +406,11 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
|
|||
|
||||
/* ... but we still receive.
|
||||
* Close our write side, ev. give some time
|
||||
* for address notification
|
||||
* for address notification and/or checking
|
||||
* the current status
|
||||
*/
|
||||
if (cfg_join)
|
||||
usleep(400000);
|
||||
if (cfg_wait)
|
||||
usleep(cfg_wait);
|
||||
shutdown(peerfd, SHUT_WR);
|
||||
} else {
|
||||
if (errno == EINTR)
|
||||
|
@ -427,7 +428,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
|
|||
}
|
||||
|
||||
/* leave some time for late join/announce */
|
||||
if (cfg_wait)
|
||||
if (cfg_join)
|
||||
usleep(cfg_wait);
|
||||
|
||||
close(peerfd);
|
||||
|
|
Loading…
Reference in New Issue
Block a user