Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: "Another week, another set of bug fixes: 1) Fix pskb_pull length in __xfrm_transport_prep(), from Xin Long. 2) Fix double xfrm_state put in esp{4,6}_gro_receive(), also from Xin Long. 3) Re-arm discovery timer properly in mac80211 mesh code, from Linus Lüssing. 4) Prevent buffer overflows in nf_conntrack_pptp debug code, from Pablo Neira Ayuso. 5) Fix race in ktls code between tls_sw_recvmsg() and tls_decrypt_done(), from Vinay Kumar Yadav. 6) Fix crashes on TCP fallback in MPTCP code, from Paolo Abeni. 7) More validation is necessary of untrusted GSO packets coming from virtualization devices, from Willem de Bruijn. 8) Fix endianness of bnxt_en firmware message length accesses, from Edwin Peer. 9) Fix infinite loop in sch_fq_pie, from Davide Caratti. 10) Fix lockdep splat in DSA by setting lockless TX in netdev features for slave ports, from Vladimir Oltean. 11) Fix suspend/resume crashes in mlx5, from Mark Bloch. 12) Fix use after free in bpf fmod_ret, from Alexei Starovoitov. 13) ARP retransmit timer guard uses wrong offset, from Hongbin Liu. 14) Fix leak in inetdev_init(), from Yang Yingliang. 15) Don't try to use inet hash and unhash in l2tp code, results in crashes. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (77 commits) l2tp: add sk_family checks to l2tp_validate_socket l2tp: do not use inet_hash()/inet_unhash() net: qrtr: Allocate workqueue before kernel_bind mptcp: remove msk from the token container at destruction time. mptcp: fix race between MP_JOIN and close mptcp: fix unblocking connect() net/sched: act_ct: add nat mangle action only for NAT-conntrack devinet: fix memleak in inetdev_init() virtio_vsock: Fix race condition in virtio_transport_recv_pkt drivers/net/ibmvnic: Update VNIC protocol version reporting NFC: st21nfca: add missed kfree_skb() in an error path neigh: fix ARP retransmit timer guard bpf, selftests: Add a verifier test for assigning 32bit reg states to 64bit ones bpf, selftests: Verifier bounds tests need to be updated bpf: Fix a verifier issue when assigning 32bit reg states to 64bit ones bpf: Fix use-after-free in fmod_ret check net/mlx5e: replace EINVAL in mlx5e_flower_parse_meta() net/mlx5e: Fix MLX5_TC_CT dependencies net/mlx5e: Properly set default values when disabling adaptive moderation net/mlx5e: Fix arch depending casting issue in FEC ...
This commit is contained in:
commit
19835b1ba6
|
@ -126,6 +126,7 @@ config PPC
|
|||
select ARCH_HAS_MMIOWB if PPC64
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_PMEM_API
|
||||
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_MEMBARRIER_CALLBACKS
|
||||
|
|
|
@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
|
|||
make_tx_data_wr(sk, skb, immdlen, len,
|
||||
credits_needed, completion);
|
||||
tp->snd_nxt += len;
|
||||
tp->lsndtime = tcp_time_stamp(tp);
|
||||
tp->lsndtime = tcp_jiffies32;
|
||||
if (completion)
|
||||
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
|
||||
} else {
|
||||
|
|
|
@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
|
|||
|
||||
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
||||
&(slave->dev->dev.kobj), "bonding_slave");
|
||||
if (err)
|
||||
if (err) {
|
||||
kobject_put(&slave->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (a = slave_attrs; *a; ++a) {
|
||||
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
||||
|
|
|
@ -102,13 +102,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
|
|||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
u16 flags = vlan->flags;
|
||||
u16 vid;
|
||||
int err;
|
||||
|
||||
if (dsa_is_cpu_port(ds, port))
|
||||
flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
|
||||
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
|
||||
err = ocelot_vlan_add(ocelot, port, vid,
|
||||
vlan->flags & BRIDGE_VLAN_INFO_PVID,
|
||||
vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
|
||||
flags & BRIDGE_VLAN_INFO_PVID,
|
||||
flags & BRIDGE_VLAN_INFO_UNTAGGED);
|
||||
if (err) {
|
||||
dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
|
||||
vid, port, err);
|
||||
|
|
|
@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
int i, intr_process, rc, tmo_count;
|
||||
struct input *req = msg;
|
||||
u32 *data = msg;
|
||||
__le32 *resp_len;
|
||||
u8 *valid;
|
||||
u16 cp_ring_id, len = 0;
|
||||
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
|
||||
struct hwrm_short_input short_input = {0};
|
||||
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
|
||||
u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
|
||||
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
|
||||
u16 dst = BNXT_HWRM_CHNL_CHIMP;
|
||||
|
||||
|
@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
|
||||
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
|
||||
resp = bp->hwrm_cmd_kong_resp_addr;
|
||||
resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
|
||||
}
|
||||
|
||||
memset(resp, 0, PAGE_SIZE);
|
||||
|
@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
|
||||
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
|
||||
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
|
||||
resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
|
||||
|
||||
if (intr_process) {
|
||||
u16 seq_id = bp->hwrm_intr_seq_id;
|
||||
|
@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
le16_to_cpu(req->req_type));
|
||||
return -EBUSY;
|
||||
}
|
||||
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
||||
HWRM_RESP_LEN_SFT;
|
||||
valid = resp_addr + len - 1;
|
||||
len = le16_to_cpu(resp->resp_len);
|
||||
valid = ((u8 *)resp) + len - 1;
|
||||
} else {
|
||||
int j;
|
||||
|
||||
|
@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
*/
|
||||
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
|
||||
return -EBUSY;
|
||||
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
||||
HWRM_RESP_LEN_SFT;
|
||||
len = le16_to_cpu(resp->resp_len);
|
||||
if (len)
|
||||
break;
|
||||
/* on first few passes, just barely sleep */
|
||||
|
@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|||
}
|
||||
|
||||
/* Last byte of resp contains valid bit */
|
||||
valid = resp_addr + len - 1;
|
||||
valid = ((u8 *)resp) + len - 1;
|
||||
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
|
||||
/* make sure we read from updated DMA memory */
|
||||
dma_rmb();
|
||||
|
@ -9310,7 +9304,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
|
|||
bnxt_free_skbs(bp);
|
||||
|
||||
/* Save ring stats before shutdown */
|
||||
if (bp->bnapi)
|
||||
if (bp->bnapi && irq_re_init)
|
||||
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
|
||||
if (irq_re_init) {
|
||||
bnxt_free_irq(bp);
|
||||
|
|
|
@ -656,11 +656,6 @@ struct nqe_cn {
|
|||
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
|
||||
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
|
||||
#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
|
||||
#define HWRM_RESP_ERR_CODE_MASK 0xffff
|
||||
#define HWRM_RESP_LEN_OFFSET 4
|
||||
#define HWRM_RESP_LEN_MASK 0xffff0000
|
||||
#define HWRM_RESP_LEN_SFT 16
|
||||
#define HWRM_RESP_VALID_MASK 0xff000000
|
||||
#define BNXT_HWRM_REQ_MAX_SIZE 128
|
||||
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
|
||||
BNXT_HWRM_REQ_MAX_SIZE)
|
||||
|
|
|
@ -2012,11 +2012,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
|
|||
|
||||
bnxt_hwrm_fw_set_time(bp);
|
||||
|
||||
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
|
||||
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
|
||||
&index, &item_len, NULL) != 0) {
|
||||
rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
|
||||
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
|
||||
&index, &item_len, NULL);
|
||||
if (rc) {
|
||||
netdev_err(dev, "PKG update area not created in nvram\n");
|
||||
return -ENOBUFS;
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = request_firmware(&fw, filename, &dev->dev);
|
||||
|
|
|
@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Do this here, so we can be verbose early */
|
||||
SET_NETDEV_DEV(net_dev, dev);
|
||||
SET_NETDEV_DEV(net_dev, dev->parent);
|
||||
dev_set_drvdata(dev, net_dev);
|
||||
|
||||
priv = netdev_priv(net_dev);
|
||||
|
|
|
@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
|
||||
break;
|
||||
}
|
||||
dev_info(dev, "Partner protocol version is %d\n",
|
||||
crq->version_exchange_rsp.version);
|
||||
if (be16_to_cpu(crq->version_exchange_rsp.version) <
|
||||
ibmvnic_version)
|
||||
ibmvnic_version =
|
||||
ibmvnic_version =
|
||||
be16_to_cpu(crq->version_exchange_rsp.version);
|
||||
dev_info(dev, "Partner protocol version is %d\n",
|
||||
ibmvnic_version);
|
||||
send_cap_queries(adapter);
|
||||
break;
|
||||
case QUERY_CAPABILITY_RSP:
|
||||
|
|
|
@ -80,7 +80,7 @@ config MLX5_ESWITCH
|
|||
|
||||
config MLX5_TC_CT
|
||||
bool "MLX5 TC connection tracking offload support"
|
||||
depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
|
||||
depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
|
||||
default y
|
||||
help
|
||||
Say Y here if you want to support offloading connection tracking rules
|
||||
|
|
|
@ -1068,10 +1068,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
|
|||
|
||||
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
|
||||
int num_channels);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
|
||||
u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
|
||||
u8 cq_period_mode);
|
||||
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
|
||||
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
|
|
|
@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode {
|
|||
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
|
||||
} while (0)
|
||||
|
||||
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
|
||||
do { \
|
||||
u16 *__policy = &(policy); \
|
||||
bool _write = (write); \
|
||||
\
|
||||
if (_write && *__policy) \
|
||||
*__policy = find_first_bit((u_long *)__policy, \
|
||||
sizeof(u16) * BITS_PER_BYTE);\
|
||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
|
||||
if (!_write && *__policy) \
|
||||
*__policy = 1 << *__policy; \
|
||||
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
|
||||
do { \
|
||||
unsigned long policy_long; \
|
||||
u16 *__policy = &(policy); \
|
||||
bool _write = (write); \
|
||||
\
|
||||
policy_long = *__policy; \
|
||||
if (_write && *__policy) \
|
||||
*__policy = find_first_bit(&policy_long, \
|
||||
sizeof(policy_long) * BITS_PER_BYTE);\
|
||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
|
||||
if (!_write && *__policy) \
|
||||
*__policy = 1 << *__policy; \
|
||||
} while (0)
|
||||
|
||||
/* get/set FEC admin field for a given speed */
|
||||
|
|
|
@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
|||
struct dim_cq_moder *rx_moder, *tx_moder;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_channels new_channels = {};
|
||||
bool reset_rx, reset_tx;
|
||||
int err = 0;
|
||||
bool reset;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
|||
}
|
||||
/* we are opened */
|
||||
|
||||
reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
|
||||
(!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
|
||||
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
|
||||
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
|
||||
|
||||
if (!reset) {
|
||||
if (!reset_rx && !reset_tx) {
|
||||
mlx5e_set_priv_channels_coalesce(priv, coal);
|
||||
priv->channels.params = new_channels.params;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reset_rx) {
|
||||
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
||||
MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
||||
|
||||
mlx5e_reset_rx_moderation(&new_channels.params, mode);
|
||||
}
|
||||
if (reset_tx) {
|
||||
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
||||
MLX5E_PFLAG_TX_CQE_BASED_MODER);
|
||||
|
||||
mlx5e_reset_tx_moderation(&new_channels.params, mode);
|
||||
}
|
||||
|
||||
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
|
||||
|
||||
out:
|
||||
|
@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = {
|
|||
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
|
||||
struct ethtool_link_ksettings *link_ksettings)
|
||||
{
|
||||
u_long active_fec = 0;
|
||||
unsigned long active_fec_long;
|
||||
u32 active_fec;
|
||||
u32 bitn;
|
||||
int err;
|
||||
|
||||
err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
|
||||
err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
|
||||
if (err)
|
||||
return (err == -EOPNOTSUPP) ? 0 : err;
|
||||
|
||||
|
@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
|
|||
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
|
||||
ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
|
||||
|
||||
active_fec_long = active_fec;
|
||||
/* active fec is a bit set, find out which bit is set and
|
||||
* advertise the corresponding ethtool bit
|
||||
*/
|
||||
bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
|
||||
bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
|
||||
if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
|
||||
__set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
|
||||
link_ksettings->link_modes.advertising);
|
||||
|
@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 fec_configured = 0;
|
||||
u32 fec_active = 0;
|
||||
u16 fec_configured;
|
||||
u32 fec_active;
|
||||
int err;
|
||||
|
||||
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
|
||||
|
@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
|
||||
sizeof(u32) * BITS_PER_BYTE);
|
||||
fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
|
||||
sizeof(unsigned long) * BITS_PER_BYTE);
|
||||
|
||||
if (!fecparam->active_fec)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
|
||||
sizeof(u16) * BITS_PER_BYTE);
|
||||
fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
|
||||
sizeof(unsigned long) * BITS_PER_BYTE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4716,7 +4716,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
|
|||
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
}
|
||||
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->tx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
|
@ -4725,13 +4725,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
|||
} else {
|
||||
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
|
||||
}
|
||||
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
|
||||
params->tx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->rx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
|
@ -4740,7 +4736,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
|||
} else {
|
||||
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_tx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
|
||||
params->tx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_rx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
|
||||
params->rx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
|
|
|
@ -2068,7 +2068,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
|
|||
flow_rule_match_meta(rule, &match);
|
||||
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
|
||||
|
@ -2076,13 +2076,13 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
|
|||
if (!ingress_dev) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't find the ingress port to match on");
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (ingress_dev != filter_dev) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't match on the ingress filter port");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3849,10 +3849,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
|||
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"devices are not on same switch HW, can't offload forwarding");
|
||||
netdev_warn(priv->netdev,
|
||||
"devices %s %s not on same switch HW, can't offload forwarding\n",
|
||||
priv->netdev->name,
|
||||
out_dev->name);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -4614,7 +4610,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
|
|||
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
|
||||
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
|
||||
rpriv->prev_vf_vport_stats = cur_stats;
|
||||
flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
|
||||
flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
}
|
||||
|
||||
|
|
|
@ -1549,6 +1549,22 @@ static void shutdown(struct pci_dev *pdev)
|
|||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
mlx5_unload_one(dev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
return mlx5_load_one(dev, false);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
|
||||
|
@ -1592,6 +1608,8 @@ static struct pci_driver mlx5_core_driver = {
|
|||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.remove = remove_one,
|
||||
.suspend = mlx5_suspend,
|
||||
.resume = mlx5_resume,
|
||||
.shutdown = shutdown,
|
||||
.err_handler = &mlx5_err_handler,
|
||||
.sriov_configure = mlx5_core_sriov_configure,
|
||||
|
|
|
@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
|
|||
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
|
||||
priv->stats[ctx_id].pkts += pkts;
|
||||
priv->stats[ctx_id].bytes += bytes;
|
||||
max_t(u64, priv->stats[ctx_id].used, used);
|
||||
priv->stats[ctx_id].used = max_t(u64, used,
|
||||
priv->stats[ctx_id].used);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
|||
ahw->diag_cnt = 0;
|
||||
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
|
||||
if (ret)
|
||||
goto fail_diag_irq;
|
||||
goto fail_mbx_args;
|
||||
|
||||
if (adapter->flags & QLCNIC_MSIX_ENABLED)
|
||||
intrpt_id = ahw->intr_tbl[0].id;
|
||||
|
@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
|||
|
||||
done:
|
||||
qlcnic_free_mbx_args(&cmd);
|
||||
|
||||
fail_mbx_args:
|
||||
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
|
||||
|
||||
fail_diag_irq:
|
||||
|
|
|
@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
|||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
ptp_v2 = PTP_TCR_TSVER2ENA;
|
||||
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
if (priv->synopsys_id != DWMAC_CORE_5_10)
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
|
||||
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
|
||||
ptp_over_ethernet = PTP_TCR_TSIPENA;
|
||||
|
|
|
@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
|
||||
|
|
|
@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
|
|||
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
|
||||
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
|
||||
gb_len);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
kfree_skb(skb);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
info->dep_info.curr_nfc_dep_pni = 0;
|
||||
|
|
|
@ -2047,7 +2047,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
|
|||
}
|
||||
|
||||
/* HE Operation defines */
|
||||
#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
|
||||
#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
|
||||
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
|
||||
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
|
||||
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
|
||||
|
||||
extern const char *const pptp_msg_name[];
|
||||
const char *pptp_msg_name(u_int16_t msg);
|
||||
|
||||
/* state of the control session */
|
||||
enum pptp_ctrlsess_state {
|
||||
|
|
|
@ -31,6 +31,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
{
|
||||
unsigned int gso_type = 0;
|
||||
unsigned int thlen = 0;
|
||||
unsigned int p_off = 0;
|
||||
unsigned int ip_proto;
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
|
@ -68,7 +69,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
|
||||
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
|
||||
p_off = skb_transport_offset(skb) + thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
|
@ -92,23 +94,32 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (keys.control.thoff + thlen > skb_headlen(skb) ||
|
||||
p_off = keys.control.thoff + thlen;
|
||||
if (p_off > skb_headlen(skb) ||
|
||||
keys.basic.ip_proto != ip_proto)
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
} else if (gso_type) {
|
||||
p_off = thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
skb_shinfo(skb)->gso_size = gso_size;
|
||||
skb_shinfo(skb)->gso_type = gso_type;
|
||||
/* Too small packets are not really GSO ones. */
|
||||
if (skb->len - p_off > gso_size) {
|
||||
shinfo->gso_size = gso_size;
|
||||
shinfo->gso_type = gso_type;
|
||||
|
||||
/* Header must be checked, and gso_segs computed. */
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
|
||||
skb_shinfo(skb)->gso_segs = 0;
|
||||
/* Header must be checked, and gso_segs computed. */
|
||||
shinfo->gso_type |= SKB_GSO_DODGY;
|
||||
shinfo->gso_segs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -25,6 +25,7 @@ struct espintcp_ctx {
|
|||
struct espintcp_msg partial;
|
||||
void (*saved_data_ready)(struct sock *sk);
|
||||
void (*saved_write_space)(struct sock *sk);
|
||||
void (*saved_destruct)(struct sock *sk);
|
||||
struct work_struct work;
|
||||
bool tx_running;
|
||||
};
|
||||
|
|
|
@ -447,6 +447,16 @@ static inline int fib_num_tclassid_users(struct net *net)
|
|||
#endif
|
||||
int fib_unmerge(struct net *net);
|
||||
|
||||
static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
|
||||
const struct net_device *dev)
|
||||
{
|
||||
if (nhc->nhc_dev == dev ||
|
||||
l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Exported by fib_semantics.c */
|
||||
int ip_fib_check_default(__be32 gw, struct net_device *dev);
|
||||
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
|
||||
|
@ -479,6 +489,8 @@ void fib_nh_common_release(struct fib_nh_common *nhc);
|
|||
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
|
||||
void fib_trie_init(void);
|
||||
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
|
||||
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
|
||||
const struct flowi4 *flp);
|
||||
|
||||
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
|
||||
{
|
||||
|
|
|
@ -70,6 +70,7 @@ struct nh_grp_entry {
|
|||
};
|
||||
|
||||
struct nh_group {
|
||||
struct nh_group *spare; /* spare group for removals */
|
||||
u16 num_nh;
|
||||
bool mpath;
|
||||
bool has_v4;
|
||||
|
@ -136,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
|
|||
{
|
||||
unsigned int rc = 1;
|
||||
|
||||
if (nexthop_is_multipath(nh)) {
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nh_grp;
|
||||
|
||||
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
||||
rc = nh_grp->num_nh;
|
||||
if (nh_grp->mpath)
|
||||
rc = nh_grp->num_nh;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
|
||||
struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
|
||||
{
|
||||
const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
|
||||
|
||||
/* for_nexthops macros in fib_semantics.c grabs a pointer to
|
||||
* the nexthop before checking nhsel
|
||||
*/
|
||||
|
@ -185,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh)
|
|||
{
|
||||
const struct nh_info *nhi;
|
||||
|
||||
if (nexthop_is_multipath(nh)) {
|
||||
if (nexthop_num_path(nh) > 1)
|
||||
return false;
|
||||
nh = nexthop_mpath_select(nh, 0);
|
||||
if (!nh)
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nh_grp;
|
||||
|
||||
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
||||
if (nh_grp->num_nh > 1)
|
||||
return false;
|
||||
|
||||
nh = nh_grp->nh_entries[0].nh;
|
||||
}
|
||||
|
||||
nhi = rcu_dereference_rtnl(nh->nh_info);
|
||||
|
@ -216,16 +218,79 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
|
|||
BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
|
||||
|
||||
if (nexthop_is_multipath(nh)) {
|
||||
nh = nexthop_mpath_select(nh, nhsel);
|
||||
if (!nh)
|
||||
return NULL;
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nh_grp;
|
||||
|
||||
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
||||
if (nh_grp->mpath) {
|
||||
nh = nexthop_mpath_select(nh_grp, nhsel);
|
||||
if (!nh)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
nhi = rcu_dereference_rtnl(nh->nh_info);
|
||||
return &nhi->fib_nhc;
|
||||
}
|
||||
|
||||
/* called from fib_table_lookup with rcu_lock */
|
||||
static inline
|
||||
struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
|
||||
int fib_flags,
|
||||
const struct flowi4 *flp,
|
||||
int *nhsel)
|
||||
{
|
||||
struct nh_info *nhi;
|
||||
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nhg = rcu_dereference(nh->nh_grp);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nhg->num_nh; i++) {
|
||||
struct nexthop *nhe = nhg->nh_entries[i].nh;
|
||||
|
||||
nhi = rcu_dereference(nhe->nh_info);
|
||||
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
|
||||
*nhsel = i;
|
||||
return &nhi->fib_nhc;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nhi = rcu_dereference(nh->nh_info);
|
||||
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
|
||||
*nhsel = 0;
|
||||
return &nhi->fib_nhc;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool nexthop_uses_dev(const struct nexthop *nh,
|
||||
const struct net_device *dev)
|
||||
{
|
||||
struct nh_info *nhi;
|
||||
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nhg = rcu_dereference(nh->nh_grp);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nhg->num_nh; i++) {
|
||||
struct nexthop *nhe = nhg->nh_entries[i].nh;
|
||||
|
||||
nhi = rcu_dereference(nhe->nh_info);
|
||||
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
nhi = rcu_dereference(nh->nh_info);
|
||||
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int fib_info_num_path(const struct fib_info *fi)
|
||||
{
|
||||
if (unlikely(fi->nh))
|
||||
|
@ -263,8 +328,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
|
|||
{
|
||||
struct nh_info *nhi;
|
||||
|
||||
if (nexthop_is_multipath(nh)) {
|
||||
nh = nexthop_mpath_select(nh, 0);
|
||||
if (nh->is_group) {
|
||||
struct nh_group *nh_grp;
|
||||
|
||||
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
||||
nh = nexthop_mpath_select(nh_grp, 0);
|
||||
if (!nh)
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -135,6 +135,8 @@ struct tls_sw_context_tx {
|
|||
struct tls_rec *open_rec;
|
||||
struct list_head tx_list;
|
||||
atomic_t encrypt_pending;
|
||||
/* protect crypto_wait with encrypt_pending */
|
||||
spinlock_t encrypt_compl_lock;
|
||||
int async_notify;
|
||||
u8 async_capable:1;
|
||||
|
||||
|
@ -155,6 +157,8 @@ struct tls_sw_context_rx {
|
|||
u8 async_capable:1;
|
||||
u8 decrypted:1;
|
||||
atomic_t decrypt_pending;
|
||||
/* protect crypto_wait with decrypt_pending*/
|
||||
spinlock_t decrypt_compl_lock;
|
||||
bool async_notify;
|
||||
};
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ enum xfrm_attr_type_t {
|
|||
XFRMA_PROTO, /* __u8 */
|
||||
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
|
||||
XFRMA_PAD,
|
||||
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
|
||||
XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
|
||||
XFRMA_SET_MARK, /* __u32 */
|
||||
XFRMA_SET_MARK_MASK, /* __u32 */
|
||||
XFRMA_IF_ID, /* __u32 */
|
||||
|
|
|
@ -1168,14 +1168,14 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
|
|||
* but must be positive otherwise set to worse case bounds
|
||||
* and refine later from tnum.
|
||||
*/
|
||||
if (reg->s32_min_value > 0)
|
||||
reg->smin_value = reg->s32_min_value;
|
||||
else
|
||||
reg->smin_value = 0;
|
||||
if (reg->s32_max_value > 0)
|
||||
if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
|
||||
reg->smax_value = reg->s32_max_value;
|
||||
else
|
||||
reg->smax_value = U32_MAX;
|
||||
if (reg->s32_min_value >= 0)
|
||||
reg->smin_value = reg->s32_min_value;
|
||||
else
|
||||
reg->smin_value = 0;
|
||||
}
|
||||
|
||||
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
|
||||
|
@ -10428,22 +10428,13 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
|
|||
}
|
||||
#define SECURITY_PREFIX "security_"
|
||||
|
||||
static int check_attach_modify_return(struct bpf_verifier_env *env)
|
||||
static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
|
||||
{
|
||||
struct bpf_prog *prog = env->prog;
|
||||
unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
|
||||
|
||||
/* This is expected to be cleaned up in the future with the KRSI effort
|
||||
* introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
|
||||
*/
|
||||
if (within_error_injection_list(addr) ||
|
||||
!strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
|
||||
sizeof(SECURITY_PREFIX) - 1))
|
||||
return 0;
|
||||
|
||||
verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
|
||||
prog->aux->attach_btf_id, prog->aux->attach_func_name);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -10654,11 +10645,18 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
|
||||
ret = check_attach_modify_return(prog, addr);
|
||||
if (ret)
|
||||
verbose(env, "%s() is not modifiable\n",
|
||||
prog->aux->attach_func_name);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
tr->func.addr = (void *)addr;
|
||||
prog->aux->trampoline = tr;
|
||||
|
||||
if (prog->expected_attach_type == BPF_MODIFY_RETURN)
|
||||
ret = check_attach_modify_return(env);
|
||||
out:
|
||||
mutex_unlock(&tr->mutex);
|
||||
if (ret)
|
||||
|
|
|
@ -2413,7 +2413,8 @@ void br_multicast_uninit_stats(struct net_bridge *br)
|
|||
free_percpu(br->mcast_stats);
|
||||
}
|
||||
|
||||
static void mcast_stats_add_dir(u64 *dst, u64 *src)
|
||||
/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
|
||||
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
|
||||
{
|
||||
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
|
||||
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
|
||||
|
|
|
@ -31,6 +31,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
|
|||
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
|
||||
eth->h_proto = eth_hdr(oldskb)->h_proto;
|
||||
skb_pull(nskb, ETH_HLEN);
|
||||
|
||||
if (skb_vlan_tag_present(oldskb)) {
|
||||
u16 vid = skb_vlan_tag_get(oldskb);
|
||||
|
||||
__vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
|
||||
}
|
||||
}
|
||||
|
||||
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
|
||||
|
|
|
@ -1082,8 +1082,8 @@ static void neigh_timer_handler(struct timer_list *t)
|
|||
}
|
||||
|
||||
if (neigh->nud_state & NUD_IN_TIMER) {
|
||||
if (time_before(next, jiffies + HZ/2))
|
||||
next = jiffies + HZ/2;
|
||||
if (time_before(next, jiffies + HZ/100))
|
||||
next = jiffies + HZ/100;
|
||||
if (!mod_timer(&neigh->timer, next))
|
||||
neigh_hold(neigh);
|
||||
}
|
||||
|
|
|
@ -1736,6 +1736,7 @@ int dsa_slave_create(struct dsa_port *port)
|
|||
if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
|
||||
slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
slave_dev->hw_features |= NETIF_F_HW_TC;
|
||||
slave_dev->features |= NETIF_F_LLTX;
|
||||
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
|
||||
if (!IS_ERR_OR_NULL(port->mac))
|
||||
ether_addr_copy(slave_dev->dev_addr, port->mac);
|
||||
|
|
|
@ -276,6 +276,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
|
|||
err = devinet_sysctl_register(in_dev);
|
||||
if (err) {
|
||||
in_dev->dead = 1;
|
||||
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
|
||||
in_dev_put(in_dev);
|
||||
in_dev = NULL;
|
||||
goto out;
|
||||
|
|
|
@ -63,10 +63,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
|||
sp->olen++;
|
||||
|
||||
xo = xfrm_offload(skb);
|
||||
if (!xo) {
|
||||
xfrm_state_put(x);
|
||||
if (!xo)
|
||||
goto out_reset;
|
||||
}
|
||||
}
|
||||
|
||||
xo->flags |= XFRM_GRO;
|
||||
|
@ -139,19 +137,27 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
|
|||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
const struct net_offload *ops;
|
||||
int proto = xo->proto;
|
||||
u8 proto = xo->proto;
|
||||
|
||||
skb->transport_header += x->props.header_len;
|
||||
|
||||
if (proto == IPPROTO_BEETPH) {
|
||||
struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
|
||||
if (x->sel.family != AF_INET6) {
|
||||
if (proto == IPPROTO_BEETPH) {
|
||||
struct ip_beet_phdr *ph =
|
||||
(struct ip_beet_phdr *)skb->data;
|
||||
|
||||
skb->transport_header += ph->hdrlen * 8;
|
||||
proto = ph->nexthdr;
|
||||
} else if (x->sel.family != AF_INET6) {
|
||||
skb->transport_header -= IPV4_BEET_PHMAXLEN;
|
||||
} else if (proto == IPPROTO_TCP) {
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
||||
skb->transport_header += ph->hdrlen * 8;
|
||||
proto = ph->nexthdr;
|
||||
} else {
|
||||
skb->transport_header -= IPV4_BEET_PHMAXLEN;
|
||||
}
|
||||
} else {
|
||||
__be16 frag;
|
||||
|
||||
skb->transport_header +=
|
||||
ipv6_skip_exthdr(skb, 0, &proto, &frag);
|
||||
if (proto == IPPROTO_TCP)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
||||
}
|
||||
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
|
|
|
@ -309,17 +309,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
|
|||
{
|
||||
bool dev_match = false;
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
int ret;
|
||||
if (unlikely(fi->nh)) {
|
||||
dev_match = nexthop_uses_dev(fi->nh, dev);
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
for (ret = 0; ret < fib_info_num_path(fi); ret++) {
|
||||
const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
|
||||
for (ret = 0; ret < fib_info_num_path(fi); ret++) {
|
||||
const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
|
||||
|
||||
if (nhc->nhc_dev == dev) {
|
||||
dev_match = true;
|
||||
break;
|
||||
} else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) {
|
||||
dev_match = true;
|
||||
break;
|
||||
if (nhc_l3mdev_matches_dev(nhc, dev)) {
|
||||
dev_match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -1371,6 +1371,26 @@ static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
|
|||
return (key ^ prefix) & (prefix | -prefix);
|
||||
}
|
||||
|
||||
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
|
||||
const struct flowi4 *flp)
|
||||
{
|
||||
if (nhc->nhc_flags & RTNH_F_DEAD)
|
||||
return false;
|
||||
|
||||
if (ip_ignore_linkdown(nhc->nhc_dev) &&
|
||||
nhc->nhc_flags & RTNH_F_LINKDOWN &&
|
||||
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
|
||||
return false;
|
||||
|
||||
if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
|
||||
if (flp->flowi4_oif &&
|
||||
flp->flowi4_oif != nhc->nhc_oif)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* should be called with rcu_read_lock */
|
||||
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
|
||||
struct fib_result *res, int fib_flags)
|
||||
|
@ -1503,6 +1523,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
|
|||
/* Step 3: Process the leaf, if that fails fall back to backtracing */
|
||||
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
|
||||
struct fib_info *fi = fa->fa_info;
|
||||
struct fib_nh_common *nhc;
|
||||
int nhsel, err;
|
||||
|
||||
if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
|
||||
|
@ -1528,26 +1549,25 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
|
|||
if (fi->fib_flags & RTNH_F_DEAD)
|
||||
continue;
|
||||
|
||||
if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) {
|
||||
err = fib_props[RTN_BLACKHOLE].error;
|
||||
goto out_reject;
|
||||
if (unlikely(fi->nh)) {
|
||||
if (nexthop_is_blackhole(fi->nh)) {
|
||||
err = fib_props[RTN_BLACKHOLE].error;
|
||||
goto out_reject;
|
||||
}
|
||||
|
||||
nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
|
||||
&nhsel);
|
||||
if (nhc)
|
||||
goto set_result;
|
||||
goto miss;
|
||||
}
|
||||
|
||||
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
|
||||
struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
|
||||
nhc = fib_info_nhc(fi, nhsel);
|
||||
|
||||
if (nhc->nhc_flags & RTNH_F_DEAD)
|
||||
if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
|
||||
continue;
|
||||
if (ip_ignore_linkdown(nhc->nhc_dev) &&
|
||||
nhc->nhc_flags & RTNH_F_LINKDOWN &&
|
||||
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
|
||||
continue;
|
||||
if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
|
||||
if (flp->flowi4_oif &&
|
||||
flp->flowi4_oif != nhc->nhc_oif)
|
||||
continue;
|
||||
}
|
||||
|
||||
set_result:
|
||||
if (!(fib_flags & FIB_LOOKUP_NOREF))
|
||||
refcount_inc(&fi->fib_clntref);
|
||||
|
||||
|
@ -1568,6 +1588,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
|
|||
return err;
|
||||
}
|
||||
}
|
||||
miss:
|
||||
#ifdef CONFIG_IP_FIB_TRIE_STATS
|
||||
this_cpu_inc(stats->semantic_match_miss);
|
||||
#endif
|
||||
|
|
|
@ -93,7 +93,28 @@ static int vti_rcv_proto(struct sk_buff *skb)
|
|||
|
||||
static int vti_rcv_tunnel(struct sk_buff *skb)
|
||||
{
|
||||
return vti_rcv(skb, ip_hdr(skb)->saddr, true);
|
||||
struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
struct ip_tunnel *tunnel;
|
||||
|
||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||
iph->saddr, iph->daddr, 0);
|
||||
if (tunnel) {
|
||||
struct tnl_ptk_info tpi = {
|
||||
.proto = htons(ETH_P_IP),
|
||||
};
|
||||
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
if (iptunnel_pull_header(skb, 0, tpi.proto, false))
|
||||
goto drop;
|
||||
return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vti_rcv_cb(struct sk_buff *skb, int err)
|
||||
|
|
|
@ -166,8 +166,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
|
|||
break;
|
||||
default:
|
||||
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
|
||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
|
||||
pptp_msg_name[0]);
|
||||
pptp_msg_name(msg));
|
||||
fallthrough;
|
||||
case PPTP_SET_LINK_INFO:
|
||||
/* only need to NAT in case PAC is behind NAT box */
|
||||
|
@ -268,9 +267,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
|
|||
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
|
||||
break;
|
||||
default:
|
||||
pr_debug("unknown inbound packet %s\n",
|
||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
|
||||
pptp_msg_name[0]);
|
||||
pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
|
||||
fallthrough;
|
||||
case PPTP_START_SESSION_REQUEST:
|
||||
case PPTP_START_SESSION_REPLY:
|
||||
|
|
|
@ -63,9 +63,16 @@ static void nexthop_free_mpath(struct nexthop *nh)
|
|||
int i;
|
||||
|
||||
nhg = rcu_dereference_raw(nh->nh_grp);
|
||||
for (i = 0; i < nhg->num_nh; ++i)
|
||||
WARN_ON(nhg->nh_entries[i].nh);
|
||||
for (i = 0; i < nhg->num_nh; ++i) {
|
||||
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
|
||||
|
||||
WARN_ON(!list_empty(&nhge->nh_list));
|
||||
nexthop_put(nhge->nh);
|
||||
}
|
||||
|
||||
WARN_ON(nhg->spare == nhg);
|
||||
|
||||
kfree(nhg->spare);
|
||||
kfree(nhg);
|
||||
}
|
||||
|
||||
|
@ -694,41 +701,56 @@ static void nh_group_rebalance(struct nh_group *nhg)
|
|||
}
|
||||
}
|
||||
|
||||
static void remove_nh_grp_entry(struct nh_grp_entry *nhge,
|
||||
struct nh_group *nhg,
|
||||
static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
|
||||
struct nl_info *nlinfo)
|
||||
{
|
||||
struct nh_grp_entry *nhges, *new_nhges;
|
||||
struct nexthop *nhp = nhge->nh_parent;
|
||||
struct nexthop *nh = nhge->nh;
|
||||
struct nh_grp_entry *nhges;
|
||||
bool found = false;
|
||||
int i;
|
||||
struct nh_group *nhg, *newg;
|
||||
int i, j;
|
||||
|
||||
WARN_ON(!nh);
|
||||
|
||||
nhges = nhg->nh_entries;
|
||||
for (i = 0; i < nhg->num_nh; ++i) {
|
||||
if (found) {
|
||||
nhges[i-1].nh = nhges[i].nh;
|
||||
nhges[i-1].weight = nhges[i].weight;
|
||||
list_del(&nhges[i].nh_list);
|
||||
list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list);
|
||||
} else if (nhg->nh_entries[i].nh == nh) {
|
||||
found = true;
|
||||
}
|
||||
nhg = rtnl_dereference(nhp->nh_grp);
|
||||
newg = nhg->spare;
|
||||
|
||||
/* last entry, keep it visible and remove the parent */
|
||||
if (nhg->num_nh == 1) {
|
||||
remove_nexthop(net, nhp, nlinfo);
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!found))
|
||||
return;
|
||||
newg->has_v4 = nhg->has_v4;
|
||||
newg->mpath = nhg->mpath;
|
||||
newg->num_nh = nhg->num_nh;
|
||||
|
||||
nhg->num_nh--;
|
||||
nhg->nh_entries[nhg->num_nh].nh = NULL;
|
||||
/* copy old entries to new except the one getting removed */
|
||||
nhges = nhg->nh_entries;
|
||||
new_nhges = newg->nh_entries;
|
||||
for (i = 0, j = 0; i < nhg->num_nh; ++i) {
|
||||
/* current nexthop getting removed */
|
||||
if (nhg->nh_entries[i].nh == nh) {
|
||||
newg->num_nh--;
|
||||
continue;
|
||||
}
|
||||
|
||||
nh_group_rebalance(nhg);
|
||||
list_del(&nhges[i].nh_list);
|
||||
new_nhges[j].nh_parent = nhges[i].nh_parent;
|
||||
new_nhges[j].nh = nhges[i].nh;
|
||||
new_nhges[j].weight = nhges[i].weight;
|
||||
list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
|
||||
j++;
|
||||
}
|
||||
|
||||
nexthop_put(nh);
|
||||
nh_group_rebalance(newg);
|
||||
rcu_assign_pointer(nhp->nh_grp, newg);
|
||||
|
||||
list_del(&nhge->nh_list);
|
||||
nexthop_put(nhge->nh);
|
||||
|
||||
if (nlinfo)
|
||||
nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo);
|
||||
nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
|
||||
}
|
||||
|
||||
static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
|
||||
|
@ -736,17 +758,11 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
|
|||
{
|
||||
struct nh_grp_entry *nhge, *tmp;
|
||||
|
||||
list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) {
|
||||
struct nh_group *nhg;
|
||||
list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
|
||||
remove_nh_grp_entry(net, nhge, nlinfo);
|
||||
|
||||
list_del(&nhge->nh_list);
|
||||
nhg = rtnl_dereference(nhge->nh_parent->nh_grp);
|
||||
remove_nh_grp_entry(nhge, nhg, nlinfo);
|
||||
|
||||
/* if this group has no more entries then remove it */
|
||||
if (!nhg->num_nh)
|
||||
remove_nexthop(net, nhge->nh_parent, nlinfo);
|
||||
}
|
||||
/* make sure all see the newly published array before releasing rtnl */
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
|
||||
|
@ -760,10 +776,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
|
|||
if (WARN_ON(!nhge->nh))
|
||||
continue;
|
||||
|
||||
list_del(&nhge->nh_list);
|
||||
nexthop_put(nhge->nh);
|
||||
nhge->nh = NULL;
|
||||
nhg->num_nh--;
|
||||
list_del_init(&nhge->nh_list);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1086,6 +1099,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
|
|||
{
|
||||
struct nlattr *grps_attr = cfg->nh_grp;
|
||||
struct nexthop_grp *entry = nla_data(grps_attr);
|
||||
u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
|
||||
struct nh_group *nhg;
|
||||
struct nexthop *nh;
|
||||
int i;
|
||||
|
@ -1096,12 +1110,21 @@ static struct nexthop *nexthop_create_group(struct net *net,
|
|||
|
||||
nh->is_group = 1;
|
||||
|
||||
nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry));
|
||||
nhg = nexthop_grp_alloc(num_nh);
|
||||
if (!nhg) {
|
||||
kfree(nh);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* spare group used for removals */
|
||||
nhg->spare = nexthop_grp_alloc(num_nh);
|
||||
if (!nhg) {
|
||||
kfree(nhg);
|
||||
kfree(nh);
|
||||
return NULL;
|
||||
}
|
||||
nhg->spare->spare = nhg;
|
||||
|
||||
for (i = 0; i < nhg->num_nh; ++i) {
|
||||
struct nexthop *nhe;
|
||||
struct nh_info *nhi;
|
||||
|
@ -1133,6 +1156,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
|
|||
for (; i >= 0; --i)
|
||||
nexthop_put(nhg->nh_entries[i].nh);
|
||||
|
||||
kfree(nhg->spare);
|
||||
kfree(nhg);
|
||||
kfree(nh);
|
||||
|
||||
|
|
|
@ -85,10 +85,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
|||
sp->olen++;
|
||||
|
||||
xo = xfrm_offload(skb);
|
||||
if (!xo) {
|
||||
xfrm_state_put(x);
|
||||
if (!xo)
|
||||
goto out_reset;
|
||||
}
|
||||
}
|
||||
|
||||
xo->flags |= XFRM_GRO;
|
||||
|
@ -123,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
|
|||
struct ip_esp_hdr *esph;
|
||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
int proto = iph->nexthdr;
|
||||
u8 proto = iph->nexthdr;
|
||||
|
||||
skb_push(skb, -skb_network_offset(skb));
|
||||
|
||||
if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
|
||||
__be16 frag;
|
||||
|
||||
ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
|
||||
}
|
||||
|
||||
esph = ip_esp_hdr(skb);
|
||||
*skb_mac_header(skb) = IPPROTO_ESP;
|
||||
|
||||
|
@ -166,23 +171,31 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
|
|||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
const struct net_offload *ops;
|
||||
int proto = xo->proto;
|
||||
u8 proto = xo->proto;
|
||||
|
||||
skb->transport_header += x->props.header_len;
|
||||
|
||||
if (proto == IPPROTO_BEETPH) {
|
||||
struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
|
||||
|
||||
skb->transport_header += ph->hdrlen * 8;
|
||||
proto = ph->nexthdr;
|
||||
}
|
||||
|
||||
if (x->sel.family != AF_INET6) {
|
||||
skb->transport_header -=
|
||||
(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
|
||||
|
||||
if (proto == IPPROTO_BEETPH) {
|
||||
struct ip_beet_phdr *ph =
|
||||
(struct ip_beet_phdr *)skb->data;
|
||||
|
||||
skb->transport_header += ph->hdrlen * 8;
|
||||
proto = ph->nexthdr;
|
||||
} else {
|
||||
skb->transport_header -= IPV4_BEET_PHMAXLEN;
|
||||
}
|
||||
|
||||
if (proto == IPPROTO_TCP)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
|
||||
} else {
|
||||
__be16 frag;
|
||||
|
||||
skb->transport_header +=
|
||||
ipv6_skip_exthdr(skb, 0, &proto, &frag);
|
||||
}
|
||||
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
|
|
|
@ -1458,6 +1458,9 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
|
|||
if (sk->sk_type != SOCK_DGRAM)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
|
||||
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <net/icmp.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/inet_common.h>
|
||||
#include <net/inet_hashtables.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/protocol.h>
|
||||
#include <net/xfrm.h>
|
||||
|
@ -209,15 +208,31 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int l2tp_ip_hash(struct sock *sk)
|
||||
{
|
||||
if (sk_unhashed(sk)) {
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
sk_add_node(sk, &l2tp_ip_table);
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void l2tp_ip_unhash(struct sock *sk)
|
||||
{
|
||||
if (sk_unhashed(sk))
|
||||
return;
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
}
|
||||
|
||||
static int l2tp_ip_open(struct sock *sk)
|
||||
{
|
||||
/* Prevent autobind. We don't have ports. */
|
||||
inet_sk(sk)->inet_num = IPPROTO_L2TP;
|
||||
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
sk_add_node(sk, &l2tp_ip_table);
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
|
||||
l2tp_ip_hash(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -594,8 +609,8 @@ static struct proto l2tp_ip_prot = {
|
|||
.sendmsg = l2tp_ip_sendmsg,
|
||||
.recvmsg = l2tp_ip_recvmsg,
|
||||
.backlog_rcv = l2tp_ip_backlog_recv,
|
||||
.hash = inet_hash,
|
||||
.unhash = inet_unhash,
|
||||
.hash = l2tp_ip_hash,
|
||||
.unhash = l2tp_ip_unhash,
|
||||
.obj_size = sizeof(struct l2tp_ip_sock),
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_setsockopt = compat_ip_setsockopt,
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
#include <net/icmp.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/inet_common.h>
|
||||
#include <net/inet_hashtables.h>
|
||||
#include <net/inet6_hashtables.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/protocol.h>
|
||||
#include <net/xfrm.h>
|
||||
|
@ -222,15 +220,31 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int l2tp_ip6_hash(struct sock *sk)
|
||||
{
|
||||
if (sk_unhashed(sk)) {
|
||||
write_lock_bh(&l2tp_ip6_lock);
|
||||
sk_add_node(sk, &l2tp_ip6_table);
|
||||
write_unlock_bh(&l2tp_ip6_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void l2tp_ip6_unhash(struct sock *sk)
|
||||
{
|
||||
if (sk_unhashed(sk))
|
||||
return;
|
||||
write_lock_bh(&l2tp_ip6_lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l2tp_ip6_lock);
|
||||
}
|
||||
|
||||
static int l2tp_ip6_open(struct sock *sk)
|
||||
{
|
||||
/* Prevent autobind. We don't have ports. */
|
||||
inet_sk(sk)->inet_num = IPPROTO_L2TP;
|
||||
|
||||
write_lock_bh(&l2tp_ip6_lock);
|
||||
sk_add_node(sk, &l2tp_ip6_table);
|
||||
write_unlock_bh(&l2tp_ip6_lock);
|
||||
|
||||
l2tp_ip6_hash(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -728,8 +742,8 @@ static struct proto l2tp_ip6_prot = {
|
|||
.sendmsg = l2tp_ip6_sendmsg,
|
||||
.recvmsg = l2tp_ip6_recvmsg,
|
||||
.backlog_rcv = l2tp_ip6_backlog_recv,
|
||||
.hash = inet6_hash,
|
||||
.unhash = inet_unhash,
|
||||
.hash = l2tp_ip6_hash,
|
||||
.unhash = l2tp_ip6_unhash,
|
||||
.obj_size = sizeof(struct l2tp_ip6_sock),
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_setsockopt = compat_ipv6_setsockopt,
|
||||
|
|
|
@ -1103,7 +1103,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
|
|||
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
|
||||
target_flags, mpath->dst, mpath->sn, da, 0,
|
||||
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
|
||||
|
||||
spin_lock_bh(&mpath->state_lock);
|
||||
if (mpath->flags & MESH_PATH_DELETED) {
|
||||
spin_unlock_bh(&mpath->state_lock);
|
||||
goto enddiscovery;
|
||||
}
|
||||
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
|
||||
spin_unlock_bh(&mpath->state_lock);
|
||||
|
||||
enddiscovery:
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -954,7 +954,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
|
||||
pr_debug("block timeout %ld", timeo);
|
||||
mptcp_wait_data(sk, &timeo);
|
||||
if (unlikely(__mptcp_tcp_fallback(msk)))
|
||||
ssock = __mptcp_tcp_fallback(msk);
|
||||
if (unlikely(ssock))
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
|
@ -1262,11 +1263,14 @@ static void mptcp_close(struct sock *sk, long timeout)
|
|||
|
||||
lock_sock(sk);
|
||||
|
||||
mptcp_token_destroy(msk->token);
|
||||
inet_sk_state_store(sk, TCP_CLOSE);
|
||||
|
||||
__mptcp_flush_join_list(msk);
|
||||
|
||||
/* be sure to always acquire the join list lock, to sync vs
|
||||
* mptcp_finish_join().
|
||||
*/
|
||||
spin_lock_bh(&msk->join_list_lock);
|
||||
list_splice_tail_init(&msk->join_list, &msk->conn_list);
|
||||
spin_unlock_bh(&msk->join_list_lock);
|
||||
list_splice_init(&msk->conn_list, &conn_list);
|
||||
|
||||
data_fin_tx_seq = msk->write_seq;
|
||||
|
@ -1456,6 +1460,7 @@ static void mptcp_destroy(struct sock *sk)
|
|||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
|
||||
mptcp_token_destroy(msk->token);
|
||||
if (msk->cached_ext)
|
||||
__skb_ext_put(msk->cached_ext);
|
||||
|
||||
|
@ -1622,22 +1627,30 @@ bool mptcp_finish_join(struct sock *sk)
|
|||
if (!msk->pm.server_side)
|
||||
return true;
|
||||
|
||||
/* passive connection, attach to msk socket */
|
||||
if (!mptcp_pm_allow_new_subflow(msk))
|
||||
return false;
|
||||
|
||||
/* active connections are already on conn_list, and we can't acquire
|
||||
* msk lock here.
|
||||
* use the join list lock as synchronization point and double-check
|
||||
* msk status to avoid racing with mptcp_close()
|
||||
*/
|
||||
spin_lock_bh(&msk->join_list_lock);
|
||||
ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
|
||||
if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
|
||||
list_add_tail(&subflow->node, &msk->join_list);
|
||||
spin_unlock_bh(&msk->join_list_lock);
|
||||
if (!ret)
|
||||
return false;
|
||||
|
||||
/* attach to msk socket only after we are sure he will deal with us
|
||||
* at close time
|
||||
*/
|
||||
parent_sock = READ_ONCE(parent->sk_socket);
|
||||
if (parent_sock && !sk->sk_socket)
|
||||
mptcp_sock_graft(sk, parent_sock);
|
||||
|
||||
ret = mptcp_pm_allow_new_subflow(msk);
|
||||
if (ret) {
|
||||
subflow->map_seq = msk->ack_seq;
|
||||
|
||||
/* active connections are already on conn_list */
|
||||
spin_lock_bh(&msk->join_list_lock);
|
||||
if (!WARN_ON_ONCE(!list_empty(&subflow->node)))
|
||||
list_add_tail(&subflow->node, &msk->join_list);
|
||||
spin_unlock_bh(&msk->join_list_lock);
|
||||
}
|
||||
return ret;
|
||||
subflow->map_seq = msk->ack_seq;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mptcp_sk_is_subflow(const struct sock *sk)
|
||||
|
@ -1711,6 +1724,14 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
int err;
|
||||
|
||||
lock_sock(sock->sk);
|
||||
if (sock->state != SS_UNCONNECTED && msk->subflow) {
|
||||
/* pending connection or invalid state, let existing subflow
|
||||
* cope with that
|
||||
*/
|
||||
ssock = msk->subflow;
|
||||
goto do_connect;
|
||||
}
|
||||
|
||||
ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
|
||||
if (IS_ERR(ssock)) {
|
||||
err = PTR_ERR(ssock);
|
||||
|
@ -1725,9 +1746,17 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
|
||||
#endif
|
||||
|
||||
do_connect:
|
||||
err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
|
||||
inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
|
||||
mptcp_copy_inaddrs(sock->sk, ssock->sk);
|
||||
sock->state = ssock->state;
|
||||
|
||||
/* on successful connect, the msk state will be moved to established by
|
||||
* subflow_finish_connect()
|
||||
*/
|
||||
if (!err || err == EINPROGRESS)
|
||||
mptcp_copy_inaddrs(sock->sk, ssock->sk);
|
||||
else
|
||||
inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
|
||||
|
||||
unlock:
|
||||
release_sock(sock->sk);
|
||||
|
|
|
@ -59,7 +59,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
|
|||
/* Don't lookup sub-counters at all */
|
||||
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
|
||||
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
|
||||
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
|
||||
opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
ret = ip_set_test(e->id, skb, par, opt);
|
||||
if (ret <= 0)
|
||||
|
|
|
@ -2016,22 +2016,18 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
|
|||
nf_conntrack_get(skb_nfct(nskb));
|
||||
}
|
||||
|
||||
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
|
||||
static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_nat_hook *nat_hook;
|
||||
unsigned int status;
|
||||
struct nf_conn *ct;
|
||||
int dataoff;
|
||||
u16 l3num;
|
||||
u8 l4num;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct || nf_ct_is_confirmed(ct))
|
||||
return 0;
|
||||
|
||||
l3num = nf_ct_l3num(ct);
|
||||
|
||||
dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
|
||||
|
@ -2088,6 +2084,76 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This packet is coming from userspace via nf_queue, complete the packet
|
||||
* processing after the helper invocation in nf_confirm().
|
||||
*/
|
||||
static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
const struct nf_conntrack_helper *helper;
|
||||
const struct nf_conn_help *help;
|
||||
int protoff;
|
||||
|
||||
help = nfct_help(ct);
|
||||
if (!help)
|
||||
return 0;
|
||||
|
||||
helper = rcu_dereference(help->helper);
|
||||
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
|
||||
return 0;
|
||||
|
||||
switch (nf_ct_l3num(ct)) {
|
||||
case NFPROTO_IPV4:
|
||||
protoff = skb_network_offset(skb) + ip_hdrlen(skb);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case NFPROTO_IPV6: {
|
||||
__be16 frag_off;
|
||||
u8 pnum;
|
||||
|
||||
pnum = ipv6_hdr(skb)->nexthdr;
|
||||
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
|
||||
&frag_off);
|
||||
if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
|
||||
!nf_is_loopback_packet(skb)) {
|
||||
if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
|
||||
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* We've seen it coming out the other side: confirm it */
|
||||
return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
|
||||
}
|
||||
|
||||
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct;
|
||||
int err;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct)
|
||||
return 0;
|
||||
|
||||
if (!nf_ct_is_confirmed(ct)) {
|
||||
err = __nf_conntrack_update(net, skb, ct, ctinfo);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
return nf_confirm_cthelper(skb, ct, ctinfo);
|
||||
}
|
||||
|
||||
static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
@ -72,24 +72,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
|
|||
|
||||
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
|
||||
/* PptpControlMessageType names */
|
||||
const char *const pptp_msg_name[] = {
|
||||
"UNKNOWN_MESSAGE",
|
||||
"START_SESSION_REQUEST",
|
||||
"START_SESSION_REPLY",
|
||||
"STOP_SESSION_REQUEST",
|
||||
"STOP_SESSION_REPLY",
|
||||
"ECHO_REQUEST",
|
||||
"ECHO_REPLY",
|
||||
"OUT_CALL_REQUEST",
|
||||
"OUT_CALL_REPLY",
|
||||
"IN_CALL_REQUEST",
|
||||
"IN_CALL_REPLY",
|
||||
"IN_CALL_CONNECT",
|
||||
"CALL_CLEAR_REQUEST",
|
||||
"CALL_DISCONNECT_NOTIFY",
|
||||
"WAN_ERROR_NOTIFY",
|
||||
"SET_LINK_INFO"
|
||||
static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
|
||||
[0] = "UNKNOWN_MESSAGE",
|
||||
[PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
|
||||
[PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
|
||||
[PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
|
||||
[PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
|
||||
[PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
|
||||
[PPTP_ECHO_REPLY] = "ECHO_REPLY",
|
||||
[PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
|
||||
[PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
|
||||
[PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
|
||||
[PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
|
||||
[PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
|
||||
[PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
|
||||
[PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
|
||||
[PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
|
||||
[PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
|
||||
};
|
||||
|
||||
const char *pptp_msg_name(u_int16_t msg)
|
||||
{
|
||||
if (msg > PPTP_MSG_MAX)
|
||||
return pptp_msg_name_array[0];
|
||||
|
||||
return pptp_msg_name_array[msg];
|
||||
}
|
||||
EXPORT_SYMBOL(pptp_msg_name);
|
||||
#endif
|
||||
|
||||
|
@ -276,7 +284,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
|
||||
|
||||
msg = ntohs(ctlh->messageType);
|
||||
pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
|
||||
pr_debug("inbound control message %s\n", pptp_msg_name(msg));
|
||||
|
||||
switch (msg) {
|
||||
case PPTP_START_SESSION_REPLY:
|
||||
|
@ -311,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
pcid = pptpReq->ocack.peersCallID;
|
||||
if (info->pns_call_id != pcid)
|
||||
goto invalid;
|
||||
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
|
||||
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
|
||||
ntohs(cid), ntohs(pcid));
|
||||
|
||||
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
|
||||
|
@ -328,7 +336,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
goto invalid;
|
||||
|
||||
cid = pptpReq->icreq.callID;
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||
info->cstate = PPTP_CALL_IN_REQ;
|
||||
info->pac_call_id = cid;
|
||||
break;
|
||||
|
@ -347,7 +355,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
if (info->pns_call_id != pcid)
|
||||
goto invalid;
|
||||
|
||||
pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
|
||||
pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
|
||||
info->cstate = PPTP_CALL_IN_CONF;
|
||||
|
||||
/* we expect a GRE connection from PAC to PNS */
|
||||
|
@ -357,7 +365,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
case PPTP_CALL_DISCONNECT_NOTIFY:
|
||||
/* server confirms disconnect */
|
||||
cid = pptpReq->disc.callID;
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||
info->cstate = PPTP_CALL_NONE;
|
||||
|
||||
/* untrack this call id, unexpect GRE packets */
|
||||
|
@ -384,7 +392,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
invalid:
|
||||
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
||||
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
|
||||
pptp_msg_name(msg),
|
||||
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
||||
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
||||
return NF_ACCEPT;
|
||||
|
@ -404,7 +412,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
|
||||
|
||||
msg = ntohs(ctlh->messageType);
|
||||
pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
|
||||
pr_debug("outbound control message %s\n", pptp_msg_name(msg));
|
||||
|
||||
switch (msg) {
|
||||
case PPTP_START_SESSION_REQUEST:
|
||||
|
@ -426,7 +434,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
info->cstate = PPTP_CALL_OUT_REQ;
|
||||
/* track PNS call id */
|
||||
cid = pptpReq->ocreq.callID;
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
||||
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||
info->pns_call_id = cid;
|
||||
break;
|
||||
|
||||
|
@ -440,7 +448,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
pcid = pptpReq->icack.peersCallID;
|
||||
if (info->pac_call_id != pcid)
|
||||
goto invalid;
|
||||
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
|
||||
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
|
||||
ntohs(cid), ntohs(pcid));
|
||||
|
||||
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
|
||||
|
@ -480,7 +488,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
|||
invalid:
|
||||
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
||||
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
|
||||
pptp_msg_name(msg),
|
||||
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
||||
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
||||
return NF_ACCEPT;
|
||||
|
|
|
@ -103,7 +103,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
|
|||
if (help->helper->data_len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
|
||||
nla_memcpy(help->data, attr, sizeof(help->data));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
|
|||
ret = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
helper->data_len = size;
|
||||
|
||||
helper->flags |= NF_CT_HELPER_F_USERSPACE;
|
||||
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
|
||||
|
|
|
@ -712,6 +712,10 @@ void qrtr_ns_init(void)
|
|||
goto err_sock;
|
||||
}
|
||||
|
||||
qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
|
||||
if (!qrtr_ns.workqueue)
|
||||
goto err_sock;
|
||||
|
||||
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
|
||||
|
||||
sq.sq_port = QRTR_PORT_CTRL;
|
||||
|
@ -720,17 +724,13 @@ void qrtr_ns_init(void)
|
|||
ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq));
|
||||
if (ret < 0) {
|
||||
pr_err("failed to bind to socket\n");
|
||||
goto err_sock;
|
||||
goto err_wq;
|
||||
}
|
||||
|
||||
qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
|
||||
qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
|
||||
qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
|
||||
|
||||
qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
|
||||
if (!qrtr_ns.workqueue)
|
||||
goto err_sock;
|
||||
|
||||
ret = say_hello(&qrtr_ns.bcast_sq);
|
||||
if (ret < 0)
|
||||
goto err_wq;
|
||||
|
|
|
@ -199,6 +199,9 @@ static int tcf_ct_flow_table_add_action_nat(struct net *net,
|
|||
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
|
||||
struct nf_conntrack_tuple target;
|
||||
|
||||
if (!(ct->status & IPS_NAT_MASK))
|
||||
return 0;
|
||||
|
||||
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
|
||||
|
||||
switch (tuple->src.l3num) {
|
||||
|
|
|
@ -297,9 +297,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
goto flow_error;
|
||||
}
|
||||
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
|
||||
if (!q->flows_cnt || q->flows_cnt > 65536) {
|
||||
if (!q->flows_cnt || q->flows_cnt >= 65536) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Number of flows must be < 65536");
|
||||
"Number of flows must range in [1..65535]");
|
||||
goto flow_error;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ menuconfig IP_SCTP
|
|||
homing at either or both ends of an association."
|
||||
|
||||
To compile this protocol support as a module, choose M here: the
|
||||
module will be called sctp. Debug messages are handeled by the
|
||||
module will be called sctp. Debug messages are handled by the
|
||||
kernel's dynamic debugging framework.
|
||||
|
||||
If in doubt, say N.
|
||||
|
|
|
@ -343,6 +343,9 @@ void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
|
|||
struct sockaddr_storage addr;
|
||||
struct sctp_ulpevent *event;
|
||||
|
||||
if (asoc->state < SCTP_STATE_ESTABLISHED)
|
||||
return;
|
||||
|
||||
memset(&addr, 0, sizeof(struct sockaddr_storage));
|
||||
memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
|
||||
|
||||
|
|
|
@ -206,10 +206,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
|
|||
|
||||
kfree(aead_req);
|
||||
|
||||
spin_lock_bh(&ctx->decrypt_compl_lock);
|
||||
pending = atomic_dec_return(&ctx->decrypt_pending);
|
||||
|
||||
if (!pending && READ_ONCE(ctx->async_notify))
|
||||
if (!pending && ctx->async_notify)
|
||||
complete(&ctx->async_wait.completion);
|
||||
spin_unlock_bh(&ctx->decrypt_compl_lock);
|
||||
}
|
||||
|
||||
static int tls_do_decryption(struct sock *sk,
|
||||
|
@ -467,10 +469,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
|
|||
ready = true;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ctx->encrypt_compl_lock);
|
||||
pending = atomic_dec_return(&ctx->encrypt_pending);
|
||||
|
||||
if (!pending && READ_ONCE(ctx->async_notify))
|
||||
if (!pending && ctx->async_notify)
|
||||
complete(&ctx->async_wait.completion);
|
||||
spin_unlock_bh(&ctx->encrypt_compl_lock);
|
||||
|
||||
if (!ready)
|
||||
return;
|
||||
|
@ -929,6 +933,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
int num_zc = 0;
|
||||
int orig_size;
|
||||
int ret = 0;
|
||||
int pending;
|
||||
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1095,13 +1100,19 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
goto send_end;
|
||||
} else if (num_zc) {
|
||||
/* Wait for pending encryptions to get completed */
|
||||
smp_store_mb(ctx->async_notify, true);
|
||||
spin_lock_bh(&ctx->encrypt_compl_lock);
|
||||
ctx->async_notify = true;
|
||||
|
||||
if (atomic_read(&ctx->encrypt_pending))
|
||||
pending = atomic_read(&ctx->encrypt_pending);
|
||||
spin_unlock_bh(&ctx->encrypt_compl_lock);
|
||||
if (pending)
|
||||
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
else
|
||||
reinit_completion(&ctx->async_wait.completion);
|
||||
|
||||
/* There can be no concurrent accesses, since we have no
|
||||
* pending encrypt operations
|
||||
*/
|
||||
WRITE_ONCE(ctx->async_notify, false);
|
||||
|
||||
if (ctx->async_wait.err) {
|
||||
|
@ -1732,6 +1743,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
|
||||
bool is_peek = flags & MSG_PEEK;
|
||||
int num_async = 0;
|
||||
int pending;
|
||||
|
||||
flags |= nonblock;
|
||||
|
||||
|
@ -1894,8 +1906,11 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
recv_end:
|
||||
if (num_async) {
|
||||
/* Wait for all previously submitted records to be decrypted */
|
||||
smp_store_mb(ctx->async_notify, true);
|
||||
if (atomic_read(&ctx->decrypt_pending)) {
|
||||
spin_lock_bh(&ctx->decrypt_compl_lock);
|
||||
ctx->async_notify = true;
|
||||
pending = atomic_read(&ctx->decrypt_pending);
|
||||
spin_unlock_bh(&ctx->decrypt_compl_lock);
|
||||
if (pending) {
|
||||
err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
if (err) {
|
||||
/* one of async decrypt failed */
|
||||
|
@ -1907,6 +1922,10 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
} else {
|
||||
reinit_completion(&ctx->async_wait.completion);
|
||||
}
|
||||
|
||||
/* There can be no concurrent accesses, since we have no
|
||||
* pending decrypt operations
|
||||
*/
|
||||
WRITE_ONCE(ctx->async_notify, false);
|
||||
|
||||
/* Drain records from the rx_list & copy if required */
|
||||
|
@ -2293,6 +2312,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
|||
|
||||
if (tx) {
|
||||
crypto_init_wait(&sw_ctx_tx->async_wait);
|
||||
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
|
||||
crypto_info = &ctx->crypto_send.info;
|
||||
cctx = &ctx->tx;
|
||||
aead = &sw_ctx_tx->aead_send;
|
||||
|
@ -2301,6 +2321,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
|||
sw_ctx_tx->tx_work.sk = sk;
|
||||
} else {
|
||||
crypto_init_wait(&sw_ctx_rx->async_wait);
|
||||
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
|
||||
crypto_info = &ctx->crypto_recv.info;
|
||||
cctx = &ctx->rx;
|
||||
skb_queue_head_init(&sw_ctx_rx->rx_list);
|
||||
|
|
|
@ -1408,7 +1408,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
|
|||
/* Wait for children sockets to appear; these are the new sockets
|
||||
* created upon connection establishment.
|
||||
*/
|
||||
timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
|
||||
timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
|
||||
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
|
||||
|
|
|
@ -1132,6 +1132,14 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|||
|
||||
lock_sock(sk);
|
||||
|
||||
/* Check if sk has been released before lock_sock */
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK) {
|
||||
(void)virtio_transport_reset_no_sock(t, pkt);
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
goto free_pkt;
|
||||
}
|
||||
|
||||
/* Update CID in case it has changed after a transport reset event */
|
||||
vsk->local_addr.svm_cid = dst.svm_cid;
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
|
|||
if (result)
|
||||
return result;
|
||||
|
||||
if (rdev->wiphy.debugfsdir)
|
||||
if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
|
||||
debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
|
||||
rdev->wiphy.debugfsdir,
|
||||
rdev->wiphy.debugfsdir->d_parent, newname);
|
||||
|
|
|
@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
|||
{
|
||||
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
|
||||
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
|
||||
u64 npgs, addr = mr->addr, size = mr->len;
|
||||
unsigned int chunks, chunks_per_page;
|
||||
u64 addr = mr->addr, size = mr->len;
|
||||
int err;
|
||||
|
||||
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
|
||||
|
@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
|||
if ((addr + size) < addr)
|
||||
return -EINVAL;
|
||||
|
||||
npgs = div_u64(size, PAGE_SIZE);
|
||||
if (npgs > U32_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
chunks = (unsigned int)div_u64(size, chunk_size);
|
||||
if (chunks == 0)
|
||||
return -EINVAL;
|
||||
|
@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
|||
umem->size = size;
|
||||
umem->headroom = headroom;
|
||||
umem->chunk_size_nohr = chunk_size - headroom;
|
||||
umem->npgs = size / PAGE_SIZE;
|
||||
umem->npgs = (u32)npgs;
|
||||
umem->pgs = NULL;
|
||||
umem->user = NULL;
|
||||
umem->flags = mr->flags;
|
||||
|
|
|
@ -379,6 +379,7 @@ static void espintcp_destruct(struct sock *sk)
|
|||
{
|
||||
struct espintcp_ctx *ctx = espintcp_getctx(sk);
|
||||
|
||||
ctx->saved_destruct(sk);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
|
@ -419,6 +420,7 @@ static int espintcp_init_sk(struct sock *sk)
|
|||
sk->sk_socket->ops = &espintcp_ops;
|
||||
ctx->saved_data_ready = sk->sk_data_ready;
|
||||
ctx->saved_write_space = sk->sk_write_space;
|
||||
ctx->saved_destruct = sk->sk_destruct;
|
||||
sk->sk_data_ready = espintcp_data_ready;
|
||||
sk->sk_write_space = espintcp_write_space;
|
||||
sk->sk_destruct = espintcp_destruct;
|
||||
|
|
|
@ -25,12 +25,10 @@ static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
|
|||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
skb_reset_mac_len(skb);
|
||||
pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
|
||||
|
||||
if (xo->flags & XFRM_GSO_SEGMENT) {
|
||||
skb_reset_transport_header(skb);
|
||||
if (xo->flags & XFRM_GSO_SEGMENT)
|
||||
skb->transport_header -= x->props.header_len;
|
||||
}
|
||||
|
||||
pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
|
||||
}
|
||||
|
||||
static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
|
||||
|
|
|
@ -644,7 +644,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
|||
dev_put(skb->dev);
|
||||
|
||||
spin_lock(&x->lock);
|
||||
if (nexthdr <= 0) {
|
||||
if (nexthdr < 0) {
|
||||
if (nexthdr == -EBADMSG) {
|
||||
xfrm_audit_state_icvfail(x, skb,
|
||||
x->type->proto);
|
||||
|
|
|
@ -750,7 +750,28 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
|
|||
.get_link_net = xfrmi_get_link_net,
|
||||
};
|
||||
|
||||
static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
LIST_HEAD(list);
|
||||
|
||||
rtnl_lock();
|
||||
list_for_each_entry(net, net_exit_list, exit_list) {
|
||||
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
|
||||
struct xfrm_if __rcu **xip;
|
||||
struct xfrm_if *xi;
|
||||
|
||||
for (xip = &xfrmn->xfrmi[0];
|
||||
(xi = rtnl_dereference(*xip)) != NULL;
|
||||
xip = &xi->next)
|
||||
unregister_netdevice_queue(xi->dev, &list);
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static struct pernet_operations xfrmi_net_ops = {
|
||||
.exit_batch = xfrmi_exit_batch_net,
|
||||
.id = &xfrmi_net_id,
|
||||
.size = sizeof(struct xfrmi_net),
|
||||
};
|
||||
|
|
|
@ -583,18 +583,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
|
|||
xfrm_state_hold(x);
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
|
||||
if (skb->inner_protocol)
|
||||
return xfrm_output_gso(net, sk, skb);
|
||||
|
||||
return xfrm_output2(net, sk, skb);
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
|
||||
goto out;
|
||||
} else {
|
||||
if (skb_is_gso(skb))
|
||||
return xfrm_output_gso(net, sk, skb);
|
||||
}
|
||||
|
||||
if (skb_is_gso(skb))
|
||||
return xfrm_output_gso(net, sk, skb);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
err = skb_checksum_help(skb);
|
||||
if (err) {
|
||||
|
@ -640,7 +642,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
|
|||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
proto = AF_INET;
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
else if (skb->protocol == htons(ETH_P_IPV6) &&
|
||||
skb->sk->sk_family == AF_INET6)
|
||||
proto = AF_INET6;
|
||||
else
|
||||
return;
|
||||
|
|
|
@ -1436,12 +1436,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
|
|||
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
|
||||
struct xfrm_policy *pol)
|
||||
{
|
||||
u32 mark = policy->mark.v & policy->mark.m;
|
||||
|
||||
if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
|
||||
return true;
|
||||
|
||||
if ((mark & pol->mark.m) == pol->mark.v &&
|
||||
if (policy->mark.v == pol->mark.v &&
|
||||
policy->priority == pol->priority)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -238,7 +238,7 @@
|
|||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
|
@ -253,10 +253,6 @@
|
|||
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = 0 or
|
||||
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* error on OOB pointer computation */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* exit */
|
||||
|
@ -265,8 +261,10 @@
|
|||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
/* not actually fully unbounded, but the bound is very high */
|
||||
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
|
||||
.result = REJECT
|
||||
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "value -4294967168 makes map_value pointer be out of bounds",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds check after truncation of boundary-crossing range (2)",
|
||||
|
@ -276,7 +274,7 @@
|
|||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
|
@ -293,10 +291,6 @@
|
|||
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = 0 or
|
||||
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* error on OOB pointer computation */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* exit */
|
||||
|
@ -305,8 +299,10 @@
|
|||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
/* not actually fully unbounded, but the bound is very high */
|
||||
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
|
||||
.result = REJECT
|
||||
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "value -4294967168 makes map_value pointer be out of bounds",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds check after wrapping 32-bit addition",
|
||||
|
@ -539,3 +535,25 @@
|
|||
},
|
||||
.result = ACCEPT
|
||||
},
|
||||
{
|
||||
"assigning 32bit bounds to 64bit for wA = 0, wB = wA",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_MOV32_IMM(BPF_REG_9, 0),
|
||||
BPF_MOV32_REG(BPF_REG_2, BPF_REG_9),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
[
|
||||
{
|
||||
"id": "83be",
|
||||
"name": "Create FQ-PIE with invalid number of flows",
|
||||
"category": [
|
||||
"qdisc",
|
||||
"fq_pie"
|
||||
],
|
||||
"setup": [
|
||||
"$IP link add dev $DUMMY type dummy || /bin/true"
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
|
||||
"expExitCode": "2",
|
||||
"verifyCmd": "$TC qdisc show dev $DUMMY",
|
||||
"matchPattern": "qdisc",
|
||||
"matchCount": "0",
|
||||
"teardown": [
|
||||
"$IP link del dev $DUMMY"
|
||||
]
|
||||
}
|
||||
]
|
Loading…
Reference in New Issue
Block a user