netfilter: drop bridge nf reset from nf_reset
commit 174e23810c
("sk_buff: drop all skb extensions on free and skb scrubbing") made napi
recycle always drop skb extensions. The additional skb_ext_del() that is
performed via nf_reset on napi skb recycle is not needed anymore.
Most nf_reset() calls in the stack are there so queued skb won't block
'rmmod nf_conntrack' indefinitely.
This removes the skb_ext_del from nf_reset, and renames it to a more
fitting nf_reset_ct().
In a few selected places, add a call to skb_ext_reset to make sure that
no active extensions remain.
I am submitting this for "net", because we're still early in the release
cycle. The patch applies to net-next too, but I think the rename causes
needless divergence between those trees.
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
9cfc370240
commit
895b5c9f20
|
@ -238,7 +238,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
ip_select_ident(net, skb, NULL);
|
||||
|
@ -358,7 +358,7 @@ static int pptp_rcv(struct sk_buff *skb)
|
|||
po = lookup_chan(htons(header->call_id), iph->saddr);
|
||||
if (po) {
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
return sk_receive_skb(sk_pppox(po), skb, 0);
|
||||
}
|
||||
drop:
|
||||
|
|
|
@ -1104,7 +1104,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
*/
|
||||
skb_orphan(skb);
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (ptr_ring_produce(&tfile->tx_ring, skb))
|
||||
goto drop;
|
||||
|
|
|
@ -1585,7 +1585,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Don't wait up for transmitted skbs to be freed. */
|
||||
if (!use_napi) {
|
||||
skb_orphan(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
}
|
||||
|
||||
/* If running out of space, stop queue to avoid getting packets that we
|
||||
|
|
|
@ -366,7 +366,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||
struct neighbour *neigh;
|
||||
int ret;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->dev = dev;
|
||||
|
@ -459,7 +459,7 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
|
|||
|
||||
/* reset skb device */
|
||||
if (likely(err == 1))
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
else
|
||||
skb = NULL;
|
||||
|
||||
|
@ -560,7 +560,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
bool is_v6gw = false;
|
||||
int ret = -EINVAL;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* Be paranoid, rather than too clever. */
|
||||
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
|
||||
|
@ -670,7 +670,7 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
|
|||
|
||||
/* reset skb device */
|
||||
if (likely(err == 1))
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
else
|
||||
skb = NULL;
|
||||
|
||||
|
|
|
@ -1261,8 +1261,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
|
|||
skb_orphan(skb);
|
||||
skb_dst_drop(skb);
|
||||
skb->mark = 0;
|
||||
secpath_reset(skb);
|
||||
nf_reset(skb);
|
||||
skb_ext_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/*
|
||||
* Get absolute mactime here so all HWs RX at the "same time", and
|
||||
|
|
|
@ -349,10 +349,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
*/
|
||||
dst_release(skb_dst(skb));
|
||||
skb_dst_set(skb, NULL);
|
||||
#ifdef CONFIG_XFRM
|
||||
secpath_reset(skb);
|
||||
#endif
|
||||
nf_reset(skb);
|
||||
skb_ext_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
skb->tc_index = 0;
|
||||
|
|
|
@ -4160,15 +4160,12 @@ static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
|
|||
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
|
||||
#endif /* CONFIG_SKB_EXTENSIONS */
|
||||
|
||||
static inline void nf_reset(struct sk_buff *skb)
|
||||
static inline void nf_reset_ct(struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
nf_conntrack_put(skb_nfct(skb));
|
||||
skb->_nfct = 0;
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nf_reset_trace(struct sk_buff *skb)
|
||||
|
|
|
@ -436,7 +436,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
|
|||
/* clean the netfilter state now that the batman-adv header has been
|
||||
* removed
|
||||
*/
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
||||
goto dropped;
|
||||
|
|
|
@ -5120,7 +5120,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
|||
skb->ignore_df = 0;
|
||||
skb_dst_drop(skb);
|
||||
skb_ext_reset(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
nf_reset_trace(skb);
|
||||
|
||||
#ifdef CONFIG_NET_SWITCHDEV
|
||||
|
|
|
@ -871,7 +871,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
|||
|
||||
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_and_relse;
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
|
|||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
}
|
||||
ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
|
||||
skb);
|
||||
|
|
|
@ -1794,7 +1794,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
|
|||
ip_send_check(iph);
|
||||
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
}
|
||||
|
||||
static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
|
||||
|
@ -2140,7 +2140,7 @@ int ip_mr_input(struct sk_buff *skb)
|
|||
|
||||
mroute_sk = rcu_dereference(mrt->mroute_sk);
|
||||
if (mroute_sk) {
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
raw_rcv(mroute_sk, skb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
|
|||
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
/* Avoid counting cloned packets towards the original connection. */
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -332,7 +332,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
skb_push(skb, skb->data - skb_network_header(skb));
|
||||
|
||||
|
|
|
@ -1916,7 +1916,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
|||
if (tcp_v4_inbound_md5_hash(sk, skb))
|
||||
goto discard_and_relse;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard_and_relse;
|
||||
|
|
|
@ -1969,7 +1969,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
|
||||
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
|
||||
|
@ -2298,7 +2298,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|||
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* No socket. Drop packet silently, if checksum is wrong */
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
|
|
|
@ -371,7 +371,7 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
|
|||
/* Free reference early: we don't need it any more,
|
||||
and it may hold ip_conntrack module loaded
|
||||
indefinitely. */
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
skb_postpull_rcsum(skb, skb_network_header(skb),
|
||||
skb_network_header_len(skb));
|
||||
|
|
|
@ -54,7 +54,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
|
|||
return;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
|
||||
#endif
|
||||
if (hooknum == NF_INET_PRE_ROUTING ||
|
||||
|
|
|
@ -215,7 +215,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
|||
|
||||
/* Not releasing hash table! */
|
||||
if (clone) {
|
||||
nf_reset(clone);
|
||||
nf_reset_ct(clone);
|
||||
rawv6_rcv(sk, clone);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1078,7 +1078,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
|||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
|
||||
IPSKB_REROUTED);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
|
|
|
@ -151,7 +151,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
|
|||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
dev = rcu_dereference(spriv->dev);
|
||||
|
|
|
@ -193,7 +193,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
|||
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_put;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
return sk_receive_skb(sk, skb, 1);
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_put;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
return sk_receive_skb(sk, skb, 1);
|
||||
|
||||
|
|
|
@ -613,7 +613,7 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
|
|||
if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
|
||||
ret = ip_vs_confirm_conntrack(skb);
|
||||
if (ret == NF_ACCEPT) {
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
skb_forward_csum(skb);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -237,7 +237,7 @@ static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
secpath_reset(skb);
|
||||
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
|
|
@ -1821,7 +1821,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
|
|||
skb_dst_drop(skb);
|
||||
|
||||
/* drop conntrack reference */
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
spkt = &PACKET_SKB_CB(skb)->sa.pkt;
|
||||
|
||||
|
@ -2121,7 +2121,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
skb_dst_drop(skb);
|
||||
|
||||
/* drop conntrack reference */
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
spin_lock(&sk->sk_receive_queue.lock);
|
||||
po->stats.stats1.tp_packets++;
|
||||
|
|
|
@ -201,7 +201,7 @@ int sctp_rcv(struct sk_buff *skb)
|
|||
|
||||
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
|
||||
goto discard_release;
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
goto discard_release;
|
||||
|
|
|
@ -706,7 +706,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
|||
if (err)
|
||||
goto drop;
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (decaps) {
|
||||
sp = skb_sec_path(skb);
|
||||
|
|
|
@ -185,7 +185,7 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
|
|||
skb->skb_iif = 0;
|
||||
skb->ignore_df = 0;
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
nf_reset_trace(skb);
|
||||
|
||||
if (!xnet)
|
||||
|
|
|
@ -502,7 +502,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
|
|||
struct net *net = xs_net(skb_dst(skb)->xfrm);
|
||||
|
||||
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
|
||||
if (unlikely(err != 1))
|
||||
|
|
|
@ -2808,7 +2808,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
|
|||
continue;
|
||||
}
|
||||
|
||||
nf_reset(skb);
|
||||
nf_reset_ct(skb);
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user