net: Fix CONFIG_NET_CLS_ACT=n and CONFIG_NFT_FWD_NETDEV={y, m} build
net/netfilter/nft_fwd_netdev.c: In function ‘nft_fwd_netdev_eval’:
net/netfilter/nft_fwd_netdev.c:32:10: error: ‘struct sk_buff’ has no member named ‘tc_redirected’
pkt->skb->tc_redirected = 1;
^~
net/netfilter/nft_fwd_netdev.c:33:10: error: ‘struct sk_buff’ has no member named ‘tc_from_ingress’
pkt->skb->tc_from_ingress = 1;
^~
To avoid a direct dependency with tc actions from netfilter, wrap the
redirect bits around CONFIG_NET_REDIRECT and move helpers to
include/linux/skbuff.h. Turn on this toggle from the ifb driver, the
only existing client of these bits in the tree.
This patch adds skb_set_redirected() that sets on the redirected bit
on the skbuff, it specifies if the packet was redirect from ingress
and resets the timestamp (timestamp reset was originally missing in the
netfilter bugfix).
Fixes: bcfabee1af
("netfilter: nft_fwd_netdev: allow to redirect to ifb via ingress")
Reported-by: noreply@ellerman.id.au
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
428c491332
commit
2c64605b59
|
@ -149,6 +149,7 @@ config NET_FC
|
|||
config IFB
|
||||
tristate "Intermediate Functional Block support"
|
||||
depends on NET_CLS_ACT
|
||||
select NET_REDIRECT
|
||||
---help---
|
||||
This is an intermediate driver that allows sharing of
|
||||
resources.
|
||||
|
|
|
@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
|
||||
skb->tc_redirected = 0;
|
||||
skb->redirected = 0;
|
||||
skb->tc_skip_classify = 1;
|
||||
|
||||
u64_stats_update_begin(&txp->tsync);
|
||||
|
@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|||
rcu_read_unlock();
|
||||
skb->skb_iif = txp->dev->ifindex;
|
||||
|
||||
if (!skb->tc_from_ingress) {
|
||||
if (!skb->from_ingress) {
|
||||
dev_queue_xmit(skb);
|
||||
} else {
|
||||
skb_pull_rcsum(skb, skb->mac_len);
|
||||
|
@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
txp->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&txp->rsync);
|
||||
|
||||
if (!skb->tc_redirected || !skb->skb_iif) {
|
||||
if (!skb->redirected || !skb->skb_iif) {
|
||||
dev_kfree_skb(skb);
|
||||
dev->stats.rx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
|
|
@ -100,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb)
|
|||
skb->dev = NULL;
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
skb->tc_index = 0;
|
||||
skb_reset_tc(skb);
|
||||
#endif
|
||||
skb_reset_redirect(skb);
|
||||
skb->hdr_len = skb_headroom(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
|
|
@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t;
|
|||
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
|
||||
* @tc_skip_classify: do not classify packet. set by IFB device
|
||||
* @tc_at_ingress: used within tc_classify to distinguish in/egress
|
||||
* @tc_redirected: packet was redirected by a tc action
|
||||
* @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
|
||||
* @redirected: packet was redirected by packet classifier
|
||||
* @from_ingress: packet was redirected from the ingress path
|
||||
* @peeked: this packet has been seen already, so stats have been
|
||||
* done for it, don't do them again
|
||||
* @nf_trace: netfilter packet trace flag
|
||||
|
@ -848,8 +848,10 @@ struct sk_buff {
|
|||
#ifdef CONFIG_NET_CLS_ACT
|
||||
__u8 tc_skip_classify:1;
|
||||
__u8 tc_at_ingress:1;
|
||||
__u8 tc_redirected:1;
|
||||
__u8 tc_from_ingress:1;
|
||||
#endif
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
__u8 redirected:1;
|
||||
__u8 from_ingress:1;
|
||||
#endif
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
__u8 decrypted:1;
|
||||
|
@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
|
|||
return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
|
||||
}
|
||||
|
||||
static inline bool skb_is_redirected(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
return skb->redirected;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
skb->redirected = 1;
|
||||
skb->from_ingress = from_ingress;
|
||||
if (skb->from_ingress)
|
||||
skb->tstamp = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void skb_reset_redirect(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
skb->redirected = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SKBUFF_H */
|
||||
|
|
|
@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
|
|||
const struct qdisc_size_table *stab);
|
||||
int skb_do_redirect(struct sk_buff *);
|
||||
|
||||
static inline void skb_reset_tc(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
skb->tc_redirected = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
return skb->tc_redirected;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
|
|
|
@ -52,6 +52,9 @@ config NET_INGRESS
|
|||
config NET_EGRESS
|
||||
bool
|
||||
|
||||
config NET_REDIRECT
|
||||
bool
|
||||
|
||||
config SKB_EXTENSIONS
|
||||
bool
|
||||
|
||||
|
|
|
@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
|||
/* Reinjected packets coming from act_mirred or similar should
|
||||
* not get XDP generic processing.
|
||||
*/
|
||||
if (skb_is_tc_redirected(skb))
|
||||
if (skb_is_redirected(skb))
|
||||
return XDP_PASS;
|
||||
|
||||
/* XDP packets must be linear and must have sufficient headroom
|
||||
|
@ -5063,7 +5063,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
|
|||
goto out;
|
||||
}
|
||||
#endif
|
||||
skb_reset_tc(skb);
|
||||
skb_reset_redirect(skb);
|
||||
skip_classify:
|
||||
if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
|
||||
goto drop;
|
||||
|
|
|
@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||
/* skb was 'freed' by stack, so clean few
|
||||
* bits and reuse it
|
||||
*/
|
||||
skb_reset_tc(skb);
|
||||
skb_reset_redirect(skb);
|
||||
} while (--burst > 0);
|
||||
goto out; /* Skips xmit_mode M_START_XMIT */
|
||||
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
|
||||
|
|
|
@ -28,9 +28,8 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
|
|||
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
|
||||
int oif = regs->data[priv->sreg_dev];
|
||||
|
||||
/* These are used by ifb only. */
|
||||
pkt->skb->tc_redirected = 1;
|
||||
pkt->skb->tc_from_ingress = 1;
|
||||
/* This is used by ifb only. */
|
||||
skb_set_redirected(pkt->skb, true);
|
||||
|
||||
nf_fwd_netdev_egress(pkt, oif);
|
||||
regs->verdict.code = NF_STOLEN;
|
||||
|
|
|
@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
|||
|
||||
/* mirror is always swallowed */
|
||||
if (is_redirect) {
|
||||
skb2->tc_redirected = 1;
|
||||
skb2->tc_from_ingress = skb2->tc_at_ingress;
|
||||
if (skb2->tc_from_ingress)
|
||||
skb2->tstamp = 0;
|
||||
skb_set_redirected(skb2, skb2->tc_at_ingress);
|
||||
|
||||
/* let's the caller reinsert the packet, if possible */
|
||||
if (use_reinsert) {
|
||||
res->ingress = want_ingress;
|
||||
|
|
Loading…
Reference in New Issue
Block a user