tmp_suning_uos_patched/net/dsa/dsa_priv.h
Vladimir Oltean 412a1526d0 net: dsa: untag the bridge pvid from rx skbs
Currently the bridge untags VLANs present in its VLAN groups in
__allowed_ingress() only when VLAN filtering is enabled.

But when a skb is seen on the RX path as tagged with the bridge's pvid,
and that bridge has vlan_filtering=0, and there isn't any 8021q upper
with that VLAN either, then we have a problem. The bridge will not untag
it (since it is supposed to remain VLAN-unaware), and pvid-tagged
communication will be broken.

There are 2 situations where we can end up like that:

1. When installing a pvid in egress-tagged mode, like this:

ip link add dev br0 type bridge vlan_filtering 0
ip link set swp0 master br0
bridge vlan del dev swp0 vid 1
bridge vlan add dev swp0 vid 1 pvid

This happens because DSA configures the VLAN membership of the CPU port
using the same flags as swp0 (in this case "pvid and not untagged"), in
an attempt to copy the frame as-is from ingress to the CPU.

However, in this case, the packet may arrive untagged on ingress, it
will be pvid-tagged by the ingress port, and will be sent as
egress-tagged towards the CPU. Otherwise stated, the CPU will see a VLAN
tag where there was none to speak of on ingress.

When vlan_filtering is 1, this is not a problem, as stated in the first
paragraph, because __allowed_ingress() will pop it. But currently, when
vlan_filtering is 0 and we have such a VLAN configuration, we need an
8021q upper (br0.1) to be able to ping over that VLAN, which is not
symmetrical with the vlan_filtering=1 case, and therefore, confusing for
users.

Basically what DSA attempts to do is simply an approximation: try to
copy the skb with (or without) the same VLAN all the way up to the CPU.
But DSA drivers treat CPU port VLAN membership in various ways (which is
a good segue into situation 2). And some of those drivers simply tell
the CPU port to copy the frame unmodified, which is the golden standard
when it comes to VLAN processing (therefore, any driver which can
configure the hardware to do that, should do that, and discard the VLAN
flags requested by DSA on the CPU port).

2. Some DSA drivers always configure the CPU port as egress-tagged, in
an attempt to recover the classified VLAN from the skb. These drivers
cannot work at all with untagged traffic when bridged in
vlan_filtering=0 mode. And they can't go for the easy "just keep the
pvid as egress-untagged towards the CPU" route, because each front port
can have its own pvid, and that might require conflicting VLAN
membership settings on the CPU port (swp1 is pvid for VID 1 and
egress-tagged for VID 2; swp2 is egress-taggeed for VID 1 and pvid for
VID 2; with this simplistic approach, the CPU port, which is really a
separate hardware entity and has its own VLAN membership settings, would
end up being egress-untagged in both VID 1 and VID 2, therefore losing
the VLAN tags of ingress traffic).

So the only thing we can do is to create a helper function for resolving
the problematic case (that is, a function which untags the bridge pvid
when that is in vlan_filtering=0 mode), which taggers in need should
call. It isn't called from the generic DSA receive path because there
are drivers that fall neither in the first nor second category.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-23 18:13:45 -07:00

271 lines
7.6 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* net/dsa/dsa_priv.h - Hardware switch handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
#ifndef __DSA_PRIV_H
#define __DSA_PRIV_H
#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <net/dsa.h>
#include <net/gro_cells.h>
enum {
DSA_NOTIFIER_AGEING_TIME,
DSA_NOTIFIER_BRIDGE_JOIN,
DSA_NOTIFIER_BRIDGE_LEAVE,
DSA_NOTIFIER_FDB_ADD,
DSA_NOTIFIER_FDB_DEL,
DSA_NOTIFIER_MDB_ADD,
DSA_NOTIFIER_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL,
DSA_NOTIFIER_MTU,
};
/* DSA_NOTIFIER_AGEING_TIME */
struct dsa_notifier_ageing_time_info {
struct switchdev_trans *trans;
unsigned int ageing_time;
};
/* DSA_NOTIFIER_BRIDGE_* */
struct dsa_notifier_bridge_info {
struct net_device *br;
int tree_index;
int sw_index;
int port;
};
/* DSA_NOTIFIER_FDB_* */
struct dsa_notifier_fdb_info {
int sw_index;
int port;
const unsigned char *addr;
u16 vid;
};
/* DSA_NOTIFIER_MDB_* */
struct dsa_notifier_mdb_info {
const struct switchdev_obj_port_mdb *mdb;
struct switchdev_trans *trans;
int sw_index;
int port;
};
/* DSA_NOTIFIER_VLAN_* */
struct dsa_notifier_vlan_info {
const struct switchdev_obj_port_vlan *vlan;
struct switchdev_trans *trans;
int sw_index;
int port;
};
/* DSA_NOTIFIER_MTU */
struct dsa_notifier_mtu_info {
bool propagate_upstream;
int sw_index;
int port;
int mtu;
};
struct dsa_slave_priv {
/* Copy of CPU port xmit for faster access in slave transmit hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
struct pcpu_sw_netstats __percpu *stats64;
struct gro_cells gcells;
/* DSA port data, such as switch, port index, etc. */
struct dsa_port *dp;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
/* TC context */
struct list_head mall_tc_list;
};
/* dsa.c */
const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
void dsa_tag_driver_put(const struct dsa_device_ops *ops);
bool dsa_schedule_work(struct work_struct *work);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
u16 flags,
struct netlink_ext_ack *extack);
int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid);
/* master.c */
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
void dsa_master_teardown(struct net_device *dev);
static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int device, int port)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
return dp->slave;
return NULL;
}
/* port.c */
int dsa_port_set_state(struct dsa_port *dp, u8 state,
struct switchdev_trans *trans);
int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
void dsa_port_disable_rt(struct dsa_port *dp);
void dsa_port_disable(struct dsa_port *dp);
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct switchdev_trans *trans);
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
struct switchdev_trans *trans);
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
bool propagate_upstream);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb,
struct switchdev_trans *trans);
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
struct switchdev_trans *trans);
int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
struct switchdev_trans *trans);
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
struct switchdev_trans *trans);
int dsa_port_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
int dsa_port_link_register_of(struct dsa_port *dp);
void dsa_port_link_unregister_of(struct dsa_port *dp);
extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
int dsa_slave_create(struct dsa_port *dp);
void dsa_slave_destroy(struct net_device *slave_dev);
bool dsa_slave_dev_check(const struct net_device *dev);
int dsa_slave_suspend(struct net_device *slave_dev);
int dsa_slave_resume(struct net_device *slave_dev);
int dsa_slave_register_notifier(void);
void dsa_slave_unregister_notifier(void);
static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
return p->dp;
}
static inline struct net_device *
dsa_slave_to_master(const struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
return dp->cpu_dp->master;
}
/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
* frames as untagged, since the bridge will not untag them.
*/
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
struct net_device *br = dp->bridge_dev;
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
struct list_head *iter;
u16 vid, pvid, proto;
int err;
if (!br || br_vlan_enabled(br))
return skb;
err = br_vlan_get_proto(br, &proto);
if (err)
return skb;
/* Move VLAN tag from data to hwaccel */
if (!skb_vlan_tag_present(skb) && hdr->h_vlan_proto == htons(proto)) {
skb = skb_vlan_untag(skb);
if (!skb)
return NULL;
}
if (!skb_vlan_tag_present(skb))
return skb;
vid = skb_vlan_tag_get_id(skb);
/* We already run under an RCU read-side critical section since
* we are called from netif_receive_skb_list_internal().
*/
err = br_vlan_get_pvid_rcu(dev, &pvid);
if (err)
return skb;
if (vid != pvid)
return skb;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
* the bridge's data path - br_allowed_ingress() - or not.
* For example, there might be an 8021q upper for the
* default_pvid of the bridge, which will steal VLAN-tagged traffic
* from the bridge's data path. This is a configuration that DSA
* supports because vlan_filtering is 0. In that case, we should
* definitely keep the tag, to make sure it keeps working.
*/
netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
if (!is_vlan_dev(upper_dev))
continue;
if (vid == vlan_dev_vlan_id(upper_dev))
return skb;
}
__vlan_hwaccel_clear_tag(skb);
return skb;
}
/* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
/* dsa2.c */
extern struct list_head dsa_tree_list;
#endif