forked from luck/tmp_suning_uos_patched
netpoll: check netpoll tx status on the right device
Although this doesn't matter actually, because netpoll_tx_running() doesn't use the parameter, the code will be more readable. For team_dev_queue_xmit() we have to move it down to avoid compile errors. Cc: David Miller <davem@davemloft.net> Signed-off-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4e3828c4bf
commit
e15c3c2294
|
@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
|
|||
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
|
||||
skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
|
||||
|
||||
if (unlikely(netpoll_tx_running(slave_dev)))
|
||||
if (unlikely(netpoll_tx_running(bond->dev)))
|
||||
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
|
||||
else
|
||||
dev_queue_xmit(skb);
|
||||
|
|
|
@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port,
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
|
||||
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
|
||||
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
|
||||
|
||||
skb->dev = port->dev;
|
||||
if (unlikely(netpoll_tx_running(port->dev))) {
|
||||
team_netpoll_send_skb(port, skb);
|
||||
return 0;
|
||||
}
|
||||
return dev_queue_xmit(skb);
|
||||
}
|
||||
|
||||
struct team_mode_ops {
|
||||
int (*init)(struct team *team);
|
||||
void (*exit)(struct team *team);
|
||||
|
@ -200,6 +185,21 @@ struct team {
|
|||
long mode_priv[TEAM_MODE_PRIV_LONGS];
|
||||
};
|
||||
|
||||
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
|
||||
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
|
||||
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
|
||||
|
||||
skb->dev = port->dev;
|
||||
if (unlikely(netpoll_tx_running(team->dev))) {
|
||||
team_netpoll_send_skb(port, skb);
|
||||
return 0;
|
||||
}
|
||||
return dev_queue_xmit(skb);
|
||||
}
|
||||
|
||||
static inline struct hlist_head *team_port_index_hash(struct team *team,
|
||||
int port_index)
|
||||
{
|
||||
|
|
|
@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|||
{
|
||||
skb->dev = to->dev;
|
||||
|
||||
if (unlikely(netpoll_tx_running(to->dev))) {
|
||||
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
||||
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
|
||||
kfree_skb(skb);
|
||||
else {
|
||||
|
|
Loading…
Reference in New Issue
Block a user