net: decreasing real_num_tx_queues needs to flush qdisc
Reducing real_num_queues needs to flush the qdisc otherwise skbs with queue_mappings greater then real_num_tx_queues can be sent to the underlying driver. The flow for this is, dev_queue_xmit() dev_pick_tx() skb_tx_hash() => hash using real_num_tx_queues skb_set_queue_mapping() ... qdisc_enqueue_root() => enqueue skb on txq from hash ... dev->real_num_tx_queues -= n ... sch_direct_xmit() dev_hard_start_xmit() ndo_start_xmit(skb,dev) => skb queue set with old hash skbs are enqueued on the qdisc with skb->queue_mapping set 0 < queue_mappings < real_num_tx_queues. When the driver decreases real_num_tx_queues skb's may be dequeued from the qdisc with a queue_mapping greater then real_num_tx_queues. This fixes a case in ixgbe where this was occurring with DCB and FCoE. Because the driver is using queue_mapping to map skbs to tx descriptor rings we can potentially map skbs to rings that no longer exist. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4ef6acff83
commit
f0796d5c73
@ -4001,7 +4001,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
|
|||||||
|
|
||||||
done:
|
done:
|
||||||
/* Notify the stack of the (possibly) reduced Tx Queue count. */
|
/* Notify the stack of the (possibly) reduced Tx Queue count. */
|
||||||
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
|
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
|
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
|
||||||
|
@ -1656,6 +1656,9 @@ static inline int netif_is_multiqueue(const struct net_device *dev)
|
|||||||
return (dev->num_tx_queues > 1);
|
return (dev->num_tx_queues > 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern void netif_set_real_num_tx_queues(struct net_device *dev,
|
||||||
|
unsigned int txq);
|
||||||
|
|
||||||
/* Use this variant when it is known for sure that it
|
/* Use this variant when it is known for sure that it
|
||||||
* is executing from hardware interrupt context or with hardware interrupts
|
* is executing from hardware interrupt context or with hardware interrupts
|
||||||
* disabled.
|
* disabled.
|
||||||
|
@ -313,13 +313,12 @@ extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
|
|||||||
extern void tcf_destroy(struct tcf_proto *tp);
|
extern void tcf_destroy(struct tcf_proto *tp);
|
||||||
extern void tcf_destroy_chain(struct tcf_proto **fl);
|
extern void tcf_destroy_chain(struct tcf_proto **fl);
|
||||||
|
|
||||||
/* Reset all TX qdiscs of a device. */
|
/* Reset all TX qdiscs greater then index of a device. */
|
||||||
static inline void qdisc_reset_all_tx(struct net_device *dev)
|
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
struct Qdisc *qdisc;
|
struct Qdisc *qdisc;
|
||||||
|
|
||||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
for (; i < dev->num_tx_queues; i++) {
|
||||||
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
|
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
|
||||||
if (qdisc) {
|
if (qdisc) {
|
||||||
spin_lock_bh(qdisc_lock(qdisc));
|
spin_lock_bh(qdisc_lock(qdisc));
|
||||||
@ -329,6 +328,11 @@ static inline void qdisc_reset_all_tx(struct net_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void qdisc_reset_all_tx(struct net_device *dev)
|
||||||
|
{
|
||||||
|
qdisc_reset_all_tx_gt(dev, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Are all TX queues of the device empty? */
|
/* Are all TX queues of the device empty? */
|
||||||
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
|
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
|
||||||
|
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
|
||||||
|
*/
|
||||||
|
void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
|
||||||
|
{
|
||||||
|
unsigned int real_num = dev->real_num_tx_queues;
|
||||||
|
|
||||||
|
if (unlikely(txq > dev->num_tx_queues))
|
||||||
|
;
|
||||||
|
else if (txq > real_num)
|
||||||
|
dev->real_num_tx_queues = txq;
|
||||||
|
else if (txq < real_num) {
|
||||||
|
dev->real_num_tx_queues = txq;
|
||||||
|
qdisc_reset_all_tx_gt(dev, txq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
|
||||||
|
|
||||||
static inline void __netif_reschedule(struct Qdisc *q)
|
static inline void __netif_reschedule(struct Qdisc *q)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user