net: forcedeth: add xmit_more support
This change adds support for xmit_more based on the igb commit6f19e12f62
("igb: flush when in xmit_more mode and under descriptor pressure") and commit6b16f9ee89
("net: move skb->xmit_more hint to softnet data") that were made to igb to support this feature. The function netif_xmit_stopped is called to check whether transmit queue on device is currently unable to send to determine whether we must write the tail because we can add no further buffers. When normal packets and/or xmit_more packets fill up tx_desc, it is necessary to trigger NIC tx reg. Following the advice from David Miller and Jakub Kicinski, after the xmit_more feature is added, the following scenario will occur. | xmit_more packets | DMA_MAPPING | DMA_MAPPING error check | xmit_more packets already in HW xmit queue | In the above scenario, if DMA_MAPPING error occurrs, the xmit_more packets already in HW xmit queue will also be dropped. This is different from the behavior before xmit_more feature. So it is necessary to trigger NIC HW tx reg in the above scenario. To the non-xmit_more packets, the above scenario will not occur. Tested: - pktgen (xmit_more packets) SMP x86_64 -> Test command: ./pktgen_sample03_burst_single_flow.sh ... -b 8 -n 1000000 Test results: Params: ... burst: 8 ... Result: OK: 12194004(c12188996+d5007) usec, 1000001 (1500byte,0frags) 82007pps 984Mb/sec (984084000bps) errors: 0 - iperf (normal packets) SMP x86_64 -> Test command: Server: iperf -s Client: iperf -c serverip Result: TCP window size: 85.0 KByte (default) ------------------------------------------------------------ [ ID] Interval Transfer Bandwidth [ 3] 0.0-10.0 sec 1.10 GBytes 942 Mbits/sec CC: Joe Jin <joe.jin@oracle.com> CC: JUNXIAO_BI <junxiao.bi@oracle.com> Reported-and-tested-by: Nan san <nan.1986san@gmail.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fb90ab6ba9
commit
5d8876e2c2
|
@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct nv_skb_map *prev_tx_ctx;
|
||||
struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
|
||||
unsigned long flags;
|
||||
netdev_tx_t ret = NETDEV_TX_OK;
|
||||
|
||||
/* add fragments to entries count */
|
||||
for (i = 0; i < fragments; i++) {
|
||||
|
@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
np->tx_stop = 1;
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
/* When normal packets and/or xmit_more packets fill up
|
||||
* tx_desc, it is necessary to trigger NIC tx reg.
|
||||
*/
|
||||
ret = NETDEV_TX_BUSY;
|
||||
goto txkick;
|
||||
}
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
|
||||
|
@ -2259,7 +2265,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
ret = NETDEV_TX_OK;
|
||||
|
||||
goto dma_error;
|
||||
}
|
||||
np->put_tx_ctx->dma_len = bcnt;
|
||||
np->put_tx_ctx->dma_single = 1;
|
||||
|
@ -2305,7 +2314,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
ret = NETDEV_TX_OK;
|
||||
|
||||
goto dma_error;
|
||||
}
|
||||
|
||||
np->put_tx_ctx->dma_len = bcnt;
|
||||
|
@ -2357,8 +2369,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
|
||||
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
|
||||
return NETDEV_TX_OK;
|
||||
txkick:
|
||||
if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
|
||||
u32 txrxctl_kick;
|
||||
dma_error:
|
||||
txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
|
||||
writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
||||
|
@ -2381,6 +2400,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
struct nv_skb_map *start_tx_ctx = NULL;
|
||||
struct nv_skb_map *tmp_tx_ctx = NULL;
|
||||
unsigned long flags;
|
||||
netdev_tx_t ret = NETDEV_TX_OK;
|
||||
|
||||
/* add fragments to entries count */
|
||||
for (i = 0; i < fragments; i++) {
|
||||
|
@ -2396,7 +2416,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
netif_stop_queue(dev);
|
||||
np->tx_stop = 1;
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
/* When normal packets and/or xmit_more packets fill up
|
||||
* tx_desc, it is necessary to trigger NIC tx reg.
|
||||
*/
|
||||
ret = NETDEV_TX_BUSY;
|
||||
|
||||
goto txkick;
|
||||
}
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
|
||||
|
@ -2416,7 +2442,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
ret = NETDEV_TX_OK;
|
||||
|
||||
goto dma_error;
|
||||
}
|
||||
np->put_tx_ctx->dma_len = bcnt;
|
||||
np->put_tx_ctx->dma_single = 1;
|
||||
|
@ -2463,7 +2492,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
ret = NETDEV_TX_OK;
|
||||
|
||||
goto dma_error;
|
||||
}
|
||||
np->put_tx_ctx->dma_len = bcnt;
|
||||
np->put_tx_ctx->dma_single = 0;
|
||||
|
@ -2542,8 +2574,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
|
||||
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
|
||||
return NETDEV_TX_OK;
|
||||
txkick:
|
||||
if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
|
||||
u32 txrxctl_kick;
|
||||
dma_error:
|
||||
txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
|
||||
writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void nv_tx_flip_ownership(struct net_device *dev)
|
||||
|
|
Loading…
Reference in New Issue
Block a user