forked from luck/tmp_suning_uos_patched
ixgb and e1000: Use new function for copybreak tests
There appears to be an off-by-1 defect in the maximum packet size copied when copybreak is speified in these modules. The copybreak module params are specified as: "Maximum size of packet that is copied to a new buffer on receive" The tests are changed from "< copybreak" to "<= copybreak" and moved into new static functions for readability. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
621b99b6f6
commit
57bf6eef2f
@ -3785,6 +3785,31 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
/*
|
||||
* this should improve performance for small packets with large amounts
|
||||
* of reassembly being done in the stack
|
||||
*/
|
||||
static void e1000_check_copybreak(struct net_device *netdev,
|
||||
struct e1000_buffer *buffer_info,
|
||||
u32 length, struct sk_buff **skb)
|
||||
{
|
||||
struct sk_buff *new_skb;
|
||||
|
||||
if (length > copybreak)
|
||||
return;
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(netdev, length);
|
||||
if (!new_skb)
|
||||
return;
|
||||
|
||||
skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
|
||||
(*skb)->data - NET_IP_ALIGN,
|
||||
length + NET_IP_ALIGN);
|
||||
/* save the skb in buffer_info as good */
|
||||
buffer_info->skb = *skb;
|
||||
*skb = new_skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_clean_rx_irq - Send received data up the network stack; legacy
|
||||
* @adapter: board private structure
|
||||
@ -3883,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||
total_rx_bytes += length;
|
||||
total_rx_packets++;
|
||||
|
||||
/* code added for copybreak, this should improve
|
||||
* performance for small packets with large amounts
|
||||
* of reassembly being done in the stack */
|
||||
if (length < copybreak) {
|
||||
struct sk_buff *new_skb =
|
||||
netdev_alloc_skb_ip_align(netdev, length);
|
||||
if (new_skb) {
|
||||
skb_copy_to_linear_data_offset(new_skb,
|
||||
-NET_IP_ALIGN,
|
||||
(skb->data -
|
||||
NET_IP_ALIGN),
|
||||
(length +
|
||||
NET_IP_ALIGN));
|
||||
/* save the skb in buffer_info as good */
|
||||
buffer_info->skb = skb;
|
||||
skb = new_skb;
|
||||
}
|
||||
/* else just continue with the old one */
|
||||
}
|
||||
/* end copybreak code */
|
||||
e1000_check_copybreak(netdev, buffer_info, length, &skb);
|
||||
|
||||
skb_put(skb, length);
|
||||
|
||||
/* Receive Checksum Offload */
|
||||
|
@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* this should improve performance for small packets with large amounts
|
||||
* of reassembly being done in the stack
|
||||
*/
|
||||
static void ixgb_check_copybreak(struct net_device *netdev,
|
||||
struct ixgb_buffer *buffer_info,
|
||||
u32 length, struct sk_buff **skb)
|
||||
{
|
||||
struct sk_buff *new_skb;
|
||||
|
||||
if (length > copybreak)
|
||||
return;
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(netdev, length);
|
||||
if (!new_skb)
|
||||
return;
|
||||
|
||||
skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
|
||||
(*skb)->data - NET_IP_ALIGN,
|
||||
length + NET_IP_ALIGN);
|
||||
/* save the skb in buffer_info as good */
|
||||
buffer_info->skb = *skb;
|
||||
*skb = new_skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgb_clean_rx_irq - Send received data up the network stack,
|
||||
* @adapter: board private structure
|
||||
@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
||||
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
|
||||
if (++i == rx_ring->count) i = 0;
|
||||
if (++i == rx_ring->count)
|
||||
i = 0;
|
||||
next_rxd = IXGB_RX_DESC(*rx_ring, i);
|
||||
prefetch(next_rxd);
|
||||
|
||||
if ((j = i + 1) == rx_ring->count) j = 0;
|
||||
j = i + 1;
|
||||
if (j == rx_ring->count)
|
||||
j = 0;
|
||||
next2_buffer = &rx_ring->buffer_info[j];
|
||||
prefetch(next2_buffer);
|
||||
|
||||
@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
||||
goto rxdesc_done;
|
||||
}
|
||||
|
||||
/* code added for copybreak, this should improve
|
||||
* performance for small packets with large amounts
|
||||
* of reassembly being done in the stack */
|
||||
if (length < copybreak) {
|
||||
struct sk_buff *new_skb =
|
||||
netdev_alloc_skb_ip_align(netdev, length);
|
||||
if (new_skb) {
|
||||
skb_copy_to_linear_data_offset(new_skb,
|
||||
-NET_IP_ALIGN,
|
||||
(skb->data -
|
||||
NET_IP_ALIGN),
|
||||
(length +
|
||||
NET_IP_ALIGN));
|
||||
/* save the skb in buffer_info as good */
|
||||
buffer_info->skb = skb;
|
||||
skb = new_skb;
|
||||
}
|
||||
}
|
||||
/* end copybreak code */
|
||||
ixgb_check_copybreak(netdev, buffer_info, length, &skb);
|
||||
|
||||
/* Good Receive */
|
||||
skb_put(skb, length);
|
||||
|
Loading…
Reference in New Issue
Block a user