xen: netback: handle compound page fragments on transmit.

An SKB paged fragment can consist of a compound page with order > 0.
However the netchannel protocol deals only in PAGE_SIZE frames.

Handle this in netbk_gop_frag_copy and xen_netbk_count_skb_slots by
iterating over the frames which make up the page.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Konrad Rzeszutek Wilk <konrad@kernel.org>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ian Campbell 2012-10-10 03:48:42 +00:00 committed by David S. Miller
parent 6caab7b054
commit 6a8ed462f1

View File

@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
unsigned long bytes; unsigned long bytes;
offset &= ~PAGE_MASK;
while (size > 0) { while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(copy_off > MAX_BUFFER_OFFSET); BUG_ON(copy_off > MAX_BUFFER_OFFSET);
if (start_new_rx_buffer(copy_off, size, 0)) { bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(copy_off, bytes, 0)) {
count++; count++;
copy_off = 0; copy_off = 0;
} }
bytes = size;
if (copy_off + bytes > MAX_BUFFER_OFFSET) if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off; bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes; copy_off += bytes;
offset += bytes;
size -= bytes; size -= bytes;
if (offset == PAGE_SIZE)
offset = 0;
} }
} }
return count; return count;
@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
unsigned long bytes; unsigned long bytes;
/* Data must not cross a page boundary. */ /* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE); BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1; meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) { while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
if (start_new_rx_buffer(npo->copy_off, size, *head)) { bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/* /*
* Netfront requires there to be some data in the head * Netfront requires there to be some data in the head
* buffer. * buffer.
@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
meta = get_next_rx_buffer(vif, npo); meta = get_next_rx_buffer(vif, npo);
} }
bytes = size;
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off; bytes = MAX_BUFFER_OFFSET - npo->copy_off;
@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
offset += bytes; offset += bytes;
size -= bytes; size -= bytes;
/* Next frame */
if (offset == PAGE_SIZE && size) {
BUG_ON(!PageCompound(page));
page++;
offset = 0;
}
/* Leave a gap for the GSO descriptor. */ /* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++; vif->rx.req_cons++;