xen-netback: Functional follow-up patch for grant mapping series

Ian made some late comments about the grant mapping series, I incorporated the
functional outcomes into this patch:

- use callback_param macro to shorten access to pending_tx_info in
  xenvif_fill_frags() and xenvif_tx_submit()
- print an error message in xenvif_idx_unmap() before panic

Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Zoltan Kiss 2014-03-24 23:59:51 +00:00 committed by David S. Miller
parent 0e59a4a553
commit 7aceb47a9d

View File

@ -99,6 +99,9 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
} }
#define callback_param(vif, pending_idx) \
(vif->pending_tx_info[pending_idx].callback_struct)
/* Find the containing VIF's structure from a pointer in pending_tx_info array /* Find the containing VIF's structure from a pointer in pending_tx_info array
*/ */
static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
@ -1020,12 +1023,12 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
/* If this is not the first frag, chain it to the previous*/ /* If this is not the first frag, chain it to the previous*/
if (unlikely(prev_pending_idx == INVALID_PENDING_IDX)) if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
skb_shinfo(skb)->destructor_arg = skb_shinfo(skb)->destructor_arg =
&vif->pending_tx_info[pending_idx].callback_struct; &callback_param(vif, pending_idx);
else if (likely(pending_idx != prev_pending_idx)) else if (likely(pending_idx != prev_pending_idx))
vif->pending_tx_info[prev_pending_idx].callback_struct.ctx = callback_param(vif, prev_pending_idx).ctx =
&(vif->pending_tx_info[pending_idx].callback_struct); &callback_param(vif, pending_idx);
vif->pending_tx_info[pending_idx].callback_struct.ctx = NULL; callback_param(vif, pending_idx).ctx = NULL;
prev_pending_idx = pending_idx; prev_pending_idx = pending_idx;
txp = &vif->pending_tx_info[pending_idx].req; txp = &vif->pending_tx_info[pending_idx].req;
@ -1395,13 +1398,13 @@ static int xenvif_tx_submit(struct xenvif *vif)
memcpy(skb->data, memcpy(skb->data,
(void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
data_len); data_len);
vif->pending_tx_info[pending_idx].callback_struct.ctx = NULL; callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) { if (data_len < txp->size) {
/* Append the packet payload as a fragment. */ /* Append the packet payload as a fragment. */
txp->offset += data_len; txp->offset += data_len;
txp->size -= data_len; txp->size -= data_len;
skb_shinfo(skb)->destructor_arg = skb_shinfo(skb)->destructor_arg =
&vif->pending_tx_info[pending_idx].callback_struct; &callback_param(vif, pending_idx);
} else { } else {
/* Schedule a response immediately. */ /* Schedule a response immediately. */
xenvif_idx_unmap(vif, pending_idx); xenvif_idx_unmap(vif, pending_idx);
@ -1681,7 +1684,16 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
ret = gnttab_unmap_refs(&tx_unmap_op, NULL, ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
&vif->mmap_pages[pending_idx], 1); &vif->mmap_pages[pending_idx], 1);
BUG_ON(ret); if (ret) {
netdev_err(vif->dev,
"Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
ret,
pending_idx,
tx_unmap_op.host_addr,
tx_unmap_op.handle,
tx_unmap_op.status);
BUG();
}
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
} }