forked from luck/tmp_suning_uos_patched
xen-netback: stop vif thread spinning if frontend is unresponsive
The recent patch to improve guest receive side flow control (ca2f09f2
) had a
slight flaw in the wait condition for the vif thread in that any remaining
skbs in the guest receive side netback internal queue would prevent the
thread from sleeping. An unresponsive frontend can lead to a permanently
non-empty internal queue and thus the thread will spin. In this case the
thread should really sleep until the frontend becomes responsive again.
This patch adds an extra flag to the vif which is set if the shared ring
is full and cleared when skbs are drained into the shared ring. Thus,
if the thread runs, finds the shared ring full and can make no progress the
flag remains set. If the flag remains set then the thread will sleep,
regardless of a non-empty queue, until the next event from the frontend.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
451cd14e08
commit
11b57f9025
@ -143,6 +143,7 @@ struct xenvif {
|
|||||||
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
|
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
|
||||||
struct xen_netif_rx_back_ring rx;
|
struct xen_netif_rx_back_ring rx;
|
||||||
struct sk_buff_head rx_queue;
|
struct sk_buff_head rx_queue;
|
||||||
|
bool rx_queue_stopped;
|
||||||
/* Set when the RX interrupt is triggered by the frontend.
|
/* Set when the RX interrupt is triggered by the frontend.
|
||||||
* The worker thread may need to wake the queue.
|
* The worker thread may need to wake the queue.
|
||||||
*/
|
*/
|
||||||
|
@ -476,7 +476,8 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
struct skb_cb_overlay *sco;
|
struct skb_cb_overlay *sco;
|
||||||
int need_to_notify = 0;
|
bool need_to_notify = false;
|
||||||
|
bool ring_full = false;
|
||||||
|
|
||||||
struct netrx_pending_operations npo = {
|
struct netrx_pending_operations npo = {
|
||||||
.copy = vif->grant_copy_op,
|
.copy = vif->grant_copy_op,
|
||||||
@ -508,7 +509,8 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
/* If the skb may not fit then bail out now */
|
/* If the skb may not fit then bail out now */
|
||||||
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
|
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
|
||||||
skb_queue_head(&vif->rx_queue, skb);
|
skb_queue_head(&vif->rx_queue, skb);
|
||||||
need_to_notify = 1;
|
need_to_notify = true;
|
||||||
|
ring_full = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,6 +523,8 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
|
|
||||||
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
|
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
|
||||||
|
|
||||||
|
vif->rx_queue_stopped = !npo.copy_prod && ring_full;
|
||||||
|
|
||||||
if (!npo.copy_prod)
|
if (!npo.copy_prod)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
@ -592,8 +596,7 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
|
|
||||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
|
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
|
||||||
|
|
||||||
if (ret)
|
need_to_notify |= !!ret;
|
||||||
need_to_notify = 1;
|
|
||||||
|
|
||||||
npo.meta_cons += sco->meta_slots_used;
|
npo.meta_cons += sco->meta_slots_used;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
@ -1724,7 +1727,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
|
|||||||
|
|
||||||
static inline int rx_work_todo(struct xenvif *vif)
|
static inline int rx_work_todo(struct xenvif *vif)
|
||||||
{
|
{
|
||||||
return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
|
return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
|
||||||
|
vif->rx_event;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int tx_work_todo(struct xenvif *vif)
|
static inline int tx_work_todo(struct xenvif *vif)
|
||||||
|
Loading…
Reference in New Issue
Block a user