forked from luck/tmp_suning_uos_patched
sky2: backout NAPI reschedule
This is a backout of earlier patch. The whole rescheduling hack was a bad idea. It doesn't really solve the problem and it makes the code more complicated for no good reason. Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
This commit is contained in:
parent
6810b548b2
commit
d324031245
@ -2105,7 +2105,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
|||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
|
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
|
||||||
|
|
||||||
restart_poll:
|
|
||||||
if (unlikely(status & ~Y2_IS_STAT_BMU)) {
|
if (unlikely(status & ~Y2_IS_STAT_BMU)) {
|
||||||
if (status & Y2_IS_HW_ERR)
|
if (status & Y2_IS_HW_ERR)
|
||||||
sky2_hw_intr(hw);
|
sky2_hw_intr(hw);
|
||||||
@ -2136,7 +2135,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (status & Y2_IS_STAT_BMU) {
|
if (status & Y2_IS_STAT_BMU) {
|
||||||
work_done += sky2_status_intr(hw, work_limit - work_done);
|
work_done = sky2_status_intr(hw, work_limit);
|
||||||
*budget -= work_done;
|
*budget -= work_done;
|
||||||
dev0->quota -= work_done;
|
dev0->quota -= work_done;
|
||||||
|
|
||||||
@ -2148,22 +2147,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
|||||||
|
|
||||||
mod_timer(&hw->idle_timer, jiffies + HZ);
|
mod_timer(&hw->idle_timer, jiffies + HZ);
|
||||||
|
|
||||||
local_irq_disable();
|
netif_rx_complete(dev0);
|
||||||
__netif_rx_complete(dev0);
|
|
||||||
|
|
||||||
status = sky2_read32(hw, B0_Y2_SP_LISR);
|
status = sky2_read32(hw, B0_Y2_SP_LISR);
|
||||||
|
|
||||||
if (unlikely(status)) {
|
|
||||||
/* More work pending, try and keep going */
|
|
||||||
if (__netif_rx_schedule_prep(dev0)) {
|
|
||||||
__netif_rx_reschedule(dev0, work_done);
|
|
||||||
status = sky2_read32(hw, B0_Y2_SP_EISR);
|
|
||||||
local_irq_enable();
|
|
||||||
goto restart_poll;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_enable();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2181,6 +2167,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|||||||
prefetch(&hw->st_le[hw->st_idx]);
|
prefetch(&hw->st_le[hw->st_idx]);
|
||||||
if (likely(__netif_rx_schedule_prep(dev0)))
|
if (likely(__netif_rx_schedule_prep(dev0)))
|
||||||
__netif_rx_schedule(dev0);
|
__netif_rx_schedule(dev0);
|
||||||
|
else
|
||||||
|
printk(KERN_DEBUG PFX "irq race detected\n");
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -831,21 +831,19 @@ static inline void netif_rx_schedule(struct net_device *dev)
|
|||||||
__netif_rx_schedule(dev);
|
__netif_rx_schedule(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
|
||||||
static inline void __netif_rx_reschedule(struct net_device *dev, int undo)
|
* Do not inline this?
|
||||||
{
|
*/
|
||||||
dev->quota += undo;
|
|
||||||
list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
|
|
||||||
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
|
|
||||||
static inline int netif_rx_reschedule(struct net_device *dev, int undo)
|
static inline int netif_rx_reschedule(struct net_device *dev, int undo)
|
||||||
{
|
{
|
||||||
if (netif_rx_schedule_prep(dev)) {
|
if (netif_rx_schedule_prep(dev)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
dev->quota += undo;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__netif_rx_reschedule(dev, undo);
|
list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
|
||||||
|
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user