forked from luck/tmp_suning_uos_patched
2bbb6817c0
It is assumed that rcu won't be used once we switch to tickless mode and until we restart the tick. However this is not always true, as in x86-64 where we dereference the idle notifiers after the tick is stopped. To prepare for fixing this, add two new APIs: tick_nohz_idle_enter_norcu() and tick_nohz_idle_exit_norcu(). If no use of RCU is made in the idle loop between tick_nohz_enter_idle() and tick_nohz_exit_idle() calls, the arch must instead call the new *_norcu() version such that the arch doesn't need to call rcu_idle_enter() and rcu_idle_exit(). Otherwise the arch must call tick_nohz_enter_idle() and tick_nohz_exit_idle() and also call explicitly: - rcu_idle_enter() after its last use of RCU before the CPU is put to sleep. - rcu_idle_exit() before the first use of RCU after the CPU is woken up. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: David Miller <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mackerras <paulus@samba.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
163 lines
3.3 KiB
C
163 lines
3.3 KiB
C
/*
|
|
* The idle loop for all SuperH platforms.
|
|
*
|
|
* Copyright (C) 2002 - 2009 Paul Mundt
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/system.h>
|
|
#include <linux/atomic.h>
|
|
#include <asm/smp.h>
|
|
|
|
void (*pm_idle)(void);
|
|
|
|
static int hlt_counter;
|
|
|
|
static int __init nohlt_setup(char *__unused)
|
|
{
|
|
hlt_counter = 1;
|
|
return 1;
|
|
}
|
|
__setup("nohlt", nohlt_setup);
|
|
|
|
static int __init hlt_setup(char *__unused)
|
|
{
|
|
hlt_counter = 0;
|
|
return 1;
|
|
}
|
|
__setup("hlt", hlt_setup);
|
|
|
|
static inline int hlt_works(void)
|
|
{
|
|
return !hlt_counter;
|
|
}
|
|
|
|
/*
|
|
* On SMP it's slightly faster (but much more power-consuming!)
|
|
* to poll the ->work.need_resched flag instead of waiting for the
|
|
* cross-CPU IPI to arrive. Use this option with caution.
|
|
*/
|
|
static void poll_idle(void)
|
|
{
|
|
local_irq_enable();
|
|
while (!need_resched())
|
|
cpu_relax();
|
|
}
|
|
|
|
void default_idle(void)
|
|
{
|
|
if (hlt_works()) {
|
|
clear_thread_flag(TIF_POLLING_NRFLAG);
|
|
smp_mb__after_clear_bit();
|
|
|
|
set_bl_bit();
|
|
if (!need_resched()) {
|
|
local_irq_enable();
|
|
cpu_sleep();
|
|
} else
|
|
local_irq_enable();
|
|
|
|
set_thread_flag(TIF_POLLING_NRFLAG);
|
|
clear_bl_bit();
|
|
} else
|
|
poll_idle();
|
|
}
|
|
|
|
/*
|
|
* The idle thread. There's no useful work to be done, so just try to conserve
|
|
* power and have a low exit latency (ie sit in a loop waiting for somebody to
|
|
* say that they'd like to reschedule)
|
|
*/
|
|
void cpu_idle(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
set_thread_flag(TIF_POLLING_NRFLAG);
|
|
|
|
/* endless idle loop with no priority at all */
|
|
while (1) {
|
|
tick_nohz_idle_enter_norcu();
|
|
|
|
while (!need_resched()) {
|
|
check_pgt_cache();
|
|
rmb();
|
|
|
|
if (cpu_is_offline(cpu))
|
|
play_dead();
|
|
|
|
local_irq_disable();
|
|
/* Don't trace irqs off for idle */
|
|
stop_critical_timings();
|
|
if (cpuidle_idle_call())
|
|
pm_idle();
|
|
/*
|
|
* Sanity check to ensure that pm_idle() returns
|
|
* with IRQs enabled
|
|
*/
|
|
WARN_ON(irqs_disabled());
|
|
start_critical_timings();
|
|
}
|
|
|
|
tick_nohz_idle_exit_norcu();
|
|
preempt_enable_no_resched();
|
|
schedule();
|
|
preempt_disable();
|
|
}
|
|
}
|
|
|
|
void __init select_idle_routine(void)
|
|
{
|
|
/*
|
|
* If a platform has set its own idle routine, leave it alone.
|
|
*/
|
|
if (pm_idle)
|
|
return;
|
|
|
|
if (hlt_works())
|
|
pm_idle = default_idle;
|
|
else
|
|
pm_idle = poll_idle;
|
|
}
|
|
|
|
static void do_nothing(void *unused)
|
|
{
|
|
}
|
|
|
|
void stop_this_cpu(void *unused)
|
|
{
|
|
local_irq_disable();
|
|
set_cpu_online(smp_processor_id(), false);
|
|
|
|
for (;;)
|
|
cpu_sleep();
|
|
}
|
|
|
|
/*
|
|
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
|
|
* pm_idle and update to new pm_idle value. Required while changing pm_idle
|
|
* handler on SMP systems.
|
|
*
|
|
* Caller must have changed pm_idle to the new value before the call. Old
|
|
* pm_idle value will not be used by any CPU after the return of this function.
|
|
*/
|
|
void cpu_idle_wait(void)
|
|
{
|
|
smp_mb();
|
|
/* kick all the CPUs so that they exit out of pm_idle */
|
|
smp_call_function(do_nothing, NULL, 1);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|