Merge branch 'suspend-to-idle'

* suspend-to-idle:
  cpuidle / sleep: Use broadcast timer for states that stop local timer
  cpuidle: Clean up fallback handling in cpuidle_idle_call()
  cpuidle / sleep: Do sanity checks in cpuidle_enter_freeze() too
  idle / sleep: Avoid excessive disabling and enabling interrupts
This commit is contained in:
Rafael J. Wysocki 2015-03-05 23:14:51 +01:00
commit eef16e4362
3 changed files with 75 additions and 59 deletions

View File

@ -44,6 +44,12 @@ void disable_cpuidle(void)
off = 1; off = 1;
} }
bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
return off || !initialized || !drv || !dev || !dev->enabled;
}
/** /**
* cpuidle_play_dead - cpu off-lining * cpuidle_play_dead - cpu off-lining
* *
@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
return -ENODEV; return -ENODEV;
} }
/** static int find_deepest_state(struct cpuidle_driver *drv,
* cpuidle_find_deepest_state - Find deepest state meeting specific conditions. struct cpuidle_device *dev, bool freeze)
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
* @freeze: Whether or not the state should be suitable for suspend-to-idle.
*/
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, bool freeze)
{ {
unsigned int latency_req = 0; unsigned int latency_req = 0;
int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return ret; return ret;
} }
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
return find_deepest_state(drv, dev, false);
}
static void enter_freeze_proper(struct cpuidle_driver *drv, static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index) struct cpuidle_device *dev, int index)
{ {
@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
/** /**
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
* *
* If there are states with the ->enter_freeze callback, find the deepest of * If there are states with the ->enter_freeze callback, find the deepest of
* them and enter it with frozen tick. Otherwise, find the deepest state * them and enter it with frozen tick.
* available and enter it normally.
*/ */
void cpuidle_enter_freeze(void) int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{ {
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int index; int index;
/* /*
@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
* that interrupts won't be enabled when it exits and allows the tick to * that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely. * be frozen safely.
*/ */
index = cpuidle_find_deepest_state(drv, dev, true); index = find_deepest_state(drv, dev, true);
if (index >= 0) {
enter_freeze_proper(drv, dev, index);
return;
}
/*
* It is not safe to freeze the tick, find the deepest state available
* at all and try to enter it normally.
*/
index = cpuidle_find_deepest_state(drv, dev, false);
if (index >= 0) if (index >= 0)
cpuidle_enter(drv, dev, index); enter_freeze_proper(drv, dev, index);
else
arch_cpu_idle();
/* Interrupts are enabled again here. */ return index;
local_irq_disable();
} }
/** /**
@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
*/ */
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{ {
if (off || !initialized)
return -ENODEV;
if (!drv || !dev || !dev->enabled)
return -EBUSY;
return cpuidle_curr_governor->select(drv, dev); return cpuidle_curr_governor->select(drv, dev);
} }

View File

@ -126,6 +126,8 @@ struct cpuidle_driver {
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void); extern void disable_cpuidle(void);
extern bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv, extern int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev); struct cpuidle_device *dev);
@ -150,11 +152,17 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev); extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void); extern int cpuidle_play_dead(void);
extern void cpuidle_enter_freeze(void); extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else #else
static inline void disable_cpuidle(void) { } static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv, static inline int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline void cpuidle_enter_freeze(void) { } static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; } struct cpuidle_device *dev) {return NULL; }
#endif #endif

View File

@ -82,6 +82,7 @@ static void cpuidle_idle_call(void)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state; int next_state, entered_state;
unsigned int broadcast; unsigned int broadcast;
bool reflect;
/* /*
* Check if the idle task must be rescheduled. If it is the * Check if the idle task must be rescheduled. If it is the
@ -105,6 +106,9 @@ static void cpuidle_idle_call(void)
*/ */
rcu_idle_enter(); rcu_idle_enter();
if (cpuidle_not_available(drv, dev))
goto use_default;
/* /*
* Suspend-to-idle ("freeze") is a system state in which all user space * Suspend-to-idle ("freeze") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only * has been frozen, all I/O devices have been suspended and the only
@ -115,30 +119,24 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens. * until a proper wakeup interrupt happens.
*/ */
if (idle_should_freeze()) { if (idle_should_freeze()) {
cpuidle_enter_freeze(); entered_state = cpuidle_enter_freeze(drv, dev);
local_irq_enable(); if (entered_state >= 0) {
goto exit_idle;
}
/*
* Ask the cpuidle framework to choose a convenient idle state.
* Fall back to the default arch idle method on errors.
*/
next_state = cpuidle_select(drv, dev);
if (next_state < 0) {
use_default:
/*
* We can't use the cpuidle framework, let's use the default
* idle routine.
*/
if (current_clr_polling_and_test())
local_irq_enable(); local_irq_enable();
else goto exit_idle;
arch_cpu_idle(); }
goto exit_idle; reflect = false;
next_state = cpuidle_find_deepest_state(drv, dev);
} else {
reflect = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev);
} }
/* Fall back to the default arch idle method on errors. */
if (next_state < 0)
goto use_default;
/* /*
* The idle task must be scheduled, it is pointless to * The idle task must be scheduled, it is pointless to
@ -183,7 +181,8 @@ static void cpuidle_idle_call(void)
/* /*
* Give the governor an opportunity to reflect on the outcome * Give the governor an opportunity to reflect on the outcome
*/ */
cpuidle_reflect(dev, entered_state); if (reflect)
cpuidle_reflect(dev, entered_state);
exit_idle: exit_idle:
__current_set_polling(); __current_set_polling();
@ -196,6 +195,19 @@ static void cpuidle_idle_call(void)
rcu_idle_exit(); rcu_idle_exit();
start_critical_timings(); start_critical_timings();
return;
use_default:
/*
* We can't use the cpuidle framework, let's use the default
* idle routine.
*/
if (current_clr_polling_and_test())
local_irq_enable();
else
arch_cpu_idle();
goto exit_idle;
} }
/* /*