PM / sleep: Asynchronous threads for resume_noirq

In analogy with commits 5af84b8270 and 97df8c1299, using
asynchronous threads can improve the overall resume_noirq time
significantly.

One typical case is:
In resume_noirq phase and for the PCI devices, the function
pci_pm_resume_noirq() will be called, and there is one d3_delay
(10ms) at least.

With the way of asynchronous threads, we just need wait d3_delay
time once in parallel for each calling, which saves much time to
resume quickly.

Signed-off-by: Chuansheng Liu <chuansheng.liu@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Liu, Chuansheng 2014-02-18 10:28:45 +08:00 committed by Rafael J. Wysocki
parent 3d2699bc17
commit 76569faa62

View File

@ -469,7 +469,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* The driver of @dev will not receive interrupts while this function is being * The driver of @dev will not receive interrupts while this function is being
* executed. * executed.
*/ */
static int device_resume_noirq(struct device *dev, pm_message_t state) static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{ {
pm_callback_t callback = NULL; pm_callback_t callback = NULL;
char *info = NULL; char *info = NULL;
@ -484,6 +484,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (!dev->power.is_noirq_suspended) if (!dev->power.is_noirq_suspended)
goto Out; goto Out;
dpm_wait(dev->parent, async);
if (dev->pm_domain) { if (dev->pm_domain) {
info = "noirq power domain "; info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state); callback = pm_noirq_op(&dev->pm_domain->ops, state);
@ -507,10 +509,29 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
dev->power.is_noirq_suspended = false; dev->power.is_noirq_suspended = false;
Out: Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error); TRACE_RESUME(error);
return error; return error;
} }
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
int error;
error = device_resume_noirq(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
put_device(dev);
}
/** /**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out. * @state: PM transition of the system being carried out.
@ -520,29 +541,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/ */
static void dpm_resume_noirq(pm_message_t state) static void dpm_resume_noirq(pm_message_t state)
{ {
struct device *dev;
ktime_t starttime = ktime_get(); ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_noirq_list)) { pm_transition = state;
struct device *dev = to_device(dpm_noirq_list.next);
int error;
/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule(async_resume_noirq, dev);
}
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev); get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list); list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state); if (!is_async(dev)) {
if (error) { int error;
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); error = device_resume_noirq(dev, state, false);
dpm_save_failed_dev(dev_name(dev)); if (error) {
pm_dev_err(dev, state, " noirq", error); suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " noirq", error);
}
} }
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
put_device(dev); put_device(dev);
} }
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "noirq"); dpm_show_time(starttime, state, "noirq");
resume_device_irqs(); resume_device_irqs();
cpuidle_resume(); cpuidle_resume();
@ -742,12 +782,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev); put_device(dev);
} }
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
/** /**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out. * @state: PM transition of the system being carried out.