Skip to content

Commit

Permalink
PM / sleep: Asynchronous threads for resume_early
Browse files Browse the repository at this point in the history
In analogy with commits 5af84b8 and 97df8c1, using
asynchronous threads can improve the overall resume_early
time significantly.

This patch is for resume_early phase.

Signed-off-by: Chuansheng Liu <chuansheng.liu@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  • Loading branch information
Liu, Chuansheng authored and Rafael J. Wysocki committed Feb 20, 2014
1 parent 76569fa commit 9e5e791
Showing 1 changed file with 44 additions and 11 deletions.
55 changes: 44 additions & 11 deletions drivers/base/power/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int device_resume_early(struct device *dev, pm_message_t state)
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
Expand All @@ -610,6 +610,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (!dev->power.is_late_suspended)
goto Out;

dpm_wait(dev->parent, async);

if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
Expand All @@ -636,38 +638,69 @@ static int device_resume_early(struct device *dev, pm_message_t state)
TRACE_RESUME(error);

pm_runtime_enable(dev);
complete_all(&dev->power.completion);
return error;
}

static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
int error;

error = device_resume_early(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);

put_device(dev);
}

/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();

mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.next);
int error;
pm_transition = state;

/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule(async_resume_early, dev);
}
}

while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);

error = device_resume_early(dev, state);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
if (!is_async(dev)) {
int error;

error = device_resume_early(dev, state, false);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "early");
}

Expand Down

0 comments on commit 9e5e791

Please sign in to comment.