Skip to content

Commit

Permalink
[CPUFREQ] Eliminate the recent lockdep warnings in cpufreq
Browse files Browse the repository at this point in the history
Commit b14893a although it was very
much needed to properly cleanup ondemand timer, opened-up a can of worms
related to locking dependencies in cpufreq.

Patch here defines the need for dbs_mutex and cleans up its usage in
ondemand governor. This also resolves the lockdep warnings reported here

http://lkml.indiana.edu/hypermail/linux/kernel/0906.1/01925.html
http://lkml.indiana.edu/hypermail/linux/kernel/0907.0/00820.html

and few others..

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
  • Loading branch information
venkatesh.pallipadi@intel.com authored and Dave Jones committed Jul 7, 2009
1 parent faf80d6 commit 7d26e2d
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 34 deletions.
4 changes: 2 additions & 2 deletions drivers/cpufreq/cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1070,8 +1070,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif

unlock_policy_rwsem_write(cpu);

if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);

Expand All @@ -1088,6 +1086,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);

unlock_policy_rwsem_write(cpu);

free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
Expand Down
27 changes: 11 additions & 16 deletions drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */

/*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
* lock and dbs_mutex. cpu_hotplug lock should always be held before
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
* dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* different CPUs. It protects dbs_enable in governor start/stop. It also
* serializes governor limit_change with do_dbs_timer. We do not want
* do_dbs_timer to run when user is changing the governor or limits.
*/
static DEFINE_MUTEX(dbs_mutex);

Expand Down Expand Up @@ -488,18 +483,17 @@ static void do_dbs_timer(struct work_struct *work)

delay -= jiffies % delay;

if (lock_policy_rwsem_write(cpu) < 0)
return;
mutex_lock(&dbs_mutex);

if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu);
mutex_unlock(&dbs_mutex);
return;
}

dbs_check_cpu(dbs_info);

queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu);
mutex_unlock(&dbs_mutex);
}

static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Expand Down Expand Up @@ -590,15 +584,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
&dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
dbs_timer_init(this_dbs_info);

mutex_unlock(&dbs_mutex);

dbs_timer_init(this_dbs_info);

break;

case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info);

mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;

Expand Down
27 changes: 11 additions & 16 deletions drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,15 +78,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */

/*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
* lock and dbs_mutex. cpu_hotplug lock should always be held before
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
* dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* different CPUs. It protects dbs_enable in governor start/stop. It also
* serializes governor limit_change with do_dbs_timer. We do not want
* do_dbs_timer to run when user is changing the governor or limits.
*/
static DEFINE_MUTEX(dbs_mutex);

Expand Down Expand Up @@ -494,11 +489,10 @@ static void do_dbs_timer(struct work_struct *work)

delay -= jiffies % delay;

if (lock_policy_rwsem_write(cpu) < 0)
return;
mutex_lock(&dbs_mutex);

if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu);
mutex_unlock(&dbs_mutex);
return;
}

Expand All @@ -517,7 +511,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_info->freq_lo, CPUFREQ_RELATION_H);
}
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu);
mutex_unlock(&dbs_mutex);
}

static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Expand Down Expand Up @@ -598,14 +592,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
}
dbs_timer_init(this_dbs_info);

mutex_unlock(&dbs_mutex);

dbs_timer_init(this_dbs_info);
break;

case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info);

mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
mutex_unlock(&dbs_mutex);
Expand Down

0 comments on commit 7d26e2d

Please sign in to comment.