Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 28855
b: refs/heads/master
c: 4ec223d
h: refs/heads/master
i:
  28853: 8ef8eba
  28851: 012f3e7
  28847: 54cf36f
v: v3
  • Loading branch information
Venkatesh Pallipadi authored and Dave Jones committed Jun 21, 2006
1 parent b6eb331 commit 9f3c77d
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9ed059e1551bf36092215b965838502ac21f42e4
refs/heads/master: 4ec223d02f4d5f5a3129edc0e3d22550d6ac8a32
12 changes: 12 additions & 0 deletions trunk/drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);

static unsigned int dbs_enable; /* number of CPUs using this policy */

/*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
* lock and dbs_mutex. cpu_hotplug lock should always be held before
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
*/
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);

Expand Down Expand Up @@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(void *data)
{
int i;
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
}

static inline void dbs_timer_init(void)
Expand Down Expand Up @@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;

case CPUFREQ_GOV_LIMITS:
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
Expand All @@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
break;
}
return 0;
Expand Down
12 changes: 12 additions & 0 deletions trunk/drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);

static unsigned int dbs_enable; /* number of CPUs using this policy */

/*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
* lock and dbs_mutex. cpu_hotplug lock should always be held before
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
*/
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);

Expand Down Expand Up @@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(void *data)
{
int i;
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
queue_delayed_work(dbs_workq, &dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
}

static inline void dbs_timer_init(void)
Expand Down Expand Up @@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;

case CPUFREQ_GOV_LIMITS:
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
Expand All @@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
break;
}
return 0;
Expand Down

0 comments on commit 9f3c77d

Please sign in to comment.