Skip to content

Commit

Permalink
Merge back cpufreq material for v5.2.
Browse files Browse the repository at this point in the history
  • Loading branch information
Rafael J. Wysocki committed Apr 8, 2019
2 parents 4ab5264 + 108ec36 commit e75135e
Show file tree
Hide file tree
Showing 7 changed files with 148 additions and 42 deletions.
2 changes: 1 addition & 1 deletion drivers/acpi/processor_perflib.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
acpi_processor_ppc_ost(pr->handle, 0);
}
if (ret >= 0)
cpufreq_update_policy(pr->id);
cpufreq_update_limits(pr->id);
}

int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/amd_freq_sensitivity.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);

if (!pcidev) {
if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
}

Expand Down
84 changes: 65 additions & 19 deletions drivers/cpufreq/cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,6 @@

static LIST_HEAD(cpufreq_policy_list);

static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}

/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
Expand Down Expand Up @@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);

/**
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
*/
void cpufreq_cpu_release(struct cpufreq_policy *policy)
{
if (WARN_ON(!policy))
return;

lockdep_assert_held(&policy->rwsem);

up_write(&policy->rwsem);

cpufreq_cpu_put(policy);
}

/**
* cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
* @cpu: CPU to find the policy for.
*
* Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
* if the policy returned by it is not NULL, acquire its rwsem for writing.
* Return the policy if it is active or release it and return NULL otherwise.
*
* The policy returned by this function has to be released with the help of
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
* counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);

if (!policy)
return NULL;

down_write(&policy->rwsem);

if (policy_is_inactive(policy)) {
cpufreq_cpu_release(policy);
return NULL;
}

return policy;
}

/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
Expand Down Expand Up @@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret;
}

static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);

/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
Expand Down Expand Up @@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
*
* The cpuinfo part of @policy is not updated by this function.
*/
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
Expand Down Expand Up @@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;

if (!policy)
return;

down_write(&policy->rwsem);

if (policy_is_inactive(policy))
goto unlock;

/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
Expand All @@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
cpufreq_set_policy(policy, &new_policy);

unlock:
up_write(&policy->rwsem);

cpufreq_cpu_put(policy);
cpufreq_cpu_release(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);

/**
* cpufreq_update_limits - Update policy limits for a given CPU.
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
* cpufreq_update_policy() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
cpufreq_update_policy(cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);

/*********************************************************************
* BOOST *
*********************************************************************/
Expand Down
65 changes: 56 additions & 9 deletions drivers/cpufreq/intel_pstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
* @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
Expand All @@ -187,6 +188,7 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
Expand Down Expand Up @@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
u64 epb;
int ret;

if (!static_cpu_has(X86_FEATURE_EPB))
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;

ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
Expand All @@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;

if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP.
Expand All @@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
u64 epb;
int ret;

if (!static_cpu_has(X86_FEATURE_EPB))
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;

ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
Expand Down Expand Up @@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (epp < 0)
return epp;

if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE)
return 1;
if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
Expand All @@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
return 3;
else
return 4;
} else if (static_cpu_has(X86_FEATURE_EPB)) {
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
* 0x00-0x03 : Performance
Expand Down Expand Up @@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,

mutex_lock(&intel_pstate_limits_lock);

if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
u64 value;

ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
Expand Down Expand Up @@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave;
}
update_epp:
if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
Expand All @@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MIN_PERF(min_perf);

/* Set EPP/EPB to min */
if (static_cpu_has(X86_FEATURE_HWP_EPP))
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
Expand Down Expand Up @@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}

static void intel_pstate_update_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
struct cpudata *cpudata;

if (!policy)
return;

cpudata = all_cpu_data[cpu];
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;

memcpy(&new_policy, policy, sizeof(*policy));
new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
new_policy.min = min(policy->user_policy.min, new_policy.max);

cpufreq_set_policy(policy, &new_policy);

cpufreq_cpu_release(policy);
}

static void intel_pstate_update_limits(unsigned int cpu)
{
mutex_lock(&intel_pstate_driver_lock);

update_turbo_state();
/*
* If turbo has been turned on or off globally, policy limits for
* all CPUs need to be updated to reflect that.
*/
if (global.turbo_disabled_mf != global.turbo_disabled) {
global.turbo_disabled_mf = global.turbo_disabled;
for_each_possible_cpu(cpu)
intel_pstate_update_max_freq(cpu);
} else {
cpufreq_update_policy(cpu);
}

mutex_unlock(&intel_pstate_driver_lock);
}

/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
Expand Down Expand Up @@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);

wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
Expand Down Expand Up @@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
Expand Down Expand Up @@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
.update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};

Expand Down Expand Up @@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
.init = intel_cpufreq_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};

Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/powernow-k8.c
Original file line number Diff line number Diff line change
Expand Up @@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;

if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
Expand Down
14 changes: 14 additions & 0 deletions include/linux/cpufreq.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif

static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}

static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
Expand All @@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void);

u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);

struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
Expand Down Expand Up @@ -322,6 +333,9 @@ struct cpufreq_driver {
/* should be defined, if possible */
unsigned int (*get)(unsigned int cpu);

/* Called to update policy limits on firmware notifications. */
void (*update_limits)(unsigned int cpu);

/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);

Expand Down
Loading

0 comments on commit e75135e

Please sign in to comment.