Skip to content

Commit

Permalink
Revert "cpufreq: intel_pstate: Process HWP Guaranteed change notifica…
Browse files Browse the repository at this point in the history
…tion"

Revert commit d0e936a ("cpufreq: intel_pstate: Process HWP
Guaranteed change notification"), because it causes a NULL pointer
dereference to occur on Lenovo X1 gen9 laptops due to an HWP
guaranteed performance change interrupt arriving prematurely.

This feature will be revisited in the next cycle.

Reported-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  • Loading branch information
Rafael J. Wysocki committed Sep 7, 2021
1 parent 4bf8e58 commit dd7c46d
Showing 1 changed file with 0 additions and 39 deletions.
39 changes: 0 additions & 39 deletions drivers/cpufreq/intel_pstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#include "../drivers/thermal/intel/thermal_interrupt.h"

#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)

Expand Down Expand Up @@ -220,7 +219,6 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
Expand Down Expand Up @@ -259,7 +257,6 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
struct delayed_work hwp_notify_work;
};

static struct cpudata **all_cpu_data;
Expand Down Expand Up @@ -1628,40 +1625,6 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)

/************************** sysfs end ************************/

static void intel_pstate_notify_work(struct work_struct *work)
{
mutex_lock(&intel_pstate_driver_lock);
cpufreq_update_policy(smp_processor_id());
wrmsrl(MSR_HWP_STATUS, 0);
mutex_unlock(&intel_pstate_driver_lock);
}

void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
struct cpudata *cpudata;
u64 value;

if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;

rdmsrl(MSR_HWP_STATUS, value);
if (!(value & 0x01))
return;

cpudata = all_cpu_data[this_cpu];
schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
}

static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
}
}

static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
Expand All @@ -1671,8 +1634,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);

intel_pstate_enable_hwp_interrupt(cpudata);
}

static int atom_get_min_pstate(void)
Expand Down

0 comments on commit dd7c46d

Please sign in to comment.