Skip to content

Commit

Permalink
perf/x86: Remove swap_task_ctx()
Browse files Browse the repository at this point in the history
The pmu specific data is saved in task_struct now. It doesn't need to
swap between context.

Remove swap_task_ctx() support.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250314172700.438923-6-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Mar 17, 2025
1 parent 3cec9fd commit 1fbc6c8
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 50 deletions.
9 changes: 0 additions & 9 deletions arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);

DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);

DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
Expand Down Expand Up @@ -2039,7 +2038,6 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);

static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);

static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
Expand Down Expand Up @@ -2644,12 +2642,6 @@ static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
}

static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{
static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
}

void perf_check_microcode(void)
{
if (x86_pmu.check_microcode)
Expand Down Expand Up @@ -2714,7 +2706,6 @@ static struct pmu pmu = {

.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,

.aux_output_match = x86_pmu_aux_output_match,
Expand Down
7 changes: 0 additions & 7 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5300,12 +5300,6 @@ static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
}

static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{
intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
}

static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
Expand Down Expand Up @@ -5474,7 +5468,6 @@ static __initconst const struct x86_pmu intel_pmu = {

.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
.swap_task_ctx = intel_pmu_swap_task_ctx,

.check_period = intel_pmu_check_period,

Expand Down
23 changes: 0 additions & 23 deletions arch/x86/events/intel/lbr.c
Original file line number Diff line number Diff line change
Expand Up @@ -522,29 +522,6 @@ static void __intel_pmu_lbr_save(void *ctx)
cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
}

void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{
void *prev_ctx_data, *next_ctx_data;

swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);

/*
* Architecture specific synchronization makes sense in case
* both prev_epc->task_ctx_data and next_epc->task_ctx_data
* pointers are allocated.
*/

prev_ctx_data = next_epc->task_ctx_data;
next_ctx_data = prev_epc->task_ctx_data;

if (!prev_ctx_data || !next_ctx_data)
return;

swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
task_context_opt(next_ctx_data)->lbr_callstack_users);
}

void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
struct task_struct *task, bool sched_in)
{
Expand Down
11 changes: 0 additions & 11 deletions arch/x86/events/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -958,14 +958,6 @@ struct x86_pmu {
*/
int num_topdown_events;

/*
* perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
* switch helper to bridge calls from perf/core to perf/x86.
* See struct pmu::swap_task_ctx() usage for examples;
*/
void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc);

/*
* AMD bits
*/
Expand Down Expand Up @@ -1671,9 +1663,6 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
struct cpu_hw_events *cpuc,
struct perf_event *event);

void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc);

void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
struct task_struct *task, bool sched_in);

Expand Down

0 comments on commit 1fbc6c8

Please sign in to comment.