Skip to content

Commit

Permalink
perf core: Factor out __perf_sw_event_sched
Browse files Browse the repository at this point in the history
In some cases, we need to check more than whether the software event
is enabled.  So split the condition check and the actual event
handling.  This is a preparation for the next change.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210210083327.22726-1-namhyung@kernel.org
  • Loading branch information
Namhyung Kim authored and Peter Zijlstra committed Apr 16, 2021
1 parent 46ade47 commit 7c8056b
Showing 1 changed file with 12 additions and 21 deletions.
33 changes: 12 additions & 21 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -1178,30 +1178,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
if (static_key_false(&perf_swevent_enabled[event_id])) {
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

perf_fetch_caller_regs(regs);
___perf_sw_event(event_id, nr, regs, addr);
}
perf_fetch_caller_regs(regs);
___perf_sw_event(event_id, nr, regs, addr);
}

extern struct static_key_false perf_sched_events;

static __always_inline bool
perf_sw_migrate_enabled(void)
static __always_inline bool __perf_sw_enabled(int swevt)
{
if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
return true;
return false;
return static_key_false(&perf_swevent_enabled[swevt]);
}

static inline void perf_event_task_migrate(struct task_struct *task)
{
if (perf_sw_migrate_enabled())
if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}

Expand All @@ -1211,19 +1205,18 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);

if (perf_sw_migrate_enabled() && task->sched_migrated) {
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

perf_fetch_caller_regs(regs);
___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
task->sched_migrated) {
__perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}

static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
__perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);

if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
Expand Down Expand Up @@ -1480,8 +1473,6 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }

static inline int perf_register_guest_info_callbacks
Expand Down

0 comments on commit 7c8056b

Please sign in to comment.