Skip to content

Commit

Permalink
Merge tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/tip/tip

Pull misc perf fixes from Ingo Molnar:

 - Require group events for branch counter groups and
   PEBS counter snapshotting groups to be x86 events.

 - Fix the handling of counter-snapshotting of non-precise
   events, where counter values may move backwards a bit,
   temporarily, confusing the code.

 - Restrict perf/KVM PEBS to guest-owned events.

* tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.
  perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting
  perf/x86/intel: Check the X86 leader for pebs_counter_event_group
  perf/x86/intel: Only check the group flag for X86 leader
  • Loading branch information
Linus Torvalds committed May 4, 2025
2 parents 5aac99c + 58f6217 commit 3f3041b
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 6 deletions.
2 changes: 1 addition & 1 deletion arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
}
}

static inline int is_x86_event(struct perf_event *event)
int is_x86_event(struct perf_event *event)
{
int i;

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};

if (arr[pebs_enable].host) {
Expand Down
21 changes: 19 additions & 2 deletions arch/x86/events/intel/ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
} else
intel_pmu_save_and_restart(event);
} else {
/*
* For a non-precise event, it's possible the
* counters-snapshotting records a positive value for the
* overflowed event. Then the HW auto-reload mechanism
* reset the counter to 0 immediately, because the
* pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
* is not set. The counter backwards may be observed in a
* PMI handler.
*
* Since the event value has been updated when processing the
* counters-snapshotting record, only needs to set the new
* period for the counter.
*/
if (is_pebs_counter_event_group(event))
static_call(x86_pmu_set_period)(event);
else
intel_pmu_save_and_restart(event);
}
}

static __always_inline void
Expand Down
11 changes: 9 additions & 2 deletions arch/x86/events/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}

int is_x86_event(struct perf_event *event);

static inline bool check_leader_group(struct perf_event *leader, int flags)
{
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
}

static inline bool is_branch_counters_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}

static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
}

struct amd_nb {
Expand Down

0 comments on commit 3f3041b

Please sign in to comment.