Skip to content

Commit

Permalink
perf/amd/ibs: Prevent leaking sensitive data to userspace
Browse files Browse the repository at this point in the history
Although IBS "swfilt" can prevent leaking samples with kernel RIP to the
userspace, there are few subtle cases where a 'data' address and/or a
'branch target' address can fall under kernel address range although RIP
is from userspace. Prevent leaking kernel 'data' addresses by discarding
such samples when {exclude_kernel=1,swfilt=1}.

IBS can now be invoked by unprivileged user with the introduction of
"swfilt". However, this creates a loophole in the interface where an
unprivileged user can get physical address of the userspace virtual
addresses through IBS register raw dump (PERF_SAMPLE_RAW). Prevent this
as well.

This upstream commit fixed the most obvious leak:

  65a9926 perf/x86: Check data address for IBS software filter

Follow that up with a more complete fix.

Fixes: d29e744 ("perf/x86: Relax privilege filter restriction on AMD IBS")
Suggested-by: Matteo Rizzo <matteorizzo@google.com>
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250321161251.1033-1-ravi.bangoria@amd.com
  • Loading branch information
Namhyung Kim authored and Ingo Molnar committed Mar 22, 2025
1 parent 5c7474b commit 50a53b6
Showing 1 changed file with 78 additions and 6 deletions.
84 changes: 78 additions & 6 deletions arch/x86/events/amd/ibs.c
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
}

/* Be careful. Works only for contiguous MSRs. */
#define ibs_fetch_msr_idx(msr) (msr - MSR_AMD64_IBSFETCHCTL)
#define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)

static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
Expand Down Expand Up @@ -1036,6 +1038,67 @@ static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
return 1;
}

static bool perf_ibs_is_kernel_data_addr(struct perf_event *event,
struct perf_ibs_data *ibs_data)
{
u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW;
union ibs_op_data3 op_data3;
u64 dc_lin_addr;

op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
dc_lin_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];

return unlikely((event->attr.sample_type & sample_type_mask) &&
op_data3.dc_lin_addr_valid && kernel_ip(dc_lin_addr));
}

static bool perf_ibs_is_kernel_br_target(struct perf_event *event,
struct perf_ibs_data *ibs_data,
int br_target_idx)
{
union ibs_op_data op_data;
u64 br_target;

op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
br_target = ibs_data->regs[br_target_idx];

return unlikely((event->attr.sample_type & PERF_SAMPLE_RAW) &&
op_data.op_brn_ret && kernel_ip(br_target));
}

static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
struct pt_regs *regs, struct perf_ibs_data *ibs_data,
int br_target_idx)
{
if (perf_exclude_event(event, regs))
return true;

if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
return false;

if (perf_ibs_is_kernel_data_addr(event, ibs_data))
return true;

if (br_target_idx != -1 &&
perf_ibs_is_kernel_br_target(event, ibs_data, br_target_idx))
return true;

return false;
}

static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
struct perf_ibs_data *ibs_data)
{
if (perf_ibs == &perf_ibs_op) {
ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)] &= ~(1ULL << 18);
ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)] = 0;
return;
}

ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHCTL)] &= ~(1ULL << 52);
ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHPHYSAD)] = 0;
}

static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
Expand All @@ -1048,6 +1111,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
u64 *buf, *config, period, new_config = 0;
int br_target_idx = -1;

if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
Expand Down Expand Up @@ -1102,6 +1166,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
br_target_idx = size;
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
Expand All @@ -1128,16 +1193,20 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
regs.flags |= PERF_EFLAGS_EXACT;
}

if (perf_ibs == &perf_ibs_op)
perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);

if ((event->attr.config2 & IBS_SW_FILTER_MASK) &&
(perf_exclude_event(event, &regs) ||
((data.sample_flags & PERF_SAMPLE_ADDR) &&
event->attr.exclude_kernel && kernel_ip(data.addr)))) {
perf_ibs_swfilt_discard(perf_ibs, event, &regs, &ibs_data, br_target_idx)) {
throttle = perf_event_account_interrupt(event);
goto out;
}
/*
* Prevent leaking physical addresses to unprivileged users. Skip
* PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
* unprivileged users.
*/
if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
perf_allow_kernel(&event->attr)) {
perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
}

if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw = (struct perf_raw_record){
Expand All @@ -1149,6 +1218,9 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
perf_sample_save_raw_data(&data, event, &raw);
}

if (perf_ibs == &perf_ibs_op)
perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);

/*
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
* recorded as part of interrupt regs. Thus we need to use rip from
Expand Down

0 comments on commit 50a53b6

Please sign in to comment.