Skip to content

Commit

Permalink
perf_counter: Rename various fields
Browse files Browse the repository at this point in the history
A few renames:

  s/irq_period/sample_period/
  s/irq_freq/sample_freq/
  s/PERF_RECORD_/PERF_SAMPLE_/
  s/record_type/sample_type/

And change both the new sample_type and read_format to u64.

Reported-by: Stephane Eranian <eranian@googlemail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 2, 2009
1 parent 8e5799b commit b23f332
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 78 deletions.
12 changes: 6 additions & 6 deletions arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ void hw_perf_enable(void)
continue;
}
val = 0;
if (counter->hw.irq_period) {
if (counter->hw.sample_period) {
left = atomic64_read(&counter->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
Expand Down Expand Up @@ -749,12 +749,12 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
s64 val, left;
unsigned long flags;

if (!counter->hw.idx || !counter->hw.irq_period)
if (!counter->hw.idx || !counter->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
power_pmu_read(counter);
left = counter->hw.irq_period;
left = counter->hw.sample_period;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
Expand Down Expand Up @@ -789,7 +789,7 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
if (counter->hw_event.exclude_user
|| counter->hw_event.exclude_kernel
|| counter->hw_event.exclude_hv
|| counter->hw_event.irq_period)
|| counter->hw_event.sample_period)
return 0;

if (ppmu->limited_pmc_event(ev))
Expand Down Expand Up @@ -925,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)

counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
atomic64_set(&counter->hw.period_left, counter->hw.sample_period);

/*
* See if we need to reserve the PMU.
Expand Down Expand Up @@ -958,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
static void record_and_restart(struct perf_counter *counter, long val,
struct pt_regs *regs, int nmi)
{
u64 period = counter->hw.irq_period;
u64 period = counter->hw.sample_period;
s64 prev, delta, left;
int record = 0;
u64 addr, mmcra, sdsync;
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -290,11 +290,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
hwc->nmi = 1;
hw_event->nmi = 1;

if (!hwc->irq_period)
hwc->irq_period = x86_pmu.max_period;
if (!hwc->sample_period)
hwc->sample_period = x86_pmu.max_period;

atomic64_set(&hwc->period_left,
min(x86_pmu.max_period, hwc->irq_period));
min(x86_pmu.max_period, hwc->sample_period));

/*
* Raw event type provide the config in the event structure
Expand Down Expand Up @@ -462,7 +462,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = min(x86_pmu.max_period, hwc->irq_period);
s64 period = min(x86_pmu.max_period, hwc->sample_period);
int err;

/*
Expand Down
32 changes: 16 additions & 16 deletions include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,18 +94,18 @@ enum sw_event_ids {
#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)

/*
* Bits that can be set in hw_event.record_type to request information
* Bits that can be set in hw_event.sample_type to request information
* in the overflow packets.
*/
enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1,
PERF_RECORD_TIME = 1U << 2,
PERF_RECORD_ADDR = 1U << 3,
PERF_RECORD_GROUP = 1U << 4,
PERF_RECORD_CALLCHAIN = 1U << 5,
PERF_RECORD_CONFIG = 1U << 6,
PERF_RECORD_CPU = 1U << 7,
enum perf_counter_sample_format {
PERF_SAMPLE_IP = 1U << 0,
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
PERF_SAMPLE_GROUP = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_CONFIG = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
};

/*
Expand All @@ -132,12 +132,12 @@ struct perf_counter_hw_event {
__u64 config;

union {
__u64 irq_period;
__u64 irq_freq;
__u64 sample_period;
__u64 sample_freq;
};

__u32 record_type;
__u32 read_format;
__u64 sample_type;
__u64 read_format;

__u64 disabled : 1, /* off by default */
nmi : 1, /* NMI sampling */
Expand Down Expand Up @@ -262,7 +262,7 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u64 time;
* u64 irq_period;
* u64 sample_period;
* };
*/
PERF_EVENT_PERIOD = 4,
Expand Down Expand Up @@ -363,7 +363,7 @@ struct hw_perf_counter {
};
};
atomic64_t prev_count;
u64 irq_period;
u64 sample_period;
atomic64_t period_left;
u64 interrupts;
#endif
Expand Down
Loading

0 comments on commit b23f332

Please sign in to comment.