Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147139
b: refs/heads/master
c: 9f66a38
h: refs/heads/master
i:
  147137: 7d72c0f
  147135: 6320a8d
v: v3
  • Loading branch information
Ingo Molnar committed Dec 11, 2008
1 parent f30011a commit f3eec38
Show file tree
Hide file tree
Showing 5 changed files with 107 additions and 72 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dfa7c899b401d7dc5d85aca416aee64ac82812f2
refs/heads/master: 9f66a3810fe0d4100972db84290f3ae4a4d77025
29 changes: 14 additions & 15 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
*/
int hw_perf_counter_init(struct perf_counter *counter)
{
struct perf_counter_hw_event *hw_event = &counter->hw_event;
struct hw_perf_counter *hwc = &counter->hw;
u32 hw_event_type = counter->event.hw_event_type;

if (unlikely(!perf_counters_initialized))
return -EINVAL;
Expand All @@ -77,14 +77,14 @@ int hw_perf_counter_init(struct perf_counter *counter)
hwc->nmi = 0;
if (capable(CAP_SYS_ADMIN)) {
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
if (hw_event_type & PERF_COUNT_NMI)
if (hw_event->nmi)
hwc->nmi = 1;
}

hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;

hwc->irq_period = counter->event.hw_event_period;
hwc->irq_period = hw_event->irq_period;
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
Expand All @@ -93,21 +93,20 @@ int hw_perf_counter_init(struct perf_counter *counter)
if (!hwc->irq_period)
hwc->irq_period = 0x7FFFFFFF;

hwc->next_count = -((s32) hwc->irq_period);
hwc->next_count = -(s32)hwc->irq_period;

/*
* Raw event type provide the config in the event structure
*/
hw_event_type &= ~PERF_COUNT_NMI;
if (hw_event_type == PERF_COUNT_RAW) {
hwc->config |= counter->event.hw_raw_ctrl;
if (hw_event->raw) {
hwc->config |= hw_event->type;
} else {
if (hw_event_type >= max_intel_perfmon_events)
if (hw_event->type >= max_intel_perfmon_events)
return -EINVAL;
/*
* The generic map:
*/
hwc->config |= intel_perfmon_event_map[hw_event_type];
hwc->config |= intel_perfmon_event_map[hw_event->type];
}
counter->wakeup_pending = 0;

Expand Down Expand Up @@ -354,7 +353,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
int bit;

list_for_each_entry(counter, &ctx->counters, list) {
if (counter->record_type != PERF_RECORD_SIMPLE ||
if (counter->hw_event.record_type != PERF_RECORD_SIMPLE ||
counter == leader)
continue;

Expand All @@ -372,7 +371,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_save_and_restart(counter);
}
}
perf_store_irq_data(leader, counter->event.hw_event_type);
perf_store_irq_data(leader, counter->hw_event.type);
perf_store_irq_data(leader, atomic64_counter_read(counter));
}
}
Expand Down Expand Up @@ -410,15 +409,15 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)

perf_save_and_restart(counter);

switch (counter->record_type) {
switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
continue;
case PERF_RECORD_IRQ:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
perf_store_irq_data(counter,
counter->event.hw_event_type);
counter->hw_event.type);
perf_store_irq_data(counter,
atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);
Expand Down
98 changes: 62 additions & 36 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,65 +24,93 @@
struct task_struct;

/*
* Generalized hardware event types, used by the hw_event_type parameter
* of the sys_perf_counter_open() syscall:
* User-space ABI bits:
*/

/*
* Generalized performance counter event types, used by the hw_event.type
* parameter of the sys_perf_counter_open() syscall:
*/
enum hw_event_types {
PERF_COUNT_CYCLES,
PERF_COUNT_INSTRUCTIONS,
PERF_COUNT_CACHE_REFERENCES,
PERF_COUNT_CACHE_MISSES,
PERF_COUNT_BRANCH_INSTRUCTIONS,
PERF_COUNT_BRANCH_MISSES,
/*
* If this bit is set in the type, then trigger NMI sampling:
* Common hardware events, generalized by the kernel:
*/
PERF_COUNT_NMI = (1 << 30),
PERF_COUNT_RAW = (1 << 31),
PERF_COUNT_CYCLES = 0,
PERF_COUNT_INSTRUCTIONS = 1,
PERF_COUNT_CACHE_REFERENCES = 2,
PERF_COUNT_CACHE_MISSES = 3,
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_BRANCH_MISSES = 5,

/*
* Special "software" counters provided by the kernel, even if
* the hardware does not support performance counters. These
* counters measure various physical and sw events of the
* kernel (and allow the profiling of them as well):
*/
PERF_COUNT_CPU_CLOCK = -1,
PERF_COUNT_TASK_CLOCK = -2,
PERF_COUNT_PAGE_FAULTS = -3,
PERF_COUNT_CONTEXT_SWITCHES = -4,
};

/*
* IRQ-notification data record type:
*/
enum perf_record_type {
PERF_RECORD_SIMPLE,
PERF_RECORD_IRQ,
PERF_RECORD_GROUP,
enum perf_counter_record_type {
PERF_RECORD_SIMPLE = 0,
PERF_RECORD_IRQ = 1,
PERF_RECORD_GROUP = 2,
};

struct perf_counter_event {
u32 hw_event_type;
u32 hw_event_period;
u64 hw_raw_ctrl;
/*
* Hardware event to monitor via a performance monitoring counter:
*/
struct perf_counter_hw_event {
u64 type;

u64 irq_period;
u32 record_type;

u32 disabled : 1, /* off by default */
nmi : 1, /* NMI sampling */
raw : 1, /* raw event type */
__reserved_1 : 29;

u64 __reserved_2;
};

/*
* Kernel-internal data types:
*/

/**
* struct hw_perf_counter - performance counter hardware details
* struct hw_perf_counter - performance counter hardware details:
*/
struct hw_perf_counter {
u64 config;
unsigned long config_base;
unsigned long counter_base;
int nmi;
unsigned int idx;
u64 prev_count;
s32 next_count;
u64 irq_period;
u64 config;
unsigned long config_base;
unsigned long counter_base;
int nmi;
unsigned int idx;
u64 prev_count;
u64 irq_period;
s32 next_count;
};

/*
* Hardcoded buffer length limit for now, for IRQ-fed events:
*/
#define PERF_DATA_BUFLEN 2048
#define PERF_DATA_BUFLEN 2048

/**
* struct perf_data - performance counter IRQ data sampling ...
*/
struct perf_data {
int len;
int rd_idx;
int overrun;
u8 data[PERF_DATA_BUFLEN];
int len;
int rd_idx;
int overrun;
u8 data[PERF_DATA_BUFLEN];
};

/**
Expand All @@ -96,7 +124,7 @@ struct perf_counter {
#else
atomic_t count32[2];
#endif
struct perf_counter_event event;
struct perf_counter_hw_event hw_event;
struct hw_perf_counter hw;

struct perf_counter_context *ctx;
Expand All @@ -110,8 +138,6 @@ struct perf_counter {
int oncpu;
int cpu;

enum perf_record_type record_type;

/* read() / irq related data */
wait_queue_head_t waitq;
/* optional: for NMIs */
Expand Down
12 changes: 8 additions & 4 deletions trunk/include/linux/syscalls.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ struct compat_stat;
struct compat_timeval;
struct robust_list_head;
struct getcpu_cache;
struct perf_counter_event;
struct perf_counter_hw_event;

#include <linux/types.h>
#include <linux/aio_abi.h>
Expand Down Expand Up @@ -625,7 +625,11 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);

int kernel_execve(const char *filename, char *const argv[], char *const envp[]);

asmlinkage int
sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type,
pid_t pid, int cpu, int masterfd);

asmlinkage int sys_perf_counter_open(

struct perf_counter_hw_event *hw_event_uptr __user,
pid_t pid,
int cpu,
int group_fd);
#endif
38 changes: 22 additions & 16 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_counter *counter = file->private_data;

switch (counter->record_type) {
switch (counter->hw_event.record_type) {
case PERF_RECORD_SIMPLE:
return perf_read_hw(counter, buf, count);

Expand Down Expand Up @@ -707,7 +707,7 @@ static const struct file_operations perf_fops = {
* Allocate and initialize a counter structure
*/
static struct perf_counter *
perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type)
perf_counter_alloc(struct perf_counter_hw_event *hw_event, int cpu)
{
struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);

Expand All @@ -718,39 +718,45 @@ perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type)
INIT_LIST_HEAD(&counter->list);
init_waitqueue_head(&counter->waitq);

counter->irqdata = &counter->data[0];
counter->usrdata = &counter->data[1];
counter->cpu = cpu;
counter->record_type = record_type;
counter->event = *event;
counter->wakeup_pending = 0;
counter->irqdata = &counter->data[0];
counter->usrdata = &counter->data[1];
counter->cpu = cpu;
counter->hw_event = *hw_event;
counter->wakeup_pending = 0;

return counter;
}

/**
* sys_perf_task_open - open a performance counter associate it to a task
* @hw_event_type: event type for monitoring/sampling...
* sys_perf_task_open - open a performance counter, associate it to a task/cpu
*
* @hw_event_uptr: event type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader counter fd
*/
asmlinkage int
sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type,
pid_t pid, int cpu, int masterfd)
asmlinkage int sys_perf_counter_open(

struct perf_counter_hw_event *hw_event_uptr __user,
pid_t pid,
int cpu,
int group_fd)

{
struct perf_counter_context *ctx;
struct perf_counter_event event;
struct perf_counter_hw_event hw_event;
struct perf_counter *counter;
int ret;

if (copy_from_user(&event, uevent, sizeof(event)) != 0)
if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
return -EFAULT;

ctx = find_get_context(pid, cpu);
if (IS_ERR(ctx))
return PTR_ERR(ctx);

ret = -ENOMEM;
counter = perf_counter_alloc(&event, cpu, record_type);
counter = perf_counter_alloc(&hw_event, cpu);
if (!counter)
goto err_put_context;

Expand Down

0 comments on commit f3eec38

Please sign in to comment.