Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223891
b: refs/heads/master
c: c980d10
h: refs/heads/master
i:
  223889: 49c514d
  223887: b2ffd7d
v: v3
  • Loading branch information
Arnaldo Carvalho de Melo committed Dec 5, 2010
1 parent c4ab788 commit db59d35
Show file tree
Hide file tree
Showing 3 changed files with 103 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6844c09d849aeb00e8ddfe9525e8567a531c22d0
refs/heads/master: c980d1091810df13f21aabbce545fd98f545bbf7
12 changes: 11 additions & 1 deletion trunk/include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,9 @@ struct perf_event_attr {
*/
precise_ip : 2, /* skid constraint */
mmap_data : 1, /* non-exec mmap data */
sample_id_all : 1, /* sample_type all events */

__reserved_1 : 46;
__reserved_1 : 45;

union {
__u32 wakeup_events; /* wakeup every n events */
Expand Down Expand Up @@ -327,6 +328,15 @@ struct perf_event_header {
enum perf_event_type {

/*
* If perf_event_attr.sample_id_all is set then all event types will
* have the sample_type selected fields related to where/when
* (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
* described in PERF_RECORD_SAMPLE below, it will be stashed just after
* the perf_event_header and the fields already present for the existing
* fields, i.e. at the end of the payload. That way a newer perf.data
* file will be supported by older perf tools, with these new optional
* fields being ignored.
*
* The MMAP events record the PROT_EXEC mappings so that we can
* correlate userspace IPs to code. They have the following structure:
*
Expand Down
108 changes: 91 additions & 17 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -3388,9 +3388,9 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle,
} while (len);
}

static void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;

Expand Down Expand Up @@ -3418,13 +3418,51 @@ static void perf_event_header__init_id(struct perf_event_header *header,
}
}

static void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;

if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);

if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);

if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);

if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);

if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
}

static void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}

int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
int nmi, int sample)
{
struct perf_buffer *buffer;
unsigned long tail, offset, head;
int have_lost;
struct perf_sample_data sample_data;
struct {
struct perf_event_header header;
u64 id;
Expand All @@ -3451,8 +3489,12 @@ int perf_output_begin(struct perf_output_handle *handle,
goto out;

have_lost = local_read(&buffer->lost);
if (have_lost)
size += sizeof(lost_event);
if (have_lost) {
lost_event.header.size = sizeof(lost_event);
perf_event_header__init_id(&lost_event.header, &sample_data,
event);
size += lost_event.header.size;
}

perf_output_get_handle(handle);

Expand Down Expand Up @@ -3483,11 +3525,11 @@ int perf_output_begin(struct perf_output_handle *handle,
if (have_lost) {
lost_event.header.type = PERF_RECORD_LOST;
lost_event.header.misc = 0;
lost_event.header.size = sizeof(lost_event);
lost_event.id = event->id;
lost_event.lost = local_xchg(&buffer->lost, 0);

perf_output_put(handle, lost_event);
perf_event__output_id_sample(event, handle, &sample_data);
}

return 0;
Expand Down Expand Up @@ -3700,7 +3742,7 @@ void perf_prepare_sample(struct perf_event_header *header,
header->misc = 0;
header->misc |= perf_misc_flags(regs);

perf_event_header__init_id(header, data, event);
__perf_event_header__init_id(header, data, event);

if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
Expand Down Expand Up @@ -3768,6 +3810,7 @@ perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
Expand All @@ -3779,12 +3822,14 @@ perf_event_read_event(struct perf_event *event,
};
int ret;

perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
if (ret)
return;

perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);

perf_output_end(&handle);
}
Expand Down Expand Up @@ -3814,14 +3859,16 @@ static void perf_event_task_output(struct perf_event *event,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int size, ret;
int ret, size = task_event->event_id.header.size;

size = task_event->event_id.header.size;
ret = perf_output_begin(&handle, event, size, 0, 0);
perf_event_header__init_id(&task_event->event_id.header, &sample, event);

ret = perf_output_begin(&handle, event,
task_event->event_id.header.size, 0, 0);
if (ret)
return;
goto out;

task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
Expand All @@ -3831,7 +3878,11 @@ static void perf_event_task_output(struct perf_event *event,

perf_output_put(&handle, task_event->event_id);

perf_event__output_id_sample(event, &handle, &sample);

perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}

static int perf_event_task_match(struct perf_event *event)
Expand Down Expand Up @@ -3944,19 +3995,29 @@ static void perf_event_comm_output(struct perf_event *event,
struct perf_comm_event *comm_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret = perf_output_begin(&handle, event, size, 0, 0);
int ret;

perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size, 0, 0);

if (ret)
return;
goto out;

comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);

perf_output_put(&handle, comm_event->event_id);
perf_output_copy(&handle, comm_event->comm,
comm_event->comm_size);

perf_event__output_id_sample(event, &handle, &sample);

perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}

static int perf_event_comm_match(struct perf_event *event)
Expand Down Expand Up @@ -4001,7 +4062,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->comm_size = size;

comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;

rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Expand Down Expand Up @@ -4080,19 +4140,28 @@ static void perf_event_mmap_output(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret = perf_output_begin(&handle, event, size, 0, 0);
int ret;

perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size, 0, 0);
if (ret)
return;
goto out;

mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);

perf_output_put(&handle, mmap_event->event_id);
perf_output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);

perf_event__output_id_sample(event, &handle, &sample);

perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}

static int perf_event_mmap_match(struct perf_event *event,
Expand Down Expand Up @@ -4245,6 +4314,7 @@ void perf_event_mmap(struct vm_area_struct *vma)
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;

struct {
Expand All @@ -4266,11 +4336,15 @@ static void perf_log_throttle(struct perf_event *event, int enable)
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;

ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
perf_event_header__init_id(&throttle_event.header, &sample, event);

ret = perf_output_begin(&handle, event,
throttle_event.header.size, 1, 0);
if (ret)
return;

perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}

Expand Down

0 comments on commit db59d35

Please sign in to comment.