Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191242
b: refs/heads/master
c: c61e52e
h: refs/heads/master
v: v3
  • Loading branch information
Frederic Weisbecker committed Apr 24, 2010
1 parent fbe9425 commit b1e07c6
Show file tree
Hide file tree
Showing 4 changed files with 211 additions and 177 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5710fcad7c367adefe5634dc998f1f88780a8457
refs/heads/master: c61e52ee705f938596d307625dce00cc4345aaf0
197 changes: 22 additions & 175 deletions trunk/tools/perf/builtin-lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,8 +316,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)

static char const *input_name = "perf.data";

static int profile_cpu = -1;

struct raw_event_sample {
u32 size;
char data[0];
Expand Down Expand Up @@ -697,8 +695,7 @@ process_lock_release_event(void *data,
}

static void
process_raw_event(void *data, int cpu __used,
u64 timestamp __used, struct thread *thread __used)
process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
{
struct event *event;
int type;
Expand All @@ -716,176 +713,6 @@ process_raw_event(void *data, int cpu __used,
process_lock_release_event(data, event, cpu, timestamp, thread);
}

struct raw_event_queue {
u64 timestamp;
int cpu;
void *data;
struct thread *thread;
struct list_head list;
};

static LIST_HEAD(raw_event_head);

#define FLUSH_PERIOD (5 * NSEC_PER_SEC)

static u64 flush_limit = ULLONG_MAX;
static u64 last_flush = 0;
struct raw_event_queue *last_inserted;

static void flush_raw_event_queue(u64 limit)
{
struct raw_event_queue *tmp, *iter;

list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
if (iter->timestamp > limit)
return;

if (iter == last_inserted)
last_inserted = NULL;

process_raw_event(iter->data, iter->cpu, iter->timestamp,
iter->thread);

last_flush = iter->timestamp;
list_del(&iter->list);
free(iter->data);
free(iter);
}
}

static void __queue_raw_event_end(struct raw_event_queue *new)
{
struct raw_event_queue *iter;

list_for_each_entry_reverse(iter, &raw_event_head, list) {
if (iter->timestamp < new->timestamp) {
list_add(&new->list, &iter->list);
return;
}
}

list_add(&new->list, &raw_event_head);
}

static void __queue_raw_event_before(struct raw_event_queue *new,
struct raw_event_queue *iter)
{
list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
if (iter->timestamp < new->timestamp) {
list_add(&new->list, &iter->list);
return;
}
}

list_add(&new->list, &raw_event_head);
}

static void __queue_raw_event_after(struct raw_event_queue *new,
struct raw_event_queue *iter)
{
list_for_each_entry_continue(iter, &raw_event_head, list) {
if (iter->timestamp > new->timestamp) {
list_add_tail(&new->list, &iter->list);
return;
}
}
list_add_tail(&new->list, &raw_event_head);
}

/* The queue is ordered by time */
static void __queue_raw_event(struct raw_event_queue *new)
{
if (!last_inserted) {
__queue_raw_event_end(new);
return;
}

/*
* Most of the time the current event has a timestamp
* very close to the last event inserted, unless we just switched
* to another event buffer. Having a sorting based on a list and
* on the last inserted event that is close to the current one is
* probably more efficient than an rbtree based sorting.
*/
if (last_inserted->timestamp >= new->timestamp)
__queue_raw_event_before(new, last_inserted);
else
__queue_raw_event_after(new, last_inserted);
}

static void queue_raw_event(void *data, int raw_size, int cpu,
u64 timestamp, struct thread *thread)
{
struct raw_event_queue *new;

if (flush_limit == ULLONG_MAX)
flush_limit = timestamp + FLUSH_PERIOD;

if (timestamp < last_flush) {
printf("Warning: Timestamp below last timeslice flush\n");
return;
}

new = malloc(sizeof(*new));
if (!new)
die("Not enough memory\n");

new->timestamp = timestamp;
new->cpu = cpu;
new->thread = thread;

new->data = malloc(raw_size);
if (!new->data)
die("Not enough memory\n");

memcpy(new->data, data, raw_size);

__queue_raw_event(new);
last_inserted = new;

/*
* We want to have a slice of events covering 2 * FLUSH_PERIOD
* If FLUSH_PERIOD is big enough, it ensures every events that occured
* in the first half of the timeslice have all been buffered and there
* are none remaining (we need that because of the weakly ordered
* event recording we have). Then once we reach the 2 * FLUSH_PERIOD
* timeslice, we flush the first half to be gentle with the memory
* (the second half can still get new events in the middle, so wait
* another period to flush it)
*/
if (new->timestamp > flush_limit &&
new->timestamp - flush_limit > FLUSH_PERIOD) {
flush_limit += FLUSH_PERIOD;
flush_raw_event_queue(flush_limit);
}
}

static int process_sample_event(event_t *event, struct perf_session *s)
{
struct thread *thread;
struct sample_data data;

bzero(&data, sizeof(struct sample_data));
event__parse_sample(event, s->sample_type, &data);
/* CAUTION: using tid as thread.pid */
thread = perf_session__findnew(s, data.tid);

if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}

dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);

if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
return 0;

queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);

return 0;
}

/* TODO: various way to print, coloring, nano or milli sec */
static void print_result(void)
{
Expand Down Expand Up @@ -963,9 +790,30 @@ static void dump_map(void)
}
}

static int process_sample_event(event_t *self, struct perf_session *s)
{
struct sample_data data;
struct thread *thread;

bzero(&data, sizeof(data));
event__parse_sample(self, s->sample_type, &data);

thread = perf_session__findnew(s, data.tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
self->header.type);
return -1;
}

process_raw_event(data.raw_data, data.cpu, data.time, thread);

return 0;
}

static struct perf_event_ops eops = {
.sample = process_sample_event,
.comm = event__process_comm,
.ordered_samples = true,
};

static int read_events(void)
Expand Down Expand Up @@ -994,7 +842,6 @@ static void __cmd_report(void)
setup_pager();
select_key();
read_events();
flush_raw_event_queue(ULLONG_MAX);
sort_result();
print_result();
}
Expand Down
Loading

0 comments on commit b1e07c6

Please sign in to comment.