Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 211827
b: refs/heads/master
c: 97dee4f
h: refs/heads/master
i:
  211825: 6a6b2cb
  211823: 8e6edec
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent 50b2616 commit fe3bc82
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 101 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 108b02cfce04ee90b0a07ee0b104baffd39f5934
refs/heads/master: 97dee4f3206622f31396dede2b5ddb8670458f56
200 changes: 100 additions & 100 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -5556,106 +5556,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);

/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;

/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;

child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
group_leader, parent_event,
NULL);
if (IS_ERR(child_event))
return child_event;
get_ctx(child_ctx);

/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;

if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;

hwc->sample_period = sample_period;
hwc->last_period = sample_period;

local64_set(&hwc->period_left, sample_period);
}

child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;

/*
* Link it up in the child's context:
*/
add_event_to_ctx(child_event, child_ctx);

/*
* Get a reference to the parent filp - we will fput it
* when the child event exits. This is safe to do because
* we are in the parent and we know that the filp still
* exists and has a nonzero count:
*/
atomic_long_inc(&parent_event->filp->f_count);

/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);

return child_event;
}

static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;

leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}

static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
Expand Down Expand Up @@ -5844,6 +5744,106 @@ void perf_event_free_task(struct task_struct *task)
put_ctx(ctx);
}

/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;

/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;

child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
group_leader, parent_event,
NULL);
if (IS_ERR(child_event))
return child_event;
get_ctx(child_ctx);

/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;

if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;

hwc->sample_period = sample_period;
hwc->last_period = sample_period;

local64_set(&hwc->period_left, sample_period);
}

child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;

/*
* Link it up in the child's context:
*/
add_event_to_ctx(child_event, child_ctx);

/*
* Get a reference to the parent filp - we will fput it
* when the child event exits. This is safe to do because
* we are in the parent and we know that the filp still
* exists and has a nonzero count:
*/
atomic_long_inc(&parent_event->filp->f_count);

/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);

return child_event;
}

static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;

leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}

static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
Expand Down

0 comments on commit fe3bc82

Please sign in to comment.