Skip to content

Commit

Permalink
perf_counter: rework ioctl()s
Browse files Browse the repository at this point in the history
Corey noticed that ioctl()s on grouped counters didn't work on
the whole group. This extends the ioctl() interface to take a
second argument that is interpreted as a flags field. We then
provide PERF_IOC_FLAG_GROUP to toggle the behaviour.

Having this flag gives the greatest flexibility, allowing you
to individually enable/disable/reset counters in a group, or
all together.

[ Impact: fix group counter enable/disable semantics ]

Reported-by: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20090508170028.837558214@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed May 8, 2009
1 parent 7fc23a5 commit 3df5eda
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 49 deletions.
10 changes: 7 additions & 3 deletions include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,14 @@ struct perf_counter_hw_event {
/*
* Ioctls that can be done on a perf counter fd:
*/
#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32)
#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32)
#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32)

enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
};

/*
* Structure of the page that can be mapped via mmap
Expand Down
104 changes: 58 additions & 46 deletions kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
* add it straight to the context's counter list, or to the group
* leader's sibling list:
*/
if (counter->group_leader == counter)
if (group_leader == counter)
list_add_tail(&counter->list_entry, &ctx->counter_list);
else {
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Expand Down Expand Up @@ -385,24 +385,6 @@ static void perf_counter_disable(struct perf_counter *counter)
spin_unlock_irq(&ctx->lock);
}

/*
* Disable a counter and all its children.
*/
static void perf_counter_disable_family(struct perf_counter *counter)
{
struct perf_counter *child;

perf_counter_disable(counter);

/*
* Lock the mutex to protect the list of children
*/
mutex_lock(&counter->mutex);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_disable(child);
mutex_unlock(&counter->mutex);
}

static int
counter_sched_in(struct perf_counter *counter,
struct perf_cpu_context *cpuctx,
Expand Down Expand Up @@ -753,24 +735,6 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
return 0;
}

/*
* Enable a counter and all its children.
*/
static void perf_counter_enable_family(struct perf_counter *counter)
{
struct perf_counter *child;

perf_counter_enable(counter);

/*
* Lock the mutex to protect the list of children
*/
mutex_lock(&counter->mutex);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_enable(child);
mutex_unlock(&counter->mutex);
}

void __perf_counter_sched_out(struct perf_counter_context *ctx,
struct perf_cpu_context *cpuctx)
{
Expand Down Expand Up @@ -1307,31 +1271,79 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)

static void perf_counter_reset(struct perf_counter *counter)
{
(void)perf_counter_read(counter);
atomic_set(&counter->count, 0);
perf_counter_update_userpage(counter);
}

static void perf_counter_for_each_sibling(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter_context *ctx = counter->ctx;
struct perf_counter *sibling;

spin_lock_irq(&ctx->lock);
counter = counter->group_leader;

func(counter);
list_for_each_entry(sibling, &counter->sibling_list, list_entry)
func(sibling);
spin_unlock_irq(&ctx->lock);
}

static void perf_counter_for_each_child(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter *child;

mutex_lock(&counter->mutex);
func(counter);
list_for_each_entry(child, &counter->child_list, child_list)
func(child);
mutex_unlock(&counter->mutex);
}

static void perf_counter_for_each(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter *child;

mutex_lock(&counter->mutex);
perf_counter_for_each_sibling(counter, func);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_for_each_sibling(child, func);
mutex_unlock(&counter->mutex);
}

static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
int err = 0;
void (*func)(struct perf_counter *);
u32 flags = arg;

switch (cmd) {
case PERF_COUNTER_IOC_ENABLE:
perf_counter_enable_family(counter);
func = perf_counter_enable;
break;
case PERF_COUNTER_IOC_DISABLE:
perf_counter_disable_family(counter);
break;
case PERF_COUNTER_IOC_REFRESH:
err = perf_counter_refresh(counter, arg);
func = perf_counter_disable;
break;
case PERF_COUNTER_IOC_RESET:
perf_counter_reset(counter);
func = perf_counter_reset;
break;

case PERF_COUNTER_IOC_REFRESH:
return perf_counter_refresh(counter, arg);
default:
err = -ENOTTY;
return -ENOTTY;
}
return err;

if (flags & PERF_IOC_FLAG_GROUP)
perf_counter_for_each(counter, func);
else
perf_counter_for_each_child(counter, func);

return 0;
}

/*
Expand Down

0 comments on commit 3df5eda

Please sign in to comment.