Skip to content

Commit

Permalink
perf: Reduce perf_disable() usage
Browse files Browse the repository at this point in the history
Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent 9ed6060 commit 24cd7f5
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 59 deletions.
3 changes: 3 additions & 0 deletions arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
int idx;
int err = 0;

perf_disable();

/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc);
if (idx < 0) {
Expand All @@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event)
perf_event_update_userpage(event);

out:
perf_enable();
return err;
}

Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

perf_disable();
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
Expand All @@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
}

/*
Expand All @@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i];

cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
return 0;
}

Expand Down
8 changes: 6 additions & 2 deletions arch/powerpc/kernel/perf_event_fsl_emb.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}

/* perf must be disabled, context locked on entry */
/* context locked on entry */
static int fsl_emb_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
Expand All @@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
u64 val;
int i;

perf_disable();
cpuhw = &get_cpu_var(cpu_hw_events);

if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
Expand Down Expand Up @@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_enable();
return ret;
}

/* perf must be disabled, context locked on entry */
/* context locked on entry */
static void fsl_emb_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;

perf_disable();
if (i < 0)
goto out;

Expand Down Expand Up @@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
cpuhw->n_events--;

out:
perf_enable();
put_cpu_var(cpu_hw_events);
}

Expand Down
11 changes: 8 additions & 3 deletions arch/sh/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret = -EAGAIN;

perf_disable();

if (test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
if (idx == sh_pmu->num_events)
return -EAGAIN;
goto out;

set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
Expand All @@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event)
sh_pmu->enable(hwc, idx);

perf_event_update_userpage(event);

return 0;
ret = 0;
out:
perf_enable();
return ret;
}

static void sh_pmu_read(struct perf_event *event)
Expand Down
3 changes: 3 additions & 0 deletions arch/sparc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

perf_disable();
cpuhw->group_flag |= PERF_EVENT_TXN;
}

Expand All @@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
}

/*
Expand All @@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
return -EAGAIN;

cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
return 0;
}

Expand Down
22 changes: 14 additions & 8 deletions arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -969,34 +969,38 @@ static int x86_pmu_enable(struct perf_event *event)

hwc = &event->hw;

perf_disable();
n0 = cpuc->n_events;
n = collect_events(cpuc, event, false);
if (n < 0)
return n;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
goto out;

/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto out;
goto done_collect;

ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
goto out;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));

out:
done_collect:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;

return 0;
ret = 0;
out:
perf_enable();
return ret;
}

static int x86_pmu_start(struct perf_event *event)
Expand Down Expand Up @@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

perf_disable();
cpuc->group_flag |= PERF_EVENT_TXN;
cpuc->n_txn = 0;
}
Expand All @@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
*/
cpuc->n_added -= cpuc->n_txn;
cpuc->n_events -= cpuc->n_txn;
perf_enable();
}

/*
Expand Down Expand Up @@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
memcpy(cpuc->assign, assign, n*sizeof(int));

cpuc->group_flag &= ~PERF_EVENT_TXN;

perf_enable();
return 0;
}

Expand Down
20 changes: 10 additions & 10 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -564,26 +564,26 @@ struct pmu {
struct list_head entry;

/*
* Should return -ENOENT when the @event doesn't match this pmu
* Should return -ENOENT when the @event doesn't match this PMU.
*/
int (*event_init) (struct perf_event *event);

int (*enable) (struct perf_event *event);
int (*enable) (struct perf_event *event);
void (*disable) (struct perf_event *event);
int (*start) (struct perf_event *event);
int (*start) (struct perf_event *event);
void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event);

/*
* Group events scheduling is treated as a transaction, add group
* events as a whole and perform one schedulability test. If the test
* fails, roll back the whole group
* Group events scheduling is treated as a transaction, add
* group events as a whole and perform one schedulability test.
* If the test fails, roll back the whole group
*/

/*
* Start the transaction, after this ->enable() doesn't need
* to do schedulability tests.
* Start the transaction, after this ->enable() doesn't need to
* do schedulability tests.
*/
void (*start_txn) (struct pmu *pmu);
/*
Expand All @@ -594,8 +594,8 @@ struct pmu {
*/
int (*commit_txn) (struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->disable() is called for
* each successfull ->enable() during the transaction.
* Will cancel the transaction, assumes ->disable() is called
* for each successfull ->enable() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu);
};
Expand Down
Loading

0 comments on commit 24cd7f5

Please sign in to comment.