Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147722
b: refs/heads/master
c: df58ab2
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 11, 2009
1 parent c327b6c commit 2b6eef5
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0764771dab80d7b84b9a271bee7f1b21a04a3f0c
refs/heads/master: df58ab24bf26b166874bfb18b3b5a2e0a8e63179
2 changes: 1 addition & 1 deletion trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);

extern int sysctl_perf_counter_paranoid;
extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_limit;
extern int sysctl_perf_counter_sample_rate;

extern void perf_counter_init(void);

Expand Down
27 changes: 19 additions & 8 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;

/*
* 0 - not paranoid
* 1 - disallow cpu counters to unpriv
* 2 - disallow kernel profiling to unpriv
* perf counter paranoia level:
* 0 - not paranoid
* 1 - disallow cpu counters to unpriv
* 2 - disallow kernel profiling to unpriv
*/
int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_paranoid __read_mostly;

static inline bool perf_paranoid_cpu(void)
{
Expand All @@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
}

int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */

/*
* max perf counter sample rate
*/
int sysctl_perf_counter_sample_rate __read_mostly = 100000;

static atomic64_t perf_counter_id;

Expand Down Expand Up @@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(counter, 1);
counter->pmu->unthrottle(counter);
interrupts = 2*sysctl_perf_counter_limit/HZ;
interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
}

if (!counter->attr.freq || !counter->attr.sample_freq)
Expand Down Expand Up @@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)

spin_lock_irq(&ctx->lock);
if (counter->attr.freq) {
if (value > sysctl_perf_counter_limit) {
if (value > sysctl_perf_counter_sample_rate) {
ret = -EINVAL;
goto unlock;
}
Expand Down Expand Up @@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
} else {
if (hwc->interrupts != MAX_INTERRUPTS) {
hwc->interrupts++;
if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) {
if (HZ * hwc->interrupts >
(u64)sysctl_perf_counter_sample_rate) {
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(counter, 0);
ret = 1;
Expand Down Expand Up @@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open,
return -EACCES;
}

if (attr.freq) {
if (attr.sample_freq > sysctl_perf_counter_sample_rate)
return -EINVAL;
}

/*
* Get the target context (task or percpu):
*/
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "perf_counter_int_limit",
.data = &sysctl_perf_counter_limit,
.maxlen = sizeof(sysctl_perf_counter_limit),
.procname = "perf_counter_max_sample_rate",
.data = &sysctl_perf_counter_sample_rate,
.maxlen = sizeof(sysctl_perf_counter_sample_rate),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
Expand Down

0 comments on commit 2b6eef5

Please sign in to comment.