Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 205232
b: refs/heads/master
c: 761844b
h: refs/heads/master
v: v3
  • Loading branch information
Stephane Eranian authored and Arnaldo Carvalho de Melo committed Jun 5, 2010
1 parent e7f952a commit 0ad699f
Show file tree
Hide file tree
Showing 15 changed files with 110 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6113e45f831616de98c54a005260223b21bcb6b9
refs/heads/master: 761844b9c68b3c67b085265f92ac0675706cc3b3
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/oprofile/op_model_cell.c
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
index = ENTRIES-1;

/* make sure index is valid */
if ((index >= ENTRIES) || (index < 0))
if ((index > ENTRIES) || (index < 0))
index = ENTRIES-1;

return initial_lfsr[index];
Expand Down
5 changes: 1 addition & 4 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1507,9 +1507,6 @@ do { \
divisor = nsec * frequency;
}

if (!divisor)
return dividend;

return div64_u64(dividend, divisor);
}

Expand All @@ -1532,7 +1529,7 @@ static int perf_event_start(struct perf_event *event)
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
u64 period, sample_period;
s64 delta;

period = perf_calculate_period(event, nsec, count);
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -3730,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void)
asmlinkage void __sched preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();

Expand All @@ -3742,9 +3742,9 @@ asmlinkage void __sched notrace preempt_schedule(void)
return;

do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
add_preempt_count(PREEMPT_ACTIVE);
schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
sub_preempt_count(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1883,6 +1883,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key;
int resched;

key = hash_long(ip, FTRACE_HASH_BITS);

Expand All @@ -1896,12 +1897,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
* period. This syncs the hash iteration and freeing of items
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
resched = ftrace_preempt_disable();
hlist_for_each_entry_rcu(entry, n, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_probe_ops __read_mostly =
Expand Down
38 changes: 30 additions & 8 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -2242,6 +2242,8 @@ static void trace_recursive_unlock(void)

#endif

static DEFINE_PER_CPU(int, rb_need_resched);

/**
* ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from
Expand All @@ -2262,13 +2264,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int cpu;
int cpu, resched;

if (ring_buffer_flags != RB_BUFFERS_ON)
return NULL;

/* If we are tracing schedule, we don't want to recurse */
preempt_disable_notrace();
resched = ftrace_preempt_disable();

if (atomic_read(&buffer->record_disabled))
goto out_nocheck;
Expand All @@ -2293,13 +2295,21 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
if (!event)
goto out;

/*
* Need to store resched state on this cpu.
* Only the first needs to.
*/

if (preempt_count() == 1)
per_cpu(rb_need_resched, cpu) = resched;

return event;

out:
trace_recursive_unlock();

out_nocheck:
preempt_enable_notrace();
ftrace_preempt_enable(resched);
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Expand Down Expand Up @@ -2345,7 +2355,13 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,

trace_recursive_unlock();

preempt_enable_notrace();
/*
* Only the last preempt count needs to restore preemption.
*/
if (preempt_count() == 1)
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
else
preempt_enable_no_resched_notrace();

return 0;
}
Expand Down Expand Up @@ -2453,7 +2469,13 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,

trace_recursive_unlock();

preempt_enable_notrace();
/*
* Only the last preempt count needs to restore preemption.
*/
if (preempt_count() == 1)
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
else
preempt_enable_no_resched_notrace();

}
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
Expand All @@ -2479,12 +2501,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
struct ring_buffer_event *event;
void *body;
int ret = -EBUSY;
int cpu;
int cpu, resched;

if (ring_buffer_flags != RB_BUFFERS_ON)
return -EBUSY;

preempt_disable_notrace();
resched = ftrace_preempt_disable();

if (atomic_read(&buffer->record_disabled))
goto out;
Expand Down Expand Up @@ -2514,7 +2536,7 @@ int ring_buffer_write(struct ring_buffer *buffer,

ret = 0;
out:
preempt_enable_notrace();
ftrace_preempt_enable(resched);

return ret;
}
Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1404,6 +1404,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct bprint_entry *entry;
unsigned long flags;
int disable;
int resched;
int cpu, len = 0, size, pc;

if (unlikely(tracing_selftest_running || tracing_disabled))
Expand All @@ -1413,7 +1414,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
pause_graph_tracing();

pc = preempt_count();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
data = tr->data[cpu];

Expand Down Expand Up @@ -1451,7 +1452,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)

out:
atomic_dec_return(&data->disabled);
preempt_enable_notrace();
ftrace_preempt_enable(resched);
unpause_graph_tracing();

return len;
Expand Down
48 changes: 48 additions & 0 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,54 @@ enum trace_iterator_flags {

extern struct tracer nop_trace;

/**
* ftrace_preempt_disable - disable preemption scheduler safe
*
* When tracing can happen inside the scheduler, there exists
* cases that the tracing might happen before the need_resched
* flag is checked. If this happens and the tracer calls
* preempt_enable (after a disable), a schedule might take place
* causing an infinite recursion.
*
* To prevent this, we read the need_resched flag before
* disabling preemption. When we want to enable preemption we
* check the flag, if it is set, then we call preempt_enable_no_resched.
* Otherwise, we call preempt_enable.
*
* The rational for doing the above is that if need_resched is set
* and we have yet to reschedule, we are either in an atomic location
* (where we do not need to check for scheduling) or we are inside
* the scheduler and do not want to resched.
*/
static inline int ftrace_preempt_disable(void)
{
int resched;

resched = need_resched();
preempt_disable_notrace();

return resched;
}

/**
* ftrace_preempt_enable - enable preemption scheduler safe
* @resched: the return value from ftrace_preempt_disable
*
* This is a scheduler safe way to enable preemption and not miss
* any preemption checks. The disabled saved the state of preemption.
* If resched is set, then we are either inside an atomic or
* are inside the scheduler (we would have already scheduled
* otherwise). In this case, we do not want to call normal
* preempt_enable, but preempt_enable_no_resched instead.
*/
static inline void ftrace_preempt_enable(int resched)
{
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
}

#ifdef CONFIG_BRANCH_TRACER
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/trace_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,16 @@
u64 notrace trace_clock_local(void)
{
u64 clock;
int resched;

/*
* sched_clock() is an architecture implemented, fast, scalable,
* lockless clock. It is not guaranteed to be coherent across
* CPUs, nor across CPU idle events.
*/
preempt_disable_notrace();
resched = ftrace_preempt_disable();
clock = sched_clock();
preempt_enable_notrace();
ftrace_preempt_enable(resched);

return clock;
}
Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/trace_events.c
Original file line number Diff line number Diff line change
Expand Up @@ -1524,11 +1524,12 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
struct ftrace_entry *entry;
unsigned long flags;
long disabled;
int resched;
int cpu;
int pc;

pc = preempt_count();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));

Expand All @@ -1550,7 +1551,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)

out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __initdata =
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/trace/trace_functions.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int cpu, resched;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

pc = preempt_count();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
Expand All @@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
trace_function(tr, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static void
Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/trace_sched_wakeup.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,15 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int resched;
int cpu;
int pc;

if (likely(!wakeup_task))
return;

pc = preempt_count();
preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();
if (cpu != wakeup_current_cpu)
Expand All @@ -73,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
out:
atomic_dec(&data->disabled);
out_enable:
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/trace/trace_stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,12 @@ static inline void check_stack(void)
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
int cpu;
int cpu, resched;

if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;

preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
Expand All @@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
Expand Down
5 changes: 3 additions & 2 deletions trunk/tools/perf/builtin-report.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)

event__parse_sample(event, session->sample_type, &data);

dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
data.pid, data.tid, data.ip, data.period);
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n",
event->header.misc, data.pid, data.tid, data.ip,
data.period, data.cpu);

if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
unsigned int i;
Expand Down
3 changes: 2 additions & 1 deletion trunk/tools/perf/util/event.c
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,8 @@ int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
u32 *p = (u32 *)array;
data->cpu = *p;
array++;
}
} else
data->cpu = -1;

if (type & PERF_SAMPLE_PERIOD) {
data->period = *array;
Expand Down

0 comments on commit 0ad699f

Please sign in to comment.