Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 156575
b: refs/heads/master
c: 17d42c1
h: refs/heads/master
i:
  156573: 60d1db6
  156571: e1da0c4
  156567: 57351d8
  156559: 10caf61
  156543: d6d4f46
v: v3
  • Loading branch information
Stanislaw Gruszka authored and Ingo Molnar committed Aug 8, 2009
1 parent 5095ea9 commit 91a47f0
Show file tree
Hide file tree
Showing 20 changed files with 71 additions and 238 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 95d0ad049cd6937634c0a75f9518f5166daabfce
refs/heads/master: 17d42c1c497aa54952b9e58c1502a46f0df40315
6 changes: 3 additions & 3 deletions trunk/Documentation/lockdep-design.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ State
The validator tracks lock-class usage history into 4n + 1 separate state bits:

- 'ever held in STATE context'
- 'ever held as readlock in STATE context'
- 'ever held with STATE enabled'
- 'ever held as readlock with STATE enabled'
- 'ever head as readlock in STATE context'
- 'ever head with STATE enabled'
- 'ever head as readlock with STATE enabled'

Where STATE can be either one of (kernel/lockdep_states.h)
- hardirq
Expand Down
8 changes: 3 additions & 5 deletions trunk/arch/x86/kernel/apic/x2apic_cluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,11 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return x2apic_enabled();
}

/*
* need to use more than cpu 0, because we need more vectors when
* MSI-X are used.
*/
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */

static const struct cpumask *x2apic_target_cpus(void)
{
return cpu_online_mask;
return cpumask_of(0);
}

/*
Expand Down
8 changes: 3 additions & 5 deletions trunk/arch/x86/kernel/apic/x2apic_phys.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,11 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}

/*
* need to use more than cpu 0, because we need more vectors when
* MSI-X are used.
*/
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */

static const struct cpumask *x2apic_target_cpus(void)
{
return cpu_online_mask;
return cpumask_of(0);
}

static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ void __init efi_init(void)
*/
c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
if (c16) {
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
for (i = 0; i < sizeof(vendor) && *c16; ++i)
vendor[i] = *c16++;
vendor[i] = '\0';
} else
Expand Down
10 changes: 1 addition & 9 deletions trunk/arch/x86/kernel/reboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart);
#endif /* CONFIG_X86_32 */

/*
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
* Apple MacBook5,2 (2009 MacBook) needs reboot=p
*/
static int __init set_pci_reboot(const struct dmi_system_id *d)
{
Expand All @@ -426,14 +426,6 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
},
},
{ /* Handle problems with rebooting on Apple MacBookPro5,1 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
},
},
{ }
};

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/vmi_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
ap.ds = __USER_DS;
ap.es = __USER_DS;
ap.fs = __KERNEL_PERCPU;
ap.gs = __KERNEL_STACK_CANARY;
ap.gs = 0;

ap.eflags = 0;

Expand Down
4 changes: 1 addition & 3 deletions trunk/include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,7 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};

void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);

struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len,
unsigned long flags, int pc);
Expand Down
9 changes: 1 addition & 8 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,8 @@ enum perf_counter_sample_format {
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_TP_RECORD = 1U << 10,

PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */
};

/*
Expand Down Expand Up @@ -414,11 +413,6 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH];
};

struct perf_tracepoint_record {
int size;
char *record;
};

struct task_struct;

/**
Expand Down Expand Up @@ -687,7 +681,6 @@ struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
u64 period;
void *private;
};

extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
Expand Down
172 changes: 25 additions & 147 deletions trunk/include/trace/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,6 @@
#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TP_perf_assign
#define TP_perf_assign(args...)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
Expand Down Expand Up @@ -348,56 +345,6 @@ static inline int ftrace_get_offsets_##call( \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#ifdef CONFIG_EVENT_PROFILE

/*
* Generate the functions needed for tracepoint perf_counter support.
*
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
*
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
* {
* int ret = 0;
*
* if (!atomic_inc_return(&event_call->profile_count))
* ret = register_trace_<call>(ftrace_profile_<call>);
*
* return ret;
* }
*
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
* {
* if (atomic_add_negative(-1, &event->call->profile_count))
* unregister_trace_<call>(ftrace_profile_<call>);
* }
*
*/

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
\
static void ftrace_profile_##call(proto); \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

/*
* Stage 4 of the trace events.
*
Expand Down Expand Up @@ -500,13 +447,36 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#define TP_FMT(fmt, args...) fmt "\n", ##args

#ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE(call, proto, args) \
static void ftrace_profile_##call(proto) \
{ \
extern void perf_tpcounter_event(int); \
perf_tpcounter_event(event_##call.id); \
} \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
}

#define _TRACE_PROFILE_INIT(call) \
.profile_count = ATOMIC_INIT(-1), \
.profile_enable = ftrace_profile_enable_##call, \
.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif

Expand All @@ -532,6 +502,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
\
static struct ftrace_event_call event_##call; \
\
Expand Down Expand Up @@ -615,99 +586,6 @@ __attribute__((section("_ftrace_events"))) event_##call = { \

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
* Define the insertion callback to profile events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
* static void ftrace_profile_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
* extern void perf_tpcounter_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* int __entry_size;
* int __data_size;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
* __entry_size = __data_size + sizeof(*entry);
*
* do {
* char raw_data[__entry_size]; <- allocate our sample in the stack
* struct trace_entry *ent;
*
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
*
* <tstruct> <- do some jobs with dynamic arrays
*
* <assign> <- affect our values
*
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
* } while (0);
*
* }
*/

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \
extern void perf_tpcounter_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
int __entry_size; \
int __data_size; \
int pc; \
\
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
__entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
\
do { \
char raw_data[__entry_size]; \
struct trace_entry *ent; \
\
entry = (struct ftrace_raw_##call *)raw_data; \
ent = &entry->ent; \
tracing_generic_entry_update(ent, irq_flags, pc); \
ent->type = event_call->id; \
\
tstruct \
\
{ assign; } \
\
perf_tpcounter_event(event_call->id, __addr, __count, entry,\
__entry_size); \
} while (0); \
\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

#undef _TRACE_PROFILE
#undef _TRACE_PROFILE_INIT

3 changes: 1 addition & 2 deletions trunk/kernel/lockdep_proc.c
Original file line number Diff line number Diff line change
Expand Up @@ -758,8 +758,7 @@ static int __init lockdep_proc_init(void)
&proc_lockdep_stats_operations);

#ifdef CONFIG_LOCK_STAT
proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
&proc_lock_stat_operations);
proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
#endif

return 0;
Expand Down
Loading

0 comments on commit 91a47f0

Please sign in to comment.