From 23e5a4ca295e8e4fcb8d9c5d937f7e458951d0ff Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 14 Oct 2009 12:43:44 +0200 Subject: [PATCH] --- yaml --- r: 167475 b: refs/heads/master c: 7874b1b66a53c4d9c8dcb37884cbb758aa2d712c h: refs/heads/master i: 167473: 0680e26e3d04618aeaef1081d4fddf4479c418db 167471: 169c0b0f931d3e4c0d45ef3efb14e46203003880 v: v3 --- [refs] | 2 +- .../feature-removal-schedule.txt | 30 --------------- trunk/MAINTAINERS | 7 ---- trunk/arch/s390/hypfs/hypfs_diag.c | 2 +- trunk/arch/sh/kernel/entry-common.S | 2 +- trunk/arch/sh/kernel/ftrace.c | 37 +++++-------------- trunk/arch/sh/kernel/setup.c | 2 - trunk/arch/sh/kernel/signal_32.c | 9 +++-- trunk/arch/sh/kernel/smp.c | 2 - trunk/arch/sh/kernel/traps_32.c | 7 ++-- trunk/arch/sh/mm/cache.c | 2 +- trunk/arch/x86/Kconfig | 11 +----- trunk/arch/x86/include/asm/paravirt.h | 28 ++++++++++++-- trunk/arch/x86/include/asm/paravirt_types.h | 10 ++--- trunk/arch/x86/kernel/irq.c | 2 + trunk/arch/x86/kernel/smp.c | 1 + trunk/arch/x86/kernel/time.c | 3 +- trunk/arch/x86/kernel/trampoline.c | 12 +----- trunk/arch/x86/kernel/trampoline_64.S | 4 -- trunk/arch/x86/kernel/vmi_32.c | 2 +- trunk/drivers/oprofile/event_buffer.c | 35 +++++------------- trunk/kernel/lockdep.c | 20 ++++------ trunk/kernel/sched.c | 10 +---- trunk/kernel/trace/trace.c | 2 +- trunk/kernel/trace/trace_events_filter.c | 3 +- trunk/tools/perf/Makefile | 21 ++++------- trunk/tools/perf/builtin-sched.c | 4 +- trunk/tools/perf/util/parse-events.c | 5 +-- trunk/tools/perf/util/trace-event-parse.c | 8 ++-- 29 files changed, 93 insertions(+), 190 deletions(-) diff --git a/[refs] b/[refs] index fe8ba45cdf67..9a8a45c20655 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d470c05bedc27dbd2df9d0bb6fd82336e4ff43db +refs/heads/master: 7874b1b66a53c4d9c8dcb37884cbb758aa2d712c diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 04e6c819b28a..89a47b5aff07 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -451,33 +451,3 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR will also allow making ALSA OSS emulation independent of sound_core. The dependency will be broken then too. Who: Tejun Heo - ----------------------------- - -What: Support for VMware's guest paravirtuliazation technique [VMI] will be - dropped. -When: 2.6.37 or earlier. -Why: With the recent innovations in CPU hardware acceleration technologies - from Intel and AMD, VMware ran a few experiments to compare these - techniques to guest paravirtualization technique on VMware's platform. - These hardware assisted virtualization techniques have outperformed the - performance benefits provided by VMI in most of the workloads. VMware - expects that these hardware features will be ubiquitous in a couple of - years, as a result, VMware has started a phased retirement of this - feature from the hypervisor. We will be removing this feature from the - Kernel too. Right now we are targeting 2.6.37 but can retire earlier if - technical reasons (read opportunity to remove major chunk of pvops) - arise. - - Please note that VMI has always been an optimization and non-VMI kernels - still work fine on VMware's platform. - Latest versions of VMware's product which support VMI are, - Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence - releases for these products will continue supporting VMI. - - For more details about VMI retirement take a look at this, - http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html - -Who: Alok N Kataria - ----------------------------- diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 6b057778477d..ff968842ce56 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -4076,13 +4076,6 @@ M: Peter Zijlstra M: Paul Mackerras M: Ingo Molnar S: Supported -F: kernel/perf_event.c -F: include/linux/perf_event.h -F: arch/*/*/kernel/perf_event.c -F: arch/*/include/asm/perf_event.h -F: arch/*/lib/perf_event.c -F: arch/*/kernel/perf_callchain.c -F: tools/perf/ PERSONALITY HANDLING M: Christoph Hellwig diff --git a/trunk/arch/s390/hypfs/hypfs_diag.c b/trunk/arch/s390/hypfs/hypfs_diag.c index 704dd396257b..77df726180ba 100644 --- a/trunk/arch/s390/hypfs/hypfs_diag.c +++ b/trunk/arch/s390/hypfs/hypfs_diag.c @@ -438,7 +438,7 @@ static int diag204_probe(void) } if (diag204((unsigned long)SUBC_STIB6 | (unsigned long)INFO_EXT, pages, buf) >= 0) { - diag204_store_sc = SUBC_STIB7; + diag204_store_sc = SUBC_STIB6; diag204_info_type = INFO_EXT; goto out; } diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index 3eb84931d2aa..68d9223b145e 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -121,7 +121,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli - TRACE_IRQS_OFF + TRACE_IRQS_OfF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all diff --git a/trunk/arch/sh/kernel/ftrace.c b/trunk/arch/sh/kernel/ftrace.c index 2c48e267256e..a3dcc6d5d253 100644 --- a/trunk/arch/sh/kernel/ftrace.c +++ b/trunk/arch/sh/kernel/ftrace.c @@ -291,48 +291,31 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) -{ - syscalls_metadata[num]->exit_id = id; -} - -static int __init arch_init_ftrace_syscalls(void) +void arch_init_ftrace_syscalls(void) { int i; struct syscall_metadata *meta; unsigned long **psys_syscall_table = &sys_call_table; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto end; syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * FTRACE_SYSCALL_MAX, GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); - return -ENOMEM; + return; } for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { meta = find_syscall_meta(psys_syscall_table[i]); syscalls_metadata[i] = meta; } + return; - return 0; + /* Paranoid: avoid overflow */ +end: + atomic_dec(&refs); } -arch_initcall(arch_init_ftrace_syscalls); #endif /* CONFIG_FTRACE_SYSCALLS */ diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 99b4fb553bf1..f9d44f8e0df6 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -549,8 +549,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu == 0) seq_printf(m, "machine\t\t: %s\n", get_system_type()); - else - seq_printf(m, "\n"); seq_printf(m, "processor\t: %d\n", cpu); seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); diff --git a/trunk/arch/sh/kernel/signal_32.c b/trunk/arch/sh/kernel/signal_32.c index 3db37425210d..6729703547a1 100644 --- a/trunk/arch/sh/kernel/signal_32.c +++ b/trunk/arch/sh/kernel/signal_32.c @@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) { struct task_struct *tsk = current; - if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + if (!(current_cpu_data.flags & CPU_HAS_FPU)) return 0; set_used_math(); @@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc, { struct task_struct *tsk = current; - if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + if (!(current_cpu_data.flags & CPU_HAS_FPU)) return 0; if (!used_math()) { @@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p #undef COPY #ifdef CONFIG_SH_FPU - if (boot_cpu_data.flags & CPU_HAS_FPU) { + if (current_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; struct task_struct *tsk = current; @@ -472,7 +472,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; - flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) @@ -498,6 +497,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); + flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); + return 0; give_sigsegv: diff --git a/trunk/arch/sh/kernel/smp.c b/trunk/arch/sh/kernel/smp.c index 160db1003cfb..442d8d47a41e 100644 --- a/trunk/arch/sh/kernel/smp.c +++ b/trunk/arch/sh/kernel/smp.c @@ -35,8 +35,6 @@ static inline void __init smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; - memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); - c->loops_per_jiffy = loops_per_jiffy; } diff --git a/trunk/arch/sh/kernel/traps_32.c b/trunk/arch/sh/kernel/traps_32.c index 7a2ee3a6b8e7..e0b5e4b5accd 100644 --- a/trunk/arch/sh/kernel/traps_32.c +++ b/trunk/arch/sh/kernel/traps_32.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -160,12 +159,12 @@ void die(const char * str, struct pt_regs * regs, long err) oops_enter(); - spin_lock_irq(&die_lock); console_verbose(); + spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - sysfs_printk_last_file(); + print_modules(); show_regs(regs); @@ -181,7 +180,6 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); - oops_exit(); if (kexec_should_crash(current)) crash_kexec(regs); @@ -192,6 +190,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); + oops_exit(); do_exit(SIGSEGV); } diff --git a/trunk/arch/sh/mm/cache.c b/trunk/arch/sh/mm/cache.c index 5e1091be9dc4..35c37b7f717a 100644 --- a/trunk/arch/sh/mm/cache.c +++ b/trunk/arch/sh/mm/cache.c @@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma, return; page = pfn_to_page(pfn); - if (pfn_valid(pfn)) { + if (pfn_valid(pfn) && page_mapping(page)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) { unsigned long addr = (unsigned long)page_address(page); diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 07e01149e3bf..c876bace8fdc 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -491,7 +491,7 @@ if PARAVIRT_GUEST source "arch/x86/xen/Kconfig" config VMI - bool "VMI Guest support (DEPRECATED)" + bool "VMI Guest support" select PARAVIRT depends on X86_32 ---help--- @@ -500,15 +500,6 @@ config VMI at the moment), by linking the kernel to a GPL-ed ROM module provided by the hypervisor. - As of September 2009, VMware has started a phased retirement - of this feature from VMware's products. Please see - feature-removal-schedule.txt for details. If you are - planning to enable this option, please note that you cannot - live migrate a VMI enabled VM to a future VMware product, - which doesn't support VMI. So if you expect your kernel to - seamlessly migrate to newer VMware products, keep this - disabled. - config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h index efb38994859c..8aebcc41041d 100644 --- a/trunk/arch/x86/include/asm/paravirt.h +++ b/trunk/arch/x86/include/asm/paravirt.h @@ -840,22 +840,42 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) static inline unsigned long __raw_local_save_flags(void) { - return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); + unsigned long f; + + asm volatile(paravirt_alt(PARAVIRT_CALL) + : "=a"(f) + : paravirt_type(pv_irq_ops.save_fl), + paravirt_clobber(CLBR_EAX) + : "memory", "cc"); + return f; } static inline void raw_local_irq_restore(unsigned long f) { - PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : "=a"(f) + : PV_FLAGS_ARG(f), + paravirt_type(pv_irq_ops.restore_fl), + paravirt_clobber(CLBR_EAX) + : "memory", "cc"); } static inline void raw_local_irq_disable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_disable); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : + : paravirt_type(pv_irq_ops.irq_disable), + paravirt_clobber(CLBR_EAX) + : "memory", "eax", "cc"); } static inline void raw_local_irq_enable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_enable); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : + : paravirt_type(pv_irq_ops.irq_enable), + paravirt_clobber(CLBR_EAX) + : "memory", "eax", "cc"); } static inline unsigned long __raw_local_irq_save(void) diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..dd0f5b32489d 100644 --- a/trunk/arch/x86/include/asm/paravirt_types.h +++ b/trunk/arch/x86/include/asm/paravirt_types.h @@ -494,11 +494,10 @@ int paravirt_disable_iospace(void); #define EXTRA_CLOBBERS #define VEXTRA_CLOBBERS #else /* CONFIG_X86_64 */ -/* [re]ax isn't an arg, but the return val */ #define PVOP_VCALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx, __eax = __eax -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS + __edx = __edx, __ecx = __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) @@ -510,7 +509,6 @@ int paravirt_disable_iospace(void); "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) -/* void functions are still allowed [re]ax for scratch */ #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS @@ -585,8 +583,8 @@ int paravirt_disable_iospace(void); VEXTRA_CLOBBERS, \ pre, post, ##__VA_ARGS__) -#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ - ____PVOP_VCALL(op.func, CLBR_RET_REG, \ +#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ PVOP_VCALLEE_CLOBBERS, , \ pre, post, ##__VA_ARGS__) diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 74656d1d4e30..391206199515 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __func__, smp_processor_id(), vector, irq); } + run_local_timers(); irq_exit(); set_irq_regs(old_regs); @@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs) if (generic_interrupt_extension) generic_interrupt_extension(); + run_local_timers(); irq_exit(); set_irq_regs(old_regs); diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index ec1de97600e7..d915d956e66d 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); + run_local_timers(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/trunk/arch/x86/kernel/time.c b/trunk/arch/x86/kernel/time.c index be2573448ed9..dcb00d278512 100644 --- a/trunk/arch/x86/kernel/time.c +++ b/trunk/arch/x86/kernel/time.c @@ -38,8 +38,7 @@ unsigned long profile_pc(struct pt_regs *regs) #ifdef CONFIG_FRAME_POINTER return *(unsigned long *)(regs->bp + sizeof(long)); #else - unsigned long *sp = - (unsigned long *)kernel_stack_pointer(regs); + unsigned long *sp = (unsigned long *)regs->sp; /* * Return address is either directly at stack pointer * or above a saved flags. Eflags has bits 22-31 zero, diff --git a/trunk/arch/x86/kernel/trampoline.c b/trunk/arch/x86/kernel/trampoline.c index cd022121cab6..699f7eeb896a 100644 --- a/trunk/arch/x86/kernel/trampoline.c +++ b/trunk/arch/x86/kernel/trampoline.c @@ -3,16 +3,8 @@ #include #include -#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) -#define __trampinit -#define __trampinitdata -#else -#define __trampinit __cpuinit -#define __trampinitdata __cpuinitdata -#endif - /* ready for x86_64 and x86 */ -unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); +unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); void __init reserve_trampoline_memory(void) { @@ -34,7 +26,7 @@ void __init reserve_trampoline_memory(void) * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */ -unsigned long __trampinit setup_trampoline(void) +unsigned long __cpuinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); diff --git a/trunk/arch/x86/kernel/trampoline_64.S b/trunk/arch/x86/kernel/trampoline_64.S index 3af2dff58b21..596d54c660a5 100644 --- a/trunk/arch/x86/kernel/trampoline_64.S +++ b/trunk/arch/x86/kernel/trampoline_64.S @@ -32,12 +32,8 @@ #include #include -#ifdef CONFIG_ACPI_SLEEP -.section .rodata, "a", @progbits -#else /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ __CPUINITRODATA -#endif .code16 ENTRY(trampoline_data) diff --git a/trunk/arch/x86/kernel/vmi_32.c b/trunk/arch/x86/kernel/vmi_32.c index d430e4c30193..31e6f6cfe53e 100644 --- a/trunk/arch/x86/kernel/vmi_32.c +++ b/trunk/arch/x86/kernel/vmi_32.c @@ -648,7 +648,7 @@ static inline int __init activate_vmi(void) pv_info.paravirt_enabled = 1; pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; - pv_info.name = "vmi [deprecated]"; + pv_info.name = "vmi"; pv_init_ops.patch = vmi_patch; diff --git a/trunk/drivers/oprofile/event_buffer.c b/trunk/drivers/oprofile/event_buffer.c index 5df60a6b6776..2b7ae366ceb1 100644 --- a/trunk/drivers/oprofile/event_buffer.c +++ b/trunk/drivers/oprofile/event_buffer.c @@ -35,23 +35,12 @@ static size_t buffer_pos; /* atomic_t because wait_event checks it outside of buffer_mutex */ static atomic_t buffer_ready = ATOMIC_INIT(0); -/* - * Add an entry to the event buffer. When we get near to the end we - * wake up the process sleeping on the read() of the file. To protect - * the event_buffer this function may only be called when buffer_mutex - * is set. +/* Add an entry to the event buffer. When we + * get near to the end we wake up the process + * sleeping on the read() of the file. */ void add_event_entry(unsigned long value) { - /* - * This shouldn't happen since all workqueues or handlers are - * canceled or flushed before the event buffer is freed. - */ - if (!event_buffer) { - WARN_ON_ONCE(1); - return; - } - if (buffer_pos == buffer_size) { atomic_inc(&oprofile_stats.event_lost_overflow); return; @@ -80,6 +69,7 @@ void wake_up_buffer_waiter(void) int alloc_event_buffer(void) { + int err = -ENOMEM; unsigned long flags; spin_lock_irqsave(&oprofilefs_lock, flags); @@ -90,22 +80,21 @@ int alloc_event_buffer(void) if (buffer_watershed >= buffer_size) return -EINVAL; - buffer_pos = 0; event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); if (!event_buffer) - return -ENOMEM; + goto out; - return 0; + err = 0; +out: + return err; } void free_event_buffer(void) { - mutex_lock(&buffer_mutex); vfree(event_buffer); - buffer_pos = 0; + event_buffer = NULL; - mutex_unlock(&buffer_mutex); } @@ -178,12 +167,6 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, mutex_lock(&buffer_mutex); - /* May happen if the buffer is freed during pending reads. */ - if (!event_buffer) { - retval = -EINTR; - goto out; - } - atomic_set(&buffer_ready, 0); retval = -EFAULT; diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 9af56723c096..3815ac1d58b2 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -142,11 +142,6 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); -static inline u64 lockstat_clock(void) -{ - return cpu_clock(smp_processor_id()); -} - static int lock_point(unsigned long points[], unsigned long ip) { int i; @@ -163,7 +158,7 @@ static int lock_point(unsigned long points[], unsigned long ip) return i; } -static void lock_time_inc(struct lock_time *lt, u64 time) +static void lock_time_inc(struct lock_time *lt, s64 time) { if (time > lt->max) lt->max = time; @@ -239,12 +234,12 @@ static void put_lock_stats(struct lock_class_stats *stats) static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; - u64 holdtime; + s64 holdtime; if (!lock_stat) return; - holdtime = lockstat_clock() - hlock->holdtime_stamp; + holdtime = sched_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) @@ -2797,7 +2792,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; - hlock->holdtime_stamp = lockstat_clock(); + hlock->holdtime_stamp = sched_clock(); #endif if (check == 2 && !mark_irqflags(curr, hlock)) @@ -3327,7 +3322,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) if (hlock->instance != lock) return; - hlock->waittime_stamp = lockstat_clock(); + hlock->waittime_stamp = sched_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, @@ -3350,7 +3345,8 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; - u64 now, waittime = 0; + u64 now; + s64 waittime = 0; int i, cpu; depth = curr->lockdep_depth; @@ -3378,7 +3374,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) cpu = smp_processor_id(); if (hlock->waittime_stamp) { - now = lockstat_clock(); + now = sched_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index e88689522e66..8d25be06db62 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -676,7 +676,6 @@ inline void update_rq_clock(struct rq *rq) /** * runqueue_is_locked - * @cpu: the processor in question. * * Returns true if the current cpu runqueue is locked. * This interface allows printk to be called with the runqueue lock @@ -2312,7 +2311,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; - struct rq *rq, *orig_rq; + struct rq *rq; if (!sched_feat(SYNC_WAKEUPS)) wake_flags &= ~WF_SYNC; @@ -2320,7 +2319,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, this_cpu = get_cpu(); smp_wmb(); - rq = orig_rq = task_rq_lock(p, &flags); + rq = task_rq_lock(p, &flags); update_rq_clock(rq); if (!(p->state & state)) goto out; @@ -2351,10 +2350,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, set_task_cpu(p, cpu); rq = task_rq_lock(p, &flags); - - if (rq != orig_rq) - update_rq_clock(rq); - WARN_ON(p->state != TASK_WAKING); cpu = task_cpu(p); @@ -3661,7 +3656,6 @@ static void update_group_power(struct sched_domain *sd, int cpu) /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. - * @sd: The sched_domain whose statistics are to be updated. * @group: sched_group whose statistics are to be updated. * @this_cpu: Cpu for which load balance is currently performed. * @idle: Idle status of this_cpu diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index c820b0310a12..45068269ebb1 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { - return trace_array_vprintk(&global_trace, ip, fmt, args); + return trace_array_printk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index 98a6cc5c64ed..23245785927f 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -933,9 +933,8 @@ static void postfix_clear(struct filter_parse_state *ps) while (!list_empty(&ps->postfix)) { elt = list_first_entry(&ps->postfix, struct postfix_elt, list); - list_del(&elt->list); kfree(elt->operand); - kfree(elt); + list_del(&elt->list); } } diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 742a32eee8fc..5881943f0c34 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -157,18 +157,11 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') -# -# Add -m32 for cross-builds: -# -ifdef NO_64BIT - MBITS := -m32 -else - # - # If we're on a 64-bit kernel, use -m64: - # - ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) - MBITS := -m64 - endif +# If we're on a 64-bit kernel, use -m64 +ifndef NO_64BIT + ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) + M64 := -m64 + endif endif # CFLAGS and LDFLAGS are for the users to override from the command line. @@ -201,7 +194,7 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +CFLAGS = $(M64) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) LDFLAGS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) @@ -423,7 +416,7 @@ ifeq ($(uname_S),Darwin) endif ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); + msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif ifdef NO_DEMANGLE diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index ce2d5be4f30e..ea9c15c0cdfe 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -1287,7 +1287,7 @@ static struct sort_dimension *available_sorts[] = { static LIST_HEAD(sort_list); -static int sort_dimension__add(const char *tok, struct list_head *list) +static int sort_dimension__add(char *tok, struct list_head *list) { int i; @@ -1917,7 +1917,7 @@ static void setup_sorting(void) free(str); - sort_dimension__add("pid", &cmp_pid); + sort_dimension__add((char *)"pid", &cmp_pid); } static const char *record_args[] = { diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 8cfb48cbbea0..87c424de79ee 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -691,10 +691,7 @@ static void store_event_type(const char *orgname) FILE *file; int id; - sprintf(filename, "%s/", debugfs_path); - strncat(filename, orgname, strlen(orgname)); - strcat(filename, "/id"); - + sprintf(filename, "/sys/kernel/debug/tracing/events/%s/id", orgname); c = strchr(filename, ':'); if (c) *c = '/'; diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 55c9659a56e2..55b41b9e3834 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -618,7 +618,7 @@ static int test_type(enum event_type type, enum event_type expect) } static int test_type_token(enum event_type type, char *token, - enum event_type expect, const char *expect_tok) + enum event_type expect, char *expect_tok) { if (type != expect) { die("Error: expected type %d but read %d", @@ -650,7 +650,7 @@ static int read_expect_type(enum event_type expect, char **tok) return __read_expect_type(expect, tok, 1); } -static int __read_expected(enum event_type expect, const char *str, int newline_ok) +static int __read_expected(enum event_type expect, char *str, int newline_ok) { enum event_type type; char *token; @@ -668,12 +668,12 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ return 0; } -static int read_expected(enum event_type expect, const char *str) +static int read_expected(enum event_type expect, char *str) { return __read_expected(expect, str, 1); } -static int read_expected_item(enum event_type expect, const char *str) +static int read_expected_item(enum event_type expect, char *str) { return __read_expected(expect, str, 0); }