From 9134d5332074d7cf6e2114bd4cf42b30cc08e1f8 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 14 Oct 2009 08:59:49 -0700 Subject: [PATCH] --- yaml --- r: 167499 b: refs/heads/master c: c8e33141911bf8fe87dc6c92793b9a59b2be0130 h: refs/heads/master i: 167497: e7a04acf65fdb07c804d34cf4174dd72ec44792f 167495: bab201ee63527088ce2fa57c4cbc3d624e27bac9 v: v3 --- [refs] | 2 +- .../Documentation/debugging-via-ohci1394.txt | 8 ++-- .../feature-removal-schedule.txt | 30 -------------- trunk/MAINTAINERS | 8 ---- trunk/arch/s390/hypfs/hypfs_diag.c | 2 +- trunk/arch/s390/kernel/processor.c | 6 +-- trunk/arch/sh/kernel/entry-common.S | 2 +- trunk/arch/sh/kernel/ftrace.c | 37 +++++------------- trunk/arch/sh/kernel/setup.c | 2 - trunk/arch/sh/kernel/signal_32.c | 9 +++-- trunk/arch/sh/kernel/smp.c | 2 - trunk/arch/sh/kernel/traps_32.c | 7 ++-- trunk/arch/sh/mm/cache.c | 2 +- trunk/arch/sparc/kernel/ldc.c | 4 +- trunk/arch/sparc/kernel/perf_event.c | 2 +- trunk/arch/sparc/mm/init_64.c | 2 +- trunk/arch/x86/Kconfig | 11 +----- trunk/arch/x86/include/asm/paravirt.h | 28 +++++++++++-- trunk/arch/x86/include/asm/paravirt_types.h | 10 ++--- .../x86/kernel/acpi/realmode/wakeup.lds.S | 4 +- trunk/arch/x86/kernel/irq.c | 2 + trunk/arch/x86/kernel/smp.c | 1 + trunk/arch/x86/kernel/time.c | 3 +- trunk/arch/x86/kernel/trampoline.c | 12 +----- trunk/arch/x86/kernel/trampoline_64.S | 4 -- trunk/arch/x86/kernel/vmi_32.c | 2 +- trunk/arch/x86/kernel/vmlinux.lds.S | 17 ++++---- trunk/drivers/char/tty_buffer.c | 2 +- trunk/drivers/firewire/sbp2.c | 39 ++++++++++--------- trunk/drivers/oprofile/event_buffer.c | 35 +++++------------ trunk/drivers/s390/block/dasd.c | 13 +------ trunk/drivers/s390/block/dasd_eckd.c | 8 ++-- trunk/drivers/s390/char/sclp_async.c | 4 +- trunk/drivers/s390/char/sclp_vt220.c | 30 +++++++------- trunk/drivers/s390/char/tape_block.c | 3 +- trunk/drivers/s390/cio/device.c | 9 +++-- trunk/drivers/watchdog/riowd.c | 2 +- trunk/include/linux/workqueue.h | 1 - trunk/kernel/lockdep.c | 20 ++++------ trunk/kernel/sched.c | 10 +---- trunk/kernel/trace/trace.c | 2 +- trunk/kernel/trace/trace_events_filter.c | 3 +- trunk/kernel/workqueue.c | 18 --------- trunk/tools/perf/Makefile | 21 ++++------ trunk/tools/perf/builtin-sched.c | 4 +- trunk/tools/perf/util/parse-events.c | 5 +-- trunk/tools/perf/util/trace-event-parse.c | 8 ++-- 47 files changed, 165 insertions(+), 291 deletions(-) diff --git a/[refs] b/[refs] index 853e875ba82b..e7d411fa5e8e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a3ccf63ee643ef243cbf8918da8b3f9238f10029 +refs/heads/master: c8e33141911bf8fe87dc6c92793b9a59b2be0130 diff --git a/trunk/Documentation/debugging-via-ohci1394.txt b/trunk/Documentation/debugging-via-ohci1394.txt index 611f5a5499b1..59a91e5c6909 100644 --- a/trunk/Documentation/debugging-via-ohci1394.txt +++ b/trunk/Documentation/debugging-via-ohci1394.txt @@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update. Bernhard Kaindl enhanced firescope to support accessing 64-bit machines from 32-bit firescope and vice versa: -- http://halobates.de/firewire/firescope-0.2.2.tar.bz2 +- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2 and he implemented fast system dump (alpha version - read README.txt): -- http://halobates.de/firewire/firedump-0.1.tar.bz2 +- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2 There is also a gdb proxy for firewire which allows to use gdb to access data which can be referenced from symbols found by gdb in vmlinux: -- http://halobates.de/firewire/fireproxy-0.33.tar.bz2 +- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2 The latest version of this gdb proxy (fireproxy-0.34) can communicate (not yet stable) with kgdb over an memory-based communication module (kgdbom). @@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization: Notes ----- -Documentation and specifications: http://halobates.de/firewire/ +Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs FireWire is a trademark of Apple Inc. - for more information please refer to: http://en.wikipedia.org/wiki/FireWire diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 04e6c819b28a..89a47b5aff07 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -451,33 +451,3 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR will also allow making ALSA OSS emulation independent of sound_core. The dependency will be broken then too. Who: Tejun Heo - ----------------------------- - -What: Support for VMware's guest paravirtuliazation technique [VMI] will be - dropped. -When: 2.6.37 or earlier. -Why: With the recent innovations in CPU hardware acceleration technologies - from Intel and AMD, VMware ran a few experiments to compare these - techniques to guest paravirtualization technique on VMware's platform. - These hardware assisted virtualization techniques have outperformed the - performance benefits provided by VMI in most of the workloads. VMware - expects that these hardware features will be ubiquitous in a couple of - years, as a result, VMware has started a phased retirement of this - feature from the hypervisor. We will be removing this feature from the - Kernel too. Right now we are targeting 2.6.37 but can retire earlier if - technical reasons (read opportunity to remove major chunk of pvops) - arise. - - Please note that VMI has always been an optimization and non-VMI kernels - still work fine on VMware's platform. - Latest versions of VMware's product which support VMI are, - Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence - releases for these products will continue supporting VMI. - - For more details about VMI retirement take a look at this, - http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html - -Who: Alok N Kataria - ----------------------------- diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index d5eb8c13ef05..ff968842ce56 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2615,7 +2615,6 @@ L: linux1394-devel@lists.sourceforge.net W: http://www.linux1394.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained -F: Documentation/debugging-via-ohci1394.txt F: drivers/ieee1394/ IEEE 1394 RAW I/O DRIVER @@ -4077,13 +4076,6 @@ M: Peter Zijlstra M: Paul Mackerras M: Ingo Molnar S: Supported -F: kernel/perf_event.c -F: include/linux/perf_event.h -F: arch/*/*/kernel/perf_event.c -F: arch/*/include/asm/perf_event.h -F: arch/*/lib/perf_event.c -F: arch/*/kernel/perf_callchain.c -F: tools/perf/ PERSONALITY HANDLING M: Christoph Hellwig diff --git a/trunk/arch/s390/hypfs/hypfs_diag.c b/trunk/arch/s390/hypfs/hypfs_diag.c index 77df726180ba..704dd396257b 100644 --- a/trunk/arch/s390/hypfs/hypfs_diag.c +++ b/trunk/arch/s390/hypfs/hypfs_diag.c @@ -438,7 +438,7 @@ static int diag204_probe(void) } if (diag204((unsigned long)SUBC_STIB6 | (unsigned long)INFO_EXT, pages, buf) >= 0) { - diag204_store_sc = SUBC_STIB6; + diag204_store_sc = SUBC_STIB7; diag204_info_type = INFO_EXT; goto out; } diff --git a/trunk/arch/s390/kernel/processor.c b/trunk/arch/s390/kernel/processor.c index 0729f36c2fe3..802c8ab247f3 100644 --- a/trunk/arch/s390/kernel/processor.c +++ b/trunk/arch/s390/kernel/processor.c @@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void) static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[10] = { + static const char *hwcap_str[9] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat", "etf3eh", "highgprs" + "edat", "etf3eh" }; struct _lowcore *lc; unsigned long n = (unsigned long) v - 1; @@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); - for (i = 0; i < 10; i++) + for (i = 0; i < 9; i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index 3eb84931d2aa..68d9223b145e 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -121,7 +121,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli - TRACE_IRQS_OFF + TRACE_IRQS_OfF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all diff --git a/trunk/arch/sh/kernel/ftrace.c b/trunk/arch/sh/kernel/ftrace.c index 2c48e267256e..a3dcc6d5d253 100644 --- a/trunk/arch/sh/kernel/ftrace.c +++ b/trunk/arch/sh/kernel/ftrace.c @@ -291,48 +291,31 @@ struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) -{ - syscalls_metadata[num]->exit_id = id; -} - -static int __init arch_init_ftrace_syscalls(void) +void arch_init_ftrace_syscalls(void) { int i; struct syscall_metadata *meta; unsigned long **psys_syscall_table = &sys_call_table; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto end; syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * FTRACE_SYSCALL_MAX, GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); - return -ENOMEM; + return; } for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { meta = find_syscall_meta(psys_syscall_table[i]); syscalls_metadata[i] = meta; } + return; - return 0; + /* Paranoid: avoid overflow */ +end: + atomic_dec(&refs); } -arch_initcall(arch_init_ftrace_syscalls); #endif /* CONFIG_FTRACE_SYSCALLS */ diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 99b4fb553bf1..f9d44f8e0df6 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -549,8 +549,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu == 0) seq_printf(m, "machine\t\t: %s\n", get_system_type()); - else - seq_printf(m, "\n"); seq_printf(m, "processor\t: %d\n", cpu); seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); diff --git a/trunk/arch/sh/kernel/signal_32.c b/trunk/arch/sh/kernel/signal_32.c index 3db37425210d..6729703547a1 100644 --- a/trunk/arch/sh/kernel/signal_32.c +++ b/trunk/arch/sh/kernel/signal_32.c @@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) { struct task_struct *tsk = current; - if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + if (!(current_cpu_data.flags & CPU_HAS_FPU)) return 0; set_used_math(); @@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc, { struct task_struct *tsk = current; - if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + if (!(current_cpu_data.flags & CPU_HAS_FPU)) return 0; if (!used_math()) { @@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p #undef COPY #ifdef CONFIG_SH_FPU - if (boot_cpu_data.flags & CPU_HAS_FPU) { + if (current_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; struct task_struct *tsk = current; @@ -472,7 +472,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; - flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) @@ -498,6 +497,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); + flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); + return 0; give_sigsegv: diff --git a/trunk/arch/sh/kernel/smp.c b/trunk/arch/sh/kernel/smp.c index 160db1003cfb..442d8d47a41e 100644 --- a/trunk/arch/sh/kernel/smp.c +++ b/trunk/arch/sh/kernel/smp.c @@ -35,8 +35,6 @@ static inline void __init smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; - memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); - c->loops_per_jiffy = loops_per_jiffy; } diff --git a/trunk/arch/sh/kernel/traps_32.c b/trunk/arch/sh/kernel/traps_32.c index 7a2ee3a6b8e7..e0b5e4b5accd 100644 --- a/trunk/arch/sh/kernel/traps_32.c +++ b/trunk/arch/sh/kernel/traps_32.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -160,12 +159,12 @@ void die(const char * str, struct pt_regs * regs, long err) oops_enter(); - spin_lock_irq(&die_lock); console_verbose(); + spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - sysfs_printk_last_file(); + print_modules(); show_regs(regs); @@ -181,7 +180,6 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); - oops_exit(); if (kexec_should_crash(current)) crash_kexec(regs); @@ -192,6 +190,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); + oops_exit(); do_exit(SIGSEGV); } diff --git a/trunk/arch/sh/mm/cache.c b/trunk/arch/sh/mm/cache.c index 5e1091be9dc4..35c37b7f717a 100644 --- a/trunk/arch/sh/mm/cache.c +++ b/trunk/arch/sh/mm/cache.c @@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma, return; page = pfn_to_page(pfn); - if (pfn_valid(pfn)) { + if (pfn_valid(pfn) && page_mapping(page)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) { unsigned long addr = (unsigned long)page_address(page); diff --git a/trunk/arch/sparc/kernel/ldc.c b/trunk/arch/sparc/kernel/ldc.c index cb3c72c45aab..adf5f273868a 100644 --- a/trunk/arch/sparc/kernel/ldc.c +++ b/trunk/arch/sparc/kernel/ldc.c @@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name) snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); err = request_irq(lp->cfg.rx_irq, ldc_rx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_SHARED, lp->rx_irq_name, lp); if (err) return err; err = request_irq(lp->cfg.tx_irq, ldc_tx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_SHARED, lp->tx_irq_name, lp); if (err) { free_irq(lp->cfg.rx_irq, lp); diff --git a/trunk/arch/sparc/kernel/perf_event.c b/trunk/arch/sparc/kernel/perf_event.c index fa5936e1c3b9..04db92743896 100644 --- a/trunk/arch/sparc/kernel/perf_event.c +++ b/trunk/arch/sparc/kernel/perf_event.c @@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = { .lower_shift = 6, .event_mask = 0xfff, .hv_bit = 0x8, - .irq_bit = 0x30, + .irq_bit = 0x03, .upper_nop = 0x220, .lower_nop = 0x220, }; diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index 1886d37d411b..a70a5e1904d9 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn) struct page *page; page = pfn_to_page(pfn); - if (page) { + if (page && page_mapping(page)) { unsigned long pg_flags; pg_flags = page->flags; diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 07e01149e3bf..c876bace8fdc 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -491,7 +491,7 @@ if PARAVIRT_GUEST source "arch/x86/xen/Kconfig" config VMI - bool "VMI Guest support (DEPRECATED)" + bool "VMI Guest support" select PARAVIRT depends on X86_32 ---help--- @@ -500,15 +500,6 @@ config VMI at the moment), by linking the kernel to a GPL-ed ROM module provided by the hypervisor. - As of September 2009, VMware has started a phased retirement - of this feature from VMware's products. Please see - feature-removal-schedule.txt for details. If you are - planning to enable this option, please note that you cannot - live migrate a VMI enabled VM to a future VMware product, - which doesn't support VMI. So if you expect your kernel to - seamlessly migrate to newer VMware products, keep this - disabled. - config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h index efb38994859c..8aebcc41041d 100644 --- a/trunk/arch/x86/include/asm/paravirt.h +++ b/trunk/arch/x86/include/asm/paravirt.h @@ -840,22 +840,42 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) static inline unsigned long __raw_local_save_flags(void) { - return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); + unsigned long f; + + asm volatile(paravirt_alt(PARAVIRT_CALL) + : "=a"(f) + : paravirt_type(pv_irq_ops.save_fl), + paravirt_clobber(CLBR_EAX) + : "memory", "cc"); + return f; } static inline void raw_local_irq_restore(unsigned long f) { - PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : "=a"(f) + : PV_FLAGS_ARG(f), + paravirt_type(pv_irq_ops.restore_fl), + paravirt_clobber(CLBR_EAX) + : "memory", "cc"); } static inline void raw_local_irq_disable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_disable); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : + : paravirt_type(pv_irq_ops.irq_disable), + paravirt_clobber(CLBR_EAX) + : "memory", "eax", "cc"); } static inline void raw_local_irq_enable(void) { - PVOP_VCALLEE0(pv_irq_ops.irq_enable); + asm volatile(paravirt_alt(PARAVIRT_CALL) + : + : paravirt_type(pv_irq_ops.irq_enable), + paravirt_clobber(CLBR_EAX) + : "memory", "eax", "cc"); } static inline unsigned long __raw_local_irq_save(void) diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..dd0f5b32489d 100644 --- a/trunk/arch/x86/include/asm/paravirt_types.h +++ b/trunk/arch/x86/include/asm/paravirt_types.h @@ -494,11 +494,10 @@ int paravirt_disable_iospace(void); #define EXTRA_CLOBBERS #define VEXTRA_CLOBBERS #else /* CONFIG_X86_64 */ -/* [re]ax isn't an arg, but the return val */ #define PVOP_VCALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx, __eax = __eax -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS + __edx = __edx, __ecx = __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) @@ -510,7 +509,6 @@ int paravirt_disable_iospace(void); "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) -/* void functions are still allowed [re]ax for scratch */ #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS @@ -585,8 +583,8 @@ int paravirt_disable_iospace(void); VEXTRA_CLOBBERS, \ pre, post, ##__VA_ARGS__) -#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ - ____PVOP_VCALL(op.func, CLBR_RET_REG, \ +#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ PVOP_VCALLEE_CLOBBERS, , \ pre, post, ##__VA_ARGS__) diff --git a/trunk/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/trunk/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 0e50e1e5c573..7da00b799cda 100644 --- a/trunk/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/trunk/arch/x86/kernel/acpi/realmode/wakeup.lds.S @@ -56,6 +56,6 @@ SECTIONS /DISCARD/ : { *(.note*) } -} -ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); + . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); +} diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 74656d1d4e30..391206199515 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __func__, smp_processor_id(), vector, irq); } + run_local_timers(); irq_exit(); set_irq_regs(old_regs); @@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs) if (generic_interrupt_extension) generic_interrupt_extension(); + run_local_timers(); irq_exit(); set_irq_regs(old_regs); diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index ec1de97600e7..d915d956e66d 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); + run_local_timers(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/trunk/arch/x86/kernel/time.c b/trunk/arch/x86/kernel/time.c index be2573448ed9..dcb00d278512 100644 --- a/trunk/arch/x86/kernel/time.c +++ b/trunk/arch/x86/kernel/time.c @@ -38,8 +38,7 @@ unsigned long profile_pc(struct pt_regs *regs) #ifdef CONFIG_FRAME_POINTER return *(unsigned long *)(regs->bp + sizeof(long)); #else - unsigned long *sp = - (unsigned long *)kernel_stack_pointer(regs); + unsigned long *sp = (unsigned long *)regs->sp; /* * Return address is either directly at stack pointer * or above a saved flags. Eflags has bits 22-31 zero, diff --git a/trunk/arch/x86/kernel/trampoline.c b/trunk/arch/x86/kernel/trampoline.c index cd022121cab6..699f7eeb896a 100644 --- a/trunk/arch/x86/kernel/trampoline.c +++ b/trunk/arch/x86/kernel/trampoline.c @@ -3,16 +3,8 @@ #include #include -#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) -#define __trampinit -#define __trampinitdata -#else -#define __trampinit __cpuinit -#define __trampinitdata __cpuinitdata -#endif - /* ready for x86_64 and x86 */ -unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); +unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); void __init reserve_trampoline_memory(void) { @@ -34,7 +26,7 @@ void __init reserve_trampoline_memory(void) * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */ -unsigned long __trampinit setup_trampoline(void) +unsigned long __cpuinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); diff --git a/trunk/arch/x86/kernel/trampoline_64.S b/trunk/arch/x86/kernel/trampoline_64.S index 3af2dff58b21..596d54c660a5 100644 --- a/trunk/arch/x86/kernel/trampoline_64.S +++ b/trunk/arch/x86/kernel/trampoline_64.S @@ -32,12 +32,8 @@ #include #include -#ifdef CONFIG_ACPI_SLEEP -.section .rodata, "a", @progbits -#else /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ __CPUINITRODATA -#endif .code16 ENTRY(trampoline_data) diff --git a/trunk/arch/x86/kernel/vmi_32.c b/trunk/arch/x86/kernel/vmi_32.c index d430e4c30193..31e6f6cfe53e 100644 --- a/trunk/arch/x86/kernel/vmi_32.c +++ b/trunk/arch/x86/kernel/vmi_32.c @@ -648,7 +648,7 @@ static inline int __init activate_vmi(void) pv_info.paravirt_enabled = 1; pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; - pv_info.name = "vmi [deprecated]"; + pv_info.name = "vmi"; pv_init_ops.patch = vmi_patch; diff --git a/trunk/arch/x86/kernel/vmlinux.lds.S b/trunk/arch/x86/kernel/vmlinux.lds.S index 8d6001ad8d8d..92929fb3f9fa 100644 --- a/trunk/arch/x86/kernel/vmlinux.lds.S +++ b/trunk/arch/x86/kernel/vmlinux.lds.S @@ -305,8 +305,8 @@ SECTIONS #ifdef CONFIG_X86_32 -ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE"); +. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); #else /* * Per-cpu symbols which need to be offset from __per_cpu_load @@ -319,12 +319,12 @@ INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ -ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE"); +. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP -ASSERT((per_cpu__irq_stack_union == 0), - "irq_stack_union is not at start of per-cpu area"); +. = ASSERT((per_cpu__irq_stack_union == 0), + "irq_stack_union is not at start of per-cpu area"); #endif #endif /* CONFIG_X86_32 */ @@ -332,6 +332,7 @@ ASSERT((per_cpu__irq_stack_union == 0), #ifdef CONFIG_KEXEC #include -ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, - "kexec control code size is too big"); +. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, + "kexec control code size is too big"); #endif + diff --git a/trunk/drivers/char/tty_buffer.c b/trunk/drivers/char/tty_buffer.c index 66fa4e10d76b..0296612cc7df 100644 --- a/trunk/drivers/char/tty_buffer.c +++ b/trunk/drivers/char/tty_buffer.c @@ -468,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work) */ void tty_flush_to_ldisc(struct tty_struct *tty) { - flush_delayed_work(&tty->buf.work); + flush_to_ldisc(&tty->buf.work.work); } /** diff --git a/trunk/drivers/firewire/sbp2.c b/trunk/drivers/firewire/sbp2.c index 98dbbda3ad41..50f0176de615 100644 --- a/trunk/drivers/firewire/sbp2.c +++ b/trunk/drivers/firewire/sbp2.c @@ -188,7 +188,14 @@ static struct fw_device *target_device(struct sbp2_target *tgt) /* Impossible login_id, to detect logout attempt before successful login */ #define INVALID_LOGIN_ID 0x10000 -#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ +/* + * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be + * provided in the config rom. Most devices do provide a value, which + * we'll use for login management orbs, but with some sane limits. + */ +#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ +#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ +#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ #define SBP2_ORB_NULL 0x80000000 #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ @@ -1027,6 +1034,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, { struct fw_csr_iterator ci; int key, value; + unsigned int timeout; fw_csr_iterator_init(&ci, directory); while (fw_csr_iterator_next(&ci, &key, &value)) { @@ -1051,7 +1059,17 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, case SBP2_CSR_UNIT_CHARACTERISTICS: /* the timeout value is stored in 500ms units */ - tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; + timeout = ((unsigned int) value >> 8 & 0xff) * 500; + timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT); + tgt->mgt_orb_timeout = + min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT); + + if (timeout > tgt->mgt_orb_timeout) + fw_notify("%s: config rom contains %ds " + "management ORB timeout, limiting " + "to %ds\n", tgt->bus_id, + timeout / 1000, + tgt->mgt_orb_timeout / 1000); break; case SBP2_CSR_LOGICAL_UNIT_NUMBER: @@ -1069,22 +1087,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, return 0; } -/* - * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be - * provided in the config rom. Most devices do provide a value, which - * we'll use for login management orbs, but with some sane limits. - */ -static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) -{ - unsigned int timeout = tgt->mgt_orb_timeout; - - if (timeout > 40000) - fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n", - tgt->bus_id, timeout / 1000); - - tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); -} - static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, u32 firmware_revision) { @@ -1169,7 +1171,6 @@ static int sbp2_probe(struct device *dev) &firmware_revision) < 0) goto fail_tgt_put; - sbp2_clamp_management_orb_timeout(tgt); sbp2_init_workarounds(tgt, model, firmware_revision); /* diff --git a/trunk/drivers/oprofile/event_buffer.c b/trunk/drivers/oprofile/event_buffer.c index 5df60a6b6776..2b7ae366ceb1 100644 --- a/trunk/drivers/oprofile/event_buffer.c +++ b/trunk/drivers/oprofile/event_buffer.c @@ -35,23 +35,12 @@ static size_t buffer_pos; /* atomic_t because wait_event checks it outside of buffer_mutex */ static atomic_t buffer_ready = ATOMIC_INIT(0); -/* - * Add an entry to the event buffer. When we get near to the end we - * wake up the process sleeping on the read() of the file. To protect - * the event_buffer this function may only be called when buffer_mutex - * is set. +/* Add an entry to the event buffer. When we + * get near to the end we wake up the process + * sleeping on the read() of the file. */ void add_event_entry(unsigned long value) { - /* - * This shouldn't happen since all workqueues or handlers are - * canceled or flushed before the event buffer is freed. - */ - if (!event_buffer) { - WARN_ON_ONCE(1); - return; - } - if (buffer_pos == buffer_size) { atomic_inc(&oprofile_stats.event_lost_overflow); return; @@ -80,6 +69,7 @@ void wake_up_buffer_waiter(void) int alloc_event_buffer(void) { + int err = -ENOMEM; unsigned long flags; spin_lock_irqsave(&oprofilefs_lock, flags); @@ -90,22 +80,21 @@ int alloc_event_buffer(void) if (buffer_watershed >= buffer_size) return -EINVAL; - buffer_pos = 0; event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); if (!event_buffer) - return -ENOMEM; + goto out; - return 0; + err = 0; +out: + return err; } void free_event_buffer(void) { - mutex_lock(&buffer_mutex); vfree(event_buffer); - buffer_pos = 0; + event_buffer = NULL; - mutex_unlock(&buffer_mutex); } @@ -178,12 +167,6 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, mutex_lock(&buffer_mutex); - /* May happen if the buffer is freed during pending reads. */ - if (!event_buffer) { - retval = -EINTR; - goto out; - } - atomic_set(&buffer_ready, 0); retval = -EFAULT; diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index aaccc8ecfa8f..53b8c255360a 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -2533,7 +2533,6 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, { struct dasd_ccw_req *cqr; struct ccw1 *ccw; - unsigned long *idaw; cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); @@ -2547,17 +2546,9 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, ccw = cqr->cpaddr; ccw->cmd_code = CCW_CMD_RDC; - if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { - idaw = (unsigned long *) (cqr->data); - ccw->cda = (__u32)(addr_t) idaw; - ccw->flags = CCW_FLAG_IDA; - idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); - } else { - ccw->cda = (__u32)(addr_t) rdc_buffer; - ccw->flags = 0; - } - + ccw->cda = (__u32)(addr_t)rdc_buffer; ccw->count = rdc_buffer_size; + cqr->startdev = device; cqr->memdev = device; cqr->expires = 10*HZ; diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index 417b97cd3f94..0be7c15f45c5 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -3216,7 +3216,6 @@ int dasd_eckd_restore_device(struct dasd_device *device) struct dasd_eckd_characteristics temp_rdc_data; int is_known, rc; struct dasd_uid temp_uid; - unsigned long flags; private = (struct dasd_eckd_private *) device->private; @@ -3229,8 +3228,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) rc = dasd_eckd_generate_uid(device, &private->uid); dasd_get_uid(device->cdev, &temp_uid); if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) - dev_err(&device->cdev->dev, "The UID of the DASD has " - "changed\n"); + dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); if (rc) goto out_err; dasd_set_uid(device->cdev, &private->uid); @@ -3258,9 +3256,9 @@ int dasd_eckd_restore_device(struct dasd_device *device) "device: %s", rc, dev_name(&device->cdev->dev)); goto out_err; } - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + spin_lock(get_ccwdev_lock(device->cdev)); memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); - spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + spin_unlock(get_ccwdev_lock(device->cdev)); /* add device to alias management */ dasd_alias_add_device(device); diff --git a/trunk/drivers/s390/char/sclp_async.c b/trunk/drivers/s390/char/sclp_async.c index a4f68e5b9c96..daaec185ed36 100644 --- a/trunk/drivers/s390/char/sclp_async.c +++ b/trunk/drivers/s390/char/sclp_async.c @@ -62,7 +62,7 @@ static struct notifier_block call_home_panic_nb = { .priority = INT_MAX, }; -static int proc_handler_callhome(struct ctl_table *ctl, int write, +static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp, void __user *buffer, size_t *count, loff_t *ppos) { @@ -100,7 +100,7 @@ static struct ctl_table callhome_table[] = { { .procname = "callhome", .mode = 0644, - .proc_handler = proc_handler_callhome, + .proc_handler = &proc_handler_callhome, }, { .ctl_name = 0 } }; diff --git a/trunk/drivers/s390/char/sclp_vt220.c b/trunk/drivers/s390/char/sclp_vt220.c index b9d2a007e93b..178724f2a4c3 100644 --- a/trunk/drivers/s390/char/sclp_vt220.c +++ b/trunk/drivers/s390/char/sclp_vt220.c @@ -705,6 +705,21 @@ static int __init sclp_vt220_tty_init(void) } __initcall(sclp_vt220_tty_init); +#ifdef CONFIG_SCLP_VT220_CONSOLE + +static void +sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) +{ + __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0); +} + +static struct tty_driver * +sclp_vt220_con_device(struct console *c, int *index) +{ + *index = 0; + return sclp_vt220_driver; +} + static void __sclp_vt220_flush_buffer(void) { unsigned long flags; @@ -761,21 +776,6 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg, } } -#ifdef CONFIG_SCLP_VT220_CONSOLE - -static void -sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) -{ - __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0); -} - -static struct tty_driver * -sclp_vt220_con_device(struct console *c, int *index) -{ - *index = 0; - return sclp_vt220_driver; -} - static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) diff --git a/trunk/drivers/s390/char/tape_block.c b/trunk/drivers/s390/char/tape_block.c index 0c0705b91c28..64f57ef2763c 100644 --- a/trunk/drivers/s390/char/tape_block.c +++ b/trunk/drivers/s390/char/tape_block.c @@ -162,10 +162,9 @@ tapeblock_requeue(struct work_struct *work) { spin_lock_irq(&device->blk_data.request_queue_lock); while ( !blk_queue_plugged(queue) && - blk_peek_request(queue) && + (req = blk_fetch_request(queue)) && nr_queued < TAPEBLOCK_MIN_REQUEUE ) { - req = blk_fetch_request(queue); if (rq_data_dir(req) == WRITE) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); spin_unlock_irq(&device->blk_data.request_queue_lock); diff --git a/trunk/drivers/s390/cio/device.c b/trunk/drivers/s390/cio/device.c index 2490b741e16a..2ee093ec86e4 100644 --- a/trunk/drivers/s390/cio/device.c +++ b/trunk/drivers/s390/cio/device.c @@ -1250,7 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch) unsigned long flags; struct ccw_dev_id dev_id; - if (cio_is_console(sch->schid)) { + cdev = sch_get_cdev(sch); + if (cdev) { rc = sysfs_create_group(&sch->dev.kobj, &io_subchannel_attr_group); if (rc) @@ -1259,13 +1260,13 @@ static int io_subchannel_probe(struct subchannel *sch) "0.%x.%04x (rc=%d)\n", sch->schid.ssid, sch->schid.sch_no, rc); /* - * The console subchannel already has an associated ccw_device. + * This subchannel already has an associated ccw_device. * Throw the delayed uevent for the subchannel, register - * the ccw_device and exit. + * the ccw_device and exit. This happens for all early + * devices, e.g. the console. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - cdev = sch_get_cdev(sch); cdev->dev.groups = ccwdev_attr_groups; device_initialize(&cdev->dev); ccw_device_register(cdev); diff --git a/trunk/drivers/watchdog/riowd.c b/trunk/drivers/watchdog/riowd.c index d3c824dc2358..1e8f02f440e6 100644 --- a/trunk/drivers/watchdog/riowd.c +++ b/trunk/drivers/watchdog/riowd.c @@ -206,7 +206,7 @@ static int __devinit riowd_probe(struct of_device *op, dev_set_drvdata(&op->dev, p); riowd_device = p; - return 0; + err = 0; out_iounmap: of_iounmap(&op->resource[0], p->regs, 2); diff --git a/trunk/include/linux/workqueue.h b/trunk/include/linux/workqueue.h index cf24c20de9e4..7ef0c7b94f31 100644 --- a/trunk/include/linux/workqueue.h +++ b/trunk/include/linux/workqueue.h @@ -207,7 +207,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); -extern void flush_delayed_work(struct delayed_work *work); extern int schedule_work(struct work_struct *work); extern int schedule_work_on(int cpu, struct work_struct *work); diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 9af56723c096..3815ac1d58b2 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -142,11 +142,6 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); -static inline u64 lockstat_clock(void) -{ - return cpu_clock(smp_processor_id()); -} - static int lock_point(unsigned long points[], unsigned long ip) { int i; @@ -163,7 +158,7 @@ static int lock_point(unsigned long points[], unsigned long ip) return i; } -static void lock_time_inc(struct lock_time *lt, u64 time) +static void lock_time_inc(struct lock_time *lt, s64 time) { if (time > lt->max) lt->max = time; @@ -239,12 +234,12 @@ static void put_lock_stats(struct lock_class_stats *stats) static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; - u64 holdtime; + s64 holdtime; if (!lock_stat) return; - holdtime = lockstat_clock() - hlock->holdtime_stamp; + holdtime = sched_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) @@ -2797,7 +2792,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; - hlock->holdtime_stamp = lockstat_clock(); + hlock->holdtime_stamp = sched_clock(); #endif if (check == 2 && !mark_irqflags(curr, hlock)) @@ -3327,7 +3322,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) if (hlock->instance != lock) return; - hlock->waittime_stamp = lockstat_clock(); + hlock->waittime_stamp = sched_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, @@ -3350,7 +3345,8 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; - u64 now, waittime = 0; + u64 now; + s64 waittime = 0; int i, cpu; depth = curr->lockdep_depth; @@ -3378,7 +3374,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) cpu = smp_processor_id(); if (hlock->waittime_stamp) { - now = lockstat_clock(); + now = sched_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index e88689522e66..8d25be06db62 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -676,7 +676,6 @@ inline void update_rq_clock(struct rq *rq) /** * runqueue_is_locked - * @cpu: the processor in question. * * Returns true if the current cpu runqueue is locked. * This interface allows printk to be called with the runqueue lock @@ -2312,7 +2311,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; - struct rq *rq, *orig_rq; + struct rq *rq; if (!sched_feat(SYNC_WAKEUPS)) wake_flags &= ~WF_SYNC; @@ -2320,7 +2319,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, this_cpu = get_cpu(); smp_wmb(); - rq = orig_rq = task_rq_lock(p, &flags); + rq = task_rq_lock(p, &flags); update_rq_clock(rq); if (!(p->state & state)) goto out; @@ -2351,10 +2350,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, set_task_cpu(p, cpu); rq = task_rq_lock(p, &flags); - - if (rq != orig_rq) - update_rq_clock(rq); - WARN_ON(p->state != TASK_WAKING); cpu = task_cpu(p); @@ -3661,7 +3656,6 @@ static void update_group_power(struct sched_domain *sd, int cpu) /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. - * @sd: The sched_domain whose statistics are to be updated. * @group: sched_group whose statistics are to be updated. * @this_cpu: Cpu for which load balance is currently performed. * @idle: Idle status of this_cpu diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index c820b0310a12..45068269ebb1 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { - return trace_array_vprintk(&global_trace, ip, fmt, args); + return trace_array_printk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index 98a6cc5c64ed..23245785927f 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -933,9 +933,8 @@ static void postfix_clear(struct filter_parse_state *ps) while (!list_empty(&ps->postfix)) { elt = list_first_entry(&ps->postfix, struct postfix_elt, list); - list_del(&elt->list); kfree(elt->operand); - kfree(elt); + list_del(&elt->list); } } diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index ccefe574dcf7..addfe2df93b1 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -639,24 +639,6 @@ int schedule_delayed_work(struct delayed_work *dwork, } EXPORT_SYMBOL(schedule_delayed_work); -/** - * flush_delayed_work - block until a dwork_struct's callback has terminated - * @dwork: the delayed work which is to be flushed - * - * Any timeout is cancelled, and any pending work is run immediately. - */ -void flush_delayed_work(struct delayed_work *dwork) -{ - if (del_timer(&dwork->timer)) { - struct cpu_workqueue_struct *cwq; - cwq = wq_per_cpu(keventd_wq, get_cpu()); - __queue_work(cwq, &dwork->work); - put_cpu(); - } - flush_work(&dwork->work); -} -EXPORT_SYMBOL(flush_delayed_work); - /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 742a32eee8fc..5881943f0c34 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -157,18 +157,11 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') -# -# Add -m32 for cross-builds: -# -ifdef NO_64BIT - MBITS := -m32 -else - # - # If we're on a 64-bit kernel, use -m64: - # - ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) - MBITS := -m64 - endif +# If we're on a 64-bit kernel, use -m64 +ifndef NO_64BIT + ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) + M64 := -m64 + endif endif # CFLAGS and LDFLAGS are for the users to override from the command line. @@ -201,7 +194,7 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +CFLAGS = $(M64) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) LDFLAGS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) @@ -423,7 +416,7 @@ ifeq ($(uname_S),Darwin) endif ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); + msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif ifdef NO_DEMANGLE diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index ce2d5be4f30e..ea9c15c0cdfe 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -1287,7 +1287,7 @@ static struct sort_dimension *available_sorts[] = { static LIST_HEAD(sort_list); -static int sort_dimension__add(const char *tok, struct list_head *list) +static int sort_dimension__add(char *tok, struct list_head *list) { int i; @@ -1917,7 +1917,7 @@ static void setup_sorting(void) free(str); - sort_dimension__add("pid", &cmp_pid); + sort_dimension__add((char *)"pid", &cmp_pid); } static const char *record_args[] = { diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 8cfb48cbbea0..87c424de79ee 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -691,10 +691,7 @@ static void store_event_type(const char *orgname) FILE *file; int id; - sprintf(filename, "%s/", debugfs_path); - strncat(filename, orgname, strlen(orgname)); - strcat(filename, "/id"); - + sprintf(filename, "/sys/kernel/debug/tracing/events/%s/id", orgname); c = strchr(filename, ':'); if (c) *c = '/'; diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 55c9659a56e2..55b41b9e3834 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -618,7 +618,7 @@ static int test_type(enum event_type type, enum event_type expect) } static int test_type_token(enum event_type type, char *token, - enum event_type expect, const char *expect_tok) + enum event_type expect, char *expect_tok) { if (type != expect) { die("Error: expected type %d but read %d", @@ -650,7 +650,7 @@ static int read_expect_type(enum event_type expect, char **tok) return __read_expect_type(expect, tok, 1); } -static int __read_expected(enum event_type expect, const char *str, int newline_ok) +static int __read_expected(enum event_type expect, char *str, int newline_ok) { enum event_type type; char *token; @@ -668,12 +668,12 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ return 0; } -static int read_expected(enum event_type expect, const char *str) +static int read_expected(enum event_type expect, char *str) { return __read_expected(expect, str, 1); } -static int read_expected_item(enum event_type expect, const char *str) +static int read_expected_item(enum event_type expect, char *str) { return __read_expected(expect, str, 0); }