Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 145113
b: refs/heads/master
c: 3633832
h: refs/heads/master
i:
  145111: 62494fd
v: v3
  • Loading branch information
Linus Torvalds committed May 18, 2009
1 parent 347acb2 commit 973f633
Show file tree
Hide file tree
Showing 26 changed files with 108 additions and 47 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0e337b42d620ca7c45fe64e64dd71957c56216c9
refs/heads/master: 363383277081ce831642b72df40932ee05ce40a2
13 changes: 13 additions & 0 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,19 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.

config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP && EXPERIMENTAL
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
(for example, block the virtual CPU rather than spinning).

Unfortunately the downside is an up to 5% performance hit on
native kernels, with various workloads.

If you are unsure how to answer this question, answer N.

config PARAVIRT_CLOCK
bool
default n
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);

#define paravirt_nop ((void *)_paravirt_nop)

#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{
Expand Down
10 changes: 5 additions & 5 deletions trunk/arch/x86/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,22 +82,22 @@ do { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "qi" ((T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "ri" ((T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "ri" ((T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
: "re" ((T__)val)); \
: "re" ((T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
Expand All @@ -109,7 +109,7 @@ do { \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "=q" (ret__) \
: "m" (var)); \
break; \
case 2: \
Expand Down
7 changes: 4 additions & 3 deletions trunk/arch/x86/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)

/*
* X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
* when it traps. So regs will be the current sp.
* when it traps. The previous stack will be directly underneath the saved
* registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
*
* This is valid only for kernel mode traps.
*/
static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (unsigned long)regs;
return (unsigned long)(&regs->sp);
#else
return regs->sp;
#endif
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}

#ifndef CONFIG_PARAVIRT
#ifndef CONFIG_PARAVIRT_SPINLOCKS

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
Expand Down Expand Up @@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock);
}

#endif
#endif /* CONFIG_PARAVIRT_SPINLOCKS */

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST) += kvm.o
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o

obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/x86/kernel/apic/es7000_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr)
}

#ifdef CONFIG_ACPI
static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
struct es7000_oem_table *table;
Expand Down Expand Up @@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
return 0;
}

static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
if (!oem_addr)
return;
Expand All @@ -306,7 +306,7 @@ static int es7000_check_dsdt(void)
static int es7000_acpi_ret;

/* Hook from generic ACPI tables.c */
static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
unsigned long oem_addr = 0;
int check_dsdt;
Expand Down Expand Up @@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = {
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};

struct apic apic_es7000 = {
struct apic __refdata apic_es7000 = {

.name = "es7000",
.probe = probe_es7000,
Expand Down
6 changes: 5 additions & 1 deletion trunk/arch/x86/kernel/cpu/mtrr/generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,11 @@ static void __init print_mtrr_state(void)
}
printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
mtrr_state.enabled & 2 ? "en" : "dis");
high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
if (size_or_mask & 0xffffffffUL)
high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
else
high_width = ffs(size_or_mask>>32) + 32 - 1;
high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)

: [old] "=r" (old), [faulted] "=r" (faulted)
: [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/x86/kernel/paravirt.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ static void *get_call_destination(u8 type)
.pv_irq_ops = pv_irq_ops,
.pv_apic_ops = pv_apic_ops,
.pv_mmu_ops = pv_mmu_ops,
#ifdef CONFIG_PARAVIRT_SPINLOCKS
.pv_lock_ops = pv_lock_ops,
#endif
};
return *((void **)&tmpl + type);
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/oprofile/backtrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,9 @@ void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct frame_head *head = (struct frame_head *)frame_pointer(regs);
unsigned long stack = kernel_trap_sp(regs);

if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
dump_trace(NULL, regs, (unsigned long *)stack, 0,
&backtrace_ops, &depth);
Expand Down
5 changes: 3 additions & 2 deletions trunk/arch/x86/xen/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o

obj-$(CONFIG_SMP) += smp.o spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
1 change: 1 addition & 0 deletions trunk/arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/module.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
Expand Down
19 changes: 15 additions & 4 deletions trunk/arch/x86/xen/xen-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,26 @@ void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void xen_smp_init(void);

void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu);

extern cpumask_var_t xen_cpu_initialized_map;
#else
static inline void xen_smp_init(void) {}
#endif

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu);
#else
static inline void xen_init_spinlocks(void)
{
}
static inline void xen_init_lock_cpu(int cpu)
{
}
static inline void xen_uninit_lock_cpu(int cpu)
{
}
#endif

/* Declare an asm function, along with symbols needed to make it
inlineable */
Expand Down
3 changes: 2 additions & 1 deletion trunk/crypto/api.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)

request_module(name);

if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK) &&
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
request_module(tmp);

Expand Down
3 changes: 2 additions & 1 deletion trunk/crypto/eseqiv.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
if (err)
goto out;

eseqiv_complete2(req);
if (giv != req->giv)
eseqiv_complete2(req);

out:
return err;
Expand Down
33 changes: 32 additions & 1 deletion trunk/drivers/crypto/ixp4xx_crypto.c
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ static void crypto_done_action(unsigned long arg)
static int init_ixp_crypto(void)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };

if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
Expand All @@ -426,9 +427,35 @@ static int init_ixp_crypto(void)
return ret;

if (!npe_running(npe_c)) {
npe_load_firmware(npe_c, npe_name(npe_c), dev);
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
if (ret) {
return ret;
}
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
if (npe_send_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;

if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
}

switch ((msg[1]>>16) & 0xff) {
case 3:
printk(KERN_WARNING "Firmware of %s lacks AES support\n",
npe_name(npe_c));
support_aes = 0;
break;
case 4:
case 5:
support_aes = 1;
break;
default:
printk(KERN_ERR "Firmware of %s lacks crypto support\n",
npe_name(npe_c));
return -ENODEV;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
Expand Down Expand Up @@ -459,6 +486,10 @@ static int init_ixp_crypto(void)

qmgr_enable_irq(RECV_QID);
return 0;

npe_error:
printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/crypto/padlock-aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

MODULE_ALIAS("aes-all");
MODULE_ALIAS("aes");
2 changes: 1 addition & 1 deletion trunk/drivers/mtd/devices/mtd_dataflash.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Calculate flash page address; use block erase (for speed) if
* we're at a block boundary and need to erase the whole block.
*/
pageaddr = div_u64(instr->len, priv->page_size);
pageaddr = div_u64(instr->addr, priv->page_size);
do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
pageaddr = pageaddr << priv->page_offset;

Expand Down
9 changes: 1 addition & 8 deletions trunk/drivers/usb/serial/ftdi_sio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1487,14 +1487,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)

remove_sysfs_attrs(port);

/* all open ports are closed at this point
* (by usbserial.c:__serial_close, which calls ftdi_close)
*/

if (priv) {
usb_set_serial_port_data(port, NULL);
kref_put(&priv->kref, ftdi_sio_priv_release);
}
kref_put(&priv->kref, ftdi_sio_priv_release);

return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/asm-generic/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ typedef struct

#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)

/* Non-atomic variants, ie. preemption disabled and won't be touched
Expand Down
4 changes: 2 additions & 2 deletions trunk/kernel/lockdep_internals.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ enum {
* table (if it's not there yet), and we check it for lock order
* conflicts and deadlocks.
*/
#define MAX_LOCKDEP_ENTRIES 8192UL
#define MAX_LOCKDEP_ENTRIES 16384UL

#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS_BITS 15
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)

#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
Expand Down
3 changes: 2 additions & 1 deletion trunk/kernel/sched_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
* (NSEC_PER_SEC / HZ);
}

static __read_mostly int sched_clock_running;
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2380,7 +2380,7 @@ static const char readme_msg[] =
"# echo print-parent > /debug/tracing/trace_options\n"
"# echo 1 > /debug/tracing/tracing_enabled\n"
"# cat /debug/tracing/trace > /tmp/trace.txt\n"
"echo 0 > /debug/tracing/tracing_enabled\n"
"# echo 0 > /debug/tracing/tracing_enabled\n"
;

static ssize_t
Expand Down
Loading

0 comments on commit 973f633

Please sign in to comment.