diff --git a/[refs] b/[refs] index 106713f62ab4..7d88416efe7e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: aa165971c2923d05988f920c978e438dbc7b0de6 +refs/heads/master: 4996c02306a25def1d352ec8e8f48895bbc7dea9 diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 951143dd0684..b1c921c27519 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -310,6 +310,15 @@ Who: Ravikiran Thirumalai --------------------------- +What: CONFIG_THERMAL_HWMON +When: January 2009 +Why: This option was introduced just to allow older lm-sensors userspace + to keep working over the upgrade to 2.6.26. At the scheduled time of + removal fixed lm-sensors (2.x or 3.x) should be readily available. +Who: Rene Herman + +--------------------------- + What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS (in net/core/net-sysfs.c) When: After the only user (hal) has seen a release with the patches diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index aa47be71df4c..37713bcf7778 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -163,6 +163,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. See also Documentation/power/pm.txt, pci=noacpi + acpi_rsdp= [ACPI,EFI,KEXEC] + Pass the RSDP address to the kernel, mostly used + on machines running EFI runtime service to boot the + second kernel for kdump. + acpi_apic_instance= [ACPI, IOAPIC] Format: 2: use 2nd APIC table, if available diff --git a/trunk/Documentation/x86/boot.txt b/trunk/Documentation/x86/boot.txt index 7c3a8801b7ce..9b7221a86df2 100644 --- a/trunk/Documentation/x86/boot.txt +++ b/trunk/Documentation/x86/boot.txt @@ -674,7 +674,7 @@ Protocol: 2.10+ Field name: init_size Type: read -Offset/size: 0x260/4 +Offset/size: 0x25c/4 This field indicates the amount of linear contiguous memory starting at the kernel runtime start address that the kernel needs before it diff --git a/trunk/Makefile b/trunk/Makefile index 6a5bdad524af..60d91f76c2fd 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc7 NAME = Sneaky Weasel # *DOCUMENTATION* diff --git a/trunk/arch/arm/mach-davinci/board-dm365-evm.c b/trunk/arch/arm/mach-davinci/board-dm365-evm.c index 09a87e61ffcf..c67f684ee3e5 100644 --- a/trunk/arch/arm/mach-davinci/board-dm365-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm365-evm.c @@ -520,7 +520,7 @@ static void __init evm_init_cpld(void) */ if (have_imager()) { label = "HD imager"; - mux |= 2; + mux |= 1; /* externally mux MMC1/ENET/AIC33 to imager */ mux |= BIT(6) | BIT(5) | BIT(3); @@ -540,7 +540,7 @@ static void __init evm_init_cpld(void) resets &= ~BIT(1); if (have_tvp7002()) { - mux |= 1; + mux |= 2; resets &= ~BIT(2); label = "tvp7002 HD"; } else { diff --git a/trunk/arch/arm/mach-davinci/gpio.c b/trunk/arch/arm/mach-davinci/gpio.c index cafbe13a82a5..e7221398e5af 100644 --- a/trunk/arch/arm/mach-davinci/gpio.c +++ b/trunk/arch/arm/mach-davinci/gpio.c @@ -254,10 +254,8 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; - struct davinci_gpio_controller *d; - d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); - g = (struct davinci_gpio_regs __iomem *)d->regs; + g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); /* we only care about one bank */ if (irq & 1) @@ -276,14 +274,11 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) if (!status) break; __raw_writel(status, &g->intstat); - - /* now demux them to the right lowlevel handler */ - n = d->irq_base; - if (irq & 1) { - n += 16; + if (irq & 1) status >>= 16; - } + /* now demux them to the right lowlevel handler */ + n = (int)irq_get_handler_data(irq); while (status) { res = ffs(status); n += res; @@ -429,13 +424,7 @@ static int __init davinci_gpio_irq_setup(void) /* set up all irqs in this bank */ irq_set_chained_handler(bank_irq, gpio_irq_handler); - - /* - * Each chip handles 32 gpios, and each irq bank consists of 16 - * gpio irqs. Pass the irq bank's corresponding controller to - * the chained irq handler. - */ - irq_set_handler_data(bank_irq, &chips[gpio / 32]); + irq_set_handler_data(bank_irq, (__force void *)g); for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { irq_set_chip(irq, &gpio_irqchip); diff --git a/trunk/arch/arm/mach-davinci/irq.c b/trunk/arch/arm/mach-davinci/irq.c index 952dc126c390..d8c1af025931 100644 --- a/trunk/arch/arm/mach-davinci/irq.c +++ b/trunk/arch/arm/mach-davinci/irq.c @@ -52,12 +52,6 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) struct irq_chip_type *ct; gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); - if (!gc) { - pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", - __func__, irq_start); - return; - } - ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; diff --git a/trunk/arch/arm/mach-s3c64xx/dma.c b/trunk/arch/arm/mach-s3c64xx/dma.c index 204bfafe4bfc..b197171e7d03 100644 --- a/trunk/arch/arm/mach-s3c64xx/dma.c +++ b/trunk/arch/arm/mach-s3c64xx/dma.c @@ -113,7 +113,7 @@ static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel) return chan; } -int s3c2410_dma_config(enum dma_ch channel, int xferunit) +int s3c2410_dma_config(unsigned int channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan) return 0; } -int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) +int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * */ -int s3c2410_dma_enqueue(enum dma_ch channel, void *id, +int s3c2410_dma_enqueue(unsigned int channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -415,7 +415,7 @@ int s3c2410_dma_enqueue(enum dma_ch channel, void *id, EXPORT_SYMBOL(s3c2410_dma_enqueue); -int s3c2410_dma_devconfig(enum dma_ch channel, +int s3c2410_dma_devconfig(unsigned int channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel, EXPORT_SYMBOL(s3c2410_dma_devconfig); -int s3c2410_dma_getposition(enum dma_ch channel, +int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition); * get control of an dma channel */ -int s3c2410_dma_request(enum dma_ch channel, +int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *client, void *dev) { @@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; diff --git a/trunk/arch/arm/plat-s3c24xx/dma.c b/trunk/arch/arm/plat-s3c24xx/dma.c index 539bd0e3defd..a79a8ccd25f6 100644 --- a/trunk/arch/arm/plat-s3c24xx/dma.c +++ b/trunk/arch/arm/plat-s3c24xx/dma.c @@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); * get control of an dma channel */ -int s3c2410_dma_request(enum dma_ch channel, +int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *client, void *dev) { @@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; @@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) } int -s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) +s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1021,7 +1021,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * xfersize: size of unit in bytes (1,2,4) */ -int s3c2410_dma_config(enum dma_ch channel, +int s3c2410_dma_config(unsigned int channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config); * devaddr: physical address of the source */ -int s3c2410_dma_devconfig(enum dma_ch channel, +int s3c2410_dma_devconfig(unsigned int channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -1173,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig); * returns the current transfer points for the dma source and destination */ -int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) +int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/trunk/arch/arm/plat-samsung/dma.c b/trunk/arch/arm/plat-samsung/dma.c index 6143aa147688..cb459dd95459 100644 --- a/trunk/arch/arm/plat-samsung/dma.c +++ b/trunk/arch/arm/plat-samsung/dma.c @@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel) * irq? */ -int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn) +int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_opfn); -int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn) +int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); -int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags) +int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/trunk/arch/arm/plat-samsung/include/plat/dma.h b/trunk/arch/arm/plat-samsung/include/plat/dma.h index 8c273b7a6f56..2e8f8c6560d7 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/dma.h +++ b/trunk/arch/arm/plat-samsung/include/plat/dma.h @@ -42,7 +42,6 @@ struct s3c2410_dma_client { }; struct s3c2410_dma_chan; -enum dma_ch; /* s3c2410_dma_cbfn_t * @@ -63,7 +62,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *, * request a dma channel exclusivley */ -extern int s3c2410_dma_request(enum dma_ch channel, +extern int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *, void *dev); @@ -72,14 +71,14 @@ extern int s3c2410_dma_request(enum dma_ch channel, * change the state of the dma channel */ -extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op); +extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); /* s3c2410_dma_setflags * * set the channel's flags to a given state */ -extern int s3c2410_dma_setflags(enum dma_ch channel, +extern int s3c2410_dma_setflags(unsigned int channel, unsigned int flags); /* s3c2410_dma_free @@ -87,7 +86,7 @@ extern int s3c2410_dma_setflags(enum dma_ch channel, * free the dma channel (will also abort any outstanding operations) */ -extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *); +extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); /* s3c2410_dma_enqueue * @@ -96,7 +95,7 @@ extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *); * drained before the buffer is given to the DMA system. */ -extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id, +extern int s3c2410_dma_enqueue(unsigned int channel, void *id, dma_addr_t data, int size); /* s3c2410_dma_config @@ -104,14 +103,14 @@ extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id, * configure the dma channel */ -extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); +extern int s3c2410_dma_config(unsigned int channel, int xferunit); /* s3c2410_dma_devconfig * * configure the device we're talking to */ -extern int s3c2410_dma_devconfig(enum dma_ch channel, +extern int s3c2410_dma_devconfig(unsigned int channel, enum s3c2410_dmasrc source, unsigned long devaddr); /* s3c2410_dma_getposition @@ -119,10 +118,10 @@ extern int s3c2410_dma_devconfig(enum dma_ch channel, * get the position that the dma transfer is currently at */ -extern int s3c2410_dma_getposition(enum dma_ch channel, +extern int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dest); -extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); -extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); +extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); +extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); diff --git a/trunk/arch/arm/plat-samsung/irq-uart.c b/trunk/arch/arm/plat-samsung/irq-uart.c index 657405c481d0..0e46588d847b 100644 --- a/trunk/arch/arm/plat-samsung/irq-uart.c +++ b/trunk/arch/arm/plat-samsung/irq-uart.c @@ -54,13 +54,6 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, handle_level_irq); - - if (!gc) { - pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", - __func__, uirq->base_irq); - return; - } - ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; diff --git a/trunk/arch/arm/plat-samsung/irq-vic-timer.c b/trunk/arch/arm/plat-samsung/irq-vic-timer.c index f714d060370d..a607546ddbd0 100644 --- a/trunk/arch/arm/plat-samsung/irq-vic-timer.c +++ b/trunk/arch/arm/plat-samsung/irq-vic-timer.c @@ -54,13 +54,6 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); - - if (!s3c_tgc) { - pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", - __func__, timer_irq); - return; - } - ct = s3c_tgc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; diff --git a/trunk/arch/mips/kernel/i8259.c b/trunk/arch/mips/kernel/i8259.c index 5c74eb797f08..c018696765d4 100644 --- a/trunk/arch/mips/kernel/i8259.c +++ b/trunk/arch/mips/kernel/i8259.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -215,13 +215,14 @@ static void mask_and_ack_8259A(struct irq_data *d) } } -static void i8259A_resume(void) +static int i8259A_resume(struct sys_device *dev) { if (i8259A_auto_eoi >= 0) init_8259A(i8259A_auto_eoi); + return 0; } -static void i8259A_shutdown(void) +static int i8259A_shutdown(struct sys_device *dev) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it @@ -231,17 +232,26 @@ static void i8259A_shutdown(void) outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } + return 0; } -static struct syscore_ops i8259_syscore_ops = { +static struct sysdev_class i8259_sysdev_class = { + .name = "i8259", .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; +static struct sys_device device_i8259A = { + .id = 0, + .cls = &i8259_sysdev_class, +}; + static int __init i8259A_init_sysfs(void) { - register_syscore_ops(&i8259_syscore_ops); - return 0; + int error = sysdev_class_register(&i8259_sysdev_class); + if (!error) + error = sysdev_register(&device_i8259A); + return error; } device_initcall(i8259A_init_sysfs); diff --git a/trunk/arch/sparc/include/asm/irqflags_32.h b/trunk/arch/sparc/include/asm/irqflags_32.h index 14848909e0de..d4d0711de0f9 100644 --- a/trunk/arch/sparc/include/asm/irqflags_32.h +++ b/trunk/arch/sparc/include/asm/irqflags_32.h @@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long); extern unsigned long arch_local_irq_save(void); extern void arch_local_irq_enable(void); -static inline notrace unsigned long arch_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,17 +26,17 @@ static inline notrace unsigned long arch_local_save_flags(void) return flags; } -static inline notrace void arch_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { arch_local_irq_save(); } -static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { return (flags & PSR_PIL) != 0; } -static inline notrace bool arch_irqs_disabled(void) +static inline bool arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } diff --git a/trunk/arch/sparc/include/asm/irqflags_64.h b/trunk/arch/sparc/include/asm/irqflags_64.h index 23cd27f6beb4..aab969c82c2b 100644 --- a/trunk/arch/sparc/include/asm/irqflags_64.h +++ b/trunk/arch/sparc/include/asm/irqflags_64.h @@ -14,7 +14,7 @@ #ifndef __ASSEMBLY__ -static inline notrace unsigned long arch_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,7 +26,7 @@ static inline notrace unsigned long arch_local_save_flags(void) return flags; } -static inline notrace void arch_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -36,7 +36,7 @@ static inline notrace void arch_local_irq_restore(unsigned long flags) ); } -static inline notrace void arch_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -46,7 +46,7 @@ static inline notrace void arch_local_irq_disable(void) ); } -static inline notrace void arch_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { __asm__ __volatile__( "wrpr 0, %%pil" @@ -56,17 +56,17 @@ static inline notrace void arch_local_irq_enable(void) ); } -static inline notrace int arch_irqs_disabled_flags(unsigned long flags) +static inline int arch_irqs_disabled_flags(unsigned long flags) { return (flags > 0); } -static inline notrace int arch_irqs_disabled(void) +static inline int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } -static inline notrace unsigned long arch_local_irq_save(void) +static inline unsigned long arch_local_irq_save(void) { unsigned long flags, tmp; diff --git a/trunk/arch/sparc/include/asm/ptrace.h b/trunk/arch/sparc/include/asm/ptrace.h index b928b31424b1..c7ad3fe2b252 100644 --- a/trunk/arch/sparc/include/asm/ptrace.h +++ b/trunk/arch/sparc/include/asm/ptrace.h @@ -205,7 +205,6 @@ do { current_thread_info()->syscall_noerror = 1; \ } while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) -#define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) #define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) #ifdef CONFIG_SMP diff --git a/trunk/arch/sparc/kernel/entry.S b/trunk/arch/sparc/kernel/entry.S index f445e98463e6..9fe08a1ea6c6 100644 --- a/trunk/arch/sparc/kernel/entry.S +++ b/trunk/arch/sparc/kernel/entry.S @@ -293,7 +293,7 @@ maybe_smp4m_msg: WRITE_PAUSE wr %l4, PSR_ET, %psr WRITE_PAUSE - srl %o3, 28, %o2 ! shift for simpler checks below + sll %o3, 28, %o2 ! shift for simpler checks below maybe_smp4m_msg_check_single: andcc %o2, 0x1, %g0 beq,a maybe_smp4m_msg_check_mask diff --git a/trunk/arch/sparc/mm/leon_mm.c b/trunk/arch/sparc/mm/leon_mm.c index e485a6804998..c0e01297e64e 100644 --- a/trunk/arch/sparc/mm/leon_mm.c +++ b/trunk/arch/sparc/mm/leon_mm.c @@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs) * Leon2 and Leon3 differ in their way of telling cache information * */ -int __init leon_flush_needed(void) +int leon_flush_needed(void) { int flush_needed = -1; unsigned int ssize, sets; diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 37357a599dca..da349723d411 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" - depends on X86_64 && NUMA && PCI + depends on NUMA && PCI ---help--- Enable AMD NUMA node topology detection. You should say Y here if you have a multi processor AMD system. This uses an old method to diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 9242436e9937..4f0d46fefa7f 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -419,30 +419,6 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), }, }, - { /* Handle problems with rebooting on the Latitude E6320. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E6320", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), - }, - }, - { /* Handle problems with rebooting on the Latitude E5420. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E5420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), - }, - }, - { /* Handle problems with rebooting on the Latitude E6420. */ - .callback = set_pci_reboot, - .ident = "Dell Latitude E6420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), - }, - }, { } }; diff --git a/trunk/drivers/acpi/apei/hest.c b/trunk/drivers/acpi/apei/hest.c index 181bc2f7bb74..abda3786a5d7 100644 --- a/trunk/drivers/acpi/apei/hest.c +++ b/trunk/drivers/acpi/apei/hest.c @@ -139,23 +139,13 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) { struct platform_device *ghes_dev; struct ghes_arr *ghes_arr = data; - int rc, i; + int rc; if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) return 0; if (!((struct acpi_hest_generic *)hest_hdr)->enabled) return 0; - for (i = 0; i < ghes_arr->count; i++) { - struct acpi_hest_header *hdr; - ghes_dev = ghes_arr->ghes_devs[i]; - hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data; - if (hdr->source_id == hest_hdr->source_id) { - pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n", - hdr->source_id); - return -EIO; - } - } ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); if (!ghes_dev) return -ENOMEM; diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c index 0e4785703d45..27cd1408f6d8 100644 --- a/trunk/drivers/acpi/osl.c +++ b/trunk/drivers/acpi/osl.c @@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args) #endif } +#ifdef CONFIG_KEXEC +static unsigned long acpi_rsdp; +static int __init setup_acpi_rsdp(char *arg) +{ + acpi_rsdp = simple_strtoul(arg, NULL, 16); + return 0; +} +early_param("acpi_rsdp", setup_acpi_rsdp); +#endif + acpi_physical_address __init acpi_os_get_root_pointer(void) { +#ifdef CONFIG_KEXEC + if (acpi_rsdp) + return acpi_rsdp; +#endif + if (efi_enabled) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; @@ -1083,13 +1098,7 @@ struct osi_setup_entry { bool enable; }; -static struct osi_setup_entry __initdata - osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { - {"Module Device", true}, - {"Processor Device", true}, - {"3.0 _SCP Extensions", true}, - {"Processor Aggregator Device", true}, -}; +static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; void __init acpi_osi_setup(char *str) { @@ -1338,6 +1347,23 @@ int acpi_resources_are_enforced(void) } EXPORT_SYMBOL(acpi_resources_are_enforced); +/* + * Create and initialize a spinlock. + */ +acpi_status +acpi_os_create_lock(acpi_spinlock *out_handle) +{ + spinlock_t *lock; + + lock = ACPI_ALLOCATE(sizeof(spinlock_t)); + if (!lock) + return AE_NO_MEMORY; + spin_lock_init(lock); + *out_handle = lock; + + return AE_OK; +} + /* * Deallocate the memory for a spinlock. */ diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index ce7914c4c044..f245c588ae95 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -262,7 +262,6 @@ enum intel_pch { }; #define QUIRK_PIPEA_FORCE (1<<0) -#define QUIRK_LVDS_SSC_DISABLE (1<<1) struct intel_fbdev; @@ -1195,9 +1194,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode); +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index a087e1bf0c2f..5c0d1247f453 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -1374,24 +1374,25 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) } static uint32_t -i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) { - uint32_t gtt_size; + struct drm_device *dev = obj->base.dev; + uint32_t size; if (INTEL_INFO(dev)->gen >= 4 || - tiling_mode == I915_TILING_NONE) - return size; + obj->tiling_mode == I915_TILING_NONE) + return obj->base.size; /* Previous chips need a power-of-two fence region when tiling */ if (INTEL_INFO(dev)->gen == 3) - gtt_size = 1024*1024; + size = 1024*1024; else - gtt_size = 512*1024; + size = 512*1024; - while (gtt_size < size) - gtt_size <<= 1; + while (size < obj->base.size) + size <<= 1; - return gtt_size; + return size; } /** @@ -1402,52 +1403,59 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) * potential fence register mapping. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4 || - tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - return i915_gem_get_gtt_size(dev, size, tiling_mode); + return i915_gem_get_gtt_size(obj); } /** * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an * unfenced object - * @dev: the device - * @size: size of the object - * @tiling_mode: tiling mode of the object + * @obj: object to check * * Return the required GTT alignment for an object, only taking into account * unfenced tiled surface requirements. */ uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, - uint32_t size, - int tiling_mode) +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + int tile_height; + /* * Minimum alignment is 4k (GTT page size) for sane hw. */ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || - tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; - /* Previous hardware however needs to be aligned to a power-of-two - * tile height. The simplest method for determining this is to reuse - * the power-of-tile object size. + /* + * Older chips need unfenced tiled buffers to be aligned to the left + * edge of an even tile row (where tile rows are counted as if the bo is + * placed in a fenced gtt region). */ - return i915_gem_get_gtt_size(dev, size, tiling_mode); + if (IS_GEN2(dev)) + tile_height = 16; + else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_height = 32; + else + tile_height = 8; + + return tile_height * obj->stride * 2; } int @@ -2736,16 +2744,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -EINVAL; } - fence_size = i915_gem_get_gtt_size(dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode); - unfenced_alignment = - i915_gem_get_unfenced_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode); + fence_size = i915_gem_get_gtt_size(obj); + fence_alignment = i915_gem_get_gtt_alignment(obj); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); if (alignment == 0) alignment = map_and_fenceable ? fence_alignment : diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c index 99c4faa59d8f..82d70fd9e933 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -348,9 +348,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_alignment = - i915_gem_get_unfenced_gtt_alignment(dev, - obj->base.size, - args->tiling_mode); + i915_gem_get_unfenced_gtt_alignment(obj); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 0f1c799afea1..21b6f93fe919 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -4305,8 +4305,7 @@ static void intel_update_watermarks(struct drm_device *dev) static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - return dev_priv->lvds_use_ssc && i915_panel_use_ssc - && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); + return dev_priv->lvds_use_ssc && i915_panel_use_ssc; } static int i9xx_crtc_mode_set(struct drm_crtc *crtc, @@ -7811,15 +7810,6 @@ static void quirk_pipea_force (struct drm_device *dev) DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); } -/* - * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason - */ -static void quirk_ssc_force_disable(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; -} - struct intel_quirk { int device; int subsystem_vendor; @@ -7848,9 +7838,6 @@ struct intel_quirk intel_quirks[] = { /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, - - /* Lenovo U160 cannot use SSC on LVDS */ - { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index 00e98517f94c..dcb78a7a8047 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -674,7 +674,6 @@ static int atk_debugfs_gitm_get(void *p, u64 *val) else err = -EIO; - ACPI_FREE(ret); return err; } diff --git a/trunk/drivers/hwmon/it87.c b/trunk/drivers/hwmon/it87.c index 5f5247750430..bb6405b92007 100644 --- a/trunk/drivers/hwmon/it87.c +++ b/trunk/drivers/hwmon/it87.c @@ -1538,7 +1538,7 @@ static struct attribute *it87_attributes_label[] = { }; static const struct attribute_group it87_group_label = { - .attrs = it87_attributes_label, + .attrs = it87_attributes_vid, }; /* SuperIO detection - will change isa_address if a chip is found */ diff --git a/trunk/drivers/hwmon/max1111.c b/trunk/drivers/hwmon/max1111.c index 14335bbc9bdc..12a54aa29776 100644 --- a/trunk/drivers/hwmon/max1111.c +++ b/trunk/drivers/hwmon/max1111.c @@ -40,8 +40,6 @@ struct max1111_data { struct spi_transfer xfer[2]; uint8_t *tx_buf; uint8_t *rx_buf; - struct mutex drvdata_lock; - /* protect msg, xfer and buffers from multiple access */ }; static int max1111_read(struct device *dev, int channel) @@ -50,9 +48,6 @@ static int max1111_read(struct device *dev, int channel) uint8_t v1, v2; int err; - /* writing to drvdata struct is not thread safe, wait on mutex */ - mutex_lock(&data->drvdata_lock); - data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) | MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 | MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR; @@ -60,15 +55,12 @@ static int max1111_read(struct device *dev, int channel) err = spi_sync(data->spi, &data->msg); if (err < 0) { dev_err(dev, "spi_sync failed with %d\n", err); - mutex_unlock(&data->drvdata_lock); return err; } v1 = data->rx_buf[0]; v2 = data->rx_buf[1]; - mutex_unlock(&data->drvdata_lock); - if ((v1 & 0xc0) || (v2 & 0x3f)) return -EINVAL; @@ -184,8 +176,6 @@ static int __devinit max1111_probe(struct spi_device *spi) if (err) goto err_free_data; - mutex_init(&data->drvdata_lock); - data->spi = spi; spi_set_drvdata(spi, data); @@ -223,7 +213,6 @@ static int __devexit max1111_remove(struct spi_device *spi) hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); - mutex_destroy(&data->drvdata_lock); kfree(data->rx_buf); kfree(data->tx_buf); kfree(data); diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c index 5b6b451d4694..98278041d75f 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -1988,14 +1988,6 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) { if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0) goto err0; - - /* If we took control of the bus, we need to force - reinitialization. This is because many ts_bus_ctrl() - functions strobe the RESET pin on the demod, and if the - frontend thread already exists then the dvb_init() routine - won't get called (which is what usually does initial - register configuration). */ - fepriv->reinitialise = 1; } if ((ret = dvb_generic_open (inode, file)) < 0) diff --git a/trunk/drivers/media/radio/Kconfig b/trunk/drivers/media/radio/Kconfig index 52798a111e16..e4c97fd6f05a 100644 --- a/trunk/drivers/media/radio/Kconfig +++ b/trunk/drivers/media/radio/Kconfig @@ -168,7 +168,7 @@ config RADIO_MAXIRADIO config RADIO_MIROPCM20 tristate "miroSOUND PCM20 radio" - depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND + depends on ISA && VIDEO_V4L2 && SND select SND_ISA select SND_MIRO ---help--- @@ -201,7 +201,7 @@ config RADIO_SF16FMI config RADIO_SF16FMR2 tristate "SF16FMR2 Radio" - depends on ISA && VIDEO_V4L2 && SND + depends on ISA && VIDEO_V4L2 ---help--- Choose Y here if you have one of these FM radio cards. diff --git a/trunk/drivers/media/radio/si4713-i2c.c b/trunk/drivers/media/radio/si4713-i2c.c index c9f4a8e65dc4..deca2e06ff22 100644 --- a/trunk/drivers/media/radio/si4713-i2c.c +++ b/trunk/drivers/media/radio/si4713-i2c.c @@ -1033,7 +1033,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev, char ps_name[MAX_RDS_PS_NAME + 1]; len = control->size - 1; - if (len < 0 || len > MAX_RDS_PS_NAME) { + if (len > MAX_RDS_PS_NAME) { rval = -ERANGE; goto exit; } @@ -1057,7 +1057,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev, char radio_text[MAX_RDS_RADIO_TEXT + 1]; len = control->size - 1; - if (len < 0 || len > MAX_RDS_RADIO_TEXT) { + if (len > MAX_RDS_RADIO_TEXT) { rval = -ERANGE; goto exit; } diff --git a/trunk/drivers/media/rc/mceusb.c b/trunk/drivers/media/rc/mceusb.c index ec972dc25790..06dfe0957b5e 100644 --- a/trunk/drivers/media/rc/mceusb.c +++ b/trunk/drivers/media/rc/mceusb.c @@ -558,10 +558,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, inout, data1); break; case MCE_CMD_S_TIMEOUT: - /* value is in units of 50us, so x*50/1000 ms */ + /* value is in units of 50us, so x*50/100 or x/2 ms */ dev_info(dev, "%s receive timeout of %d ms\n", - inout, - ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000); + inout, ((data1 << 8) | data2) / 2); break; case MCE_CMD_G_TIMEOUT: dev_info(dev, "Get receive timeout\n"); @@ -848,7 +847,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) switch (ir->buf_in[index]) { /* 2-byte return value commands */ case MCE_CMD_S_TIMEOUT: - ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT); + ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2); break; /* 1-byte return value commands */ @@ -1079,7 +1078,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir) rc->priv = ir; rc->driver_type = RC_DRIVER_IR_RAW; rc->allowed_protos = RC_TYPE_ALL; - rc->timeout = MS_TO_NS(100); + rc->timeout = US_TO_NS(1000); if (!ir->flags.no_tx) { rc->s_tx_mask = mceusb_set_tx_mask; rc->s_tx_carrier = mceusb_set_tx_carrier; diff --git a/trunk/drivers/media/rc/nuvoton-cir.c b/trunk/drivers/media/rc/nuvoton-cir.c index ce595f9ab4c7..565f24c20d77 100644 --- a/trunk/drivers/media/rc/nuvoton-cir.c +++ b/trunk/drivers/media/rc/nuvoton-cir.c @@ -1110,7 +1110,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) rdev->dev.parent = &pdev->dev; rdev->driver_name = NVT_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; - rdev->timeout = MS_TO_NS(100); + rdev->timeout = US_TO_NS(1000); /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); #if 0 diff --git a/trunk/drivers/media/video/cx23885/cx23885-core.c b/trunk/drivers/media/video/cx23885/cx23885-core.c index 419777a832ee..64d9b2136ff6 100644 --- a/trunk/drivers/media/video/cx23885/cx23885-core.c +++ b/trunk/drivers/media/video/cx23885/cx23885-core.c @@ -2060,8 +2060,12 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev, goto fail_irq; } - err = request_irq(pci_dev->irq, cx23885_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + if (!pci_enable_msi(pci_dev)) + err = request_irq(pci_dev->irq, cx23885_irq, + IRQF_DISABLED, dev->name, dev); + else + err = request_irq(pci_dev->irq, cx23885_irq, + IRQF_SHARED | IRQF_DISABLED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); @@ -2110,6 +2114,7 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev) /* unregister stuff */ free_irq(pci_dev->irq, dev); + pci_disable_msi(pci_dev); cx23885_dev_unregister(dev); v4l2_device_unregister(v4l2_dev); diff --git a/trunk/drivers/media/video/tuner-core.c b/trunk/drivers/media/video/tuner-core.c index a03945ab9f08..cfa9f7efe93d 100644 --- a/trunk/drivers/media/video/tuner-core.c +++ b/trunk/drivers/media/video/tuner-core.c @@ -714,19 +714,10 @@ static int tuner_remove(struct i2c_client *client) * returns 0. * This function is needed for boards that have a separate tuner for * radio (like devices with tea5767). - * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to - * select a TV frequency. So, t_mode = T_ANALOG_TV could actually - * be used to represent a Digital TV too. */ static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode) { - int t_mode; - if (mode == V4L2_TUNER_RADIO) - t_mode = T_RADIO; - else - t_mode = T_ANALOG_TV; - - if ((t_mode & t->mode_mask) == 0) + if ((1 << mode & t->mode_mask) == 0) return -EINVAL; return 0; @@ -993,7 +984,7 @@ static void tuner_status(struct dvb_frontend *fe) case V4L2_TUNER_RADIO: p = "radio"; break; - case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */ + case V4L2_TUNER_DIGITAL_TV: p = "digital TV"; break; case V4L2_TUNER_ANALOG_TV: @@ -1144,8 +1135,9 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) return 0; if (vt->type == t->mode && analog_ops->get_afc) vt->afc = analog_ops->get_afc(&t->fe); - if (t->mode != V4L2_TUNER_RADIO) { + if (vt->type == V4L2_TUNER_ANALOG_TV) vt->capability |= V4L2_TUNER_CAP_NORM; + if (vt->type != V4L2_TUNER_RADIO) { vt->rangelow = tv_range[0] * 16; vt->rangehigh = tv_range[1] * 16; return 0; diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 63c22b0bb5ad..eafe44a528ac 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1428,9 +1428,9 @@ static u32 bond_fix_features(struct net_device *dev, u32 features) return features; } -#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ - NETIF_F_HIGHDMA | NETIF_F_LRO) +#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \ + NETIF_F_SOFT_FEATURES | \ + NETIF_F_LRO) static void bond_compute_features(struct bonding *bond) { diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c index dfa55f94ba7f..2dfcc8047847 100644 --- a/trunk/drivers/net/gianfar.c +++ b/trunk/drivers/net/gianfar.c @@ -2289,23 +2289,6 @@ static int gfar_set_mac_address(struct net_device *dev) return 0; } -/* Check if rx parser should be activated */ -void gfar_check_rx_parser_mode(struct gfar_private *priv) -{ - struct gfar __iomem *regs; - u32 tempval; - - regs = priv->gfargrp[0].regs; - - tempval = gfar_read(®s->rctrl); - /* If parse is no longer required, then disable parser */ - if (tempval & RCTRL_REQ_PARSER) - tempval |= RCTRL_PRSDEP_INIT; - else - tempval &= ~RCTRL_PRSDEP_INIT; - gfar_write(®s->rctrl, tempval); -} - /* Enables and disables VLAN insertion/extraction */ static void gfar_vlan_rx_register(struct net_device *dev, @@ -2342,9 +2325,12 @@ static void gfar_vlan_rx_register(struct net_device *dev, /* Disable VLAN tag extraction */ tempval = gfar_read(®s->rctrl); tempval &= ~RCTRL_VLEX; + /* If parse is no longer required, then disable parser */ + if (tempval & RCTRL_REQ_PARSER) + tempval |= RCTRL_PRSDEP_INIT; + else + tempval &= ~RCTRL_PRSDEP_INIT; gfar_write(®s->rctrl, tempval); - - gfar_check_rx_parser_mode(priv); } gfar_change_mtu(dev, dev->mtu); diff --git a/trunk/drivers/net/gianfar.h b/trunk/drivers/net/gianfar.h index 440e69d8beff..ba36dc7a3435 100644 --- a/trunk/drivers/net/gianfar.h +++ b/trunk/drivers/net/gianfar.h @@ -274,7 +274,7 @@ extern const char gfar_driver_version[]; #define RCTRL_PROM 0x00000008 #define RCTRL_EMEN 0x00000002 #define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \ - RCTRL_TUCSEN | RCTRL_FILREN) + RCTRL_TUCSEN) #define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \ RCTRL_PRSDEP_INIT) #define RCTRL_EXTHASH (RCTRL_GHTX) @@ -1156,7 +1156,6 @@ extern void gfar_configure_coalescing(struct gfar_private *priv, unsigned long tx_mask, unsigned long rx_mask); void gfar_init_sysfs(struct net_device *dev); int gfar_set_features(struct net_device *dev, u32 features); -extern void gfar_check_rx_parser_mode(struct gfar_private *priv); extern const struct ethtool_ops gfar_ethtool_ops; diff --git a/trunk/drivers/net/pppoe.c b/trunk/drivers/net/pppoe.c index bc9a4bb31980..718879b35b7d 100644 --- a/trunk/drivers/net/pppoe.c +++ b/trunk/drivers/net/pppoe.c @@ -348,9 +348,8 @@ static int pppoe_device_event(struct notifier_block *this, /* Only look at sockets that are using this specific device. */ switch (event) { - case NETDEV_CHANGEADDR: case NETDEV_CHANGEMTU: - /* A change in mtu or address is a bad thing, requiring + /* A change in mtu is a bad thing, requiring * LCP re-negotiation. */ diff --git a/trunk/drivers/net/r6040.c b/trunk/drivers/net/r6040.c index 0ffec4608441..200a363c3bf5 100644 --- a/trunk/drivers/net/r6040.c +++ b/trunk/drivers/net/r6040.c @@ -677,11 +677,9 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) if (status & RX_FIFO_FULL) dev->stats.rx_fifo_errors++; - if (likely(napi_schedule_prep(&lp->napi))) { - /* Mask off RX interrupt */ - misr &= ~RX_INTS; - __napi_schedule(&lp->napi); - } + /* Mask off RX interrupt */ + misr &= ~RX_INTS; + napi_schedule(&lp->napi); } /* TX interrupt request */ diff --git a/trunk/drivers/net/tulip/dmfe.c b/trunk/drivers/net/tulip/dmfe.c index 9a21ca3873fc..468512731966 100644 --- a/trunk/drivers/net/tulip/dmfe.c +++ b/trunk/drivers/net/tulip/dmfe.c @@ -879,6 +879,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) txptr = db->tx_remove_ptr; while(db->tx_packet_cnt) { tdes0 = le32_to_cpu(txptr->tdes0); + pr_debug("tdes0=%x\n", tdes0); if (tdes0 & 0x80000000) break; @@ -888,6 +889,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) /* Transmit statistic counter */ if ( tdes0 != 0x7fffffff ) { + pr_debug("tdes0=%x\n", tdes0); dev->stats.collisions += (tdes0 >> 3) & 0xf; dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; if (tdes0 & TDES0_ERR_MASK) { @@ -984,6 +986,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) /* error summary bit check */ if (rdes0 & 0x8000) { /* This is a error packet */ + pr_debug("rdes0: %x\n", rdes0); dev->stats.rx_errors++; if (rdes0 & 1) dev->stats.rx_fifo_errors++; @@ -1635,6 +1638,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db) else /* DM9102/DM9102A */ phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000; + pr_debug("Phy_mode %x\n", phy_mode); switch (phy_mode) { case 0x1000: db->op_mode = DMFE_10MHF; break; case 0x2000: db->op_mode = DMFE_10MFD; break; diff --git a/trunk/drivers/thermal/Kconfig b/trunk/drivers/thermal/Kconfig index f7f71b2d3101..bf7c687519ef 100644 --- a/trunk/drivers/thermal/Kconfig +++ b/trunk/drivers/thermal/Kconfig @@ -14,7 +14,11 @@ menuconfig THERMAL If you want this support, you should say Y or M here. config THERMAL_HWMON - bool + bool "Hardware monitoring support" depends on THERMAL depends on HWMON=y || HWMON=THERMAL - default y + help + The generic thermal sysfs driver's hardware monitoring support + requires a 2.10.7/3.0.2 or later lm-sensors userspace. + + Say Y if your user-space is new enough. diff --git a/trunk/drivers/thermal/thermal_sys.c b/trunk/drivers/thermal/thermal_sys.c index 708f8e92771a..0b1c82ad6805 100644 --- a/trunk/drivers/thermal/thermal_sys.c +++ b/trunk/drivers/thermal/thermal_sys.c @@ -420,29 +420,6 @@ thermal_cooling_device_trip_point_show(struct device *dev, /* hwmon sys I/F */ #include - -/* thermal zone devices with the same type share one hwmon device */ -struct thermal_hwmon_device { - char type[THERMAL_NAME_LENGTH]; - struct device *device; - int count; - struct list_head tz_list; - struct list_head node; -}; - -struct thermal_hwmon_attr { - struct device_attribute attr; - char name[16]; -}; - -/* one temperature input for each thermal zone */ -struct thermal_hwmon_temp { - struct list_head hwmon_node; - struct thermal_zone_device *tz; - struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ - struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ -}; - static LIST_HEAD(thermal_hwmon_list); static ssize_t @@ -460,10 +437,9 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) int ret; struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_hwmon_temp *temp - = container_of(hwmon_attr, struct thermal_hwmon_temp, + struct thermal_zone_device *tz + = container_of(hwmon_attr, struct thermal_zone_device, temp_input); - struct thermal_zone_device *tz = temp->tz; ret = tz->ops->get_temp(tz, &temperature); @@ -479,10 +455,9 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, { struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_hwmon_temp *temp - = container_of(hwmon_attr, struct thermal_hwmon_temp, + struct thermal_zone_device *tz + = container_of(hwmon_attr, struct thermal_zone_device, temp_crit); - struct thermal_zone_device *tz = temp->tz; long temperature; int ret; @@ -494,54 +469,22 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, } -static struct thermal_hwmon_device * -thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) +static int +thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; + int new_hwmon_device = 1; + int result; mutex_lock(&thermal_list_lock); list_for_each_entry(hwmon, &thermal_hwmon_list, node) if (!strcmp(hwmon->type, tz->type)) { + new_hwmon_device = 0; mutex_unlock(&thermal_list_lock); - return hwmon; - } - mutex_unlock(&thermal_list_lock); - - return NULL; -} - -/* Find the temperature input matching a given thermal zone */ -static struct thermal_hwmon_temp * -thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, - const struct thermal_zone_device *tz) -{ - struct thermal_hwmon_temp *temp; - - mutex_lock(&thermal_list_lock); - list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) - if (temp->tz == tz) { - mutex_unlock(&thermal_list_lock); - return temp; + goto register_sys_interface; } mutex_unlock(&thermal_list_lock); - return NULL; -} - -static int -thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) -{ - struct thermal_hwmon_device *hwmon; - struct thermal_hwmon_temp *temp; - int new_hwmon_device = 1; - int result; - - hwmon = thermal_hwmon_lookup_by_type(tz); - if (hwmon) { - new_hwmon_device = 0; - goto register_sys_interface; - } - hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); if (!hwmon) return -ENOMEM; @@ -559,36 +502,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) goto free_mem; register_sys_interface: - temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL); - if (!temp) { - result = -ENOMEM; - goto unregister_name; - } - - temp->tz = tz; + tz->hwmon = hwmon; hwmon->count++; - snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH, + snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH, "temp%d_input", hwmon->count); - temp->temp_input.attr.attr.name = temp->temp_input.name; - temp->temp_input.attr.attr.mode = 0444; - temp->temp_input.attr.show = temp_input_show; - sysfs_attr_init(&temp->temp_input.attr.attr); - result = device_create_file(hwmon->device, &temp->temp_input.attr); + tz->temp_input.attr.attr.name = tz->temp_input.name; + tz->temp_input.attr.attr.mode = 0444; + tz->temp_input.attr.show = temp_input_show; + sysfs_attr_init(&tz->temp_input.attr.attr); + result = device_create_file(hwmon->device, &tz->temp_input.attr); if (result) - goto free_temp_mem; + goto unregister_name; if (tz->ops->get_crit_temp) { unsigned long temperature; if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH, + snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH, "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); + tz->temp_crit.attr.attr.name = tz->temp_crit.name; + tz->temp_crit.attr.attr.mode = 0444; + tz->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&tz->temp_crit.attr.attr); result = device_create_file(hwmon->device, - &temp->temp_crit.attr); + &tz->temp_crit.attr); if (result) goto unregister_input; } @@ -597,15 +534,13 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) mutex_lock(&thermal_list_lock); if (new_hwmon_device) list_add_tail(&hwmon->node, &thermal_hwmon_list); - list_add_tail(&temp->hwmon_node, &hwmon->tz_list); + list_add_tail(&tz->hwmon_node, &hwmon->tz_list); mutex_unlock(&thermal_list_lock); return 0; unregister_input: - device_remove_file(hwmon->device, &temp->temp_input.attr); - free_temp_mem: - kfree(temp); + device_remove_file(hwmon->device, &tz->temp_input.attr); unregister_name: if (new_hwmon_device) { device_remove_file(hwmon->device, &dev_attr_name); @@ -621,30 +556,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) static void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { - struct thermal_hwmon_device *hwmon; - struct thermal_hwmon_temp *temp; - - hwmon = thermal_hwmon_lookup_by_type(tz); - if (unlikely(!hwmon)) { - /* Should never happen... */ - dev_dbg(&tz->device, "hwmon device lookup failed!\n"); - return; - } - - temp = thermal_hwmon_lookup_temp(hwmon, tz); - if (unlikely(!temp)) { - /* Should never happen... */ - dev_dbg(&tz->device, "temperature input lookup failed!\n"); - return; - } + struct thermal_hwmon_device *hwmon = tz->hwmon; - device_remove_file(hwmon->device, &temp->temp_input.attr); + tz->hwmon = NULL; + device_remove_file(hwmon->device, &tz->temp_input.attr); if (tz->ops->get_crit_temp) - device_remove_file(hwmon->device, &temp->temp_crit.attr); + device_remove_file(hwmon->device, &tz->temp_crit.attr); mutex_lock(&thermal_list_lock); - list_del(&temp->hwmon_node); - kfree(temp); + list_del(&tz->hwmon_node); if (!list_empty(&hwmon->tz_list)) { mutex_unlock(&thermal_list_lock); return; diff --git a/trunk/drivers/watchdog/Kconfig b/trunk/drivers/watchdog/Kconfig index 21d816e9dfa5..9536d386bb38 100644 --- a/trunk/drivers/watchdog/Kconfig +++ b/trunk/drivers/watchdog/Kconfig @@ -599,7 +599,8 @@ config IT87_WDT config HP_WATCHDOG tristate "HP ProLiant iLO2+ Hardware Watchdog Timer" - depends on X86 && PCI + depends on X86 + default m help A software monitoring watchdog and NMI sourcing driver. This driver will detect lockups and provide a stack trace. This is a driver that diff --git a/trunk/fs/ceph/mds_client.c b/trunk/fs/ceph/mds_client.c index 0c1d91756528..79743d146be6 100644 --- a/trunk/fs/ceph/mds_client.c +++ b/trunk/fs/ceph/mds_client.c @@ -1438,15 +1438,12 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, struct dentry *temp; char *path; int len, pos; - unsigned seq; if (dentry == NULL) return ERR_PTR(-EINVAL); retry: len = 0; - seq = read_seqbegin(&rename_lock); - rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp);) { struct inode *inode = temp->d_inode; if (inode && ceph_snap(inode) == CEPH_SNAPDIR) @@ -1458,12 +1455,10 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, len += 1 + temp->d_name.len; temp = temp->d_parent; if (temp == NULL) { - rcu_read_unlock(); pr_err("build_path corrupt dentry %p\n", dentry); return ERR_PTR(-EINVAL); } } - rcu_read_unlock(); if (len) len--; /* no leading '/' */ @@ -1472,12 +1467,9 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, return ERR_PTR(-ENOMEM); pos = len; path[pos] = 0; /* trailing null */ - rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { - struct inode *inode; + struct inode *inode = temp->d_inode; - spin_lock(&temp->d_lock); - inode = temp->d_inode; if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { dout("build_path path+%d: %p SNAPDIR\n", pos, temp); @@ -1486,26 +1478,21 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, break; } else { pos -= temp->d_name.len; - if (pos < 0) { - spin_unlock(&temp->d_lock); + if (pos < 0) break; - } strncpy(path + pos, temp->d_name.name, temp->d_name.len); } - spin_unlock(&temp->d_lock); if (pos) path[--pos] = '/'; temp = temp->d_parent; if (temp == NULL) { - rcu_read_unlock(); pr_err("build_path corrupt dentry\n"); kfree(path); return ERR_PTR(-EINVAL); } } - rcu_read_unlock(); - if (pos != 0 || read_seqretry(&rename_lock, seq)) { + if (pos != 0) { pr_err("build_path did not end path lookup where " "expected, namelen is %d, pos is %d\n", len, pos); /* presumably this is only possible if racing with a diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index bc4b12ca537b..3e2989976297 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include "cifsfs.h" #include "cifspdu.h" @@ -543,12 +542,14 @@ static const struct super_operations cifs_super_ops = { static struct dentry * cifs_get_root(struct smb_vol *vol, struct super_block *sb) { - struct dentry *dentry; + int xid, rc; + struct inode *inode; + struct qstr name; + struct dentry *dparent = NULL, *dchild = NULL, *alias; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - char *full_path = NULL; - char *s, *p; + unsigned int i, full_len, len; + char *full_path = NULL, *pstart; char sep; - int xid; full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); @@ -559,32 +560,73 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) xid = GetXid(); sep = CIFS_DIR_SEP(cifs_sb); - dentry = dget(sb->s_root); - p = s = full_path; - - do { - struct inode *dir = dentry->d_inode; - struct dentry *child; - - /* skip separators */ - while (*s == sep) - s++; - if (!*s) - break; - p = s++; - /* next separator */ - while (*s && *s != sep) - s++; - - mutex_lock(&dir->i_mutex); - child = lookup_one_len(p, dentry, s - p); - mutex_unlock(&dir->i_mutex); - dput(dentry); - dentry = child; - } while (!IS_ERR(dentry)); + dparent = dget(sb->s_root); + full_len = strlen(full_path); + full_path[full_len] = sep; + pstart = full_path + 1; + + for (i = 1, len = 0; i <= full_len; i++) { + if (full_path[i] != sep || !len) { + len++; + continue; + } + + full_path[i] = 0; + cFYI(1, "get dentry for %s", pstart); + + name.name = pstart; + name.len = len; + name.hash = full_name_hash(pstart, len); + dchild = d_lookup(dparent, &name); + if (dchild == NULL) { + cFYI(1, "not exists"); + dchild = d_alloc(dparent, &name); + if (dchild == NULL) { + dput(dparent); + dparent = ERR_PTR(-ENOMEM); + goto out; + } + } + + cFYI(1, "get inode"); + if (dchild->d_inode == NULL) { + cFYI(1, "not exists"); + inode = NULL; + if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) + rc = cifs_get_inode_info_unix(&inode, full_path, + sb, xid); + else + rc = cifs_get_inode_info(&inode, full_path, + NULL, sb, xid, NULL); + if (rc) { + dput(dchild); + dput(dparent); + dparent = ERR_PTR(rc); + goto out; + } + alias = d_materialise_unique(dchild, inode); + if (alias != NULL) { + dput(dchild); + if (IS_ERR(alias)) { + dput(dparent); + dparent = ERR_PTR(-EINVAL); /* XXX */ + goto out; + } + dchild = alias; + } + } + cFYI(1, "parent %p, child %p", dparent, dchild); + + dput(dparent); + dparent = dchild; + len = 0; + pstart = full_path + i + 1; + full_path[i] = sep; + } +out: _FreeXid(xid); kfree(full_path); - return dentry; + return dparent; } static int cifs_set_super(struct super_block *sb, void *data) diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h index 036ca83e5f46..0900e1658c96 100644 --- a/trunk/fs/cifs/cifsfs.h +++ b/trunk/fs/cifs/cifsfs.h @@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "1.74" +#define CIFS_VERSION "1.73" #endif /* _CIFSFS_H */ diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index ccc1afa0bf3b..dbd669cc5bc7 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -3485,7 +3485,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) goto out; } - snprintf(username, sizeof(username), "krb50x%x", fsuid); + snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid); vol_info->username = username; vol_info->local_nls = cifs_sb->local_nls; vol_info->linux_uid = fsuid; diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index fa8c21d913bc..81914df47ef1 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -55,7 +55,6 @@ build_path_from_dentry(struct dentry *direntry) char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); - unsigned seq; if (direntry == NULL) return NULL; /* not much we can do if dentry is freed and @@ -69,29 +68,22 @@ build_path_from_dentry(struct dentry *direntry) dfsplen = 0; cifs_bp_rename_retry: namelen = dfsplen; - seq = read_seqbegin(&rename_lock); - rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { namelen += (1 + temp->d_name.len); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); - rcu_read_unlock(); return NULL; } } - rcu_read_unlock(); full_path = kmalloc(namelen+1, GFP_KERNEL); if (full_path == NULL) return full_path; full_path[namelen] = 0; /* trailing null */ - rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { - spin_lock(&temp->d_lock); namelen -= 1 + temp->d_name.len; if (namelen < 0) { - spin_unlock(&temp->d_lock); break; } else { full_path[namelen] = dirsep; @@ -99,17 +91,14 @@ build_path_from_dentry(struct dentry *direntry) temp->d_name.len); cFYI(0, "name: %s", full_path + namelen); } - spin_unlock(&temp->d_lock); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); - rcu_read_unlock(); kfree(full_path); return NULL; } } - rcu_read_unlock(); - if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { + if (namelen != dfsplen) { cERROR(1, "did not end path lookup where expected namelen is %d", namelen); /* presumably this is only possible if racing with a rename diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index a9b4a24f2a16..bb71471a4d9d 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, io_parms.pid = pid; io_parms.tcon = pTcon; io_parms.offset = *poffset; - io_parms.length = cur_len; + io_parms.length = len; rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &read_data, &buf_type); pSMBr = (struct smb_com_read_rsp *)read_data; diff --git a/trunk/fs/cifs/sess.c b/trunk/fs/cifs/sess.c index d3e619692ee0..3892ab817a36 100644 --- a/trunk/fs/cifs/sess.c +++ b/trunk/fs/cifs/sess.c @@ -428,7 +428,8 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; + flags |= NTLMSSP_NEGOTIATE_KEY_XCH | + NTLMSSP_NEGOTIATE_EXTENDED_SEC; } sec_blob->NegotiateFlags = cpu_to_le32(flags); @@ -464,11 +465,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) flags |= NTLMSSP_NEGOTIATE_SIGN; - if (!ses->server->session_estab) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; - } + if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) + flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); diff --git a/trunk/fs/cramfs/inode.c b/trunk/fs/cramfs/inode.c index 739fb59bcdc2..e141939080f0 100644 --- a/trunk/fs/cramfs/inode.c +++ b/trunk/fs/cramfs/inode.c @@ -37,7 +37,7 @@ static DEFINE_MUTEX(read_mutex); /* These macros may change in future, to provide better st_ino semantics. */ #define OFFSET(x) ((x)->i_ino) -static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset) +static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset) { if (!cino->offset) return offset + 1; @@ -61,7 +61,7 @@ static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offse } static struct inode *get_cramfs_inode(struct super_block *sb, - const struct cramfs_inode *cramfs_inode, unsigned int offset) + struct cramfs_inode *cramfs_inode, unsigned int offset) { struct inode *inode; static struct timespec zerotime; @@ -317,7 +317,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) /* Set it all up.. */ sb->s_op = &cramfs_ops; root = get_cramfs_inode(sb, &super.root, 0); - if (IS_ERR(root)) + if (!root) goto out; sb->s_root = d_alloc_root(root); if (!sb->s_root) { @@ -423,7 +423,6 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { unsigned int offset = 0; - struct inode *inode = NULL; int sorted; mutex_lock(&read_mutex); @@ -450,8 +449,8 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s for (;;) { if (!namelen) { - inode = ERR_PTR(-EIO); - goto out; + mutex_unlock(&read_mutex); + return ERR_PTR(-EIO); } if (name[namelen-1]) break; @@ -463,18 +462,17 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s if (retval > 0) continue; if (!retval) { - inode = get_cramfs_inode(dir->i_sb, de, dir_off); - break; + struct cramfs_inode entry = *de; + mutex_unlock(&read_mutex); + d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off)); + return NULL; } /* else (retval < 0) */ if (sorted) break; } -out: mutex_unlock(&read_mutex); - if (IS_ERR(inode)) - return ERR_CAST(inode); - d_add(dentry, inode); + d_add(dentry, NULL); return NULL; } diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index fbdcbca40725..6e4ea6d87774 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -1813,6 +1813,8 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, tname = dentry->d_name.name; i = dentry->d_inode; prefetch(tname); + if (i) + prefetch(i); /* * This seqcount check is required to ensure name and * len are loaded atomically, so as not to walk off the diff --git a/trunk/fs/exofs/super.c b/trunk/fs/exofs/super.c index c57beddcc217..06065bd37fc3 100644 --- a/trunk/fs/exofs/super.c +++ b/trunk/fs/exofs/super.c @@ -913,7 +913,7 @@ struct dentry *exofs_get_parent(struct dentry *child) unsigned long ino = exofs_parent_ino(child); if (!ino) - return ERR_PTR(-ESTALE); + return NULL; return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino)); } diff --git a/trunk/fs/fscache/page.c b/trunk/fs/fscache/page.c index 3f7a59bfa7ad..2f343b4d7a7d 100644 --- a/trunk/fs/fscache/page.c +++ b/trunk/fs/fscache/page.c @@ -976,12 +976,16 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, pagevec_init(&pvec, 0); next = 0; - do { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) - break; + while (next <= (loff_t)-1 && + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE) + ) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - next = page->index; + pgoff_t page_index = page->index; + + ASSERTCMP(page_index, >=, next); + next = page_index + 1; + if (PageFsCache(page)) { __fscache_wait_on_page_write(cookie, page); __fscache_uncache_page(cookie, page); @@ -989,7 +993,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, } pagevec_release(&pvec); cond_resched(); - } while (++next); + } _leave(""); } diff --git a/trunk/fs/hppfs/hppfs.c b/trunk/fs/hppfs/hppfs.c index 85c098a499f3..87ed48e0343d 100644 --- a/trunk/fs/hppfs/hppfs.c +++ b/trunk/fs/hppfs/hppfs.c @@ -139,8 +139,7 @@ static int file_removed(struct dentry *dentry, const char *file) static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { - struct dentry *proc_dentry, *parent; - struct qstr *name = &dentry->d_name; + struct dentry *proc_dentry, *new, *parent; struct inode *inode; int err, deleted; @@ -150,9 +149,23 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, else if (deleted) return ERR_PTR(-ENOENT); + err = -ENOMEM; parent = HPPFS_I(ino)->proc_dentry; mutex_lock(&parent->d_inode->i_mutex); - proc_dentry = lookup_one_len(name->name, parent, name->len); + proc_dentry = d_lookup(parent, &dentry->d_name); + if (proc_dentry == NULL) { + proc_dentry = d_alloc(parent, &dentry->d_name); + if (proc_dentry == NULL) { + mutex_unlock(&parent->d_inode->i_mutex); + goto out; + } + new = (*parent->d_inode->i_op->lookup)(parent->d_inode, + proc_dentry, NULL); + if (new) { + dput(proc_dentry); + proc_dentry = new; + } + } mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(proc_dentry)) @@ -161,11 +174,13 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, err = -ENOMEM; inode = get_inode(ino->i_sb, proc_dentry); if (!inode) - goto out; + goto out_dput; d_add(dentry, inode); return NULL; + out_dput: + dput(proc_dentry); out: return ERR_PTR(err); } @@ -675,10 +690,8 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) struct inode *proc_ino = dentry->d_inode; struct inode *inode = new_inode(sb); - if (!inode) { - dput(dentry); + if (!inode) return ERR_PTR(-ENOMEM); - } if (S_ISDIR(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_dir_iops; @@ -691,7 +704,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) inode->i_fop = &hppfs_file_fops; } - HPPFS_I(inode)->proc_dentry = dentry; + HPPFS_I(inode)->proc_dentry = dget(dentry); inode->i_uid = proc_ino->i_uid; inode->i_gid = proc_ino->i_gid; @@ -724,7 +737,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent) sb->s_fs_info = proc_mnt; err = -ENOMEM; - root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root)); + root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root); if (!root_inode) goto out_mntput; diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index 275ca4749a2e..c88eab55aec9 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - val = simple_strtoll(attr->set_buf, NULL, 0); + val = simple_strtol(attr->set_buf, NULL, 0); ret = attr->set(attr->data, val); if (ret == 0) ret = len; /* on success, claim we got the whole input */ diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 14ab8d3f2f0c..5c867dd1c0b3 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -942,6 +942,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ + *inode = path->dentry->d_inode; if (unlikely(managed_dentry_might_block(path->dentry))) return false; @@ -954,12 +955,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, path->mnt = mounted; path->dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&path->dentry->d_seq); - /* - * Update the inode too. We don't need to re-check the - * dentry sequence number here after this d_inode read, - * because a mount-point is always pinned. - */ - *inode = path->dentry->d_inode; } return true; } diff --git a/trunk/fs/ufs/namei.c b/trunk/fs/ufs/namei.c index b57aab9a1184..29309e25417f 100644 --- a/trunk/fs/ufs/namei.c +++ b/trunk/fs/ufs/namei.c @@ -56,12 +56,16 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru lock_ufs(dir->i_sb); ino = ufs_inode_by_name(dir, &dentry->d_name); - if (ino) + if (ino) { inode = ufs_iget(dir->i_sb, ino); + if (IS_ERR(inode)) { + unlock_ufs(dir->i_sb); + return ERR_CAST(inode); + } + } unlock_ufs(dir->i_sb); - if (IS_ERR(inode)) - return ERR_CAST(inode); - return d_splice_alias(inode, dentry); + d_add(dentry, inode); + return NULL; } /* diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h index 6cd5b6403a7b..3a10ef5914eb 100644 --- a/trunk/include/acpi/acpi_bus.h +++ b/trunk/include/acpi/acpi_bus.h @@ -210,7 +210,7 @@ struct acpi_device_power_state { struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; - struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ + struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ }; /* Performance Management */ diff --git a/trunk/include/acpi/acpiosxf.h b/trunk/include/acpi/acpiosxf.h index 4543b6f75867..a756bc8d866d 100644 --- a/trunk/include/acpi/acpiosxf.h +++ b/trunk/include/acpi/acpiosxf.h @@ -98,11 +98,8 @@ acpi_os_table_override(struct acpi_table_header *existing_table, /* * Spinlock primitives */ - -#ifndef acpi_os_create_lock acpi_status acpi_os_create_lock(acpi_spinlock *out_handle); -#endif void acpi_os_delete_lock(acpi_spinlock handle); diff --git a/trunk/include/acpi/platform/aclinux.h b/trunk/include/acpi/platform/aclinux.h index 2ce1be9f6291..5d2a5e9544d9 100644 --- a/trunk/include/acpi/platform/aclinux.h +++ b/trunk/include/acpi/platform/aclinux.h @@ -159,24 +159,6 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) } while (0) #endif -/* - * When lockdep is enabled, the spin_lock_init() macro stringifies it's - * argument and uses that as a name for the lock in debugging. - * By executing spin_lock_init() in a macro the key changes from "lock" for - * all locks to the name of the argument of acpi_os_create_lock(), which - * prevents lockdep from reporting false positives for ACPICA locks. - */ -#define acpi_os_create_lock(__handle) \ -({ \ - spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ - \ - if (lock) { \ - *(__handle) = lock; \ - spin_lock_init(*(__handle)); \ - } \ - lock ? AE_OK : AE_NO_MEMORY; \ -}) - #endif /* __KERNEL__ */ #endif /* __ACLINUX_H__ */ diff --git a/trunk/include/linux/acpi.h b/trunk/include/linux/acpi.h index 2312e850aab8..1deb2a73c2da 100644 --- a/trunk/include/linux/acpi.h +++ b/trunk/include/linux/acpi.h @@ -238,6 +238,7 @@ extern int acpi_paddr_to_node(u64 start_addr, u64 size); extern int pnpacpi_disabled; #define PXM_INVAL (-1) +#define NID_INVAL (-1) int acpi_check_resource_conflict(const struct resource *res); diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index 9e19477991ad..54b8b4d7b68f 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -1097,6 +1097,12 @@ struct net_device { #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) +#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ + NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_HIGHDMA | \ + NETIF_F_SCTP_CSUM | \ + NETIF_F_ALL_FCOE) + /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 14a6c7b545de..496770a96487 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -844,7 +844,6 @@ enum cpu_idle_type { #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ enum powersavings_balance_level { POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ @@ -894,21 +893,16 @@ static inline int sd_power_saving_flags(void) return 0; } -struct sched_group_power { +struct sched_group { + struct sched_group *next; /* Must be a circular list */ atomic_t ref; + /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. */ - unsigned int power, power_orig; -}; - -struct sched_group { - struct sched_group *next; /* Must be a circular list */ - atomic_t ref; - + unsigned int cpu_power, cpu_power_orig; unsigned int group_weight; - struct sched_group_power *sgp; /* * The CPUs this group covers. @@ -1260,9 +1254,6 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; char rcu_read_unlock_special; -#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) - int rcu_boosted; -#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */ struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU diff --git a/trunk/include/linux/sdla.h b/trunk/include/linux/sdla.h index 9995c7fc3f60..564acd3a71c1 100644 --- a/trunk/include/linux/sdla.h +++ b/trunk/include/linux/sdla.h @@ -112,7 +112,11 @@ struct sdla_dlci_conf { short Tb_max; }; -#ifdef __KERNEL__ +#ifndef __KERNEL__ + +void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet); + +#else /* important Z80 window addresses */ #define SDLA_CONTROL_WND 0xE000 diff --git a/trunk/include/linux/thermal.h b/trunk/include/linux/thermal.h index 47b4a27e6e97..d3ec89fb4122 100644 --- a/trunk/include/linux/thermal.h +++ b/trunk/include/linux/thermal.h @@ -85,6 +85,22 @@ struct thermal_cooling_device { ((long)t-2732+5)/10 : ((long)t-2732-5)/10) #define CELSIUS_TO_KELVIN(t) ((t)*10+2732) +#if defined(CONFIG_THERMAL_HWMON) +/* thermal zone devices with the same type share one hwmon device */ +struct thermal_hwmon_device { + char type[THERMAL_NAME_LENGTH]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; +#endif + struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; @@ -104,6 +120,12 @@ struct thermal_zone_device { struct mutex lock; /* protect cooling devices list */ struct list_head node; struct delayed_work poll_queue; +#if defined(CONFIG_THERMAL_HWMON) + struct list_head hwmon_node; + struct thermal_hwmon_device *hwmon; + struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ + struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ +#endif }; /* Adding event notification support elements */ #define THERMAL_GENL_FAMILY_NAME "thermal_event" diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 8aafbb80b8b0..75113cb7c4fb 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -68,7 +68,6 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); static struct rcu_state *rcu_state = &rcu_preempt_state; -static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(struct rcu_node *rnp); /* @@ -148,7 +147,7 @@ static void rcu_preempt_note_context_switch(int cpu) struct rcu_data *rdp; struct rcu_node *rnp; - if (t->rcu_read_lock_nesting > 0 && + if (t->rcu_read_lock_nesting && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ @@ -191,14 +190,6 @@ static void rcu_preempt_note_context_switch(int cpu) rnp->gp_tasks = &t->rcu_node_entry; } raw_spin_unlock_irqrestore(&rnp->lock, flags); - } else if (t->rcu_read_lock_nesting < 0 && - t->rcu_read_unlock_special) { - - /* - * Complete exit from RCU read-side critical section on - * behalf of preempted instance of __rcu_read_unlock(). - */ - rcu_read_unlock_special(t); } /* @@ -293,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ -static noinline void rcu_read_unlock_special(struct task_struct *t) +static void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; @@ -318,7 +309,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block. */ - if (in_irq() || in_serving_softirq()) { + if (in_irq()) { local_irq_restore(flags); return; } @@ -351,11 +342,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) #ifdef CONFIG_RCU_BOOST if (&t->rcu_node_entry == rnp->boost_tasks) rnp->boost_tasks = np; - /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */ - if (t->rcu_boosted) { - special |= RCU_READ_UNLOCK_BOOSTED; - t->rcu_boosted = 0; - } #endif /* #ifdef CONFIG_RCU_BOOST */ t->rcu_blocked_node = NULL; @@ -372,6 +358,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) #ifdef CONFIG_RCU_BOOST /* Unboost if we were boosted. */ if (special & RCU_READ_UNLOCK_BOOSTED) { + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; rt_mutex_unlock(t->rcu_boost_mutex); t->rcu_boost_mutex = NULL; } @@ -400,22 +387,13 @@ void __rcu_read_unlock(void) struct task_struct *t = current; barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ - if (t->rcu_read_lock_nesting != 1) - --t->rcu_read_lock_nesting; - else { - t->rcu_read_lock_nesting = INT_MIN; - barrier(); /* assign before ->rcu_read_unlock_special load */ - if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) - rcu_read_unlock_special(t); - barrier(); /* ->rcu_read_unlock_special load before assign */ - t->rcu_read_lock_nesting = 0; - } + --t->rcu_read_lock_nesting; + barrier(); /* decrement before load of ->rcu_read_unlock_special */ + if (t->rcu_read_lock_nesting == 0 && + unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); #ifdef CONFIG_PROVE_LOCKING - { - int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); - - WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); - } + WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); #endif /* #ifdef CONFIG_PROVE_LOCKING */ } EXPORT_SYMBOL_GPL(__rcu_read_unlock); @@ -611,8 +589,7 @@ static void rcu_preempt_check_callbacks(int cpu) rcu_preempt_qs(cpu); return; } - if (t->rcu_read_lock_nesting > 0 && - per_cpu(rcu_preempt_data, cpu).qs_pending) + if (per_cpu(rcu_preempt_data, cpu).qs_pending) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } @@ -718,12 +695,9 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) raw_spin_lock_irqsave(&rnp->lock, flags); for (;;) { - if (!sync_rcu_preempt_exp_done(rnp)) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (!sync_rcu_preempt_exp_done(rnp)) break; - } if (rnp->parent == NULL) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); wake_up(&sync_rcu_preempt_exp_wq); break; } @@ -733,6 +707,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) raw_spin_lock(&rnp->lock); /* irqs already disabled */ rnp->expmask &= ~mask; } + raw_spin_unlock_irqrestore(&rnp->lock, flags); } /* @@ -1199,7 +1174,7 @@ static int rcu_boost(struct rcu_node *rnp) t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&mtx, t); t->rcu_boost_mutex = &mtx; - t->rcu_boosted = 1; + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; raw_spin_unlock_irqrestore(&rnp->lock, flags); rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index fde6ff903525..3dc716f6d8ad 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2544,9 +2544,13 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) } #ifdef CONFIG_SMP -static void sched_ttwu_do_pending(struct task_struct *list) +static void sched_ttwu_pending(void) { struct rq *rq = this_rq(); + struct task_struct *list = xchg(&rq->wake_list, NULL); + + if (!list) + return; raw_spin_lock(&rq->lock); @@ -2559,45 +2563,9 @@ static void sched_ttwu_do_pending(struct task_struct *list) raw_spin_unlock(&rq->lock); } -#ifdef CONFIG_HOTPLUG_CPU - -static void sched_ttwu_pending(void) -{ - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - sched_ttwu_do_pending(list); -} - -#endif /* CONFIG_HOTPLUG_CPU */ - void scheduler_ipi(void) { - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - /* - * Not all reschedule IPI handlers call irq_enter/irq_exit, since - * traditionally all their work was done from the interrupt return - * path. Now that we actually do some work, we need to make sure - * we do call them. - * - * Some archs already do call them, luckily irq_enter/exit nest - * properly. - * - * Arguably we should visit all archs and update all handlers, - * however a fair share of IPIs are still resched only so this would - * somewhat pessimize the simple resched case. - */ - irq_enter(); - sched_ttwu_do_pending(list); - irq_exit(); + sched_ttwu_pending(); } static void ttwu_queue_remote(struct task_struct *p, int cpu) @@ -6589,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, break; } - if (!group->sgp->power) { + if (!group->cpu_power) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); @@ -6613,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); printk(KERN_CONT " %s", str); - if (group->sgp->power != SCHED_POWER_SCALE) { + if (group->cpu_power != SCHED_POWER_SCALE) { printk(KERN_CONT " (cpu_power = %d)", - group->sgp->power); + group->cpu_power); } group = group->next; @@ -6806,39 +6774,11 @@ static struct root_domain *alloc_rootdomain(void) return rd; } -static void free_sched_groups(struct sched_group *sg, int free_sgp) -{ - struct sched_group *tmp, *first; - - if (!sg) - return; - - first = sg; - do { - tmp = sg->next; - - if (free_sgp && atomic_dec_and_test(&sg->sgp->ref)) - kfree(sg->sgp); - - kfree(sg); - sg = tmp; - } while (sg != first); -} - static void free_sched_domain(struct rcu_head *rcu) { struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); - - /* - * If its an overlapping domain it has private groups, iterate and - * nuke them all. - */ - if (sd->flags & SD_OVERLAP) { - free_sched_groups(sd->groups, 1); - } else if (atomic_dec_and_test(&sd->groups->ref)) { - kfree(sd->groups->sgp); + if (atomic_dec_and_test(&sd->groups->ref)) kfree(sd->groups); - } kfree(sd); } @@ -7005,7 +6945,6 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; struct sd_data { struct sched_domain **__percpu sd; struct sched_group **__percpu sg; - struct sched_group_power **__percpu sgp; }; struct s_data { @@ -7025,73 +6964,15 @@ struct sched_domain_topology_level; typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -#define SDTL_OVERLAP 0x01 - struct sched_domain_topology_level { sched_domain_init_f init; sched_domain_mask_f mask; - int flags; struct sd_data data; }; -static int -build_overlap_sched_groups(struct sched_domain *sd, int cpu) -{ - struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; - const struct cpumask *span = sched_domain_span(sd); - struct cpumask *covered = sched_domains_tmpmask; - struct sd_data *sdd = sd->private; - struct sched_domain *child; - int i; - - cpumask_clear(covered); - - for_each_cpu(i, span) { - struct cpumask *sg_span; - - if (cpumask_test_cpu(i, covered)) - continue; - - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(i)); - - if (!sg) - goto fail; - - sg_span = sched_group_cpus(sg); - - child = *per_cpu_ptr(sdd->sd, i); - if (child->child) { - child = child->child; - cpumask_copy(sg_span, sched_domain_span(child)); - } else - cpumask_set_cpu(i, sg_span); - - cpumask_or(covered, covered, sg_span); - - sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); - atomic_inc(&sg->sgp->ref); - - if (cpumask_test_cpu(cpu, sg_span)) - groups = sg; - - if (!first) - first = sg; - if (last) - last->next = sg; - last = sg; - last->next = first; - } - sd->groups = groups; - - return 0; - -fail: - free_sched_groups(first, 0); - - return -ENOMEM; -} - +/* + * Assumes the sched_domain tree is fully constructed + */ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) { struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); @@ -7100,24 +6981,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) if (child) cpu = cpumask_first(sched_domain_span(child)); - if (sg) { + if (sg) *sg = *per_cpu_ptr(sdd->sg, cpu); - (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu); - atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */ - } return cpu; } /* + * build_sched_groups takes the cpumask we wish to span, and a pointer + * to a function which identifies what group(along with sched group) a CPU + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids + * (due to the fact that we keep track of groups covered with a struct cpumask). + * * build_sched_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. - * - * Assumes the sched_domain tree is fully constructed */ -static int -build_sched_groups(struct sched_domain *sd, int cpu) +static void +build_sched_groups(struct sched_domain *sd) { struct sched_group *first = NULL, *last = NULL; struct sd_data *sdd = sd->private; @@ -7125,12 +7006,6 @@ build_sched_groups(struct sched_domain *sd, int cpu) struct cpumask *covered; int i; - get_group(cpu, sdd, &sd->groups); - atomic_inc(&sd->groups->ref); - - if (cpu != cpumask_first(sched_domain_span(sd))) - return 0; - lockdep_assert_held(&sched_domains_mutex); covered = sched_domains_tmpmask; @@ -7145,7 +7020,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) continue; cpumask_clear(sched_group_cpus(sg)); - sg->sgp->power = 0; + sg->cpu_power = 0; for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) @@ -7162,8 +7037,6 @@ build_sched_groups(struct sched_domain *sd, int cpu) last = sg; } last->next = first; - - return 0; } /* @@ -7178,18 +7051,13 @@ build_sched_groups(struct sched_domain *sd, int cpu) */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { - struct sched_group *sg = sd->groups; + WARN_ON(!sd || !sd->groups); - WARN_ON(!sd || !sg); - - do { - sg->group_weight = cpumask_weight(sched_group_cpus(sg)); - sg = sg->next; - } while (sg != sd->groups); - - if (cpu != group_first_cpu(sg)) + if (cpu != group_first_cpu(sd->groups)) return; + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); + update_group_power(sd, cpu); } @@ -7309,15 +7177,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, static void claim_allocations(int cpu, struct sched_domain *sd) { struct sd_data *sdd = sd->private; + struct sched_group *sg = sd->groups; WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); *per_cpu_ptr(sdd->sd, cpu) = NULL; - if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) + if (cpu == cpumask_first(sched_group_cpus(sg))) { + WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg); *per_cpu_ptr(sdd->sg, cpu) = NULL; - - if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref)) - *per_cpu_ptr(sdd->sgp, cpu) = NULL; + } } #ifdef CONFIG_SCHED_SMT @@ -7342,7 +7210,7 @@ static struct sched_domain_topology_level default_topology[] = { #endif { sd_init_CPU, cpu_cpu_mask, }, #ifdef CONFIG_NUMA - { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, }, + { sd_init_NODE, cpu_node_mask, }, { sd_init_ALLNODES, cpu_allnodes_mask, }, #endif { NULL, }, @@ -7366,14 +7234,9 @@ static int __sdt_alloc(const struct cpumask *cpu_map) if (!sdd->sg) return -ENOMEM; - sdd->sgp = alloc_percpu(struct sched_group_power *); - if (!sdd->sgp) - return -ENOMEM; - for_each_cpu(j, cpu_map) { struct sched_domain *sd; struct sched_group *sg; - struct sched_group_power *sgp; sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); @@ -7388,13 +7251,6 @@ static int __sdt_alloc(const struct cpumask *cpu_map) return -ENOMEM; *per_cpu_ptr(sdd->sg, j) = sg; - - sgp = kzalloc_node(sizeof(struct sched_group_power), - GFP_KERNEL, cpu_to_node(j)); - if (!sgp) - return -ENOMEM; - - *per_cpu_ptr(sdd->sgp, j) = sgp; } } @@ -7410,15 +7266,11 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sd_data *sdd = &tl->data; for_each_cpu(j, cpu_map) { - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); - if (sd && (sd->flags & SD_OVERLAP)) - free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); - kfree(*per_cpu_ptr(sdd->sgp, j)); } free_percpu(sdd->sd); free_percpu(sdd->sg); - free_percpu(sdd->sgp); } } @@ -7464,13 +7316,8 @@ static int build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_topology_level *tl; sd = NULL; - for (tl = sched_domain_topology; tl->init; tl++) { + for (tl = sched_domain_topology; tl->init; tl++) sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); - if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) - sd->flags |= SD_OVERLAP; - if (cpumask_equal(cpu_map, sched_domain_span(sd))) - break; - } while (sd->child) sd = sd->child; @@ -7482,13 +7329,13 @@ static int build_sched_domains(const struct cpumask *cpu_map, for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { sd->span_weight = cpumask_weight(sched_domain_span(sd)); - if (sd->flags & SD_OVERLAP) { - if (build_overlap_sched_groups(sd, i)) - goto error; - } else { - if (build_sched_groups(sd, i)) - goto error; - } + get_group(i, sd->private, &sd->groups); + atomic_inc(&sd->groups->ref); + + if (i != cpumask_first(sched_domain_span(sd))) + continue; + + build_sched_groups(sd); } } diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index c768588e180b..433491c2dc8f 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } /* Adjust by relative CPU power of the group */ - avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; + avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power; if (local_group) { this_load = avg_load; @@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) power >>= SCHED_POWER_SHIFT; } - sdg->sgp->power_orig = power; + sdg->cpu_power_orig = power; if (sched_feat(ARCH_POWER)) power *= arch_scale_freq_power(sd, cpu); @@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) power = 1; cpu_rq(cpu)->cpu_power = power; - sdg->sgp->power = power; + sdg->cpu_power = power; } static void update_group_power(struct sched_domain *sd, int cpu) @@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu) group = child->groups; do { - power += group->sgp->power; + power += group->cpu_power; group = group->next; } while (group != child->groups); - sdg->sgp->power = power; + sdg->cpu_power = power; } /* @@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /* * If ~90% of the cpu_power is still there, we're good. */ - if (group->sgp->power * 32 > group->sgp->power_orig * 29) + if (group->cpu_power * 32 > group->cpu_power_orig * 29) return 1; return 0; @@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, } /* Adjust by relative CPU power of the group */ - sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; + sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power; /* * Consider the group unbalanced when the imbalance is larger @@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) sgs->group_imb = 1; - sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_POWER_SCALE); if (!sgs->group_capacity) sgs->group_capacity = fix_small_capacity(sd, group); @@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, return; sds->total_load += sgs.group_load; - sds->total_pwr += sg->sgp->power; + sds->total_pwr += sg->cpu_power; /* * In case the child domain prefers tasks go to siblings @@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd, if (this_cpu > busiest_cpu) return 0; - *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, + *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, SCHED_POWER_SCALE); return 1; } @@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, scaled_busy_load_per_task = sds->busiest_load_per_task * SCHED_POWER_SCALE; - scaled_busy_load_per_task /= sds->busiest->sgp->power; + scaled_busy_load_per_task /= sds->busiest->cpu_power; if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= (scaled_busy_load_per_task * imbn)) { @@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds, * moving them. */ - pwr_now += sds->busiest->sgp->power * + pwr_now += sds->busiest->cpu_power * min(sds->busiest_load_per_task, sds->max_load); - pwr_now += sds->this->sgp->power * + pwr_now += sds->this->cpu_power * min(sds->this_load_per_task, sds->this_load); pwr_now /= SCHED_POWER_SCALE; /* Amount of load we'd subtract */ tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / - sds->busiest->sgp->power; + sds->busiest->cpu_power; if (sds->max_load > tmp) - pwr_move += sds->busiest->sgp->power * + pwr_move += sds->busiest->cpu_power * min(sds->busiest_load_per_task, sds->max_load - tmp); /* Amount of load we'd add */ - if (sds->max_load * sds->busiest->sgp->power < + if (sds->max_load * sds->busiest->cpu_power < sds->busiest_load_per_task * SCHED_POWER_SCALE) - tmp = (sds->max_load * sds->busiest->sgp->power) / - sds->this->sgp->power; + tmp = (sds->max_load * sds->busiest->cpu_power) / + sds->this->cpu_power; else tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / - sds->this->sgp->power; - pwr_move += sds->this->sgp->power * + sds->this->cpu_power; + pwr_move += sds->this->cpu_power * min(sds->this_load_per_task, sds->this_load + tmp); pwr_move /= SCHED_POWER_SCALE; @@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); - load_above_capacity /= sds->busiest->sgp->power; + load_above_capacity /= sds->busiest->cpu_power; } /* @@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ - *imbalance = min(max_pull * sds->busiest->sgp->power, - (sds->avg_load - sds->this_load) * sds->this->sgp->power) + *imbalance = min(max_pull * sds->busiest->cpu_power, + (sds->avg_load - sds->this_load) * sds->this->cpu_power) / SCHED_POWER_SCALE; /* diff --git a/trunk/kernel/sched_features.h b/trunk/kernel/sched_features.h index 1e7066d76c26..be40f7371ee1 100644 --- a/trunk/kernel/sched_features.h +++ b/trunk/kernel/sched_features.h @@ -70,5 +70,3 @@ SCHED_FEAT(NONIRQ_POWER, 1) * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, 1) - -SCHED_FEAT(FORCE_SD_OVERLAP, 0) diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 415d85d6f6c6..ff7678603328 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1178,25 +1178,18 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, { struct sighand_struct *sighand; + rcu_read_lock(); for (;;) { - local_irq_save(*flags); - rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); - if (unlikely(sighand == NULL)) { - rcu_read_unlock(); - local_irq_restore(*flags); + if (unlikely(sighand == NULL)) break; - } - spin_lock(&sighand->siglock); - if (likely(sighand == tsk->sighand)) { - rcu_read_unlock(); + spin_lock_irqsave(&sighand->siglock, *flags); + if (likely(sighand == tsk->sighand)) break; - } - spin_unlock(&sighand->siglock); - rcu_read_unlock(); - local_irq_restore(*flags); + spin_unlock_irqrestore(&sighand->siglock, *flags); } + rcu_read_unlock(); return sighand; } diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index fca82c32042b..40cf63ddd4b3 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -315,24 +315,16 @@ static inline void invoke_softirq(void) { if (!force_irqthreads) __do_softirq(); - else { - __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); + else wakeup_softirqd(); - __local_bh_enable(SOFTIRQ_OFFSET); - } } #else static inline void invoke_softirq(void) { if (!force_irqthreads) do_softirq(); - else { - __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); + else wakeup_softirqd(); - __local_bh_enable(SOFTIRQ_OFFSET); - } } #endif diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index d036e59d302b..5ed24b94c5e6 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -2310,8 +2310,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, for (i = 0; i <= classzone_idx; i++) present_pages += pgdat->node_zones[i].present_pages; - /* A special case here: if zone has no page, we think it's balanced */ - return balanced_pages >= (present_pages >> 2); + return balanced_pages > (present_pages >> 2); } /* is kswapd sleeping prematurely? */ diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index 6e82148edfc8..86bff9b1ac47 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -528,11 +528,7 @@ static int vlan_dev_init(struct net_device *dev) (1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_PRESENT); - dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG | - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | - NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | - NETIF_F_ALL_FCOE; - + dev->hw_features = NETIF_F_ALL_TX_OFFLOADS; dev->features |= real_dev->vlan_features | NETIF_F_LLTX; dev->gso_max_size = real_dev->gso_max_size; diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 7705e26e699f..ebff14c69078 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -620,8 +620,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); - if (parent) - parent->sk_data_ready(parent, 0); + parent->sk_data_ready(parent, 0); } else { sk->sk_state = BT_CONFIG; @@ -2324,7 +2323,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr sk = chan->sk; - if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) { + if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) || + (!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) { struct l2cap_cmd_rej rej; rej.reason = cpu_to_le16(0x0002); @@ -4010,8 +4010,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct sock *parent = bt_sk(sk)->parent; res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; - if (parent) - parent->sk_data_ready(parent, 0); + parent->sk_data_ready(parent, 0); } else { sk->sk_state = BT_CONFIG; res = L2CAP_CR_SUCCESS; diff --git a/trunk/net/ceph/ceph_fs.c b/trunk/net/ceph/ceph_fs.c index 41466ccb972a..a3a3a31d3c37 100644 --- a/trunk/net/ceph/ceph_fs.c +++ b/trunk/net/ceph/ceph_fs.c @@ -36,19 +36,16 @@ int ceph_flags_to_mode(int flags) if ((flags & O_DIRECTORY) == O_DIRECTORY) return CEPH_FILE_MODE_PIN; #endif + if ((flags & O_APPEND) == O_APPEND) + flags |= O_WRONLY; - switch (flags & O_ACCMODE) { - case O_WRONLY: + if ((flags & O_ACCMODE) == O_RDWR) + mode = CEPH_FILE_MODE_RDWR; + else if ((flags & O_ACCMODE) == O_WRONLY) mode = CEPH_FILE_MODE_WR; - break; - case O_RDONLY: + else mode = CEPH_FILE_MODE_RD; - break; - case O_RDWR: - case O_ACCMODE: /* this is what the VFS does */ - mode = CEPH_FILE_MODE_RDWR; - break; - } + #ifdef O_LAZY if (flags & O_LAZY) mode |= CEPH_FILE_MODE_LAZY; diff --git a/trunk/net/sctp/output.c b/trunk/net/sctp/output.c index 08b3cead6503..b4f3cf06d8da 100644 --- a/trunk/net/sctp/output.c +++ b/trunk/net/sctp/output.c @@ -500,20 +500,23 @@ int sctp_packet_transmit(struct sctp_packet *packet) * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in . */ - if (!sctp_checksum_disable) { - if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { - __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); + if (!sctp_checksum_disable && + !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) { + __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); - /* 3) Put the resultant value into the checksum field in the - * common header, and leave the rest of the bits unchanged. - */ - sh->checksum = sctp_end_cksum(crc32); - } else { + /* 3) Put the resultant value into the checksum field in the + * common header, and leave the rest of the bits unchanged. + */ + sh->checksum = sctp_end_cksum(crc32); + } else { + if (dst->dev->features & NETIF_F_SCTP_CSUM) { /* no need to seed pseudo checksum for SCTP */ nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (skb_transport_header(nskb) - nskb->head); nskb->csum_offset = offsetof(struct sctphdr, checksum); + } else { + nskb->ip_summed = CHECKSUM_UNNECESSARY; } } diff --git a/trunk/sound/soc/codecs/wm8994.c b/trunk/sound/soc/codecs/wm8994.c index 83014a7c2e14..c2fc0356c2a4 100644 --- a/trunk/sound/soc/codecs/wm8994.c +++ b/trunk/sound/soc/codecs/wm8994.c @@ -1190,6 +1190,7 @@ SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), SND_SOC_DAPM_INPUT("Clock"), +SND_SOC_DAPM_MICBIAS("MICBIAS", WM8994_MICBIAS, 2, 0), SND_SOC_DAPM_SUPPLY_S("MICBIAS Supply", 1, SND_SOC_NOPM, 0, 0, micbias_ev, SND_SOC_DAPM_PRE_PMU), @@ -1508,10 +1509,8 @@ static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { { "AIF2DACDAT", NULL, "AIF1DACDAT" }, { "AIF1ADCDAT", NULL, "AIF2ADCDAT" }, { "AIF2ADCDAT", NULL, "AIF1ADCDAT" }, - { "MICBIAS1", NULL, "CLK_SYS" }, - { "MICBIAS1", NULL, "MICBIAS Supply" }, - { "MICBIAS2", NULL, "CLK_SYS" }, - { "MICBIAS2", NULL, "MICBIAS Supply" }, + { "MICBIAS", NULL, "CLK_SYS" }, + { "MICBIAS", NULL, "MICBIAS Supply" }, }; static const struct snd_soc_dapm_route wm8994_intercon[] = { @@ -2764,7 +2763,7 @@ static void wm8958_default_micdet(u16 status, void *data) report = SND_JACK_MICROPHONE; /* Everything else is buttons; just assign slots */ - if (status & 0x1c) + if (status & 0x1c0) report |= SND_JACK_BTN_0; done: diff --git a/trunk/sound/soc/sh/fsi-ak4642.c b/trunk/sound/soc/sh/fsi-ak4642.c index 770a71a15366..d6f4703b3c07 100644 --- a/trunk/sound/soc/sh/fsi-ak4642.c +++ b/trunk/sound/soc/sh/fsi-ak4642.c @@ -97,7 +97,7 @@ static int fsi_ak4642_remove(struct platform_device *pdev) static struct fsi_ak4642_data fsi_a_ak4642 = { .name = "AK4642", - .card = "FSIA-AK4642", + .card = "FSIA (AK4642)", .cpu_dai = "fsia-dai", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi.0", @@ -106,7 +106,7 @@ static struct fsi_ak4642_data fsi_a_ak4642 = { static struct fsi_ak4642_data fsi_b_ak4642 = { .name = "AK4642", - .card = "FSIB-AK4642", + .card = "FSIB (AK4642)", .cpu_dai = "fsib-dai", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi.0", @@ -115,7 +115,7 @@ static struct fsi_ak4642_data fsi_b_ak4642 = { static struct fsi_ak4642_data fsi_a_ak4643 = { .name = "AK4643", - .card = "FSIA-AK4643", + .card = "FSIA (AK4643)", .cpu_dai = "fsia-dai", .codec = "ak4642-codec.0-0013", .platform = "sh_fsi.0", @@ -124,7 +124,7 @@ static struct fsi_ak4642_data fsi_a_ak4643 = { static struct fsi_ak4642_data fsi_b_ak4643 = { .name = "AK4643", - .card = "FSIB-AK4643", + .card = "FSIB (AK4643)", .cpu_dai = "fsib-dai", .codec = "ak4642-codec.0-0013", .platform = "sh_fsi.0", @@ -133,7 +133,7 @@ static struct fsi_ak4642_data fsi_b_ak4643 = { static struct fsi_ak4642_data fsi2_a_ak4642 = { .name = "AK4642", - .card = "FSI2A-AK4642", + .card = "FSI2A (AK4642)", .cpu_dai = "fsia-dai", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi2", @@ -142,7 +142,7 @@ static struct fsi_ak4642_data fsi2_a_ak4642 = { static struct fsi_ak4642_data fsi2_b_ak4642 = { .name = "AK4642", - .card = "FSI2B-AK4642", + .card = "FSI2B (AK4642)", .cpu_dai = "fsib-dai", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi2", @@ -151,7 +151,7 @@ static struct fsi_ak4642_data fsi2_b_ak4642 = { static struct fsi_ak4642_data fsi2_a_ak4643 = { .name = "AK4643", - .card = "FSI2A-AK4643", + .card = "FSI2A (AK4643)", .cpu_dai = "fsia-dai", .codec = "ak4642-codec.0-0013", .platform = "sh_fsi2", @@ -160,7 +160,7 @@ static struct fsi_ak4642_data fsi2_a_ak4643 = { static struct fsi_ak4642_data fsi2_b_ak4643 = { .name = "AK4643", - .card = "FSI2B-AK4643", + .card = "FSI2B (AK4643)", .cpu_dai = "fsib-dai", .codec = "ak4642-codec.0-0013", .platform = "sh_fsi2", diff --git a/trunk/sound/soc/sh/fsi-da7210.c b/trunk/sound/soc/sh/fsi-da7210.c index 59553fd8c2fb..dbafd7ac5590 100644 --- a/trunk/sound/soc/sh/fsi-da7210.c +++ b/trunk/sound/soc/sh/fsi-da7210.c @@ -42,7 +42,7 @@ static struct snd_soc_dai_link fsi_da7210_dai = { }; static struct snd_soc_card fsi_soc_card = { - .name = "FSI-DA7210", + .name = "FSI (DA7210)", .dai_link = &fsi_da7210_dai, .num_links = 1, }; diff --git a/trunk/sound/soc/sh/fsi-hdmi.c b/trunk/sound/soc/sh/fsi-hdmi.c index d3d9fd880680..9719985eb82d 100644 --- a/trunk/sound/soc/sh/fsi-hdmi.c +++ b/trunk/sound/soc/sh/fsi-hdmi.c @@ -83,13 +83,13 @@ static int fsi_hdmi_remove(struct platform_device *pdev) static struct fsi_hdmi_data fsi2_a_hdmi = { .cpu_dai = "fsia-dai", - .card = "FSI2A-HDMI", + .card = "FSI2A (SH MOBILE HDMI)", .id = FSI_PORT_A, }; static struct fsi_hdmi_data fsi2_b_hdmi = { .cpu_dai = "fsib-dai", - .card = "FSI2B-HDMI", + .card = "FSI2B (SH MOBILE HDMI)", .id = FSI_PORT_B, };