diff --git a/[refs] b/[refs] index 4419df8d0af5..f4812ae5e614 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ef46222e7b56e728e423527d430cb2013c595491 +refs/heads/master: 185bf87393afe6b966881e36c459949d90930a7a diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt index db3b1aba32a3..f48178024067 100644 --- a/trunk/Documentation/filesystems/proc.txt +++ b/trunk/Documentation/filesystems/proc.txt @@ -843,7 +843,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu. TASKLET: 0 0 0 290 SCHED: 27035 26983 26971 26746 HRTIMER: 0 0 0 0 - RCU: 1678 1769 2178 2250 1.3 IDE devices in /proc/ide diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index b12b8c13f53a..6c59eb90fdf4 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -6463,7 +6463,7 @@ M: Jiri Kosina L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git S: Maintained -F: Documentation/hid/hiddev.txt +F: Documentation/usb/hiddev.txt F: drivers/hid/usbhid/ USB ISP116X DRIVER @@ -7007,13 +7007,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86. S: Maintained F: drivers/platform/x86 -X86 MCE INFRASTRUCTURE -M: Tony Luck -M: Borislav Petkov -L: linux-edac@vger.kernel.org -S: Maintained -F: arch/x86/kernel/cpu/mcheck/* - XEN HYPERVISOR INTERFACE M: Jeremy Fitzhardinge M: Konrad Rzeszutek Wilk diff --git a/trunk/arch/arm/mach-omap1/Makefile b/trunk/arch/arm/mach-omap1/Makefile index 5b114d1558c8..af98117043d2 100644 --- a/trunk/arch/arm/mach-omap1/Makefile +++ b/trunk/arch/arm/mach-omap1/Makefile @@ -4,14 +4,14 @@ # Common support obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o -obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o +obj-y += clock.o clock_data.o opp_data.o reset.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o # Power Management -obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o # DSP obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o diff --git a/trunk/arch/arm/mach-omap1/pm_bus.c b/trunk/arch/arm/mach-omap1/pm_bus.c index 334fb8871bc3..fe31d933f0ed 100644 --- a/trunk/arch/arm/mach-omap1/pm_bus.c +++ b/trunk/arch/arm/mach-omap1/pm_bus.c @@ -56,13 +56,9 @@ static struct dev_power_domain default_power_domain = { USE_PLATFORM_PM_SLEEP_OPS }, }; -#define OMAP1_PWR_DOMAIN (&default_power_domain) -#else -#define OMAP1_PWR_DOMAIN NULL -#endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = OMAP1_PWR_DOMAIN, + .pwr_domain = &default_power_domain, .con_ids = { "ick", "fck", NULL, }, }; @@ -76,4 +72,4 @@ static int __init omap1_pm_runtime_init(void) return 0; } core_initcall(omap1_pm_runtime_init); - +#endif /* CONFIG_PM_RUNTIME */ diff --git a/trunk/arch/arm/mach-omap2/board-omap3pandora.c b/trunk/arch/arm/mach-omap2/board-omap3pandora.c index 23f71d40883e..2a0bb4818cae 100644 --- a/trunk/arch/arm/mach-omap2/board-omap3pandora.c +++ b/trunk/arch/arm/mach-omap2/board-omap3pandora.c @@ -84,8 +84,7 @@ static struct mtd_partition omap3pandora_nand_partitions[] = { static struct omap_nand_platform_data pandora_nand_data = { .cs = 0, - .devsize = NAND_BUSWIDTH_16, - .xfer_type = NAND_OMAP_PREFETCH_DMA, + .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ .parts = omap3pandora_nand_partitions, .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), }; diff --git a/trunk/arch/arm/mach-omap2/pm-debug.c b/trunk/arch/arm/mach-omap2/pm-debug.c index e01da45c0537..a5a83b358ddd 100644 --- a/trunk/arch/arm/mach-omap2/pm-debug.c +++ b/trunk/arch/arm/mach-omap2/pm-debug.c @@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir; static int pm_dbg_init_done; -static int pm_dbg_init(void); +static int __init pm_dbg_init(void); enum { DEBUG_FILE_COUNTERS = 0, @@ -595,7 +595,7 @@ static int option_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); -static int pm_dbg_init(void) +static int __init pm_dbg_init(void) { int i; struct dentry *d; diff --git a/trunk/arch/arm/mach-pxa/spitz_pm.c b/trunk/arch/arm/mach-pxa/spitz_pm.c index 094279aefe9c..7fe74067d85f 100644 --- a/trunk/arch/arm/mach-pxa/spitz_pm.c +++ b/trunk/arch/arm/mach-pxa/spitz_pm.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arm/plat-omap/omap_device.c b/trunk/arch/arm/plat-omap/omap_device.c index 49fc0df0c21f..a37b8eb65b76 100644 --- a/trunk/arch/arm/plat-omap/omap_device.c +++ b/trunk/arch/arm/plat-omap/omap_device.c @@ -84,7 +84,6 @@ #include #include #include -#include #include #include @@ -540,34 +539,20 @@ int omap_early_device_register(struct omap_device *od) static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - int ret; - - ret = pm_generic_runtime_suspend(dev); - - if (!ret) - omap_device_idle(pdev); - - return ret; -} -static int _od_runtime_idle(struct device *dev) -{ - return pm_generic_runtime_idle(dev); + return omap_device_idle(pdev); } static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - omap_device_enable(pdev); - - return pm_generic_runtime_resume(dev); + return omap_device_enable(pdev); } static struct dev_power_domain omap_device_power_domain = { .ops = { .runtime_suspend = _od_runtime_suspend, - .runtime_idle = _od_runtime_idle, .runtime_resume = _od_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS } diff --git a/trunk/arch/x86/include/asm/memblock.h b/trunk/arch/x86/include/asm/memblock.h index 0cd3800f33b9..19ae14ba6978 100644 --- a/trunk/arch/x86/include/asm/memblock.h +++ b/trunk/arch/x86/include/asm/memblock.h @@ -4,6 +4,7 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); +void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); @@ -18,6 +19,5 @@ u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit); -bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); #endif diff --git a/trunk/arch/x86/include/asm/pvclock.h b/trunk/arch/x86/include/asm/pvclock.h index a518c0a45044..31d84acc1512 100644 --- a/trunk/arch/x86/include/asm/pvclock.h +++ b/trunk/arch/x86/include/asm/pvclock.h @@ -22,8 +22,6 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) u64 product; #ifdef __i386__ u32 tmp1, tmp2; -#else - ulong tmp; #endif if (shift < 0) @@ -44,11 +42,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); #elif defined(__x86_64__) __asm__ ( - "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" - : [lo]"=a"(product), - [hi]"=d"(tmp) - : "0"(delta), - [mul_frac]"rm"((u64)mul_frac)); + "mul %%rdx ; shrd $32,%%rdx,%%rax" + : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); #else #error implement me! #endif diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index aee38623b768..bd14bb4c8594 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { - return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); + return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); } static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) diff --git a/trunk/arch/x86/kvm/paging_tmpl.h b/trunk/arch/x86/kvm/paging_tmpl.h index 9d03ad4dd5ec..6c4dc010c4cb 100644 --- a/trunk/arch/x86/kvm/paging_tmpl.h +++ b/trunk/arch/x86/kvm/paging_tmpl.h @@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gva_t addr, u32 access) { pt_element_t pte; - pt_element_t __user *uninitialized_var(ptep_user); + pt_element_t __user *ptep_user; gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index d48ec60ea421..4c3fa0f67469 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -2047,8 +2047,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) - vmx_decache_cr3(vcpu); + vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, diff --git a/trunk/arch/x86/mm/memblock.c b/trunk/arch/x86/mm/memblock.c index 992da5ec5a64..aa1169392b83 100644 --- a/trunk/arch/x86/mm/memblock.c +++ b/trunk/arch/x86/mm/memblock.c @@ -8,7 +8,7 @@ #include /* Check for already reserved areas */ -bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) +static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; @@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) if (addr >= ei_last) continue; *sizep = ei_last - addr; - while (memblock_x86_check_reserved_size(&addr, sizep, align)) + while (check_with_memblock_reserved_size(&addr, sizep, align)) ; if (*sizep) diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 474356b98ede..0d3a4fa34560 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -310,31 +310,14 @@ void __init efi_reserve_boot_services(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; - u64 start = md->phys_addr; - u64 size = md->num_pages << EFI_PAGE_SHIFT; + unsigned long long start = md->phys_addr; + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; - /* Only reserve where possible: - * - Not within any already allocated areas - * - Not over any memory area (really needed, if above?) - * - Not within any part of the kernel - * - Not the bios reserved area - */ - if ((start+size >= virt_to_phys(_text) - && start <= virt_to_phys(_end)) || - !e820_all_mapped(start, start+size, E820_RAM) || - memblock_x86_check_reserved_size(&start, &size, - 1<num_pages = 0; - memblock_dbg(PFX "Could not reserve boot range " - "[0x%010llx-0x%010llx]\n", - start, start+size-1); - } else - memblock_x86_reserve_range(start, start+size, - "EFI Boot"); + + memblock_x86_reserve_range(start, start + size, "EFI Boot"); } } @@ -351,10 +334,6 @@ static void __init efi_free_boot_services(void) md->type != EFI_BOOT_SERVICES_DATA) continue; - /* Could not reserve boot area */ - if (!size) - continue; - free_bootmem_late(start, size); } } diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 5525163a0398..dd7b88f2ec7a 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1033,13 +1033,6 @@ static void xen_machine_halt(void) xen_reboot(SHUTDOWN_poweroff); } -static void xen_machine_power_off(void) -{ - if (pm_power_off) - pm_power_off(); - xen_reboot(SHUTDOWN_poweroff); -} - static void xen_crash_shutdown(struct pt_regs *regs) { xen_reboot(SHUTDOWN_crash); @@ -1065,7 +1058,7 @@ int xen_panic_handler_init(void) static const struct machine_ops xen_machine_ops __initconst = { .restart = xen_restart, .halt = xen_machine_halt, - .power_off = xen_machine_power_off, + .power_off = xen_machine_halt, .shutdown = xen_machine_halt, .crash_shutdown = xen_crash_shutdown, .emergency_restart = xen_emergency_restart, diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index 673e968df3cf..dc708dcc62f1 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include @@ -1232,7 +1231,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, { struct { struct mmuext_op op; - DECLARE_BITMAP(mask, num_processors); + DECLARE_BITMAP(mask, NR_CPUS); } *args; struct multicall_space mcs; @@ -1600,11 +1599,6 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; -#ifdef CONFIG_X86_32 - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; -#endif - if (!pte_none(pte_page[pteidx])) continue; @@ -1772,9 +1766,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index 60aeeb56948f..be1a464f6d66 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; +#ifdef CONFIG_X86_32 xen_extra_mem_start = mem_end; +#else + xen_extra_mem_start = max((1ULL << 32), mem_end); +#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -262,12 +266,6 @@ char * __init xen_memory_setup(void) if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } - /* Align the balloon area so that max_low_pfn does not get set - * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that xen_extra_mem_start gets set in the loop above to be - * past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) - xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index b4533a86d7e4..41038c01de40 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -205,18 +205,11 @@ static void __init xen_smp_prepare_boot_cpu(void) static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; - unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; - - for_each_possible_cpu(i) { - zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); - } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index faf7c5217848..853f92d23ddb 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -298,15 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, old_index = stat->last_index; new_index = freq_table_get_index(stat, freq->new); - /* We can't do stat->time_in_state[-1]= .. */ - if (old_index == -1 || new_index == -1) - return 0; - cpufreq_stats_update(freq->cpu); - if (old_index == new_index) return 0; + if (old_index == -1 || new_index == -1) + return 0; + spin_lock(&cpufreq_stats_lock); stat->last_index = new_index; #ifdef CONFIG_CPU_FREQ_STAT_DETAILS diff --git a/trunk/drivers/cpufreq/powernow-k8.c b/trunk/drivers/cpufreq/powernow-k8.c index bce576d7478e..83479b6fb9a1 100644 --- a/trunk/drivers/cpufreq/powernow-k8.c +++ b/trunk/drivers/cpufreq/powernow-k8.c @@ -1079,9 +1079,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, } res = transition_fid_vid(data, fid, vid); - if (res) - return res; - freqs.new = find_khz_freq_from_fid(data->currfid); for_each_cpu(i, data->available_cores) { @@ -1104,8 +1101,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, /* get MSR index for hardware pstate transition */ pstate = index & HW_PSTATE_MASK; if (pstate > data->max_hw_pstate) - return -EINVAL; - + return 0; freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); diff --git a/trunk/drivers/gpio/gpio-omap.c b/trunk/drivers/gpio/gpio-omap.c index 35bebde23e83..01f74a8459d9 100644 --- a/trunk/drivers/gpio/gpio-omap.c +++ b/trunk/drivers/gpio/gpio-omap.c @@ -469,9 +469,8 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, + OMAP24XX_GPIO_CLEARWKUENA); } } - /* This part needs to be executed always for OMAP{34xx, 44xx} */ - if (cpu_is_omap34xx() || cpu_is_omap44xx() || - (bank->non_wakeup_gpios & gpio_bit)) { + /* This part needs to be executed always for OMAP34xx */ + if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) { /* * Log the edge gpio and manually trigger the IRQ * after resume if the input level changes diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index 09292193dafe..0a9357c66ff8 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid) bad: if (raw_edid) { - printk(KERN_ERR "Raw EDID:\n"); + DRM_ERROR("Raw EDID:\n"); print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); - printk(KERN_ERR "\n"); + printk("\n"); } return 0; } @@ -258,17 +258,6 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, return ret == 2 ? 0 : -1; } -static bool drm_edid_is_zero(u8 *in_edid, int length) -{ - int i; - u32 *raw_edid = (u32 *)in_edid; - - for (i = 0; i < length / 4; i++) - if (*(raw_edid + i) != 0) - return false; - return true; -} - static u8 * drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { @@ -284,10 +273,6 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) goto out; if (drm_edid_block_valid(block)) break; - if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { - connector->null_edid_counter++; - goto carp; - } } if (i == 4) goto carp; diff --git a/trunk/drivers/gpu/drm/drm_ioc32.c b/trunk/drivers/gpu/drm/drm_ioc32.c index 4a058c7af6c0..d61d185cf040 100644 --- a/trunk/drivers/gpu/drm/drm_ioc32.c +++ b/trunk/drivers/gpu/drm/drm_ioc32.c @@ -28,7 +28,6 @@ * IN THE SOFTWARE. */ #include -#include #include "drmP.h" #include "drm_core.h" @@ -254,10 +253,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, return -EFAULT; m32.handle = (unsigned long)handle; - if (m32.handle != (unsigned long)handle) - printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle" - " %p for type %d offset %x\n", - handle, m32.type, m32.offset); + if (m32.handle != (unsigned long)handle && printk_ratelimit()) + printk(KERN_ERR "compat_drm_addmap truncated handle" + " %p for type %d offset %x\n", + handle, m32.type, m32.offset); if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; diff --git a/trunk/drivers/gpu/drm/drm_pci.c b/trunk/drivers/gpu/drm/drm_pci.c index b6a19cb07caf..e1aee4f6a7c6 100644 --- a/trunk/drivers/gpu/drm/drm_pci.c +++ b/trunk/drivers/gpu/drm/drm_pci.c @@ -251,7 +251,7 @@ int drm_pci_set_unique(struct drm_device *dev, } -static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) +int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) { if ((p->busnum >> 8) != drm_get_pci_domain(dev) || (p->busnum & 0xff) != dev->pdev->bus->number || @@ -292,7 +292,6 @@ static struct drm_bus drm_pci_bus = { .get_name = drm_pci_get_name, .set_busid = drm_pci_set_busid, .set_unique = drm_pci_set_unique, - .irq_by_busid = drm_pci_irq_by_busid, .agp_init = drm_pci_agp_init, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 9e34a1abeb61..b9fafe3b045b 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -1740,16 +1740,6 @@ void ironlake_irq_preinstall(struct drm_device *dev) INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); I915_WRITE(HWSTAM, 0xeffe); - if (IS_GEN6(dev)) { - /* Workaround stalls observed on Sandy Bridge GPUs by - * making the blitter command streamer generate a - * write to the Hardware Status Page for - * MI_USER_INTERRUPT. This appears to serialize the - * previous seqno write out before the interrupt - * happens. - */ - I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); - } /* XXX hotplug from PCH */ diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index d98cee60b602..d3b903bce7c5 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -401,7 +401,8 @@ int intel_setup_gmbus(struct drm_device *dev) bus->reg0 = i | GMBUS_RATE_100KHZ; /* XXX force bit banging until GMBUS is fully debugged */ - bus->force_bit = intel_gpio_create(dev_priv, i); + if (IS_GEN2(dev)) + bus->force_bit = intel_gpio_create(dev_priv, i); } intel_i2c_reset(dev_priv->dev); diff --git a/trunk/drivers/gpu/drm/radeon/atombios.h b/trunk/drivers/gpu/drm/radeon/atombios.h index 1b50ad8919d5..49611e2365d9 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios.h +++ b/trunk/drivers/gpu/drm/radeon/atombios.h @@ -1200,7 +1200,6 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 #define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 -#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14 // ucConfig #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 9541995e4b21..84a69e7fa11e 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -671,13 +671,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, DISPPLL_CONFIG_DUAL_LINK; } } - if (radeon_encoder_is_dp_bridge(encoder)) { - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); - args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; - } else - args.v3.sInput.ucExtTransmitterID = 0; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 7e3d96e7ac04..86157b172c88 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -88,8 +88,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* get temperature in millidegrees */ int evergreen_get_temp(struct radeon_device *rdev) { - u32 temp, toffset; - int actual_temp = 0; + u32 temp, toffset, actual_temp = 0; if (rdev->family == CHIP_JUNIPER) { toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >> diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index b2449629537d..9bd162fc9b0c 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -938,13 +938,6 @@ static struct radeon_asic cayman_asic = { int radeon_asic_init(struct radeon_device *rdev) { radeon_register_accessor_init(rdev); - - /* set the number of crtcs */ - if (rdev->flags & RADEON_SINGLE_CRTC) - rdev->num_crtc = 1; - else - rdev->num_crtc = 2; - switch (rdev->family) { case CHIP_R100: case CHIP_RV100: @@ -1024,11 +1017,6 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_JUNIPER: case CHIP_CYPRESS: case CHIP_HEMLOCK: - /* set num crtcs */ - if (rdev->family == CHIP_CEDAR) - rdev->num_crtc = 4; - else - rdev->num_crtc = 6; rdev->asic = &evergreen_asic; break; case CHIP_PALM: @@ -1039,17 +1027,10 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: - /* set num crtcs */ - if (rdev->family == CHIP_CAICOS) - rdev->num_crtc = 4; - else - rdev->num_crtc = 6; rdev->asic = &btc_asic; break; case CHIP_CAYMAN: rdev->asic = &cayman_asic; - /* set num crtcs */ - rdev->num_crtc = 6; break; default: /* FIXME: not supported yet */ @@ -1061,6 +1042,18 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->asic->set_memory_clock = NULL; } + /* set the number of crtcs */ + if (rdev->flags & RADEON_SINGLE_CRTC) + rdev->num_crtc = 1; + else { + if (ASIC_IS_DCE41(rdev)) + rdev->num_crtc = 2; + else if (ASIC_IS_DCE4(rdev)) + rdev->num_crtc = 6; + else + rdev->num_crtc = 2; + } + return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index e4594676a07c..797c8bcbb6a4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -1553,12 +1553,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) (rdev->pdev->subsystem_device == 0x4a48)) { /* Mac X800 */ rdev->mode_info.connector_table = CT_MAC_X800; - } else if ((of_machine_is_compatible("PowerMac7,2") || - of_machine_is_compatible("PowerMac7,3")) && - (rdev->pdev->device == 0x4150) && - (rdev->pdev->subsystem_vendor == 0x1002) && - (rdev->pdev->subsystem_device == 0x4150)) { - /* Mac G5 tower 9600 */ + } else if (of_machine_is_compatible("PowerMac7,2") || + of_machine_is_compatible("PowerMac7,3")) { + /* Mac G5 9600 */ rdev->mode_info.connector_table = CT_MAC_G5_9600; } else #endif /* CONFIG_PPC_PMAC */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index cbfca3a24fdf..9c2929c7e79f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -44,8 +44,6 @@ extern void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector); -bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); - void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -838,13 +836,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", drm_get_connector_name(connector)); - /* rs690 seems to have a problem with connectors not existing and always - * return a block of 0's. If we see this just stop polling on this output */ - if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { - ret = connector_status_disconnected; - DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); - radeon_connector->ddc_bus = NULL; - } } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); @@ -1072,11 +1063,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - struct drm_encoder *encoder = radeon_best_single_encoder(connector); int ret; - if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || - (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + struct drm_encoder *encoder; struct drm_display_mode *mode; if (!radeon_dig_connector->edp_on) @@ -1088,6 +1078,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector) ATOM_TRANSMITTER_ACTION_POWER_OFF); if (ret > 0) { + encoder = radeon_best_single_encoder(connector); if (encoder) { radeon_fixup_lvds_native_mode(encoder, connector); /* add scaled modes */ @@ -1111,14 +1102,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) /* add scaled modes */ radeon_add_common_modes(encoder, connector); } - } else { - /* need to setup ddc on the bridge */ - if (radeon_connector_encoder_is_dp_bridge(connector)) { - if (encoder) - radeon_atom_ext_encoder_setup_ddc(encoder); - } + } else ret = radeon_ddc_get_modes(radeon_connector); - } return ret; } @@ -1202,15 +1187,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - struct drm_encoder *encoder = radeon_best_single_encoder(connector); if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || - (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + struct drm_encoder *encoder = radeon_best_single_encoder(connector); if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; @@ -1230,11 +1214,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); } else { - /* need to setup ddc on the bridge */ - if (radeon_connector_encoder_is_dp_bridge(connector)) { - if (encoder) - radeon_atom_ext_encoder_setup_ddc(encoder); - } radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; @@ -1249,16 +1228,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } } - - if ((ret == connector_status_disconnected) && - radeon_connector->dac_load_detect) { - struct drm_encoder *encoder = radeon_best_single_encoder(connector); - struct drm_encoder_helper_funcs *encoder_funcs; - if (encoder) { - encoder_funcs = encoder->helper_private; - ret = encoder_funcs->detect(encoder, connector); - } - } } radeon_connector_update_scratch_regs(connector, ret); @@ -1273,8 +1242,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, /* XXX check mode bandwidth */ - if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || - (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) @@ -1284,7 +1252,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; - /* AVIVO hardware supports downscaling modes larger than the panel + /* AVIVO hardware supports downscaling modes larger than the panel * to the panel size, but I'm not sure this is desirable. */ if ((mode->hdisplay > native_mode->hdisplay) || @@ -1433,10 +1401,6 @@ radeon_add_atom_connector(struct drm_device *dev, default: connector->interlace_allowed = true; connector->doublescan_allowed = true; - radeon_connector->dac_load_detect = true; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1458,12 +1422,6 @@ radeon_add_atom_connector(struct drm_device *dev, connector->doublescan_allowed = true; else connector->doublescan_allowed = false; - if (connector_type == DRM_MODE_CONNECTOR_DVII) { - radeon_connector->dac_load_detect = true; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); - } break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 7cfaa7e2f3b5..e680501c78ea 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -215,8 +215,6 @@ int radeon_wb_init(struct radeon_device *rdev) return r; } - /* clear wb memory */ - memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); /* disable event_write fences */ rdev->wb.use_event = false; /* disabled via module param */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c index f55b64cb59d1..03f124d626c2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c @@ -367,8 +367,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, } if (ASIC_IS_DCE3(rdev) && - ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || - radeon_encoder_is_dp_bridge(encoder))) { + (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); radeon_dp_set_link_config(connector, mode); } @@ -661,16 +660,21 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if (radeon_encoder_is_dp_bridge(encoder)) return ATOM_ENCODER_MODE_DP; - /* DVO is always DVO */ - if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) - return ATOM_ENCODER_MODE_DVO; - connector = radeon_get_connector_for_encoder(encoder); - /* if we don't have an active device yet, just use one of - * the connectors tied to the encoder. - */ - if (!connector) - connector = radeon_get_connector_for_encoder_init(encoder); + if (!connector) { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + return ATOM_ENCODER_MODE_DVI; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + default: + return ATOM_ENCODER_MODE_CRT; + } + } radeon_connector = to_radeon_connector(connector); switch (connector->connector_type) { @@ -1522,29 +1526,26 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) } if (ext_encoder) { + int action; + switch (mode) { case DRM_MODE_DPMS_ON: default: - if (ASIC_IS_DCE41(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + if (ASIC_IS_DCE41(rdev)) + action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; + else + action = ATOM_ENABLE; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); + if (ASIC_IS_DCE41(rdev)) + action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; + else + action = ATOM_DISABLE; break; } + atombios_external_encoder_setup(encoder, ext_encoder, action); } radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); @@ -2003,65 +2004,6 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec return connector_status_disconnected; } -static enum drm_connector_status -radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - u32 bios_0_scratch; - - if (!ASIC_IS_DCE4(rdev)) - return connector_status_unknown; - - if (!ext_encoder) - return connector_status_unknown; - - if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) - return connector_status_unknown; - - /* load detect on the dp bridge */ - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); - - bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); - - DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); - if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT1_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT2_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) - return connector_status_connected; /* CTV */ - else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) - return connector_status_connected; /* STV */ - } - return connector_status_disconnected; -} - -void -radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) -{ - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - - if (ext_encoder) - /* ddc_setup on the dp bridge */ - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); - -} - static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -2225,7 +2167,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { .mode_set = radeon_atom_encoder_mode_set, .commit = radeon_atom_encoder_commit, .disable = radeon_atom_encoder_disable, - .detect = radeon_atom_dig_detect, + /* no detect for TMDS/LVDS yet */ }; static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index 021d2b6b556f..1f8229436570 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -40,35 +40,6 @@ #include "radeon.h" #include "radeon_trace.h" -static void radeon_fence_write(struct radeon_device *rdev, u32 seq) -{ - if (rdev->wb.enabled) { - u32 scratch_index; - if (rdev->wb.use_event) - scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - else - scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; - } else - WREG32(rdev->fence_drv.scratch_reg, seq); -} - -static u32 radeon_fence_read(struct radeon_device *rdev) -{ - u32 seq; - - if (rdev->wb.enabled) { - u32 scratch_index; - if (rdev->wb.use_event) - scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - else - scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); - } else - seq = RREG32(rdev->fence_drv.scratch_reg); - return seq; -} - int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { unsigned long irq_flags; @@ -79,12 +50,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) return 0; } fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); - if (!rdev->cp.ready) + if (!rdev->cp.ready) { /* FIXME: cp is not running assume everythings is done right * away */ - radeon_fence_write(rdev, fence->seq); - else + WREG32(rdev->fence_drv.scratch_reg, fence->seq); + } else radeon_fence_ring_emit(rdev, fence); trace_radeon_fence_emit(rdev->ddev, fence->seq); @@ -102,7 +73,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) bool wake = false; unsigned long cjiffies; - seq = radeon_fence_read(rdev); + if (rdev->wb.enabled) { + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); + } else + seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { rdev->fence_drv.last_seq = seq; rdev->fence_drv.last_jiffies = jiffies; @@ -272,7 +251,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) r = radeon_gpu_reset(rdev); if (r) return r; - radeon_fence_write(rdev, fence->seq); + WREG32(rdev->fence_drv.scratch_reg, fence->seq); rdev->gpu_lockup = false; } timeout = RADEON_FENCE_JIFFIES_TIMEOUT; @@ -372,7 +351,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); return r; } - radeon_fence_write(rdev, 0); + WREG32(rdev->fence_drv.scratch_reg, 0); atomic_set(&rdev->fence_drv.seq, 0); INIT_LIST_HEAD(&rdev->fence_drv.created); INIT_LIST_HEAD(&rdev->fence_drv.emited); @@ -412,7 +391,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) struct radeon_fence *fence; seq_printf(m, "Last signaled fence 0x%08X\n", - radeon_fence_read(rdev)); + RREG32(rdev->fence_drv.scratch_reg)); if (!list_empty(&rdev->fence_drv.emited)) { fence = list_entry(rdev->fence_drv.emited.prev, struct radeon_fence, list); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index 6df4e3cec0c2..977a341266b6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -483,8 +483,6 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev); extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set); -extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); -extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder); extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, u8 write_byte, u8 *read_byte); diff --git a/trunk/drivers/hid/Kconfig b/trunk/drivers/hid/Kconfig index 36ca465c00ce..67d2a7585934 100644 --- a/trunk/drivers/hid/Kconfig +++ b/trunk/drivers/hid/Kconfig @@ -305,7 +305,6 @@ config HID_MULTITOUCH - 3M PCT touch screens - ActionStar dual touch panels - Cando dual touch panels - - Chunghwa panels - CVTouch panels - Cypress TrueTouch panels - Elo TouchSystems IntelliTouch Plus panels diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c index f7440e8ce3e7..c957c4b4fe70 100644 --- a/trunk/drivers/hid/hid-core.c +++ b/trunk/drivers/hid/hid-core.c @@ -1359,7 +1359,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h index aecb5a4b8d6d..0b374a6d6db0 100644 --- a/trunk/drivers/hid/hid-ids.h +++ b/trunk/drivers/hid/hid-ids.h @@ -173,9 +173,6 @@ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 -#define USB_VENDOR_ID_CHUNGHWAT 0x2247 -#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 - #define USB_VENDOR_ID_CIDC 0x1677 #define USB_VENDOR_ID_CMEDIA 0x0d8c @@ -625,7 +622,6 @@ #define USB_VENDOR_ID_UCLOGIC 0x5543 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 -#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 diff --git a/trunk/drivers/hid/hid-magicmouse.c b/trunk/drivers/hid/hid-magicmouse.c index 0ec91c18a421..a5eda4c8127a 100644 --- a/trunk/drivers/hid/hid-magicmouse.c +++ b/trunk/drivers/hid/hid-magicmouse.c @@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev, } report->size = 6; + /* + * The device reponds with 'invalid report id' when feature + * report switching it into multitouch mode is sent to it. + * + * This results in -EIO from the _raw low-level transport callback, + * but there seems to be no other way of switching the mode. + * Thus the super-ugly hacky success check below. + */ ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), HID_FEATURE_REPORT); - if (ret != sizeof(feature)) { + if (ret != -EIO) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; } diff --git a/trunk/drivers/hid/hid-multitouch.c b/trunk/drivers/hid/hid-multitouch.c index 0b2dcd0ee591..ecd4d2db9e80 100644 --- a/trunk/drivers/hid/hid-multitouch.c +++ b/trunk/drivers/hid/hid-multitouch.c @@ -64,7 +64,6 @@ struct mt_device { struct mt_class *mtclass; /* our mt device class */ unsigned last_field_index; /* last field index of the report */ unsigned last_slot_field; /* the last field of a slot */ - int last_mt_collection; /* last known mt-related collection */ __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ __u8 num_received; /* how many contacts we received */ __u8 num_expected; /* expected last contact index */ @@ -226,10 +225,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, cls->sn_move); /* touchscreen emulation */ set_abs(hi->input, ABS_X, field, cls->sn_move); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_GD_Y: if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) @@ -240,10 +237,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, cls->sn_move); /* touchscreen emulation */ set_abs(hi->input, ABS_Y, field, cls->sn_move); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; } return 0; @@ -251,40 +246,31 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, case HID_UP_DIGITIZER: switch (usage->hid) { case HID_DG_INRANGE: - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_CONFIDENCE: - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_TIPSWITCH: hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); input_set_capability(hi->input, EV_KEY, BTN_TOUCH); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_CONTACTID: input_mt_init_slots(hi->input, td->maxcontacts); td->last_slot_field = usage->hid; td->last_field_index = field->index; - td->last_mt_collection = usage->collection_index; return 1; case HID_DG_WIDTH: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOUCH_MAJOR); set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, cls->sn_width); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_HEIGHT: hid_map_usage(hi, usage, bit, max, @@ -293,10 +279,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, cls->sn_height); input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 0, 1, 0, 0); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_TIPPRESSURE: if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) @@ -308,20 +292,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, /* touchscreen emulation */ set_abs(hi->input, ABS_PRESSURE, field, cls->sn_pressure); - if (td->last_mt_collection == usage->collection_index) { - td->last_slot_field = usage->hid; - td->last_field_index = field->index; - } + td->last_slot_field = usage->hid; + td->last_field_index = field->index; return 1; case HID_DG_CONTACTCOUNT: - if (td->last_mt_collection == usage->collection_index) - td->last_field_index = field->index; + td->last_field_index = field->index; return 1; case HID_DG_CONTACTMAX: /* we don't set td->last_slot_field as contactcount and * contact max are global to the report */ - if (td->last_mt_collection == usage->collection_index) - td->last_field_index = field->index; + td->last_field_index = field->index; return -1; } /* let hid-input decide for the others */ @@ -536,7 +516,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) } td->mtclass = mtclass; td->inputmode = -1; - td->last_mt_collection = -1; hid_set_drvdata(hdev, td); ret = hid_parse(hdev); @@ -614,11 +593,6 @@ static const struct hid_device_id mt_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, - /* Chunghwa Telecom touch panels */ - { .driver_data = MT_CLS_DEFAULT, - HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, - USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, - /* CVTouch panels */ { .driver_data = MT_CLS_DEFAULT, HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, diff --git a/trunk/drivers/hid/usbhid/hid-quirks.c b/trunk/drivers/hid/usbhid/hid-quirks.c index 621959d5cc42..0e30b140edca 100644 --- a/trunk/drivers/hid/usbhid/hid-quirks.c +++ b/trunk/drivers/hid/usbhid/hid-quirks.c @@ -74,7 +74,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, - { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT }, diff --git a/trunk/drivers/hid/usbhid/hiddev.c b/trunk/drivers/hid/usbhid/hiddev.c index 7c1188b53c3e..ff3c644888b1 100644 --- a/trunk/drivers/hid/usbhid/hiddev.c +++ b/trunk/drivers/hid/usbhid/hiddev.c @@ -248,15 +248,12 @@ static int hiddev_release(struct inode * inode, struct file * file) usbhid_close(list->hiddev->hid); usbhid_put_power(list->hiddev->hid); } else { - mutex_unlock(&list->hiddev->existancelock); kfree(list->hiddev); - kfree(list); - return 0; } } - mutex_unlock(&list->hiddev->existancelock); kfree(list); + mutex_unlock(&list->hiddev->existancelock); return 0; } @@ -926,11 +923,10 @@ void hiddev_disconnect(struct hid_device *hid) usb_deregister_dev(usbhid->intf, &hiddev_class); if (hiddev->open) { - mutex_unlock(&hiddev->existancelock); usbhid_close(hiddev->hid); wake_up_interruptible(&hiddev->wait); } else { - mutex_unlock(&hiddev->existancelock); kfree(hiddev); } + mutex_unlock(&hiddev->existancelock); } diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index dcb78a7a8047..b5e892017e0c 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -268,7 +268,6 @@ static struct device_attribute atk_name_attr = static void atk_init_attribute(struct device_attribute *attr, char *name, sysfs_show_func show) { - sysfs_attr_init(&attr->attr); attr->attr.name = name; attr->attr.mode = 0444; attr->show = show; @@ -1189,15 +1188,19 @@ static int atk_create_files(struct atk_data *data) int err; list_for_each_entry(s, &data->sensor_list, list) { + sysfs_attr_init(&s->input_attr.attr); err = device_create_file(data->hwmon_dev, &s->input_attr); if (err) return err; + sysfs_attr_init(&s->label_attr.attr); err = device_create_file(data->hwmon_dev, &s->label_attr); if (err) return err; + sysfs_attr_init(&s->limit1_attr.attr); err = device_create_file(data->hwmon_dev, &s->limit1_attr); if (err) return err; + sysfs_attr_init(&s->limit2_attr.attr); err = device_create_file(data->hwmon_dev, &s->limit2_attr); if (err) return err; diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c index 0070d5476dd0..85e937984ff7 100644 --- a/trunk/drivers/hwmon/coretemp.c +++ b/trunk/drivers/hwmon/coretemp.c @@ -97,7 +97,9 @@ struct platform_data { struct pdev_entry { struct list_head list; struct platform_device *pdev; + unsigned int cpu; u16 phys_proc_id; + u16 cpu_core_id; }; static LIST_HEAD(pdev_list); @@ -651,7 +653,9 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) } pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); + pdev_entry->cpu_core_id = TO_CORE_ID(cpu); list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); diff --git a/trunk/drivers/hwmon/ibmaem.c b/trunk/drivers/hwmon/ibmaem.c index 1a409c5bc9bc..537409d07ee7 100644 --- a/trunk/drivers/hwmon/ibmaem.c +++ b/trunk/drivers/hwmon/ibmaem.c @@ -947,7 +947,6 @@ static int aem_register_sensors(struct aem_data *data, /* Set up read-only sensors */ while (ro->label) { - sysfs_attr_init(&sensors->dev_attr.attr); sensors->dev_attr.attr.name = ro->label; sensors->dev_attr.attr.mode = S_IRUGO; sensors->dev_attr.show = ro->show; @@ -964,7 +963,6 @@ static int aem_register_sensors(struct aem_data *data, /* Set up read-write sensors */ while (rw->label) { - sysfs_attr_init(&sensors->dev_attr.attr); sensors->dev_attr.attr.name = rw->label; sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; sensors->dev_attr.show = rw->show; diff --git a/trunk/drivers/hwmon/ibmpex.c b/trunk/drivers/hwmon/ibmpex.c index 41dbf8161ed7..06d4eafcf76b 100644 --- a/trunk/drivers/hwmon/ibmpex.c +++ b/trunk/drivers/hwmon/ibmpex.c @@ -358,7 +358,6 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type, else if (type == POWER_SENSOR) sprintf(n, power_sensor_name_templates[func], "power", counter); - sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr); data->sensors[sensor].attr[func].dev_attr.attr.name = n; data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; diff --git a/trunk/drivers/hwmon/s3c-hwmon.c b/trunk/drivers/hwmon/s3c-hwmon.c index b39f52e2752a..92b42db43bcf 100644 --- a/trunk/drivers/hwmon/s3c-hwmon.c +++ b/trunk/drivers/hwmon/s3c-hwmon.c @@ -232,7 +232,6 @@ static int s3c_hwmon_create_attr(struct device *dev, attr = &attrs->in; attr->index = channel; - sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attrs->in_name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = s3c_hwmon_ch_show; @@ -250,7 +249,6 @@ static int s3c_hwmon_create_attr(struct device *dev, attr = &attrs->label; attr->index = channel; - sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attrs->label_name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = s3c_hwmon_label_show; diff --git a/trunk/drivers/input/evdev.c b/trunk/drivers/input/evdev.c index 4cf25347b015..be0921ef6b52 100644 --- a/trunk/drivers/input/evdev.c +++ b/trunk/drivers/input/evdev.c @@ -111,8 +111,7 @@ static void evdev_event(struct input_handle *handle, rcu_read_unlock(); - if (type == EV_SYN && code == SYN_REPORT) - wake_up_interruptible(&evdev->wait); + wake_up_interruptible(&evdev->wait); } static int evdev_fasync(int fd, struct file *file, int on) diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c index da38d97a51b1..75e11c7b70fd 100644 --- a/trunk/drivers/input/input.c +++ b/trunk/drivers/input/input.c @@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev) } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, - mt_slots = clamp(mt_slots, 2, 32); + clamp(mt_slots, 2, 32); } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { mt_slots = 2; } else { diff --git a/trunk/drivers/input/keyboard/omap-keypad.c b/trunk/drivers/input/keyboard/omap-keypad.c index 33d0bdc837c0..f23a743817db 100644 --- a/trunk/drivers/input/keyboard/omap-keypad.c +++ b/trunk/drivers/input/keyboard/omap-keypad.c @@ -209,7 +209,6 @@ static void omap_kp_tasklet(unsigned long data) #endif } } - input_sync(omap_kp_data->input); memcpy(keypad_state, new_state, sizeof(keypad_state)); if (key_down) { diff --git a/trunk/drivers/input/keyboard/sh_keysc.c b/trunk/drivers/input/keyboard/sh_keysc.c index 6876700a4469..834cf98e7efb 100644 --- a/trunk/drivers/input/keyboard/sh_keysc.c +++ b/trunk/drivers/input/keyboard/sh_keysc.c @@ -32,7 +32,7 @@ static const struct { [SH_KEYSC_MODE_3] = { 2, 4, 7 }, [SH_KEYSC_MODE_4] = { 3, 6, 6 }, [SH_KEYSC_MODE_5] = { 4, 6, 7 }, - [SH_KEYSC_MODE_6] = { 5, 8, 8 }, + [SH_KEYSC_MODE_6] = { 5, 7, 7 }, }; struct sh_keysc_priv { diff --git a/trunk/drivers/input/mousedev.c b/trunk/drivers/input/mousedev.c index 0110b5a3a167..257e033986e4 100644 --- a/trunk/drivers/input/mousedev.c +++ b/trunk/drivers/input/mousedev.c @@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, if (size == 0) size = xres ? : 1; - value = clamp(value, min, max); + clamp(value, min, max); mousedev->packet.x = ((value - min) * xres) / size; mousedev->packet.abs_event = 1; @@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, if (size == 0) size = yres ? : 1; - value = clamp(value, min, max); + clamp(value, min, max); mousedev->packet.y = yres - ((value - min) * yres) / size; mousedev->packet.abs_event = 1; diff --git a/trunk/drivers/pcmcia/pxa2xx_vpac270.c b/trunk/drivers/pcmcia/pxa2xx_vpac270.c index 712baab3c83d..435002dfc3ca 100644 --- a/trunk/drivers/pcmcia/pxa2xx_vpac270.c +++ b/trunk/drivers/pcmcia/pxa2xx_vpac270.c @@ -11,7 +11,6 @@ * */ -#include #include #include diff --git a/trunk/drivers/spi/spi_bfin5xx.c b/trunk/drivers/spi/spi_bfin5xx.c index cc880c95e7de..f706dba165cf 100644 --- a/trunk/drivers/spi/spi_bfin5xx.c +++ b/trunk/drivers/spi/spi_bfin5xx.c @@ -681,14 +681,13 @@ static void bfin_spi_pump_transfers(unsigned long data) drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ - bits_per_word = transfer->bits_per_word ? : - message->spi->bits_per_word ? : 8; - if (bits_per_word % 16 == 0) { + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) { drv_data->n_bytes = bits_per_word/8; drv_data->len = (transfer->len) >> 1; cr_width = BIT_CTL_WORDSIZE; drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; - } else if (bits_per_word % 8 == 0) { + } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) { drv_data->n_bytes = bits_per_word/8; drv_data->len = transfer->len; cr_width = 0; diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 30df85d8fca8..553da68bd510 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -395,9 +395,9 @@ static void unmask_evtchn(int port) static void xen_irq_init(unsigned irq) { struct irq_info *info; -#ifdef CONFIG_SMP struct irq_desc *desc = irq_to_desc(irq); +#ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); #endif diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 300628795fdb..378b5b4443f3 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -967,12 +967,6 @@ struct btrfs_fs_info { struct srcu_struct subvol_srcu; spinlock_t trans_lock; - /* - * the reloc mutex goes with the trans lock, it is taken - * during commit to protect us from the relocation code - */ - struct mutex reloc_mutex; - struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -1178,14 +1172,6 @@ struct btrfs_root { u32 type; u64 highest_objectid; - - /* btrfs_record_root_in_trans is a multi-step process, - * and it can race with the balancing code. But the - * race is very small, and only the first time the root - * is added to each transaction. So in_trans_setup - * is used to tell us when more checks are required - */ - unsigned long in_trans_setup; int ref_cows; int track_dirty; int in_radix; @@ -1195,6 +1181,7 @@ struct btrfs_root { struct btrfs_key defrag_max; int defrag_running; char *name; + int in_sysfs; /* the dirty list is only used by non-reference counted roots */ struct list_head dirty_list; diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index f1cbd028f7b3..6462c29d2d37 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -297,6 +297,7 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) item->data_len = data_len; item->ins_or_del = 0; item->bytes_reserved = 0; + item->block_rsv = NULL; item->delayed_node = NULL; atomic_set(&item->refs, 1); } @@ -592,8 +593,10 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); - if (!ret) + if (!ret) { item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } return ret; } @@ -601,13 +604,10 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, struct btrfs_delayed_item *item) { - struct btrfs_block_rsv *rsv; - if (!item->bytes_reserved) return; - rsv = &root->fs_info->global_block_rsv; - btrfs_block_rsv_release(root, rsv, + btrfs_block_rsv_release(root, item->block_rsv, item->bytes_reserved); } @@ -1014,7 +1014,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_path *path; - struct btrfs_block_rsv *block_rsv; int ret = 0; path = btrfs_alloc_path(); @@ -1022,9 +1021,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - block_rsv = trans->block_rsv; - trans->block_rsv = &root->fs_info->global_block_rsv; - delayed_root = btrfs_get_delayed_root(root); curr_node = btrfs_first_delayed_node(delayed_root); @@ -1049,7 +1045,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, } btrfs_free_path(path); - trans->block_rsv = block_rsv; return ret; } @@ -1057,7 +1052,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_path *path; - struct btrfs_block_rsv *block_rsv; int ret; path = btrfs_alloc_path(); @@ -1065,9 +1059,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - block_rsv = trans->block_rsv; - trans->block_rsv = &node->root->fs_info->global_block_rsv; - ret = btrfs_insert_delayed_items(trans, path, node->root, node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, node->root, node); @@ -1075,7 +1066,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, ret = btrfs_update_delayed_inode(trans, node->root, path, node); btrfs_free_path(path); - trans->block_rsv = block_rsv; return ret; } @@ -1126,7 +1116,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) struct btrfs_path *path; struct btrfs_delayed_node *delayed_node = NULL; struct btrfs_root *root; - struct btrfs_block_rsv *block_rsv; unsigned long nr = 0; int need_requeue = 0; int ret; @@ -1145,9 +1134,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) if (IS_ERR(trans)) goto free_path; - block_rsv = trans->block_rsv; - trans->block_rsv = &root->fs_info->global_block_rsv; - ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, root, @@ -1190,7 +1176,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) nr = trans->blocks_used; - trans->block_rsv = block_rsv; btrfs_end_transaction_dmeta(trans, root); __btrfs_btree_balance_dirty(root, nr); free_path: @@ -1237,13 +1222,6 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, return 0; } -void btrfs_assert_delayed_root_empty(struct btrfs_root *root) -{ - struct btrfs_delayed_root *delayed_root; - delayed_root = btrfs_get_delayed_root(root); - WARN_ON(btrfs_first_delayed_node(delayed_root)); -} - void btrfs_balance_delayed_items(struct btrfs_root *root) { struct btrfs_delayed_root *delayed_root; diff --git a/trunk/fs/btrfs/delayed-inode.h b/trunk/fs/btrfs/delayed-inode.h index d1a6a2915c66..eb7d240aa648 100644 --- a/trunk/fs/btrfs/delayed-inode.h +++ b/trunk/fs/btrfs/delayed-inode.h @@ -75,6 +75,7 @@ struct btrfs_delayed_item { struct list_head tree_list; /* used for batch insert/delete items */ struct list_head readdir_list; /* used for readdir items */ u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; struct btrfs_delayed_node *delayed_node; atomic_t refs; int ins_or_del; @@ -137,8 +138,4 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, /* for init */ int __init btrfs_delayed_inode_init(void); void btrfs_delayed_inode_exit(void); - -/* for debugging */ -void btrfs_assert_delayed_root_empty(struct btrfs_root *root); - #endif diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 1ac8db5dc0a3..9f68c6898653 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1044,6 +1044,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; + root->in_sysfs = 0; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; @@ -1299,21 +1300,19 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); - if (!root->free_ino_pinned || !root->free_ino_ctl) { - ret = -ENOMEM; + if (!root->free_ino_pinned) goto fail; - } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); - ret = set_anon_super(&root->anon_super, NULL); - if (ret) - goto fail; + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; @@ -1619,7 +1618,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); - mutex_init(&fs_info->reloc_mutex); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 1f61bf5b4960..b42efc2ded51 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -3314,6 +3314,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; + /* nothing to shrink - nothing to reclaim */ + if (root->fs_info->delalloc_bytes == 0) + return 0; + max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 0a9b10c5b0a7..751ddf8fc58a 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -3076,7 +3076,6 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index a3c4751e07db..b793d112d1f6 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -482,10 +482,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); BUG_ON(ret); - spin_lock(&root->fs_info->trans_lock); list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); - spin_unlock(&root->fs_info->trans_lock); if (async_transid) { *async_transid = trans->transid; ret = btrfs_commit_transaction_async(trans, diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index 5e0a3dc79a45..b1ef27cc673b 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, int ret; if (!root->reloc_root) - goto out; + return 0; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -1390,8 +1390,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_update_root(trans, root->fs_info->tree_root, &reloc_root->root_key, root_item); BUG_ON(ret); - -out: return 0; } @@ -2144,11 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - mutex_lock(&root->fs_info->reloc_mutex); + spin_lock(&root->fs_info->trans_lock); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - mutex_unlock(&root->fs_info->reloc_mutex); - + spin_unlock(&root->fs_info->trans_lock); again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2217,16 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - - /* - * this serializes us with btrfs_record_root_in_transaction, - * we have to make sure nobody is in the middle of - * adding their roots to the list while we are - * doing this splice - */ - mutex_lock(&root->fs_info->reloc_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&rc->reloc_roots, &reloc_roots); - mutex_unlock(&root->fs_info->reloc_mutex); + spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&reloc_roots)) { found = 1; @@ -3600,19 +3590,17 @@ int find_next_extent(struct btrfs_trans_handle *trans, static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - - mutex_lock(&fs_info->reloc_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = rc; - mutex_unlock(&fs_info->reloc_mutex); + spin_unlock(&fs_info->trans_lock); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - - mutex_lock(&fs_info->reloc_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = NULL; - mutex_unlock(&fs_info->reloc_mutex); + spin_unlock(&fs_info->trans_lock); } static int check_extent_flags(u64 flags) diff --git a/trunk/fs/btrfs/sysfs.c b/trunk/fs/btrfs/sysfs.c index daac9ae6d731..c3c223ae6691 100644 --- a/trunk/fs/btrfs/sysfs.c +++ b/trunk/fs/btrfs/sysfs.c @@ -28,6 +28,152 @@ #include "disk-io.h" #include "transaction.h" +static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_root_used(&root->root_item)); +} + +static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_root_limit(&root->root_item)); +} + +static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf) +{ + + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_bytes_used(&fs->super_copy)); +} + +static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_total_bytes(&fs->super_copy)); +} + +static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_sectorsize(&fs->super_copy)); +} + +/* this is for root attrs (subvols/snapshots) */ +struct btrfs_root_attr { + struct attribute attr; + ssize_t (*show)(struct btrfs_root *, char *); + ssize_t (*store)(struct btrfs_root *, const char *, size_t); +}; + +#define ROOT_ATTR(name, mode, show, store) \ +static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ + show, store) + +ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); +ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); + +static struct attribute *btrfs_root_attrs[] = { + &btrfs_root_attr_blocks_used.attr, + &btrfs_root_attr_block_limit.attr, + NULL, +}; + +/* this is for super attrs (actual full fs) */ +struct btrfs_super_attr { + struct attribute attr; + ssize_t (*show)(struct btrfs_fs_info *, char *); + ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t); +}; + +#define SUPER_ATTR(name, mode, show, store) \ +static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ + show, store) + +SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); +SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); +SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL); + +static struct attribute *btrfs_super_attrs[] = { + &btrfs_super_attr_blocks_used.attr, + &btrfs_super_attr_total_blocks.attr, + &btrfs_super_attr_blocksize.attr, + NULL, +}; + +static ssize_t btrfs_super_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + struct btrfs_super_attr *a = container_of(attr, + struct btrfs_super_attr, + attr); + + return a->show ? a->show(fs, buf) : 0; +} + +static ssize_t btrfs_super_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + struct btrfs_super_attr *a = container_of(attr, + struct btrfs_super_attr, + attr); + + return a->store ? a->store(fs, buf, len) : 0; +} + +static ssize_t btrfs_root_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + struct btrfs_root_attr *a = container_of(attr, + struct btrfs_root_attr, + attr); + + return a->show ? a->show(root, buf) : 0; +} + +static ssize_t btrfs_root_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + struct btrfs_root_attr *a = container_of(attr, + struct btrfs_root_attr, + attr); + return a->store ? a->store(root, buf, len) : 0; +} + +static void btrfs_super_release(struct kobject *kobj) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + complete(&fs->kobj_unregister); +} + +static void btrfs_root_release(struct kobject *kobj) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + complete(&root->kobj_unregister); +} + +static const struct sysfs_ops btrfs_super_attr_ops = { + .show = btrfs_super_attr_show, + .store = btrfs_super_attr_store, +}; + +static const struct sysfs_ops btrfs_root_attr_ops = { + .show = btrfs_root_attr_show, + .store = btrfs_root_attr_store, +}; + /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index 51dcec86757f..2b3590b9fe98 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -126,85 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -static int record_root_in_trans(struct btrfs_trans_handle *trans, +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); - /* - * see below for in_trans_setup usage rules - * we have the reloc mutex held now, so there - * is only one writer in this function - */ - root->in_trans_setup = 1; - - /* make sure readers find in_trans_setup before - * they find our root->last_trans update - */ - smp_wmb(); - spin_lock(&root->fs_info->fs_roots_radix_lock); if (root->last_trans == trans->transid) { spin_unlock(&root->fs_info->fs_roots_radix_lock); return 0; } + root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&root->fs_info->fs_roots_radix_lock); - root->last_trans = trans->transid; - - /* this is pretty tricky. We don't want to - * take the relocation lock in btrfs_record_root_in_trans - * unless we're really doing the first setup for this root in - * this transaction. - * - * Normally we'd use root->last_trans as a flag to decide - * if we want to take the expensive mutex. - * - * But, we have to set root->last_trans before we - * init the relocation root, otherwise, we trip over warnings - * in ctree.c. The solution used here is to flag ourselves - * with root->in_trans_setup. When this is 1, we're still - * fixing up the reloc trees and everyone must wait. - * - * When this is zero, they can trust root->last_trans and fly - * through btrfs_record_root_in_trans without having to take the - * lock. smp_wmb() makes sure that all the writes above are - * done before we pop in the zero below - */ btrfs_init_reloc_root(trans, root); - smp_wmb(); - root->in_trans_setup = 0; } return 0; } - -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - if (!root->ref_cows) - return 0; - - /* - * see record_root_in_trans for comments about in_trans_setup usage - * and barriers - */ - smp_rmb(); - if (root->last_trans == trans->transid && - !root->in_trans_setup) - return 0; - - mutex_lock(&root->fs_info->reloc_mutex); - record_root_in_trans(trans, root); - mutex_unlock(&root->fs_info->reloc_mutex); - - return 0; -} - /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -939,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - record_root_in_trans(trans, parent_root); + btrfs_record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -957,16 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - /* - * pull in the delayed directory update - * and the delayed inode item - * otherwise we corrupt the FS during - * snapshot - */ - ret = btrfs_run_delayed_items(trans, root); - BUG_ON(ret); - - record_root_in_trans(trans, root); + btrfs_record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1027,6 +961,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1299,42 +1241,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, schedule_timeout(1); finish_wait(&cur_trans->writer_wait, &wait); + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); - /* - * Ok now we need to make sure to block out any other joins while we - * commit the transaction. We could have started a join before setting - * no_join so make sure to wait for num_writers to == 1 again. - */ - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); - wait_event(cur_trans->writer_wait, - atomic_read(&cur_trans->num_writers) == 1); - - /* - * the reloc mutex makes sure that we stop - * the balancing code from coming in and moving - * extents around in the middle of the commit - */ - mutex_lock(&root->fs_info->reloc_mutex); - - ret = btrfs_run_delayed_items(trans, root); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); - ret = create_pending_snapshots(trans, root->fs_info); + ret = btrfs_run_delayed_items(trans, root); BUG_ON(ret); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); - /* - * make sure none of the code above managed to slip in a - * delayed item - */ - btrfs_assert_delayed_root_empty(root); - WARN_ON(cur_trans != trans->transaction); btrfs_scrub_pause(root); @@ -1391,7 +1312,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, root->fs_info->running_transaction = NULL; root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); - mutex_unlock(&root->fs_info->reloc_mutex); wake_up(&root->fs_info->transaction_wait); diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c index 4ce8a9f41d1e..592396c6dc47 100644 --- a/trunk/fs/btrfs/tree-log.c +++ b/trunk/fs/btrfs/tree-log.c @@ -3177,7 +3177,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) tmp_key.offset = (u64)-1; wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); - BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); + BUG_ON(!wc.replay_dest); wc.replay_dest->log_root = log; btrfs_record_root_in_trans(trans, wc.replay_dest); diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c index 6075a1e727ae..97e0d52d72fd 100644 --- a/trunk/fs/exec.c +++ b/trunk/fs/exec.c @@ -1996,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file) * is a special value that we use to trap recursive * core dumps */ -static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) +static int umh_pipe_setup(struct subprocess_info *info) { struct file *rp, *wp; struct fdtable *fdt; diff --git a/trunk/fs/isofs/inode.c b/trunk/fs/isofs/inode.c index b3cc8586984e..3db5ba4568fc 100644 --- a/trunk/fs/isofs/inode.c +++ b/trunk/fs/isofs/inode.c @@ -974,7 +974,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) out_no_read: printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n", __func__, s->s_id, iso_blknum, block); - goto out_freebh; + goto out_freesbi; out_bad_zone_size: printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n", sbi->s_log_zone_size); @@ -989,7 +989,6 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) out_freebh: brelse(bh); - brelse(pri_bh); out_freesbi: kfree(opt.iocharset); kfree(sbi); diff --git a/trunk/fs/timerfd.c b/trunk/fs/timerfd.c index dffeb3795af1..f67acbdda5e8 100644 --- a/trunk/fs/timerfd.c +++ b/trunk/fs/timerfd.c @@ -61,9 +61,7 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) /* * Called when the clock was set to cancel the timers in the cancel - * list. This will wake up processes waiting on these timers. The - * wake-up requires ctx->ticks to be non zero, therefore we increment - * it before calling wake_up_locked(). + * list. */ void timerfd_clock_was_set(void) { @@ -78,7 +76,6 @@ void timerfd_clock_was_set(void) spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->moffs.tv64 != moffs.tv64) { ctx->moffs.tv64 = KTIME_MAX; - ctx->ticks++; wake_up_locked(&ctx->wqh); } spin_unlock_irqrestore(&ctx->wqh.lock, flags); diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index 8c892c2d5300..529be0582029 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -2146,6 +2146,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, if (IS_ERR(sb)) { err = PTR_ERR(sb); kfree(c); + goto out_close; } if (sb->s_root) { diff --git a/trunk/fs/xfs/linux-2.6/xfs_file.c b/trunk/fs/xfs/linux-2.6/xfs_file.c index 7f782af286bf..f4213ba1ff85 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_file.c +++ b/trunk/fs/xfs/linux-2.6/xfs_file.c @@ -131,34 +131,19 @@ xfs_file_fsync( { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; int error = 0; int log_flushed = 0; trace_xfs_file_fsync(ip); - if (XFS_FORCED_SHUTDOWN(mp)) + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -XFS_ERROR(EIO); xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_ioend_wait(ip); - if (mp->m_flags & XFS_MOUNT_BARRIER) { - /* - * If we have an RT and/or log subvolume we need to make sure - * to flush the write cache the device used for file data - * first. This is to ensure newly written file data make - * it to disk before logging the new inode size in case of - * an extending write. - */ - if (XFS_IS_REALTIME_INODE(ip)) - xfs_blkdev_issue_flush(mp->m_rtdev_targp); - else if (mp->m_logdev_targp != mp->m_ddev_targp) - xfs_blkdev_issue_flush(mp->m_ddev_targp); - } - /* * We always need to make sure that the required inode state is safe on * disk. The inode might be clean but we still might need to force the @@ -190,9 +175,9 @@ xfs_file_fsync( * updates. The sync transaction will also force the log. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); error = xfs_trans_reserve(tp, 0, - XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return -error; @@ -224,25 +209,28 @@ xfs_file_fsync( * force the log. */ if (xfs_ipincount(ip)) { - error = _xfs_log_force_lsn(mp, + error = _xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, XFS_LOG_SYNC, &log_flushed); } xfs_iunlock(ip, XFS_ILOCK_SHARED); } - /* - * If we only have a single device, and the log force about was - * a no-op we might have to flush the data device cache here. - * This can only happen for fdatasync/O_DSYNC if we were overwriting - * an already allocated file and thus do not have any metadata to - * commit. - */ - if ((mp->m_flags & XFS_MOUNT_BARRIER) && - mp->m_logdev_targp == mp->m_ddev_targp && - !XFS_IS_REALTIME_INODE(ip) && - !log_flushed) - xfs_blkdev_issue_flush(mp->m_ddev_targp); + if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { + /* + * If the log write didn't issue an ordered tag we need + * to flush the disk cache for the data device now. + */ + if (!log_flushed) + xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); + + /* + * If this inode is on the RT dev we need to flush that + * cache as well. + */ + if (XFS_IS_REALTIME_INODE(ip)) + xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); + } return -error; } diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.c b/trunk/fs/xfs/linux-2.6/xfs_iops.c index d44d92cd12b1..dd21784525a8 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.c @@ -182,7 +182,7 @@ xfs_vn_mknod( if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) - return PTR_ERR(default_acl); + return -PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c index a1a881e68a9a..1e3a7ce804dc 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.c +++ b/trunk/fs/xfs/linux-2.6/xfs_super.c @@ -627,6 +627,68 @@ xfs_blkdev_put( blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } +/* + * Try to write out the superblock using barriers. + */ +STATIC int +xfs_barrier_test( + xfs_mount_t *mp) +{ + xfs_buf_t *sbp = xfs_getsb(mp, 0); + int error; + + XFS_BUF_UNDONE(sbp); + XFS_BUF_UNREAD(sbp); + XFS_BUF_UNDELAYWRITE(sbp); + XFS_BUF_WRITE(sbp); + XFS_BUF_UNASYNC(sbp); + XFS_BUF_ORDERED(sbp); + + xfsbdstrat(mp, sbp); + error = xfs_buf_iowait(sbp); + + /* + * Clear all the flags we set and possible error state in the + * buffer. We only did the write to try out whether barriers + * worked and shouldn't leave any traces in the superblock + * buffer. + */ + XFS_BUF_DONE(sbp); + XFS_BUF_ERROR(sbp, 0); + XFS_BUF_UNORDERED(sbp); + + xfs_buf_relse(sbp); + return error; +} + +STATIC void +xfs_mountfs_check_barriers(xfs_mount_t *mp) +{ + int error; + + if (mp->m_logdev_targp != mp->m_ddev_targp) { + xfs_notice(mp, + "Disabling barriers, not supported with external log device"); + mp->m_flags &= ~XFS_MOUNT_BARRIER; + return; + } + + if (xfs_readonly_buftarg(mp->m_ddev_targp)) { + xfs_notice(mp, + "Disabling barriers, underlying device is readonly"); + mp->m_flags &= ~XFS_MOUNT_BARRIER; + return; + } + + error = xfs_barrier_test(mp); + if (error) { + xfs_notice(mp, + "Disabling barriers, trial barrier write failed"); + mp->m_flags &= ~XFS_MOUNT_BARRIER; + return; + } +} + void xfs_blkdev_issue_flush( xfs_buftarg_t *buftarg) @@ -1178,6 +1240,14 @@ xfs_fs_remount( switch (token) { case Opt_barrier: mp->m_flags |= XFS_MOUNT_BARRIER; + + /* + * Test if barriers are actually working if we can, + * else delay this check until the filesystem is + * marked writeable. + */ + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) + xfs_mountfs_check_barriers(mp); break; case Opt_nobarrier: mp->m_flags &= ~XFS_MOUNT_BARRIER; @@ -1212,6 +1282,8 @@ xfs_fs_remount( /* ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { mp->m_flags &= ~XFS_MOUNT_RDONLY; + if (mp->m_flags & XFS_MOUNT_BARRIER) + xfs_mountfs_check_barriers(mp); /* * If this is the first remount to writeable state we @@ -1393,6 +1465,9 @@ xfs_fs_fill_super( if (error) goto out_free_sb; + if (mp->m_flags & XFS_MOUNT_BARRIER) + xfs_mountfs_check_barriers(mp); + error = xfs_filestream_mount(mp); if (error) goto out_free_sb; diff --git a/trunk/fs/xfs/xfs_log.c b/trunk/fs/xfs/xfs_log.c index 41d5b8f2bf92..211930246f20 100644 --- a/trunk/fs/xfs/xfs_log.c +++ b/trunk/fs/xfs/xfs_log.c @@ -1372,17 +1372,8 @@ xlog_sync(xlog_t *log, XFS_BUF_ASYNC(bp); bp->b_flags |= XBF_LOG_BUFFER; - if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { - /* - * If we have an external log device, flush the data device - * before flushing the log to make sure all meta data - * written back from the AIL actually made it to disk - * before writing out the new log tail LSN in the log buffer. - */ - if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) - xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); + if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) XFS_BUF_ORDERED(bp); - } ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); diff --git a/trunk/include/asm-generic/gpio.h b/trunk/include/asm-generic/gpio.h index d494001b1226..fcdcb5d5c995 100644 --- a/trunk/include/asm-generic/gpio.h +++ b/trunk/include/asm-generic/gpio.h @@ -170,6 +170,16 @@ extern int __gpio_cansleep(unsigned gpio); extern int __gpio_to_irq(unsigned gpio); +#define GPIOF_DIR_OUT (0 << 0) +#define GPIOF_DIR_IN (1 << 0) + +#define GPIOF_INIT_LOW (0 << 1) +#define GPIOF_INIT_HIGH (1 << 1) + +#define GPIOF_IN (GPIOF_DIR_IN) +#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) +#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) + /** * struct gpio - a structure describing a GPIO with configuration * @gpio: the GPIO number diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index 33d12f87f0e0..9573e0ce3120 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -520,8 +520,6 @@ struct drm_connector { uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; uint32_t force_encoder_id; struct drm_encoder *encoder; /* currently active encoder */ - - int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ }; /** diff --git a/trunk/include/linux/clocksource.h b/trunk/include/linux/clocksource.h index 18a1baf31f2d..d4646b48dc4a 100644 --- a/trunk/include/linux/clocksource.h +++ b/trunk/include/linux/clocksource.h @@ -188,7 +188,6 @@ struct clocksource { #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ struct list_head wd_list; - cycle_t cs_last; cycle_t wd_last; #endif } ____cacheline_aligned; diff --git a/trunk/include/linux/gpio.h b/trunk/include/linux/gpio.h index 17b5a0d80e42..32d47e710661 100644 --- a/trunk/include/linux/gpio.h +++ b/trunk/include/linux/gpio.h @@ -3,17 +3,6 @@ /* see Documentation/gpio.txt */ -/* make these flag values available regardless of GPIO kconfig options */ -#define GPIOF_DIR_OUT (0 << 0) -#define GPIOF_DIR_IN (1 << 0) - -#define GPIOF_INIT_LOW (0 << 1) -#define GPIOF_INIT_HIGH (1 << 1) - -#define GPIOF_IN (GPIOF_DIR_IN) -#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) -#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) - #ifdef CONFIG_GENERIC_GPIO #include diff --git a/trunk/include/linux/input/sh_keysc.h b/trunk/include/linux/input/sh_keysc.h index 5d253cd93691..649dc7f12925 100644 --- a/trunk/include/linux/input/sh_keysc.h +++ b/trunk/include/linux/input/sh_keysc.h @@ -1,7 +1,7 @@ #ifndef __SH_KEYSC_H__ #define __SH_KEYSC_H__ -#define SH_KEYSC_MAXKEYS 64 +#define SH_KEYSC_MAXKEYS 49 struct sh_keysc_info { enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index f6efed0039ed..6c12989839d9 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -414,7 +414,6 @@ enum TASKLET_SOFTIRQ, SCHED_SOFTIRQ, HRTIMER_SOFTIRQ, - RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS }; diff --git a/trunk/include/linux/kmod.h b/trunk/include/linux/kmod.h index 0da38cf7db7b..d4a5c84c503d 100644 --- a/trunk/include/linux/kmod.h +++ b/trunk/include/linux/kmod.h @@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; #endif -struct cred; +struct key; struct file; enum umh_wait { @@ -62,7 +62,7 @@ struct subprocess_info { char **envp; enum umh_wait wait; int retval; - int (*init)(struct subprocess_info *info, struct cred *new); + int (*init)(struct subprocess_info *info); void (*cleanup)(struct subprocess_info *info); void *data; }; @@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, /* Set various pieces of state into the subprocess_info structure */ void call_usermodehelper_setfns(struct subprocess_info *info, - int (*init)(struct subprocess_info *info, struct cred *new), + int (*init)(struct subprocess_info *info), void (*cleanup)(struct subprocess_info *info), void *data); @@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info); static inline int call_usermodehelper_fns(char *path, char **argv, char **envp, enum umh_wait wait, - int (*init)(struct subprocess_info *info, struct cred *new), + int (*init)(struct subprocess_info *info), void (*cleanup)(struct subprocess_info *), void *data) { struct subprocess_info *info; diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h index 8cc38d3bab0c..7ad824d510a2 100644 --- a/trunk/include/linux/smp.h +++ b/trunk/include/linux/smp.h @@ -85,15 +85,12 @@ int smp_call_function_any(const struct cpumask *mask, * Generic and arch helpers */ #ifdef CONFIG_USE_GENERIC_SMP_HELPERS -void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); void ipi_call_lock(void); void ipi_call_unlock(void); void ipi_call_lock_irq(void); void ipi_call_unlock_irq(void); -#else -static inline void call_function_init(void) { } #endif /* @@ -137,7 +134,7 @@ static inline void smp_send_reschedule(int cpu) { } #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) -static inline void call_function_init(void) { } +static inline void init_call_single_data(void) { } static inline int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, diff --git a/trunk/include/trace/events/irq.h b/trunk/include/trace/events/irq.h index 1c09820df585..ae045ca7d356 100644 --- a/trunk/include/trace/events/irq.h +++ b/trunk/include/trace/events/irq.h @@ -20,8 +20,7 @@ struct softirq_action; softirq_name(BLOCK_IOPOLL), \ softirq_name(TASKLET), \ softirq_name(SCHED), \ - softirq_name(HRTIMER), \ - softirq_name(RCU)) + softirq_name(HRTIMER)) /** * irq_handler_entry - called immediately before the irq action handler diff --git a/trunk/init/main.c b/trunk/init/main.c index d7211faed2ad..cafba67c13bf 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -542,7 +542,6 @@ asmlinkage void __init start_kernel(void) timekeeping_init(); time_init(); profile_init(); - call_function_init(); if (!irqs_disabled()) printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early\n"); diff --git a/trunk/kernel/kmod.c b/trunk/kernel/kmod.c index 47613dfb7b28..ad6a81c58b44 100644 --- a/trunk/kernel/kmod.c +++ b/trunk/kernel/kmod.c @@ -156,6 +156,12 @@ static int ____call_usermodehelper(void *data) */ set_user_nice(current, 0); + if (sub_info->init) { + retval = sub_info->init(sub_info); + if (retval) + goto fail; + } + retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) @@ -167,14 +173,6 @@ static int ____call_usermodehelper(void *data) new->cap_inheritable); spin_unlock(&umh_sysctl_lock); - if (sub_info->init) { - retval = sub_info->init(sub_info, new); - if (retval) { - abort_creds(new); - goto fail; - } - } - commit_creds(new); retval = kernel_execve(sub_info->path, @@ -390,7 +388,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup); * context in which call_usermodehelper_exec is called. */ void call_usermodehelper_setfns(struct subprocess_info *info, - int (*init)(struct subprocess_info *info, struct cred *new), + int (*init)(struct subprocess_info *info), void (*cleanup)(struct subprocess_info *info), void *data) { diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 7e59ffb3d0ba..89419ff92e99 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -87,8 +87,6 @@ static struct rcu_state *rcu_state; int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); -#ifdef CONFIG_RCU_BOOST - /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. @@ -100,11 +98,8 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); static char rcu_kthreads_spawnable; -#endif /* #ifdef CONFIG_RCU_BOOST */ - static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); -static void invoke_rcu_core(void); -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); +static void invoke_rcu_cpu_kthread(void); #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ @@ -1093,8 +1088,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) int need_report = 0; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp; + struct task_struct *t; - rcu_stop_cpu_kthread(cpu); + /* Stop the CPU's kthread. */ + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t != NULL) { + per_cpu(rcu_cpu_kthread_task, cpu) = NULL; + kthread_stop(t); + } /* Exclude any attempts to start a new grace period. */ raw_spin_lock_irqsave(&rsp->onofflock, flags); @@ -1230,7 +1231,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* Re-raise the RCU softirq if there are callbacks remaining. */ if (cpu_has_callbacks_ready_to_invoke(rdp)) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); } /* @@ -1276,7 +1277,7 @@ void rcu_check_callbacks(int cpu, int user) } rcu_preempt_check_callbacks(cpu); if (rcu_pending(cpu)) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); } #ifdef CONFIG_SMP @@ -1441,14 +1442,13 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) } /* If there are callbacks ready, invoke them. */ - if (cpu_has_callbacks_ready_to_invoke(rdp)) - invoke_rcu_callbacks(rsp, rdp); + rcu_do_batch(rsp, rdp); } /* * Do softirq processing for the current CPU. */ -static void rcu_process_callbacks(struct softirq_action *unused) +static void rcu_process_callbacks(void) { __rcu_process_callbacks(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); @@ -1465,20 +1465,342 @@ static void rcu_process_callbacks(struct softirq_action *unused) * the current CPU with interrupts disabled, the rcu_cpu_kthread_task * cannot disappear out from under us. */ -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) +static void invoke_rcu_cpu_kthread(void) +{ + unsigned long flags; + + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { + local_irq_restore(flags); + return; + } + wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); + local_irq_restore(flags); +} + +/* + * Wake up the specified per-rcu_node-structure kthread. + * Because the per-rcu_node kthreads are immortal, we don't need + * to do anything to keep them alive. + */ +static void invoke_rcu_node_kthread(struct rcu_node *rnp) +{ + struct task_struct *t; + + t = rnp->node_kthread_task; + if (t != NULL) + wake_up_process(t); +} + +/* + * Set the specified CPU's kthread to run RT or not, as specified by + * the to_rt argument. The CPU-hotplug locks are held, so the task + * is not going away. + */ +static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +{ + int policy; + struct sched_param sp; + struct task_struct *t; + + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t == NULL) + return; + if (to_rt) { + policy = SCHED_FIFO; + sp.sched_priority = RCU_KTHREAD_PRIO; + } else { + policy = SCHED_NORMAL; + sp.sched_priority = 0; + } + sched_setscheduler_nocheck(t, policy, &sp); +} + +/* + * Timer handler to initiate the waking up of per-CPU kthreads that + * have yielded the CPU due to excess numbers of RCU callbacks. + * We wake up the per-rcu_node kthread, which in turn will wake up + * the booster kthread. + */ +static void rcu_cpu_kthread_timer(unsigned long arg) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); + struct rcu_node *rnp = rdp->mynode; + + atomic_or(rdp->grpmask, &rnp->wakemask); + invoke_rcu_node_kthread(rnp); +} + +/* + * Drop to non-real-time priority and yield, but only after posting a + * timer that will cause us to regain our real-time priority if we + * remain preempted. Either way, we restore our real-time priority + * before returning. + */ +static void rcu_yield(void (*f)(unsigned long), unsigned long arg) +{ + struct sched_param sp; + struct timer_list yield_timer; + + setup_timer_on_stack(&yield_timer, f, arg); + mod_timer(&yield_timer, jiffies + 2); + sp.sched_priority = 0; + sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); + set_user_nice(current, 19); + schedule(); + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); + del_timer(&yield_timer); +} + +/* + * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. + * This can happen while the corresponding CPU is either coming online + * or going offline. We cannot wait until the CPU is fully online + * before starting the kthread, because the various notifier functions + * can wait for RCU grace periods. So we park rcu_cpu_kthread() until + * the corresponding CPU is online. + * + * Return 1 if the kthread needs to stop, 0 otherwise. + * + * Caller must disable bh. This function can momentarily enable it. + */ +static int rcu_cpu_kthread_should_stop(int cpu) +{ + while (cpu_is_offline(cpu) || + !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || + smp_processor_id() != cpu) { + if (kthread_should_stop()) + return 1; + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; + per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); + local_bh_enable(); + schedule_timeout_uninterruptible(1); + if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + local_bh_disable(); + } + per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; + return 0; +} + +/* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * earlier RCU softirq. + */ +static int rcu_cpu_kthread(void *arg) +{ + int cpu = (int)(long)arg; + unsigned long flags; + int spincnt = 0; + unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); + char work; + char *workp = &per_cpu(rcu_cpu_has_work, cpu); + + for (;;) { + *statusp = RCU_KTHREAD_WAITING; + rcu_wait(*workp != 0 || kthread_should_stop()); + local_bh_disable(); + if (rcu_cpu_kthread_should_stop(cpu)) { + local_bh_enable(); + break; + } + *statusp = RCU_KTHREAD_RUNNING; + per_cpu(rcu_cpu_kthread_loops, cpu)++; + local_irq_save(flags); + work = *workp; + *workp = 0; + local_irq_restore(flags); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp != 0) + spincnt++; + else + spincnt = 0; + if (spincnt > 10) { + *statusp = RCU_KTHREAD_YIELDING; + rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); + spincnt = 0; + } + } + *statusp = RCU_KTHREAD_STOPPED; + return 0; +} + +/* + * Spawn a per-CPU kthread, setting up affinity and priority. + * Because the CPU hotplug lock is held, no other CPU will be attempting + * to manipulate rcu_cpu_kthread_task. There might be another CPU + * attempting to access it during boot, but the locking in kthread_bind() + * will enforce sufficient ordering. + */ +static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) { - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); + struct sched_param sp; + struct task_struct *t; + + if (!rcu_kthreads_spawnable || + per_cpu(rcu_cpu_kthread_task, cpu) != NULL) + return 0; + t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); + if (IS_ERR(t)) + return PTR_ERR(t); + kthread_bind(t, cpu); + per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; + WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); + per_cpu(rcu_cpu_kthread_task, cpu) = t; + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + return 0; +} + +/* + * Per-rcu_node kthread, which is in charge of waking up the per-CPU + * kthreads when needed. We ignore requests to wake up kthreads + * for offline CPUs, which is OK because force_quiescent_state() + * takes care of this case. + */ +static int rcu_node_kthread(void *arg) +{ + int cpu; + unsigned long flags; + unsigned long mask; + struct rcu_node *rnp = (struct rcu_node *)arg; + struct sched_param sp; + struct task_struct *t; + + for (;;) { + rnp->node_kthread_status = RCU_KTHREAD_WAITING; + rcu_wait(atomic_read(&rnp->wakemask) != 0); + rnp->node_kthread_status = RCU_KTHREAD_RUNNING; + raw_spin_lock_irqsave(&rnp->lock, flags); + mask = atomic_xchg(&rnp->wakemask, 0); + rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { + if ((mask & 0x1) == 0) + continue; + preempt_disable(); + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (!cpu_online(cpu) || t == NULL) { + preempt_enable(); + continue; + } + per_cpu(rcu_cpu_has_work, cpu) = 1; + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + preempt_enable(); + } + } + /* NOTREACHED */ + rnp->node_kthread_status = RCU_KTHREAD_STOPPED; + return 0; +} + +/* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still + * held, so the value of rnp->qsmaskinit will be stable. + * + * We don't include outgoingcpu in the affinity set, use -1 if there is + * no outgoing CPU. If there are no CPUs left in the affinity set, + * this function allows the kthread to execute on any CPU. + */ +static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +{ + cpumask_var_t cm; + int cpu; + unsigned long mask = rnp->qsmaskinit; + + if (rnp->node_kthread_task == NULL) + return; + if (!alloc_cpumask_var(&cm, GFP_KERNEL)) return; + cpumask_clear(cm); + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) + if ((mask & 0x1) && cpu != outgoingcpu) + cpumask_set_cpu(cpu, cm); + if (cpumask_weight(cm) == 0) { + cpumask_setall(cm); + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) + cpumask_clear_cpu(cpu, cm); + WARN_ON_ONCE(cpumask_weight(cm) == 0); } - invoke_rcu_callbacks_kthread(); + set_cpus_allowed_ptr(rnp->node_kthread_task, cm); + rcu_boost_kthread_setaffinity(rnp, cm); + free_cpumask_var(cm); } -static void invoke_rcu_core(void) +/* + * Spawn a per-rcu_node kthread, setting priority and affinity. + * Called during boot before online/offline can happen, or, if + * during runtime, with the main CPU-hotplug locks held. So only + * one of these can be executing at a time. + */ +static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, + struct rcu_node *rnp) { - raise_softirq(RCU_SOFTIRQ); + unsigned long flags; + int rnp_index = rnp - &rsp->node[0]; + struct sched_param sp; + struct task_struct *t; + + if (!rcu_kthreads_spawnable || + rnp->qsmaskinit == 0) + return 0; + if (rnp->node_kthread_task == NULL) { + t = kthread_create(rcu_node_kthread, (void *)rnp, + "rcun%d", rnp_index); + if (IS_ERR(t)) + return PTR_ERR(t); + raw_spin_lock_irqsave(&rnp->lock, flags); + rnp->node_kthread_task = t; + raw_spin_unlock_irqrestore(&rnp->lock, flags); + sp.sched_priority = 99; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + } + return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); } +static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); + +/* + * Spawn all kthreads -- called as soon as the scheduler is running. + */ +static int __init rcu_spawn_kthreads(void) +{ + int cpu; + struct rcu_node *rnp; + struct task_struct *t; + + rcu_kthreads_spawnable = 1; + for_each_possible_cpu(cpu) { + per_cpu(rcu_cpu_has_work, cpu) = 0; + if (cpu_online(cpu)) { + (void)rcu_spawn_one_cpu_kthread(cpu); + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t) + wake_up_process(t); + } + } + rnp = rcu_get_root(rcu_state); + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + if (rnp->node_kthread_task) + wake_up_process(rnp->node_kthread_task); + if (NUM_RCU_NODES > 1) { + rcu_for_each_leaf_node(rcu_state, rnp) { + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + t = rnp->node_kthread_task; + if (t) + wake_up_process(t); + rcu_wake_one_boost_kthread(rnp); + } + } + return 0; +} +early_initcall(rcu_spawn_kthreads); + static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp) @@ -1885,6 +2207,44 @@ static void __cpuinit rcu_prepare_cpu(int cpu) rcu_preempt_init_percpu_data(cpu); } +static void __cpuinit rcu_prepare_kthreads(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); + struct rcu_node *rnp = rdp->mynode; + + /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ + if (rcu_kthreads_spawnable) { + (void)rcu_spawn_one_cpu_kthread(cpu); + if (rnp->node_kthread_task == NULL) + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + } +} + +/* + * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, + * but the RCU threads are woken on demand, and if demand is low this + * could be a while triggering the hung task watchdog. + * + * In order to avoid this, poke all tasks once the CPU is fully + * up and running. + */ +static void __cpuinit rcu_online_kthreads(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); + struct rcu_node *rnp = rdp->mynode; + struct task_struct *t; + + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t) + wake_up_process(t); + + t = rnp->node_kthread_task; + if (t) + wake_up_process(t); + + rcu_wake_one_boost_kthread(rnp); +} + /* * Handle CPU online/offline notification events. */ @@ -1902,6 +2262,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, rcu_prepare_kthreads(cpu); break; case CPU_ONLINE: + rcu_online_kthreads(cpu); case CPU_DOWN_FAILED: rcu_node_kthread_setaffinity(rnp, -1); rcu_cpu_kthread_setrt(cpu, 1); @@ -2049,7 +2410,6 @@ void __init rcu_init(void) rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 01b2ccda26fb..7b9a08b4aaea 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -369,7 +369,6 @@ struct rcu_state { /* period because */ /* force_quiescent_state() */ /* was running. */ - u8 boost; /* Subject to priority boost. */ unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ @@ -427,7 +426,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags); -static void rcu_stop_cpu_kthread(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_task_stall(struct rcu_node *rnp); @@ -452,19 +450,11 @@ static void rcu_preempt_send_cbs_to_online(void); static void __init __rcu_init_preempt(void); static void rcu_needs_cpu_flush(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); -static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); -#ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, cpumask_var_t cm); +static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp, int rnp_index); -static void invoke_rcu_node_kthread(struct rcu_node *rnp); -static void rcu_yield(void (*f)(unsigned long), unsigned long arg); -#endif /* #ifdef CONFIG_RCU_BOOST */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt); -static void __cpuinit rcu_prepare_kthreads(int cpu); #endif /* #ifndef RCU_TREE_NONCORE */ diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 14dc7dd00902..c8bff3099a89 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -602,15 +602,6 @@ static void rcu_preempt_process_callbacks(void) &__get_cpu_var(rcu_preempt_data)); } -#ifdef CONFIG_RCU_BOOST - -static void rcu_preempt_do_callbacks(void) -{ - rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); -} - -#endif /* #ifdef CONFIG_RCU_BOOST */ - /* * Queue a preemptible-RCU callback for invocation after a grace period. */ @@ -1257,23 +1248,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } -/* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { - local_irq_restore(flags); - return; - } - wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); - local_irq_restore(flags); -} - /* * Set the affinity of the boost kthread. The CPU-hotplug locks are * held, so no one should be messing with the existence of the boost @@ -1314,7 +1288,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, if (&rcu_preempt_state != rsp) return 0; - rsp->boost = 1; if (rnp->boost_kthread_task != NULL) return 0; t = kthread_create(rcu_boost_kthread, (void *)rnp, @@ -1326,372 +1299,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, raw_spin_unlock_irqrestore(&rnp->lock, flags); sp.sched_priority = RCU_KTHREAD_PRIO; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ return 0; } -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Stop the RCU's per-CPU kthread when its CPU goes offline,. - */ -static void rcu_stop_cpu_kthread(int cpu) +static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) { - struct task_struct *t; - - /* Stop the CPU's kthread. */ - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t != NULL) { - per_cpu(rcu_cpu_kthread_task, cpu) = NULL; - kthread_stop(t); - } -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); - rcu_preempt_do_callbacks(); -} - -/* - * Wake up the specified per-rcu_node-structure kthread. - * Because the per-rcu_node kthreads are immortal, we don't need - * to do anything to keep them alive. - */ -static void invoke_rcu_node_kthread(struct rcu_node *rnp) -{ - struct task_struct *t; - - t = rnp->node_kthread_task; - if (t != NULL) - wake_up_process(t); -} - -/* - * Set the specified CPU's kthread to run RT or not, as specified by - * the to_rt argument. The CPU-hotplug locks are held, so the task - * is not going away. - */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) -{ - int policy; - struct sched_param sp; - struct task_struct *t; - - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t == NULL) - return; - if (to_rt) { - policy = SCHED_FIFO; - sp.sched_priority = RCU_KTHREAD_PRIO; - } else { - policy = SCHED_NORMAL; - sp.sched_priority = 0; - } - sched_setscheduler_nocheck(t, policy, &sp); -} - -/* - * Timer handler to initiate the waking up of per-CPU kthreads that - * have yielded the CPU due to excess numbers of RCU callbacks. - * We wake up the per-rcu_node kthread, which in turn will wake up - * the booster kthread. - */ -static void rcu_cpu_kthread_timer(unsigned long arg) -{ - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); - struct rcu_node *rnp = rdp->mynode; - - atomic_or(rdp->grpmask, &rnp->wakemask); - invoke_rcu_node_kthread(rnp); -} - -/* - * Drop to non-real-time priority and yield, but only after posting a - * timer that will cause us to regain our real-time priority if we - * remain preempted. Either way, we restore our real-time priority - * before returning. - */ -static void rcu_yield(void (*f)(unsigned long), unsigned long arg) -{ - struct sched_param sp; - struct timer_list yield_timer; - - setup_timer_on_stack(&yield_timer, f, arg); - mod_timer(&yield_timer, jiffies + 2); - sp.sched_priority = 0; - sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); - set_user_nice(current, 19); - schedule(); - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); - del_timer(&yield_timer); -} - -/* - * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. - * This can happen while the corresponding CPU is either coming online - * or going offline. We cannot wait until the CPU is fully online - * before starting the kthread, because the various notifier functions - * can wait for RCU grace periods. So we park rcu_cpu_kthread() until - * the corresponding CPU is online. - * - * Return 1 if the kthread needs to stop, 0 otherwise. - * - * Caller must disable bh. This function can momentarily enable it. - */ -static int rcu_cpu_kthread_should_stop(int cpu) -{ - while (cpu_is_offline(cpu) || - !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || - smp_processor_id() != cpu) { - if (kthread_should_stop()) - return 1; - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; - per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); - local_bh_enable(); - schedule_timeout_uninterruptible(1); - if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - local_bh_disable(); - } - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - return 0; -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * earlier RCU softirq. - */ -static int rcu_cpu_kthread(void *arg) -{ - int cpu = (int)(long)arg; - unsigned long flags; - int spincnt = 0; - unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); - char work; - char *workp = &per_cpu(rcu_cpu_has_work, cpu); - - for (;;) { - *statusp = RCU_KTHREAD_WAITING; - rcu_wait(*workp != 0 || kthread_should_stop()); - local_bh_disable(); - if (rcu_cpu_kthread_should_stop(cpu)) { - local_bh_enable(); - break; - } - *statusp = RCU_KTHREAD_RUNNING; - per_cpu(rcu_cpu_kthread_loops, cpu)++; - local_irq_save(flags); - work = *workp; - *workp = 0; - local_irq_restore(flags); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp != 0) - spincnt++; - else - spincnt = 0; - if (spincnt > 10) { - *statusp = RCU_KTHREAD_YIELDING; - rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); - spincnt = 0; - } - } - *statusp = RCU_KTHREAD_STOPPED; - return 0; -} - -/* - * Spawn a per-CPU kthread, setting up affinity and priority. - * Because the CPU hotplug lock is held, no other CPU will be attempting - * to manipulate rcu_cpu_kthread_task. There might be another CPU - * attempting to access it during boot, but the locking in kthread_bind() - * will enforce sufficient ordering. - * - * Please note that we cannot simply refuse to wake up the per-CPU - * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, - * which can result in softlockup complaints if the task ends up being - * idle for more than a couple of minutes. - * - * However, please note also that we cannot bind the per-CPU kthread to its - * CPU until that CPU is fully online. We also cannot wait until the - * CPU is fully online before we create its per-CPU kthread, as this would - * deadlock the system when CPU notifiers tried waiting for grace - * periods. So we bind the per-CPU kthread to its CPU only if the CPU - * is online. If its CPU is not yet fully online, then the code in - * rcu_cpu_kthread() will wait until it is fully online, and then do - * the binding. - */ -static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) -{ - struct sched_param sp; - struct task_struct *t; - - if (!rcu_kthreads_spawnable || - per_cpu(rcu_cpu_kthread_task, cpu) != NULL) - return 0; - t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); - if (IS_ERR(t)) - return PTR_ERR(t); - if (cpu_online(cpu)) - kthread_bind(t, cpu); - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - per_cpu(rcu_cpu_kthread_task, cpu) = t; - wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ - return 0; -} - -/* - * Per-rcu_node kthread, which is in charge of waking up the per-CPU - * kthreads when needed. We ignore requests to wake up kthreads - * for offline CPUs, which is OK because force_quiescent_state() - * takes care of this case. - */ -static int rcu_node_kthread(void *arg) -{ - int cpu; - unsigned long flags; - unsigned long mask; - struct rcu_node *rnp = (struct rcu_node *)arg; - struct sched_param sp; - struct task_struct *t; - - for (;;) { - rnp->node_kthread_status = RCU_KTHREAD_WAITING; - rcu_wait(atomic_read(&rnp->wakemask) != 0); - rnp->node_kthread_status = RCU_KTHREAD_RUNNING; - raw_spin_lock_irqsave(&rnp->lock, flags); - mask = atomic_xchg(&rnp->wakemask, 0); - rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { - if ((mask & 0x1) == 0) - continue; - preempt_disable(); - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (!cpu_online(cpu) || t == NULL) { - preempt_enable(); - continue; - } - per_cpu(rcu_cpu_has_work, cpu) = 1; - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - preempt_enable(); - } - } - /* NOTREACHED */ - rnp->node_kthread_status = RCU_KTHREAD_STOPPED; - return 0; -} - -/* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still - * held, so the value of rnp->qsmaskinit will be stable. - * - * We don't include outgoingcpu in the affinity set, use -1 if there is - * no outgoing CPU. If there are no CPUs left in the affinity set, - * this function allows the kthread to execute on any CPU. - */ -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) -{ - cpumask_var_t cm; - int cpu; - unsigned long mask = rnp->qsmaskinit; - - if (rnp->node_kthread_task == NULL) - return; - if (!alloc_cpumask_var(&cm, GFP_KERNEL)) - return; - cpumask_clear(cm); - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) - if ((mask & 0x1) && cpu != outgoingcpu) - cpumask_set_cpu(cpu, cm); - if (cpumask_weight(cm) == 0) { - cpumask_setall(cm); - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) - cpumask_clear_cpu(cpu, cm); - WARN_ON_ONCE(cpumask_weight(cm) == 0); - } - set_cpus_allowed_ptr(rnp->node_kthread_task, cm); - rcu_boost_kthread_setaffinity(rnp, cm); - free_cpumask_var(cm); -} - -/* - * Spawn a per-rcu_node kthread, setting priority and affinity. - * Called during boot before online/offline can happen, or, if - * during runtime, with the main CPU-hotplug locks held. So only - * one of these can be executing at a time. - */ -static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, - struct rcu_node *rnp) -{ - unsigned long flags; - int rnp_index = rnp - &rsp->node[0]; - struct sched_param sp; - struct task_struct *t; - - if (!rcu_kthreads_spawnable || - rnp->qsmaskinit == 0) - return 0; - if (rnp->node_kthread_task == NULL) { - t = kthread_create(rcu_node_kthread, (void *)rnp, - "rcun%d", rnp_index); - if (IS_ERR(t)) - return PTR_ERR(t); - raw_spin_lock_irqsave(&rnp->lock, flags); - rnp->node_kthread_task = t; - raw_spin_unlock_irqrestore(&rnp->lock, flags); - sp.sched_priority = 99; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ - } - return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); -} - -/* - * Spawn all kthreads -- called as soon as the scheduler is running. - */ -static int __init rcu_spawn_kthreads(void) -{ - int cpu; - struct rcu_node *rnp; - - rcu_kthreads_spawnable = 1; - for_each_possible_cpu(cpu) { - per_cpu(rcu_cpu_has_work, cpu) = 0; - if (cpu_online(cpu)) - (void)rcu_spawn_one_cpu_kthread(cpu); - } - rnp = rcu_get_root(rcu_state); - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - if (NUM_RCU_NODES > 1) { - rcu_for_each_leaf_node(rcu_state, rnp) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - } - return 0; -} -early_initcall(rcu_spawn_kthreads); - -static void __cpuinit rcu_prepare_kthreads(int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); - struct rcu_node *rnp = rdp->mynode; - - /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_kthreads_spawnable) { - (void)rcu_spawn_one_cpu_kthread(cpu); - if (rnp->node_kthread_task == NULL) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - } + if (rnp->boost_kthread_task) + wake_up_process(rnp->boost_kthread_task); } #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1701,32 +1315,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore(&rnp->lock, flags); } -static void invoke_rcu_callbacks_kthread(void) +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, + cpumask_var_t cm) { - WARN_ON_ONCE(1); } static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { } -#ifdef CONFIG_HOTPLUG_CPU - -static void rcu_stop_cpu_kthread(int cpu) -{ -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) -{ -} - -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp, + int rnp_index) { + return 0; } -static void __cpuinit rcu_prepare_kthreads(int cpu) +static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) { } @@ -1904,7 +1509,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); * * Because it is not legal to invoke rcu_process_callbacks() with irqs * disabled, we do one pass of force_quiescent_state(), then do a - * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked + * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. */ int rcu_needs_cpu(int cpu) @@ -1955,7 +1560,7 @@ int rcu_needs_cpu(int cpu) /* If RCU callbacks are still pending, RCU still needs this CPU. */ if (c) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); return c; } diff --git a/trunk/kernel/rcutree_trace.c b/trunk/kernel/rcutree_trace.c index 4e144876dc68..9678cc3650f5 100644 --- a/trunk/kernel/rcutree_trace.c +++ b/trunk/kernel/rcutree_trace.c @@ -46,8 +46,6 @@ #define RCU_TREE_NONCORE #include "rcutree.h" -#ifdef CONFIG_RCU_BOOST - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); @@ -60,8 +58,6 @@ static char convert_kthread_status(unsigned int kthread_status) return "SRWOY"[kthread_status]; } -#endif /* #ifdef CONFIG_RCU_BOOST */ - static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) @@ -80,7 +76,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, " ql=%ld qs=%c%c%c%c", + seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], @@ -88,16 +84,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], - ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); -#ifdef CONFIG_RCU_BOOST - seq_printf(m, " kt=%d/%c/%d ktl=%x", + ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, rdp->cpu)), per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), - per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_printf(m, " b=%ld", rdp->blimit); + per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, + rdp->blimit); seq_printf(m, " ci=%lu co=%lu ca=%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } @@ -154,21 +147,18 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, + seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], ".R"[rdp->nxttail[RCU_WAIT_TAIL] != rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], - ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); -#ifdef CONFIG_RCU_BOOST - seq_printf(m, ",%d,\"%c\"", + ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, - rdp->cpu))); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_printf(m, ",%ld", rdp->blimit); + rdp->cpu)), + rdp->blimit); seq_printf(m, ",%lu,%lu,%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } @@ -179,11 +169,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ - seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); -#ifdef CONFIG_RCU_BOOST - seq_puts(m, "\"kt\",\"ktl\""); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); + seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "\"rcu_preempt:\"\n"); PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); diff --git a/trunk/kernel/smp.c b/trunk/kernel/smp.c index fb67dfa8394e..73a195193558 100644 --- a/trunk/kernel/smp.c +++ b/trunk/kernel/smp.c @@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { .notifier_call = hotplug_cfd, }; -void __init call_function_init(void) +static int __cpuinit init_call_single_data(void) { void *cpu = (void *)(long)smp_processor_id(); int i; @@ -88,7 +88,10 @@ void __init call_function_init(void) hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); + + return 0; } +early_initcall(init_call_single_data); /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 40cf63ddd4b3..13960170cad4 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); char *softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", - "TASKLET", "SCHED", "HRTIMER", "RCU" + "TASKLET", "SCHED", "HRTIMER" }; /* diff --git a/trunk/kernel/time/clocksource.c b/trunk/kernel/time/clocksource.c index e0980f0d9a0a..1c95fd677328 100644 --- a/trunk/kernel/time/clocksource.c +++ b/trunk/kernel/time/clocksource.c @@ -185,6 +185,7 @@ static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); static DEFINE_SPINLOCK(watchdog_lock); +static cycle_t watchdog_last; static int watchdog_running; static int clocksource_watchdog_kthread(void *data); @@ -253,6 +254,11 @@ static void clocksource_watchdog(unsigned long data) if (!watchdog_running) goto out; + wdnow = watchdog->read(watchdog); + wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, + watchdog->mult, watchdog->shift); + watchdog_last = wdnow; + list_for_each_entry(cs, &watchdog_list, wd_list) { /* Clocksource already marked unstable? */ @@ -262,28 +268,19 @@ static void clocksource_watchdog(unsigned long data) continue; } - local_irq_disable(); csnow = cs->read(cs); - wdnow = watchdog->read(watchdog); - local_irq_enable(); /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { cs->flags |= CLOCK_SOURCE_WATCHDOG; - cs->wd_last = wdnow; - cs->cs_last = csnow; + cs->wd_last = csnow; continue; } - wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, - watchdog->mult, watchdog->shift); - - cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & - cs->mask, cs->mult, cs->shift); - cs->cs_last = csnow; - cs->wd_last = wdnow; - /* Check the deviation from the watchdog clocksource. */ + cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & + cs->mask, cs->mult, cs->shift); + cs->wd_last = csnow; if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { clocksource_unstable(cs, cs_nsec - wd_nsec); continue; @@ -321,6 +318,7 @@ static inline void clocksource_start_watchdog(void) return; init_timer(&watchdog_timer); watchdog_timer.function = clocksource_watchdog; + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); watchdog_running = 1; diff --git a/trunk/kernel/trace/trace_printk.c b/trunk/kernel/trace/trace_printk.c index 1f06468a10d7..dff763b7baf1 100644 --- a/trunk/kernel/trace/trace_printk.c +++ b/trunk/kernel/trace/trace_printk.c @@ -240,10 +240,13 @@ static const char **find_next(void *v, loff_t *pos) const char **fmt = v; int start_index; + if (!fmt) + fmt = __start___trace_bprintk_fmt + *pos; + start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; if (*pos < start_index) - return __start___trace_bprintk_fmt + *pos; + return fmt; return find_next_mod_format(start_index, v, fmt, pos); } diff --git a/trunk/mm/rmap.c b/trunk/mm/rmap.c index 27dfd3b82b0f..0eb463ea88dd 100644 --- a/trunk/mm/rmap.c +++ b/trunk/mm/rmap.c @@ -112,9 +112,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) kmem_cache_free(anon_vma_cachep, anon_vma); } -static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) +static inline struct anon_vma_chain *anon_vma_chain_alloc(void) { - return kmem_cache_alloc(anon_vma_chain_cachep, gfp); + return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); } static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) @@ -159,7 +159,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) struct mm_struct *mm = vma->vm_mm; struct anon_vma *allocated; - avc = anon_vma_chain_alloc(GFP_KERNEL); + avc = anon_vma_chain_alloc(); if (!avc) goto out_enomem; @@ -200,32 +200,6 @@ int anon_vma_prepare(struct vm_area_struct *vma) return -ENOMEM; } -/* - * This is a useful helper function for locking the anon_vma root as - * we traverse the vma->anon_vma_chain, looping over anon_vma's that - * have the same vma. - * - * Such anon_vma's should have the same root, so you'd expect to see - * just a single mutex_lock for the whole traversal. - */ -static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) -{ - struct anon_vma *new_root = anon_vma->root; - if (new_root != root) { - if (WARN_ON_ONCE(root)) - mutex_unlock(&root->mutex); - root = new_root; - mutex_lock(&root->mutex); - } - return root; -} - -static inline void unlock_anon_vma_root(struct anon_vma *root) -{ - if (root) - mutex_unlock(&root->mutex); -} - static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) @@ -234,11 +208,13 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); + anon_vma_lock(anon_vma); /* * It's critical to add new vmas to the tail of the anon_vma, * see comment in huge_memory.c:__split_huge_page(). */ list_add_tail(&avc->same_anon_vma, &anon_vma->head); + anon_vma_unlock(anon_vma); } /* @@ -248,24 +224,13 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; - struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { - struct anon_vma *anon_vma; - - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); - if (unlikely(!avc)) { - unlock_anon_vma_root(root); - root = NULL; - avc = anon_vma_chain_alloc(GFP_KERNEL); - if (!avc) - goto enomem_failure; - } - anon_vma = pavc->anon_vma; - root = lock_anon_vma_root(root, anon_vma); - anon_vma_chain_link(dst, avc, anon_vma); + avc = anon_vma_chain_alloc(); + if (!avc) + goto enomem_failure; + anon_vma_chain_link(dst, avc, pavc->anon_vma); } - unlock_anon_vma_root(root); return 0; enomem_failure: @@ -298,7 +263,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; - avc = anon_vma_chain_alloc(GFP_KERNEL); + avc = anon_vma_chain_alloc(); if (!avc) goto out_error_free_anon_vma; @@ -315,9 +280,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; - anon_vma_lock(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); - anon_vma_unlock(anon_vma); return 0; @@ -328,43 +291,36 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) return -ENOMEM; } -void unlink_anon_vmas(struct vm_area_struct *vma) +static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) { - struct anon_vma_chain *avc, *next; - struct anon_vma *root = NULL; + struct anon_vma *anon_vma = anon_vma_chain->anon_vma; + int empty; - /* - * Unlink each anon_vma chained to the VMA. This list is ordered - * from newest to oldest, ensuring the root anon_vma gets freed last. - */ - list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { - struct anon_vma *anon_vma = avc->anon_vma; + /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ + if (!anon_vma) + return; - root = lock_anon_vma_root(root, anon_vma); - list_del(&avc->same_anon_vma); + anon_vma_lock(anon_vma); + list_del(&anon_vma_chain->same_anon_vma); - /* - * Leave empty anon_vmas on the list - we'll need - * to free them outside the lock. - */ - if (list_empty(&anon_vma->head)) - continue; + /* We must garbage collect the anon_vma if it's empty */ + empty = list_empty(&anon_vma->head); + anon_vma_unlock(anon_vma); - list_del(&avc->same_vma); - anon_vma_chain_free(avc); - } - unlock_anon_vma_root(root); + if (empty) + put_anon_vma(anon_vma); +} + +void unlink_anon_vmas(struct vm_area_struct *vma) +{ + struct anon_vma_chain *avc, *next; /* - * Iterate the list once more, it now only contains empty and unlinked - * anon_vmas, destroy them. Could not do before due to __put_anon_vma() - * needing to acquire the anon_vma->root->mutex. + * Unlink each anon_vma chained to the VMA. This list is ordered + * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { - struct anon_vma *anon_vma = avc->anon_vma; - - put_anon_vma(anon_vma); - + anon_vma_unlink(avc); list_del(&avc->same_vma); anon_vma_chain_free(avc); } diff --git a/trunk/security/keys/request_key.c b/trunk/security/keys/request_key.c index 8e319a416eec..d31862e0aa1c 100644 --- a/trunk/security/keys/request_key.c +++ b/trunk/security/keys/request_key.c @@ -71,8 +71,9 @@ EXPORT_SYMBOL(complete_request_key); * This is called in context of freshly forked kthread before kernel_execve(), * so we can simply install the desired session_keyring at this point. */ -static int umh_keys_init(struct subprocess_info *info, struct cred *cred) +static int umh_keys_init(struct subprocess_info *info) { + struct cred *cred = (struct cred*)current_cred(); struct key *keyring = info->data; return install_session_keyring_to_cred(cred, keyring); diff --git a/trunk/sound/firewire/isight.c b/trunk/sound/firewire/isight.c index 440030818db7..86ee16ca365e 100644 --- a/trunk/sound/firewire/isight.c +++ b/trunk/sound/firewire/isight.c @@ -209,7 +209,6 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle, isight->packet_index = -1; return; } - fw_iso_context_queue_flush(isight->context); if (++index >= QUEUE_LENGTH) index = 0; diff --git a/trunk/sound/pci/emu10k1/emu10k1_main.c b/trunk/sound/pci/emu10k1/emu10k1_main.c index 15f0161ce4a2..5e619a84da06 100644 --- a/trunk/sound/pci/emu10k1/emu10k1_main.c +++ b/trunk/sound/pci/emu10k1/emu10k1_main.c @@ -1440,14 +1440,6 @@ static struct snd_emu_chip_details emu_chip_details[] = { .ca0102_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */ - /* EMU0404 PCIe */ - {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102, - .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]", - .id = "EMU0404", - .emu10k2_chip = 1, - .ca0108_chip = 1, - .spk71 = 1, - .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */ /* Note that all E-mu cards require kernel 2.6 or newer. */ {.vendor = 0x1102, .device = 0x0008, .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]", diff --git a/trunk/sound/pci/hda/hda_beep.h b/trunk/sound/pci/hda/hda_beep.h index 55f0647458c7..f1de1bac042c 100644 --- a/trunk/sound/pci/hda/hda_beep.h +++ b/trunk/sound/pci/hda/hda_beep.h @@ -50,12 +50,7 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable); int snd_hda_attach_beep_device(struct hda_codec *codec, int nid); void snd_hda_detach_beep_device(struct hda_codec *codec); #else -static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid) -{ - return 0; -} -static inline void snd_hda_detach_beep_device(struct hda_codec *codec) -{ -} +#define snd_hda_attach_beep_device(...) 0 +#define snd_hda_detach_beep_device(...) #endif #endif diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index 61a774b3d3cb..43fcfbd32847 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -13316,8 +13316,9 @@ static void alc268_acer_lc_setup(struct hda_codec *codec) struct alc_spec *spec = codec->spec; spec->autocfg.hp_pins[0] = 0x15; spec->autocfg.speaker_pins[0] = 0x14; + spec->automute_mixer_nid[0] = 0x0f; spec->automute = 1; - spec->automute_mode = ALC_AUTOMUTE_AMP; + spec->automute_mode = ALC_AUTOMUTE_MIXER; spec->ext_mic.pin = 0x18; spec->ext_mic.mux_idx = 0; spec->int_mic.pin = 0x12; diff --git a/trunk/sound/pci/hda/patch_via.c b/trunk/sound/pci/hda/patch_via.c index c952582fb218..605c99e1e520 100644 --- a/trunk/sound/pci/hda/patch_via.c +++ b/trunk/sound/pci/hda/patch_via.c @@ -832,13 +832,10 @@ static int via_hp_build(struct hda_codec *codec) knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; knew->private_value = nid; - nid = side_mute_channel(spec); - if (nid) { - knew = via_clone_control(spec, &via_hp_mixer[1]); - if (knew == NULL) - return -ENOMEM; - knew->subdevice = nid; - } + knew = via_clone_control(spec, &via_hp_mixer[1]); + if (knew == NULL) + return -ENOMEM; + knew->subdevice = side_mute_channel(spec); return 0; } diff --git a/trunk/sound/pci/lola/lola.c b/trunk/sound/pci/lola/lola.c index 2692e5ae5f2d..34b24286d279 100644 --- a/trunk/sound/pci/lola/lola.c +++ b/trunk/sound/pci/lola/lola.c @@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip) lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */ } -static int __devinit lola_parse_tree(struct lola *chip) +static int lola_parse_tree(struct lola *chip) { unsigned int val; int nid, err; diff --git a/trunk/sound/pci/rme9652/hdspm.c b/trunk/sound/pci/rme9652/hdspm.c index 3f08afc0f0d3..949691a876d3 100644 --- a/trunk/sound/pci/rme9652/hdspm.c +++ b/trunk/sound/pci/rme9652/hdspm.c @@ -521,7 +521,6 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}"); #define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024) /* revisions >= 230 indicate AES32 card */ -#define HDSPM_MADI_OLD_REV 207 #define HDSPM_MADI_REV 210 #define HDSPM_RAYDAT_REV 211 #define HDSPM_AIO_REV 212 @@ -1144,7 +1143,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm) /* if wordclock has synced freq and wordclock is valid */ if ((status2 & HDSPM_wcLock) != 0 && - (status2 & HDSPM_SelSyncRef0) == 0) { + (status & HDSPM_SelSyncRef0) == 0) { rate_bits = status2 & HDSPM_wcFreqMask; @@ -1640,14 +1639,12 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi) } } hmidi->pending = 0; - spin_unlock_irqrestore(&hmidi->lock, flags); - spin_lock_irqsave(&hmidi->hdspm->lock, flags); hmidi->hdspm->control_register |= hmidi->ie; hdspm_write(hmidi->hdspm, HDSPM_controlRegister, hmidi->hdspm->control_register); - spin_unlock_irqrestore(&hmidi->hdspm->lock, flags); + spin_unlock_irqrestore (&hmidi->lock, flags); return snd_hdspm_midi_output_write (hmidi); } @@ -6380,7 +6377,6 @@ static int __devinit snd_hdspm_create(struct snd_card *card, switch (hdspm->firmware_rev) { case HDSPM_MADI_REV: - case HDSPM_MADI_OLD_REV: hdspm->io_type = MADI; hdspm->card_name = "RME MADI"; hdspm->midiPorts = 3; diff --git a/trunk/sound/usb/6fire/firmware.c b/trunk/sound/usb/6fire/firmware.c index 1e3ae3327dd3..a91719d5918b 100644 --- a/trunk/sound/usb/6fire/firmware.c +++ b/trunk/sound/usb/6fire/firmware.c @@ -270,6 +270,7 @@ static int usb6fire_fw_ezusb_upload( data = 0x00; /* resume ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); if (ret < 0) { + release_firmware(fw); snd_printk(KERN_ERR PREFIX "unable to upload ezusb " "firmware %s: end message.\n", fwname); return ret; diff --git a/trunk/sound/usb/6fire/pcm.c b/trunk/sound/usb/6fire/pcm.c index d144cdb2f159..b137b25865cc 100644 --- a/trunk/sound/usb/6fire/pcm.c +++ b/trunk/sound/usb/6fire/pcm.c @@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub) alsa_rt->hw = pcm_hw; if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) { - if (rt->rate < ARRAY_SIZE(rates)) + if (rt->rate >= 0) alsa_rt->hw.rates = rates_alsaid[rt->rate]; alsa_rt->hw.channels_max = OUT_N_CHANNELS; sub = &rt->playback; } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) { - if (rt->rate < ARRAY_SIZE(rates)) + if (rt->rate >= 0) alsa_rt->hw.rates = rates_alsaid[rt->rate]; alsa_rt->hw.channels_max = IN_N_CHANNELS; sub = &rt->capture; diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 940257b5774e..032ba6398a5c 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) -LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group +LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) ALL_CFLAGS += $(BASIC_CFLAGS) ALL_CFLAGS += $(ARCH_CFLAGS) diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 0a7ed5b5e281..1e88485c16a0 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -2187,7 +2187,6 @@ static const struct flag flags[] = { { "TASKLET_SOFTIRQ", 6 }, { "SCHED_SOFTIRQ", 7 }, { "HRTIMER_SOFTIRQ", 8 }, - { "RCU_SOFTIRQ", 9 }, { "HRTIMER_NORESTART", 0 }, { "HRTIMER_RESTART", 1 },