diff --git a/[refs] b/[refs] index 5d06064fc110..eacc984af210 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 85d45adef06caa988506686527a5fedf856dc550 +refs/heads/master: 8323fa6ba313ae2664420ec34d56a7fb0bbbe525 diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt index db3b1aba32a3..f48178024067 100644 --- a/trunk/Documentation/filesystems/proc.txt +++ b/trunk/Documentation/filesystems/proc.txt @@ -843,7 +843,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu. TASKLET: 0 0 0 290 SCHED: 27035 26983 26971 26746 HRTIMER: 0 0 0 0 - RCU: 1678 1769 2178 2250 1.3 IDE devices in /proc/ide diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index f0358cd91de3..502f2dd761eb 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2291,7 +2291,8 @@ F: drivers/scsi/eata_pio.* EBTABLES M: Bart De Schuymer -L: netfilter-devel@vger.kernel.org +L: ebtables-user@lists.sourceforge.net +L: ebtables-devel@lists.sourceforge.net W: http://ebtables.sourceforge.net/ S: Maintained F: include/linux/netfilter_bridge/ebt_*.h @@ -7006,13 +7007,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86. S: Maintained F: drivers/platform/x86 -X86 MCE INFRASTRUCTURE -M: Tony Luck -M: Borislav Petkov -L: linux-edac@vger.kernel.org -S: Maintained -F: arch/x86/kernel/cpu/mcheck/* - XEN HYPERVISOR INTERFACE M: Jeremy Fitzhardinge M: Konrad Rzeszutek Wilk diff --git a/trunk/arch/arm/mach-msm/timer.c b/trunk/arch/arm/mach-msm/timer.c index 63621f152c98..38b95e949d13 100644 --- a/trunk/arch/arm/mach-msm/timer.c +++ b/trunk/arch/arm/mach-msm/timer.c @@ -23,8 +23,6 @@ #include #include -#include - #include #include @@ -57,12 +55,10 @@ enum timer_location { #if defined(CONFIG_ARCH_QSD8X50) #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ #define MSM_DGT_SHIFT (0) -#elif defined(CONFIG_ARCH_MSM7X30) +#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ + defined(CONFIG_ARCH_MSM8960) #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ #define MSM_DGT_SHIFT (0) -#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) -#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */ -#define MSM_DGT_SHIFT (0) #else #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ #define MSM_DGT_SHIFT (5) @@ -104,11 +100,7 @@ static cycle_t msm_read_timer_count(struct clocksource *cs) { struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); - /* - * Shift timer count down by a constant due to unreliable lower bits - * on some targets. - */ - return readl(clk->global_counter) >> clk->shift; + return readl(clk->global_counter); } static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) diff --git a/trunk/arch/x86/include/asm/memblock.h b/trunk/arch/x86/include/asm/memblock.h index 0cd3800f33b9..19ae14ba6978 100644 --- a/trunk/arch/x86/include/asm/memblock.h +++ b/trunk/arch/x86/include/asm/memblock.h @@ -4,6 +4,7 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); +void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); @@ -18,6 +19,5 @@ u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit); -bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); #endif diff --git a/trunk/arch/x86/include/asm/pvclock.h b/trunk/arch/x86/include/asm/pvclock.h index a518c0a45044..31d84acc1512 100644 --- a/trunk/arch/x86/include/asm/pvclock.h +++ b/trunk/arch/x86/include/asm/pvclock.h @@ -22,8 +22,6 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) u64 product; #ifdef __i386__ u32 tmp1, tmp2; -#else - ulong tmp; #endif if (shift < 0) @@ -44,11 +42,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); #elif defined(__x86_64__) __asm__ ( - "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" - : [lo]"=a"(product), - [hi]"=d"(tmp) - : "0"(delta), - [mul_frac]"rm"((u64)mul_frac)); + "mul %%rdx ; shrd $32,%%rdx,%%rax" + : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); #else #error implement me! #endif diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index aee38623b768..bd14bb4c8594 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { - return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); + return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); } static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) diff --git a/trunk/arch/x86/kvm/paging_tmpl.h b/trunk/arch/x86/kvm/paging_tmpl.h index 9d03ad4dd5ec..6c4dc010c4cb 100644 --- a/trunk/arch/x86/kvm/paging_tmpl.h +++ b/trunk/arch/x86/kvm/paging_tmpl.h @@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gva_t addr, u32 access) { pt_element_t pte; - pt_element_t __user *uninitialized_var(ptep_user); + pt_element_t __user *ptep_user; gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index d48ec60ea421..4c3fa0f67469 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -2047,8 +2047,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) - vmx_decache_cr3(vcpu); + vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, diff --git a/trunk/arch/x86/mm/memblock.c b/trunk/arch/x86/mm/memblock.c index 992da5ec5a64..aa1169392b83 100644 --- a/trunk/arch/x86/mm/memblock.c +++ b/trunk/arch/x86/mm/memblock.c @@ -8,7 +8,7 @@ #include /* Check for already reserved areas */ -bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) +static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; @@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) if (addr >= ei_last) continue; *sizep = ei_last - addr; - while (memblock_x86_check_reserved_size(&addr, sizep, align)) + while (check_with_memblock_reserved_size(&addr, sizep, align)) ; if (*sizep) diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 474356b98ede..0d3a4fa34560 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -310,31 +310,14 @@ void __init efi_reserve_boot_services(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; - u64 start = md->phys_addr; - u64 size = md->num_pages << EFI_PAGE_SHIFT; + unsigned long long start = md->phys_addr; + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; - /* Only reserve where possible: - * - Not within any already allocated areas - * - Not over any memory area (really needed, if above?) - * - Not within any part of the kernel - * - Not the bios reserved area - */ - if ((start+size >= virt_to_phys(_text) - && start <= virt_to_phys(_end)) || - !e820_all_mapped(start, start+size, E820_RAM) || - memblock_x86_check_reserved_size(&start, &size, - 1<num_pages = 0; - memblock_dbg(PFX "Could not reserve boot range " - "[0x%010llx-0x%010llx]\n", - start, start+size-1); - } else - memblock_x86_reserve_range(start, start+size, - "EFI Boot"); + + memblock_x86_reserve_range(start, start + size, "EFI Boot"); } } @@ -351,10 +334,6 @@ static void __init efi_free_boot_services(void) md->type != EFI_BOOT_SERVICES_DATA) continue; - /* Could not reserve boot area */ - if (!size) - continue; - free_bootmem_late(start, size); } } diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 5525163a0398..dd7b88f2ec7a 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1033,13 +1033,6 @@ static void xen_machine_halt(void) xen_reboot(SHUTDOWN_poweroff); } -static void xen_machine_power_off(void) -{ - if (pm_power_off) - pm_power_off(); - xen_reboot(SHUTDOWN_poweroff); -} - static void xen_crash_shutdown(struct pt_regs *regs) { xen_reboot(SHUTDOWN_crash); @@ -1065,7 +1058,7 @@ int xen_panic_handler_init(void) static const struct machine_ops xen_machine_ops __initconst = { .restart = xen_restart, .halt = xen_machine_halt, - .power_off = xen_machine_power_off, + .power_off = xen_machine_halt, .shutdown = xen_machine_halt, .crash_shutdown = xen_crash_shutdown, .emergency_restart = xen_emergency_restart, diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index 673e968df3cf..dc708dcc62f1 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include @@ -1232,7 +1231,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, { struct { struct mmuext_op op; - DECLARE_BITMAP(mask, num_processors); + DECLARE_BITMAP(mask, NR_CPUS); } *args; struct multicall_space mcs; @@ -1600,11 +1599,6 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; -#ifdef CONFIG_X86_32 - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; -#endif - if (!pte_none(pte_page[pteidx])) continue; @@ -1772,9 +1766,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index 60aeeb56948f..be1a464f6d66 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; +#ifdef CONFIG_X86_32 xen_extra_mem_start = mem_end; +#else + xen_extra_mem_start = max((1ULL << 32), mem_end); +#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -262,12 +266,6 @@ char * __init xen_memory_setup(void) if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } - /* Align the balloon area so that max_low_pfn does not get set - * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that xen_extra_mem_start gets set in the loop above to be - * past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) - xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index b4533a86d7e4..41038c01de40 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -205,18 +205,11 @@ static void __init xen_smp_prepare_boot_cpu(void) static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; - unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; - - for_each_possible_cpu(i) { - zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); - } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) diff --git a/trunk/drivers/bluetooth/btmrvl_debugfs.c b/trunk/drivers/bluetooth/btmrvl_debugfs.c index 8ecf4c6c2874..fd6305bf953e 100644 --- a/trunk/drivers/bluetooth/btmrvl_debugfs.c +++ b/trunk/drivers/bluetooth/btmrvl_debugfs.c @@ -64,8 +64,6 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file, return -EFAULT; ret = strict_strtol(buf, 10, &result); - if (ret) - return ret; priv->btmrvl_dev.hscfgcmd = result; @@ -110,8 +108,6 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, return -EFAULT; ret = strict_strtol(buf, 10, &result); - if (ret) - return ret; priv->btmrvl_dev.psmode = result; @@ -151,8 +147,6 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, return -EFAULT; ret = strict_strtol(buf, 10, &result); - if (ret) - return ret; priv->btmrvl_dev.pscmd = result; @@ -197,8 +191,6 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, return -EFAULT; ret = strict_strtol(buf, 16, &result); - if (ret) - return ret; priv->btmrvl_dev.gpio_gap = result; @@ -238,8 +230,6 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, return -EFAULT; ret = strict_strtol(buf, 10, &result); - if (ret) - return ret; priv->btmrvl_dev.hscmd = result; if (priv->btmrvl_dev.hscmd) { @@ -282,8 +272,6 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, return -EFAULT; ret = strict_strtol(buf, 10, &result); - if (ret) - return ret; priv->btmrvl_dev.hsmode = result; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c index f55b64cb59d1..012f80251e50 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1431,7 +1431,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + else + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); diff --git a/trunk/drivers/input/evdev.c b/trunk/drivers/input/evdev.c index 4cf25347b015..be0921ef6b52 100644 --- a/trunk/drivers/input/evdev.c +++ b/trunk/drivers/input/evdev.c @@ -111,8 +111,7 @@ static void evdev_event(struct input_handle *handle, rcu_read_unlock(); - if (type == EV_SYN && code == SYN_REPORT) - wake_up_interruptible(&evdev->wait); + wake_up_interruptible(&evdev->wait); } static int evdev_fasync(int fd, struct file *file, int on) diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c index da38d97a51b1..75e11c7b70fd 100644 --- a/trunk/drivers/input/input.c +++ b/trunk/drivers/input/input.c @@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev) } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, - mt_slots = clamp(mt_slots, 2, 32); + clamp(mt_slots, 2, 32); } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { mt_slots = 2; } else { diff --git a/trunk/drivers/input/keyboard/omap-keypad.c b/trunk/drivers/input/keyboard/omap-keypad.c index 33d0bdc837c0..f23a743817db 100644 --- a/trunk/drivers/input/keyboard/omap-keypad.c +++ b/trunk/drivers/input/keyboard/omap-keypad.c @@ -209,7 +209,6 @@ static void omap_kp_tasklet(unsigned long data) #endif } } - input_sync(omap_kp_data->input); memcpy(keypad_state, new_state, sizeof(keypad_state)); if (key_down) { diff --git a/trunk/drivers/input/keyboard/sh_keysc.c b/trunk/drivers/input/keyboard/sh_keysc.c index 6876700a4469..834cf98e7efb 100644 --- a/trunk/drivers/input/keyboard/sh_keysc.c +++ b/trunk/drivers/input/keyboard/sh_keysc.c @@ -32,7 +32,7 @@ static const struct { [SH_KEYSC_MODE_3] = { 2, 4, 7 }, [SH_KEYSC_MODE_4] = { 3, 6, 6 }, [SH_KEYSC_MODE_5] = { 4, 6, 7 }, - [SH_KEYSC_MODE_6] = { 5, 8, 8 }, + [SH_KEYSC_MODE_6] = { 5, 7, 7 }, }; struct sh_keysc_priv { diff --git a/trunk/drivers/input/mousedev.c b/trunk/drivers/input/mousedev.c index 0110b5a3a167..257e033986e4 100644 --- a/trunk/drivers/input/mousedev.c +++ b/trunk/drivers/input/mousedev.c @@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, if (size == 0) size = xres ? : 1; - value = clamp(value, min, max); + clamp(value, min, max); mousedev->packet.x = ((value - min) * xres) / size; mousedev->packet.abs_event = 1; @@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, if (size == 0) size = yres ? : 1; - value = clamp(value, min, max); + clamp(value, min, max); mousedev->packet.y = yres - ((value - min) * yres) / size; mousedev->packet.abs_event = 1; diff --git a/trunk/drivers/isdn/gigaset/interface.c b/trunk/drivers/isdn/gigaset/interface.c index e35058bcd7b9..59de638225fe 100644 --- a/trunk/drivers/isdn/gigaset/interface.c +++ b/trunk/drivers/isdn/gigaset/interface.c @@ -156,10 +156,8 @@ static int if_open(struct tty_struct *tty, struct file *filp) if (!cs || !try_module_get(cs->driver->owner)) return -ENODEV; - if (mutex_lock_interruptible(&cs->mutex)) { - module_put(cs->driver->owner); + if (mutex_lock_interruptible(&cs->mutex)) return -ERESTARTSYS; - } tty->driver_data = cs; ++cs->open_count; diff --git a/trunk/drivers/misc/sgi-xp/xpnet.c b/trunk/drivers/misc/sgi-xp/xpnet.c index 42f067347bc7..ee5109a3cd98 100644 --- a/trunk/drivers/misc/sgi-xp/xpnet.c +++ b/trunk/drivers/misc/sgi-xp/xpnet.c @@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - if (atomic_dec_return(&queued_msg->use_count) == 0) { dev_kfree_skb(skb); kfree(queued_msg); } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + return NETDEV_TX_OK; } diff --git a/trunk/drivers/net/3c503.c b/trunk/drivers/net/3c503.c index 5b732988d493..d84f6e8903a5 100644 --- a/trunk/drivers/net/3c503.c +++ b/trunk/drivers/net/3c503.c @@ -412,7 +412,7 @@ el2_open(struct net_device *dev) outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); outb_p(0x00, E33G_IDCFR); msleep(1); - free_irq(*irqp, &seen); + free_irq(*irqp, el2_probe_interrupt); if (!seen) continue; @@ -422,7 +422,6 @@ el2_open(struct net_device *dev) continue; if (retval < 0) goto err_disable; - break; } while (*++irqp); if (*irqp == 0) { diff --git a/trunk/drivers/net/bfin_mac.c b/trunk/drivers/net/bfin_mac.c index 6c019e148546..68d45ba2d9b9 100644 --- a/trunk/drivers/net/bfin_mac.c +++ b/trunk/drivers/net/bfin_mac.c @@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:bfin_mac"); #if defined(CONFIG_BFIN_MAC_USE_L1) -# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num) -# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr) +# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) +# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) #else -# define bfin_mac_alloc(dma_handle, size, num) \ - dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL) -# define bfin_mac_free(dma_handle, ptr, num) \ - dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle) +# define bfin_mac_alloc(dma_handle, size) \ + dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) +# define bfin_mac_free(dma_handle, ptr) \ + dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) #endif #define PKT_BUF_SZ 1580 @@ -95,7 +95,7 @@ static void desc_list_free(void) t = t->next; } } - bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM); + bfin_mac_free(dma_handle, tx_desc); } if (rx_desc) { @@ -109,7 +109,7 @@ static void desc_list_free(void) r = r->next; } } - bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM); + bfin_mac_free(dma_handle, rx_desc); } } @@ -126,13 +126,13 @@ static int desc_list_init(void) #endif tx_desc = bfin_mac_alloc(&dma_handle, - sizeof(struct net_dma_desc_tx), + sizeof(struct net_dma_desc_tx) * CONFIG_BFIN_TX_DESC_NUM); if (tx_desc == NULL) goto init_error; rx_desc = bfin_mac_alloc(&dma_handle, - sizeof(struct net_dma_desc_rx), + sizeof(struct net_dma_desc_rx) * CONFIG_BFIN_RX_DESC_NUM); if (rx_desc == NULL) goto init_error; diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index eafe44a528ac..652b30e525d0 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1297,7 +1297,6 @@ static inline int slave_enable_netpoll(struct slave *slave) goto out; np->dev = slave->dev; - strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ); err = __netpoll_setup(np); if (err) { kfree(np); diff --git a/trunk/drivers/net/fs_enet/mac-fcc.c b/trunk/drivers/net/fs_enet/mac-fcc.c index 7583a9572bcc..7a84e45487e8 100644 --- a/trunk/drivers/net/fs_enet/mac-fcc.c +++ b/trunk/drivers/net/fs_enet/mac-fcc.c @@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep) goto out_ep; fep->fcc.mem = (void __iomem *)cpm2_immr; - fpi->dpram_offset = cpm_dpalloc(128, 32); + fpi->dpram_offset = cpm_dpalloc(128, 8); if (IS_ERR_VALUE(fpi->dpram_offset)) { ret = fpi->dpram_offset; goto out_fcccp; diff --git a/trunk/drivers/net/hp100.c b/trunk/drivers/net/hp100.c index c3ecb118c1df..8e10d2f6a5ad 100644 --- a/trunk/drivers/net/hp100.c +++ b/trunk/drivers/net/hp100.c @@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ lp->txrcommit++; + spin_unlock_irqrestore(&lp->lock, flags); + /* Update statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; drop: diff --git a/trunk/drivers/net/hplance.c b/trunk/drivers/net/hplance.c index a900d5bf2948..b6060f7538df 100644 --- a/trunk/drivers/net/hplance.c +++ b/trunk/drivers/net/hplance.c @@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d) } /* Initialise a single lance board at the given DIO device */ -static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d) +static void __init hplance_init(struct net_device *dev, struct dio_dev *d) { unsigned long va = (d->resource.start + DIO_VIRADDRBASE); struct hplance_private *lp; diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c index c0788a31ff0f..b644383017f9 100644 --- a/trunk/drivers/net/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/netxen/netxen_nic_main.c @@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) netxen_tso_check(netdev, tx_ring, first_desc, skb); + netxen_nic_update_cmd_producer(adapter, tx_ring); + adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; - netxen_nic_update_cmd_producer(adapter, tx_ring); - return NETDEV_TX_OK; drop_packet: diff --git a/trunk/drivers/net/phy/Kconfig b/trunk/drivers/net/phy/Kconfig index a70244306c94..392a6c4b72e5 100644 --- a/trunk/drivers/net/phy/Kconfig +++ b/trunk/drivers/net/phy/Kconfig @@ -58,7 +58,6 @@ config BROADCOM_PHY config BCM63XX_PHY tristate "Drivers for Broadcom 63xx SOCs internal PHY" - depends on BCM63XX ---help--- Currently supports the 6348 and 6358 PHYs. diff --git a/trunk/drivers/net/phy/dp83640.c b/trunk/drivers/net/phy/dp83640.c index 2cd8dc5847b4..b0c9522bb535 100644 --- a/trunk/drivers/net/phy/dp83640.c +++ b/trunk/drivers/net/phy/dp83640.c @@ -543,20 +543,11 @@ static void recalibrate(struct dp83640_clock *clock) /* time stamping methods */ -static int decode_evnt(struct dp83640_private *dp83640, - void *data, u16 ests) +static void decode_evnt(struct dp83640_private *dp83640, + struct phy_txts *phy_txts, u16 ests) { - struct phy_txts *phy_txts; struct ptp_clock_event event; int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; - u16 ext_status = 0; - - if (ests & MULT_EVNT) { - ext_status = *(u16 *) data; - data += sizeof(ext_status); - } - - phy_txts = data; switch (words) { /* fall through in every case */ case 3: @@ -574,9 +565,6 @@ static int decode_evnt(struct dp83640_private *dp83640, event.timestamp = phy2txts(&dp83640->edata); ptp_clock_event(dp83640->clock->ptp_clock, &event); - - words = ext_status ? words + 2 : words + 1; - return words * sizeof(u16); } static void decode_rxts(struct dp83640_private *dp83640, @@ -655,7 +643,9 @@ static void decode_status_frame(struct dp83640_private *dp83640, } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { - size = decode_evnt(dp83640, ptr, ests); + phy_txts = (struct phy_txts *) ptr; + decode_evnt(dp83640, phy_txts, ests); + size = sizeof(*phy_txts); } else { size = 0; @@ -1044,8 +1034,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, if (is_status_frame(skb, type)) { decode_status_frame(dp83640, skb); - kfree_skb(skb); - return true; + /* Let the stack drop this frame. */ + return false; } SKB_PTP_TYPE(skb) = type; diff --git a/trunk/drivers/net/ppp_async.c b/trunk/drivers/net/ppp_async.c index c554a397e558..a1b82c9c67d2 100644 --- a/trunk/drivers/net/ppp_async.c +++ b/trunk/drivers/net/ppp_async.c @@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg) #define PUT_BYTE(ap, buf, c, islcp) do { \ if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ *buf++ = PPP_ESCAPE; \ - *buf++ = c ^ PPP_TRANS; \ + *buf++ = c ^ 0x20; \ } else \ *buf++ = c; \ } while (0) @@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, sp = skb_put(skb, n); memcpy(sp, buf, n); if (ap->state & SC_ESCAPE) { - sp[0] ^= PPP_TRANS; + sp[0] ^= 0x20; ap->state &= ~SC_ESCAPE; } } diff --git a/trunk/drivers/net/pxa168_eth.c b/trunk/drivers/net/pxa168_eth.c index 5f597ca592bb..89f7540d90f9 100644 --- a/trunk/drivers/net/pxa168_eth.c +++ b/trunk/drivers/net/pxa168_eth.c @@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); - stats->tx_bytes += length; + stats->tx_bytes += skb->len; stats->tx_packets++; dev->trans_start = jiffies; if (pep->tx_ring_size - pep->tx_desc_count <= 1) { diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index 05d81780d1fd..ef1ce2ebeb4a 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, * * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec */ - static const struct rtl_mac_info { + static const struct { u32 mask; u32 val; int mac_version; @@ -1689,8 +1689,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, /* Catch-all */ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } - }; - const struct rtl_mac_info *p = mac_info; + }, *p = mac_info; u32 reg; reg = RTL_R32(TxConfig); @@ -3682,7 +3681,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) { - static const struct rtl_cfg2_info { + static const struct { u32 mac_version; u32 clk; u32 val; @@ -3691,8 +3690,7 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } - }; - const struct rtl_cfg2_info *p = cfg2_info; + }, *p = cfg2_info; unsigned int i; u32 clk; diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 5235f48be1be..74e94054ab1a 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -460,23 +460,7 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features) return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void tun_poll_controller(struct net_device *dev) -{ - /* - * Tun only receives frames when: - * 1) the char device endpoint gets data from user space - * 2) the tun socket gets a sendmsg call from user space - * Since both of those are syncronous operations, we are guaranteed - * never to have pending data when we poll for it - * so theres nothing to do here but return. - * We need this though so netpoll recognizes us as an interface that - * supports polling, which enables bridge devices in virt setups to - * still use netconsole - */ - return; -} -#endif + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -484,9 +468,6 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_start_xmit = tun_net_xmit, .ndo_change_mtu = tun_net_change_mtu, .ndo_fix_features = tun_net_fix_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif }; static const struct net_device_ops tap_netdev_ops = { @@ -499,9 +480,6 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_set_multicast_list = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif }; /* Initialize net device. */ diff --git a/trunk/drivers/net/usb/Kconfig b/trunk/drivers/net/usb/Kconfig index 84d4608153c9..9d4f9117260f 100644 --- a/trunk/drivers/net/usb/Kconfig +++ b/trunk/drivers/net/usb/Kconfig @@ -385,16 +385,6 @@ config USB_NET_CX82310_ETH router with USB ethernet port. This driver is for routers only, it will not work with ADSL modems (use cxacru driver instead). -config USB_NET_KALMIA - tristate "Samsung Kalmia based LTE USB modem" - depends on USB_USBNET - help - Choose this option if you have a Samsung Kalmia based USB modem - as Samsung GT-B3730. - - To compile this driver as a module, choose M here: the - module will be called kalmia. - config USB_HSO tristate "Option USB High Speed Mobile Devices" depends on USB && RFKILL diff --git a/trunk/drivers/net/usb/Makefile b/trunk/drivers/net/usb/Makefile index c203fa21f6b1..c7ec8a5f0a90 100644 --- a/trunk/drivers/net/usb/Makefile +++ b/trunk/drivers/net/usb/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o obj-$(CONFIG_USB_USBNET) += usbnet.o obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o -obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o obj-$(CONFIG_USB_IPHETH) += ipheth.o obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o diff --git a/trunk/drivers/net/usb/kalmia.c b/trunk/drivers/net/usb/kalmia.c deleted file mode 100644 index d965fb1e013e..000000000000 --- a/trunk/drivers/net/usb/kalmia.c +++ /dev/null @@ -1,384 +0,0 @@ -/* - * USB network interface driver for Samsung Kalmia based LTE USB modem like the - * Samsung GT-B3730 and GT-B3710. - * - * Copyright (C) 2011 Marius Bjoernstad Kotsbak - * - * Sponsored by Quicklink Video Distribution Services Ltd. - * - * Based on the cdc_eem module. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control - * handled by the "option" module and an ethernet data port handled by this - * module. - * - * The stick must first be switched into modem mode by usb_modeswitch - * or similar tool. Then the modem gets sent two initialization packets by - * this module, which gives the MAC address of the device. User space can then - * connect the modem using AT commands through the ACM port and then use - * DHCP on the network interface exposed by this module. Network packets are - * sent to and from the modem in a proprietary format discovered after watching - * the behavior of the windows driver for the modem. - * - * More information about the use of the modem is available in usb_modeswitch - * forum and the project page: - * - * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465 - * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver - */ - -/* #define DEBUG */ -/* #define VERBOSE */ - -#define KALMIA_HEADER_LENGTH 6 -#define KALMIA_ALIGN_SIZE 4 -#define KALMIA_USB_TIMEOUT 10000 - -/*-------------------------------------------------------------------------*/ - -static int -kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, - u8 *buffer, u8 expected_len) -{ - int act_len; - int status; - - netdev_dbg(dev->net, "Sending init packet"); - - status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02), - init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT); - if (status != 0) { - netdev_err(dev->net, - "Error sending init packet. Status %i, length %i\n", - status, act_len); - return status; - } - else if (act_len != init_msg_len) { - netdev_err(dev->net, - "Did not send all of init packet. Bytes sent: %i", - act_len); - } - else { - netdev_dbg(dev->net, "Successfully sent init packet."); - } - - status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81), - buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT); - - if (status != 0) - netdev_err(dev->net, - "Error receiving init result. Status %i, length %i\n", - status, act_len); - else if (act_len != expected_len) - netdev_err(dev->net, "Unexpected init result length: %i\n", - act_len); - - return status; -} - -static int -kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) -{ - char init_msg_1[] = - { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, - 0x00, 0x00 }; - char init_msg_2[] = - { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4, - 0x00, 0x00 }; - char receive_buf[28]; - int status; - - status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1) - / sizeof(init_msg_1[0]), receive_buf, 24); - if (status != 0) - return status; - - status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2) - / sizeof(init_msg_2[0]), receive_buf, 28); - if (status != 0) - return status; - - memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN); - - return status; -} - -static int -kalmia_bind(struct usbnet *dev, struct usb_interface *intf) -{ - u8 status; - u8 ethernet_addr[ETH_ALEN]; - - /* Don't bind to AT command interface */ - if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) - return -EINVAL; - - dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK); - dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK); - dev->status = NULL; - - dev->net->hard_header_len += KALMIA_HEADER_LENGTH; - dev->hard_mtu = 1400; - dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing - - status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); - - if (status < 0) { - usb_set_intfdata(intf, NULL); - usb_driver_release_interface(driver_of(intf), intf); - return status; - } - - memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN); - memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN); - - return status; -} - -static struct sk_buff * -kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) -{ - struct sk_buff *skb2 = NULL; - u16 content_len; - unsigned char *header_start; - unsigned char ether_type_1, ether_type_2; - u8 remainder, padlen = 0; - - if (!skb_cloned(skb)) { - int headroom = skb_headroom(skb); - int tailroom = skb_tailroom(skb); - - if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom - >= KALMIA_HEADER_LENGTH)) - goto done; - - if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH - + KALMIA_ALIGN_SIZE)) { - skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH, - skb->data, skb->len); - skb_set_tail_pointer(skb, skb->len); - goto done; - } - } - - skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH, - KALMIA_ALIGN_SIZE, flags); - if (!skb2) - return NULL; - - dev_kfree_skb_any(skb); - skb = skb2; - - done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH); - ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12]; - ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13]; - - netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1, - ether_type_2); - - /* According to empiric data for data packages */ - header_start[0] = 0x57; - header_start[1] = 0x44; - content_len = skb->len - KALMIA_HEADER_LENGTH; - header_start[2] = (content_len & 0xff); /* low byte */ - header_start[3] = (content_len >> 8); /* high byte */ - - header_start[4] = ether_type_1; - header_start[5] = ether_type_2; - - /* Align to 4 bytes by padding with zeros */ - remainder = skb->len % KALMIA_ALIGN_SIZE; - if (remainder > 0) { - padlen = KALMIA_ALIGN_SIZE - remainder; - memset(skb_put(skb, padlen), 0, padlen); - } - - netdev_dbg( - dev->net, - "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.", - content_len, padlen, header_start[0], header_start[1], - header_start[2], header_start[3], header_start[4], - header_start[5]); - - return skb; -} - -static int -kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb) -{ - /* - * Our task here is to strip off framing, leaving skb with one - * data frame for the usbnet framework code to process. - */ - const u8 HEADER_END_OF_USB_PACKET[] = - { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 }; - const u8 EXPECTED_UNKNOWN_HEADER_1[] = - { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 }; - const u8 EXPECTED_UNKNOWN_HEADER_2[] = - { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 }; - u8 i = 0; - - /* incomplete header? */ - if (skb->len < KALMIA_HEADER_LENGTH) - return 0; - - do { - struct sk_buff *skb2 = NULL; - u8 *header_start; - u16 usb_packet_length, ether_packet_length; - int is_last; - - header_start = skb->data; - - if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) { - if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1, - sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp( - header_start, EXPECTED_UNKNOWN_HEADER_2, - sizeof(EXPECTED_UNKNOWN_HEADER_2))) { - netdev_dbg( - dev->net, - "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", - header_start[0], header_start[1], - header_start[2], header_start[3], - header_start[4], header_start[5], - skb->len - KALMIA_HEADER_LENGTH); - } - else { - netdev_err( - dev->net, - "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", - header_start[0], header_start[1], - header_start[2], header_start[3], - header_start[4], header_start[5], - skb->len - KALMIA_HEADER_LENGTH); - return 0; - } - } - else - netdev_dbg( - dev->net, - "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", - header_start[0], header_start[1], header_start[2], - header_start[3], header_start[4], header_start[5], - skb->len - KALMIA_HEADER_LENGTH); - - /* subtract start header and end header */ - usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH); - ether_packet_length = header_start[2] + (header_start[3] << 8); - skb_pull(skb, KALMIA_HEADER_LENGTH); - - /* Some small packets misses end marker */ - if (usb_packet_length < ether_packet_length) { - ether_packet_length = usb_packet_length - + KALMIA_HEADER_LENGTH; - is_last = true; - } - else { - netdev_dbg(dev->net, "Correct package length #%i", i - + 1); - - is_last = (memcmp(skb->data + ether_packet_length, - HEADER_END_OF_USB_PACKET, - sizeof(HEADER_END_OF_USB_PACKET)) == 0); - if (!is_last) { - header_start = skb->data + ether_packet_length; - netdev_dbg( - dev->net, - "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n", - header_start[0], header_start[1], - header_start[2], header_start[3], - header_start[4], header_start[5], - skb->len - KALMIA_HEADER_LENGTH); - } - } - - if (is_last) { - skb2 = skb; - } - else { - skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) - return 0; - } - - skb_trim(skb2, ether_packet_length); - - if (is_last) { - return 1; - } - else { - usbnet_skb_return(dev, skb2); - skb_pull(skb, ether_packet_length); - } - - i++; - } - while (skb->len); - - return 1; -} - -static const struct driver_info kalmia_info = { - .description = "Samsung Kalmia LTE USB dongle", - .flags = FLAG_WWAN, - .bind = kalmia_bind, - .rx_fixup = kalmia_rx_fixup, - .tx_fixup = kalmia_tx_fixup -}; - -/*-------------------------------------------------------------------------*/ - -static const struct usb_device_id products[] = { - /* The unswitched USB ID, to get the module auto loaded: */ - { USB_DEVICE(0x04e8, 0x689a) }, - /* The stick swithed into modem (by e.g. usb_modeswitch): */ - { USB_DEVICE(0x04e8, 0x6889), - .driver_info = (unsigned long) &kalmia_info, }, - { /* EMPTY == end of list */} }; -MODULE_DEVICE_TABLE( usb, products); - -static struct usb_driver kalmia_driver = { - .name = "kalmia", - .id_table = products, - .probe = usbnet_probe, - .disconnect = usbnet_disconnect, - .suspend = usbnet_suspend, - .resume = usbnet_resume -}; - -static int __init kalmia_init(void) -{ - return usb_register(&kalmia_driver); -} -module_init( kalmia_init); - -static void __exit kalmia_exit(void) -{ - usb_deregister(&kalmia_driver); -} -module_exit( kalmia_exit); - -MODULE_AUTHOR("Marius Bjoernstad Kotsbak "); -MODULE_DESCRIPTION("Samsung Kalmia USB network driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/wan/farsync.c b/trunk/drivers/net/wan/farsync.c index 777d1a4e81b2..e050bd65e037 100644 --- a/trunk/drivers/net/wan/farsync.c +++ b/trunk/drivers/net/wan/farsync.c @@ -2203,10 +2203,8 @@ fst_open(struct net_device *dev) if (port->mode != FST_RAW) { err = hdlc_open(dev); - if (err) { - module_put(THIS_MODULE); + if (err) return err; - } } fst_openport(port); diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c index 687c1f223497..660831ce293c 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c @@ -1288,8 +1288,6 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac, *(unsigned long *) wdev_priv = (unsigned long) priv; - set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev); - ret = wiphy_register(wdev->wiphy); if (ret < 0) { dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", diff --git a/trunk/drivers/net/wireless/mwl8k.c b/trunk/drivers/net/wireless/mwl8k.c index aeac3cc4dbe4..32261189bcef 100644 --- a/trunk/drivers/net/wireless/mwl8k.c +++ b/trunk/drivers/net/wireless/mwl8k.c @@ -2474,7 +2474,6 @@ struct mwl8k_cmd_set_hw_spec { * faster client. */ #define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 -#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200 #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 @@ -2511,8 +2510,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | - MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY | - MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR); + MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY); cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 30df85d8fca8..553da68bd510 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -395,9 +395,9 @@ static void unmask_evtchn(int port) static void xen_irq_init(unsigned irq) { struct irq_info *info; -#ifdef CONFIG_SMP struct irq_desc *desc = irq_to_desc(irq); +#ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); #endif diff --git a/trunk/fs/bad_inode.c b/trunk/fs/bad_inode.c index bfcb18feb1df..9ad2369d9e35 100644 --- a/trunk/fs/bad_inode.c +++ b/trunk/fs/bad_inode.c @@ -231,6 +231,9 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer, static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) { + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + return -EIO; } diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 300628795fdb..378b5b4443f3 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -967,12 +967,6 @@ struct btrfs_fs_info { struct srcu_struct subvol_srcu; spinlock_t trans_lock; - /* - * the reloc mutex goes with the trans lock, it is taken - * during commit to protect us from the relocation code - */ - struct mutex reloc_mutex; - struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -1178,14 +1172,6 @@ struct btrfs_root { u32 type; u64 highest_objectid; - - /* btrfs_record_root_in_trans is a multi-step process, - * and it can race with the balancing code. But the - * race is very small, and only the first time the root - * is added to each transaction. So in_trans_setup - * is used to tell us when more checks are required - */ - unsigned long in_trans_setup; int ref_cows; int track_dirty; int in_radix; @@ -1195,6 +1181,7 @@ struct btrfs_root { struct btrfs_key defrag_max; int defrag_running; char *name; + int in_sysfs; /* the dirty list is only used by non-reference counted roots */ struct list_head dirty_list; diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index f1cbd028f7b3..6462c29d2d37 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -297,6 +297,7 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) item->data_len = data_len; item->ins_or_del = 0; item->bytes_reserved = 0; + item->block_rsv = NULL; item->delayed_node = NULL; atomic_set(&item->refs, 1); } @@ -592,8 +593,10 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); - if (!ret) + if (!ret) { item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } return ret; } @@ -601,13 +604,10 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, struct btrfs_delayed_item *item) { - struct btrfs_block_rsv *rsv; - if (!item->bytes_reserved) return; - rsv = &root->fs_info->global_block_rsv; - btrfs_block_rsv_release(root, rsv, + btrfs_block_rsv_release(root, item->block_rsv, item->bytes_reserved); } @@ -1014,7 +1014,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_path *path; - struct btrfs_block_rsv *block_rsv; int ret = 0; path = btrfs_alloc_path(); @@ -1022,9 +1021,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - block_rsv = trans->block_rsv; - trans->block_rsv = &root->fs_info->global_block_rsv; - delayed_root = btrfs_get_delayed_root(root); curr_node = btrfs_first_delayed_node(delayed_root); @@ -1049,7 +1045,6 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, } btrfs_free_path(path); - trans->block_rsv = block_rsv; return ret; } @@ -1057,7 +1052,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_path *path; - struct btrfs_block_rsv *block_rsv; int ret; path = btrfs_alloc_path(); @@ -1065,9 +1059,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - block_rsv = trans->block_rsv; - trans->block_rsv = &node->root->fs_info->global_block_rsv; - ret = btrfs_insert_delayed_items(trans, path, node->root, node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, node->root, node); @@ -1075,7 +1066,6 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, ret = btrfs_update_delayed_inode(trans, node->root, path, node); btrfs_free_path(path); - trans->block_rsv = block_rsv; return ret; } @@ -1126,7 +1116,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) struct btrfs_path *path; struct btrfs_delayed_node *delayed_node = NULL; struct btrfs_root *root; - struct btrfs_block_rsv *block_rsv; unsigned long nr = 0; int need_requeue = 0; int ret; @@ -1145,9 +1134,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) if (IS_ERR(trans)) goto free_path; - block_rsv = trans->block_rsv; - trans->block_rsv = &root->fs_info->global_block_rsv; - ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, root, @@ -1190,7 +1176,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) nr = trans->blocks_used; - trans->block_rsv = block_rsv; btrfs_end_transaction_dmeta(trans, root); __btrfs_btree_balance_dirty(root, nr); free_path: @@ -1237,13 +1222,6 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, return 0; } -void btrfs_assert_delayed_root_empty(struct btrfs_root *root) -{ - struct btrfs_delayed_root *delayed_root; - delayed_root = btrfs_get_delayed_root(root); - WARN_ON(btrfs_first_delayed_node(delayed_root)); -} - void btrfs_balance_delayed_items(struct btrfs_root *root) { struct btrfs_delayed_root *delayed_root; diff --git a/trunk/fs/btrfs/delayed-inode.h b/trunk/fs/btrfs/delayed-inode.h index d1a6a2915c66..eb7d240aa648 100644 --- a/trunk/fs/btrfs/delayed-inode.h +++ b/trunk/fs/btrfs/delayed-inode.h @@ -75,6 +75,7 @@ struct btrfs_delayed_item { struct list_head tree_list; /* used for batch insert/delete items */ struct list_head readdir_list; /* used for readdir items */ u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; struct btrfs_delayed_node *delayed_node; atomic_t refs; int ins_or_del; @@ -137,8 +138,4 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, /* for init */ int __init btrfs_delayed_inode_init(void); void btrfs_delayed_inode_exit(void); - -/* for debugging */ -void btrfs_assert_delayed_root_empty(struct btrfs_root *root); - #endif diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 1ac8db5dc0a3..9f68c6898653 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1044,6 +1044,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; + root->in_sysfs = 0; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; @@ -1299,21 +1300,19 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); - if (!root->free_ino_pinned || !root->free_ino_ctl) { - ret = -ENOMEM; + if (!root->free_ino_pinned) goto fail; - } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); - ret = set_anon_super(&root->anon_super, NULL); - if (ret) - goto fail; + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; @@ -1619,7 +1618,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); - mutex_init(&fs_info->reloc_mutex); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 1f61bf5b4960..b42efc2ded51 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -3314,6 +3314,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; + /* nothing to shrink - nothing to reclaim */ + if (root->fs_info->delalloc_bytes == 0) + return 0; + max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 0a9b10c5b0a7..751ddf8fc58a 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -3076,7 +3076,6 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index a3c4751e07db..b793d112d1f6 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -482,10 +482,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); BUG_ON(ret); - spin_lock(&root->fs_info->trans_lock); list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); - spin_unlock(&root->fs_info->trans_lock); if (async_transid) { *async_transid = trans->transid; ret = btrfs_commit_transaction_async(trans, diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index 5e0a3dc79a45..b1ef27cc673b 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, int ret; if (!root->reloc_root) - goto out; + return 0; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -1390,8 +1390,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_update_root(trans, root->fs_info->tree_root, &reloc_root->root_key, root_item); BUG_ON(ret); - -out: return 0; } @@ -2144,11 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - mutex_lock(&root->fs_info->reloc_mutex); + spin_lock(&root->fs_info->trans_lock); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - mutex_unlock(&root->fs_info->reloc_mutex); - + spin_unlock(&root->fs_info->trans_lock); again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2217,16 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - - /* - * this serializes us with btrfs_record_root_in_transaction, - * we have to make sure nobody is in the middle of - * adding their roots to the list while we are - * doing this splice - */ - mutex_lock(&root->fs_info->reloc_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&rc->reloc_roots, &reloc_roots); - mutex_unlock(&root->fs_info->reloc_mutex); + spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&reloc_roots)) { found = 1; @@ -3600,19 +3590,17 @@ int find_next_extent(struct btrfs_trans_handle *trans, static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - - mutex_lock(&fs_info->reloc_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = rc; - mutex_unlock(&fs_info->reloc_mutex); + spin_unlock(&fs_info->trans_lock); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - - mutex_lock(&fs_info->reloc_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = NULL; - mutex_unlock(&fs_info->reloc_mutex); + spin_unlock(&fs_info->trans_lock); } static int check_extent_flags(u64 flags) diff --git a/trunk/fs/btrfs/sysfs.c b/trunk/fs/btrfs/sysfs.c index daac9ae6d731..c3c223ae6691 100644 --- a/trunk/fs/btrfs/sysfs.c +++ b/trunk/fs/btrfs/sysfs.c @@ -28,6 +28,152 @@ #include "disk-io.h" #include "transaction.h" +static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_root_used(&root->root_item)); +} + +static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_root_limit(&root->root_item)); +} + +static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf) +{ + + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_bytes_used(&fs->super_copy)); +} + +static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_total_bytes(&fs->super_copy)); +} + +static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)btrfs_super_sectorsize(&fs->super_copy)); +} + +/* this is for root attrs (subvols/snapshots) */ +struct btrfs_root_attr { + struct attribute attr; + ssize_t (*show)(struct btrfs_root *, char *); + ssize_t (*store)(struct btrfs_root *, const char *, size_t); +}; + +#define ROOT_ATTR(name, mode, show, store) \ +static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ + show, store) + +ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); +ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); + +static struct attribute *btrfs_root_attrs[] = { + &btrfs_root_attr_blocks_used.attr, + &btrfs_root_attr_block_limit.attr, + NULL, +}; + +/* this is for super attrs (actual full fs) */ +struct btrfs_super_attr { + struct attribute attr; + ssize_t (*show)(struct btrfs_fs_info *, char *); + ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t); +}; + +#define SUPER_ATTR(name, mode, show, store) \ +static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ + show, store) + +SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); +SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); +SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL); + +static struct attribute *btrfs_super_attrs[] = { + &btrfs_super_attr_blocks_used.attr, + &btrfs_super_attr_total_blocks.attr, + &btrfs_super_attr_blocksize.attr, + NULL, +}; + +static ssize_t btrfs_super_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + struct btrfs_super_attr *a = container_of(attr, + struct btrfs_super_attr, + attr); + + return a->show ? a->show(fs, buf) : 0; +} + +static ssize_t btrfs_super_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + struct btrfs_super_attr *a = container_of(attr, + struct btrfs_super_attr, + attr); + + return a->store ? a->store(fs, buf, len) : 0; +} + +static ssize_t btrfs_root_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + struct btrfs_root_attr *a = container_of(attr, + struct btrfs_root_attr, + attr); + + return a->show ? a->show(root, buf) : 0; +} + +static ssize_t btrfs_root_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + struct btrfs_root_attr *a = container_of(attr, + struct btrfs_root_attr, + attr); + return a->store ? a->store(root, buf, len) : 0; +} + +static void btrfs_super_release(struct kobject *kobj) +{ + struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, + super_kobj); + complete(&fs->kobj_unregister); +} + +static void btrfs_root_release(struct kobject *kobj) +{ + struct btrfs_root *root = container_of(kobj, struct btrfs_root, + root_kobj); + complete(&root->kobj_unregister); +} + +static const struct sysfs_ops btrfs_super_attr_ops = { + .show = btrfs_super_attr_show, + .store = btrfs_super_attr_store, +}; + +static const struct sysfs_ops btrfs_root_attr_ops = { + .show = btrfs_root_attr_show, + .store = btrfs_root_attr_store, +}; + /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index 51dcec86757f..2b3590b9fe98 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -126,85 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -static int record_root_in_trans(struct btrfs_trans_handle *trans, +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); - /* - * see below for in_trans_setup usage rules - * we have the reloc mutex held now, so there - * is only one writer in this function - */ - root->in_trans_setup = 1; - - /* make sure readers find in_trans_setup before - * they find our root->last_trans update - */ - smp_wmb(); - spin_lock(&root->fs_info->fs_roots_radix_lock); if (root->last_trans == trans->transid) { spin_unlock(&root->fs_info->fs_roots_radix_lock); return 0; } + root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&root->fs_info->fs_roots_radix_lock); - root->last_trans = trans->transid; - - /* this is pretty tricky. We don't want to - * take the relocation lock in btrfs_record_root_in_trans - * unless we're really doing the first setup for this root in - * this transaction. - * - * Normally we'd use root->last_trans as a flag to decide - * if we want to take the expensive mutex. - * - * But, we have to set root->last_trans before we - * init the relocation root, otherwise, we trip over warnings - * in ctree.c. The solution used here is to flag ourselves - * with root->in_trans_setup. When this is 1, we're still - * fixing up the reloc trees and everyone must wait. - * - * When this is zero, they can trust root->last_trans and fly - * through btrfs_record_root_in_trans without having to take the - * lock. smp_wmb() makes sure that all the writes above are - * done before we pop in the zero below - */ btrfs_init_reloc_root(trans, root); - smp_wmb(); - root->in_trans_setup = 0; } return 0; } - -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - if (!root->ref_cows) - return 0; - - /* - * see record_root_in_trans for comments about in_trans_setup usage - * and barriers - */ - smp_rmb(); - if (root->last_trans == trans->transid && - !root->in_trans_setup) - return 0; - - mutex_lock(&root->fs_info->reloc_mutex); - record_root_in_trans(trans, root); - mutex_unlock(&root->fs_info->reloc_mutex); - - return 0; -} - /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -939,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - record_root_in_trans(trans, parent_root); + btrfs_record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -957,16 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - /* - * pull in the delayed directory update - * and the delayed inode item - * otherwise we corrupt the FS during - * snapshot - */ - ret = btrfs_run_delayed_items(trans, root); - BUG_ON(ret); - - record_root_in_trans(trans, root); + btrfs_record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1027,6 +961,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1299,42 +1241,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, schedule_timeout(1); finish_wait(&cur_trans->writer_wait, &wait); + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); - /* - * Ok now we need to make sure to block out any other joins while we - * commit the transaction. We could have started a join before setting - * no_join so make sure to wait for num_writers to == 1 again. - */ - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); - wait_event(cur_trans->writer_wait, - atomic_read(&cur_trans->num_writers) == 1); - - /* - * the reloc mutex makes sure that we stop - * the balancing code from coming in and moving - * extents around in the middle of the commit - */ - mutex_lock(&root->fs_info->reloc_mutex); - - ret = btrfs_run_delayed_items(trans, root); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); - ret = create_pending_snapshots(trans, root->fs_info); + ret = btrfs_run_delayed_items(trans, root); BUG_ON(ret); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); - /* - * make sure none of the code above managed to slip in a - * delayed item - */ - btrfs_assert_delayed_root_empty(root); - WARN_ON(cur_trans != trans->transaction); btrfs_scrub_pause(root); @@ -1391,7 +1312,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, root->fs_info->running_transaction = NULL; root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); - mutex_unlock(&root->fs_info->reloc_mutex); wake_up(&root->fs_info->transaction_wait); diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c index 4ce8a9f41d1e..592396c6dc47 100644 --- a/trunk/fs/btrfs/tree-log.c +++ b/trunk/fs/btrfs/tree-log.c @@ -3177,7 +3177,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) tmp_key.offset = (u64)-1; wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); - BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); + BUG_ON(!wc.replay_dest); wc.replay_dest->log_root = log; btrfs_record_root_in_trans(trans, wc.replay_dest); diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index 2f0c58646c10..e9def996e383 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -257,6 +257,9 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags) { struct cifs_sb_info *cifs_sb; + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + cifs_sb = CIFS_SB(inode->i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { diff --git a/trunk/fs/coda/pioctl.c b/trunk/fs/coda/pioctl.c index cb140ef293e4..6cbb3afb36dc 100644 --- a/trunk/fs/coda/pioctl.c +++ b/trunk/fs/coda/pioctl.c @@ -43,6 +43,8 @@ const struct file_operations coda_ioctl_operations = { /* the coda pioctl inode ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) { + if (flags & IPERM_FLAG_RCU) + return -ECHILD; return (mask & MAY_EXEC) ? -EACCES : 0; } diff --git a/trunk/fs/logfs/dir.c b/trunk/fs/logfs/dir.c index 1afae26cf236..9ed89d1663f8 100644 --- a/trunk/fs/logfs/dir.c +++ b/trunk/fs/logfs/dir.c @@ -555,6 +555,13 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry, return __logfs_create(dir, dentry, inode, target, destlen); } +static int logfs_permission(struct inode *inode, int mask, unsigned int flags) +{ + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + return generic_permission(inode, mask, flags, NULL); +} + static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { @@ -813,6 +820,7 @@ const struct inode_operations logfs_dir_iops = { .mknod = logfs_mknod, .rename = logfs_rename, .rmdir = logfs_rmdir, + .permission = logfs_permission, .symlink = logfs_symlink, .unlink = logfs_unlink, }; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 0223c41fb114..9e425e7e6c8f 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -238,8 +238,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags, /* * Read/write DACs are always overridable. - * Executable DACs are overridable for all directories and - * for non-directories that have least one exec bit set. + * Executable DACs are overridable if at least one exec bit is set. */ if (!(mask & MAY_EXEC) || execute_ok(inode)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) @@ -1012,6 +1011,9 @@ static int follow_dotdot_rcu(struct nameidata *nd) * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. + * + * Care must be taken as namespace_sem may be held (indicated by mounting_here + * being true). */ int follow_down(struct path *path) { diff --git a/trunk/fs/nfsd/Kconfig b/trunk/fs/nfsd/Kconfig index fbb2a5ef5817..18b3e8975fe0 100644 --- a/trunk/fs/nfsd/Kconfig +++ b/trunk/fs/nfsd/Kconfig @@ -82,7 +82,6 @@ config NFSD_V4 select NFSD_V3 select FS_POSIX_ACL select SUNRPC_GSS - select CRYPTO help This option enables support in your system's NFS server for version 4 of the NFS protocol (RFC 3530). diff --git a/trunk/fs/nfsd/nfsctl.c b/trunk/fs/nfsd/nfsctl.c index 2b1449dd2f49..1f5eae40f34e 100644 --- a/trunk/fs/nfsd/nfsctl.c +++ b/trunk/fs/nfsd/nfsctl.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "idmap.h" #include "nfsd.h" @@ -190,10 +189,18 @@ static struct file_operations export_features_operations = { .release = single_release, }; -#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) +#ifdef CONFIG_SUNRPC_GSS static int supported_enctypes_show(struct seq_file *m, void *v) { - seq_printf(m, KRB5_SUPPORTED_ENCTYPES); + struct gss_api_mech *k5mech; + + k5mech = gss_mech_get_by_name("krb5"); + if (k5mech == NULL) + goto out; + if (k5mech->gm_upcall_enctypes != NULL) + seq_printf(m, k5mech->gm_upcall_enctypes); + gss_mech_put(k5mech); +out: return 0; } @@ -208,7 +215,7 @@ static struct file_operations supported_enctypes_ops = { .llseek = seq_lseek, .release = single_release, }; -#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ +#endif /* CONFIG_SUNRPC_GSS */ extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); @@ -1420,9 +1427,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, -#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) +#ifdef CONFIG_SUNRPC_GSS [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, -#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ +#endif /* CONFIG_SUNRPC_GSS */ #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c index fd0acca5370a..d5718273bb32 100644 --- a/trunk/fs/nfsd/vfs.c +++ b/trunk/fs/nfsd/vfs.c @@ -696,15 +696,7 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor } #endif /* CONFIG_NFSD_V3 */ -static int nfsd_open_break_lease(struct inode *inode, int access) -{ - unsigned int mode; - if (access & NFSD_MAY_NOT_BREAK_LEASE) - return 0; - mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; - return break_lease(inode, mode | O_NONBLOCK); -} /* * Open an existing file or directory. @@ -752,7 +744,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (!inode->i_fop) goto out; - host_err = nfsd_open_break_lease(inode, access); + /* + * Check to see if there are any leases on this file. + * This may block while leases are broken. + */ + if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) + host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; @@ -1663,10 +1660,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (!dold->d_inode) goto out_drop_write; host_err = nfsd_break_lease(dold->d_inode); - if (host_err) { - err = nfserrno(host_err); + if (host_err) goto out_drop_write; - } host_err = vfs_link(dold, dirp, dnew); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); diff --git a/trunk/fs/nilfs2/inode.c b/trunk/fs/nilfs2/inode.c index b9b45fc2903e..b954878ad6ce 100644 --- a/trunk/fs/nilfs2/inode.c +++ b/trunk/fs/nilfs2/inode.c @@ -801,7 +801,12 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) int nilfs_permission(struct inode *inode, int mask, unsigned int flags) { - struct nilfs_root *root = NILFS_I(inode)->i_root; + struct nilfs_root *root; + + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + + root = NILFS_I(inode)->i_root; if ((mask & MAY_WRITE) && root && root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index 8a84210ca080..14def991d9dd 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -2169,7 +2169,11 @@ static const struct file_operations proc_fd_operations = { */ static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) { - int rv = generic_permission(inode, mask, flags, NULL); + int rv; + + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + rv = generic_permission(inode, mask, flags, NULL); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) diff --git a/trunk/fs/proc/proc_sysctl.c b/trunk/fs/proc/proc_sysctl.c index d167de365a8d..f50133c11c24 100644 --- a/trunk/fs/proc/proc_sysctl.c +++ b/trunk/fs/proc/proc_sysctl.c @@ -304,6 +304,9 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags) struct ctl_table *table; int error; + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + /* Executable files are not allowed under /proc/sys/ */ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) return -EACCES; diff --git a/trunk/fs/reiserfs/xattr.c b/trunk/fs/reiserfs/xattr.c index d78089690965..e8a62f41b458 100644 --- a/trunk/fs/reiserfs/xattr.c +++ b/trunk/fs/reiserfs/xattr.c @@ -954,6 +954,8 @@ static int xattr_mount_check(struct super_block *s) int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) { + if (flags & IPERM_FLAG_RCU) + return -ECHILD; /* * We don't do permission checks on the internal objects. * Permissions are determined by the "owning" object. diff --git a/trunk/fs/timerfd.c b/trunk/fs/timerfd.c index dffeb3795af1..f67acbdda5e8 100644 --- a/trunk/fs/timerfd.c +++ b/trunk/fs/timerfd.c @@ -61,9 +61,7 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) /* * Called when the clock was set to cancel the timers in the cancel - * list. This will wake up processes waiting on these timers. The - * wake-up requires ctx->ticks to be non zero, therefore we increment - * it before calling wake_up_locked(). + * list. */ void timerfd_clock_was_set(void) { @@ -78,7 +76,6 @@ void timerfd_clock_was_set(void) spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->moffs.tv64 != moffs.tv64) { ctx->moffs.tv64 = KTIME_MAX; - ctx->ticks++; wake_up_locked(&ctx->wqh); } spin_unlock_irqrestore(&ctx->wqh.lock, flags); diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index 529be0582029..8c892c2d5300 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -2146,7 +2146,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, if (IS_ERR(sb)) { err = PTR_ERR(sb); kfree(c); - goto out_close; } if (sb->s_root) { diff --git a/trunk/include/linux/clocksource.h b/trunk/include/linux/clocksource.h index 18a1baf31f2d..d4646b48dc4a 100644 --- a/trunk/include/linux/clocksource.h +++ b/trunk/include/linux/clocksource.h @@ -188,7 +188,6 @@ struct clocksource { #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ struct list_head wd_list; - cycle_t cs_last; cycle_t wd_last; #endif } ____cacheline_aligned; diff --git a/trunk/include/linux/device_cgroup.h b/trunk/include/linux/device_cgroup.h index 7aad1f440867..0b0d9c39ed67 100644 --- a/trunk/include/linux/device_cgroup.h +++ b/trunk/include/linux/device_cgroup.h @@ -2,16 +2,8 @@ #include #ifdef CONFIG_CGROUP_DEVICE -extern int __devcgroup_inode_permission(struct inode *inode, int mask); +extern int devcgroup_inode_permission(struct inode *inode, int mask); extern int devcgroup_inode_mknod(int mode, dev_t dev); -static inline int devcgroup_inode_permission(struct inode *inode, int mask) -{ - if (likely(!inode->i_rdev)) - return 0; - if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) - return 0; - return __devcgroup_inode_permission(inode, mask); -} #else static inline int devcgroup_inode_permission(struct inode *inode, int mask) { return 0; } diff --git a/trunk/include/linux/input/sh_keysc.h b/trunk/include/linux/input/sh_keysc.h index 5d253cd93691..649dc7f12925 100644 --- a/trunk/include/linux/input/sh_keysc.h +++ b/trunk/include/linux/input/sh_keysc.h @@ -1,7 +1,7 @@ #ifndef __SH_KEYSC_H__ #define __SH_KEYSC_H__ -#define SH_KEYSC_MAXKEYS 64 +#define SH_KEYSC_MAXKEYS 49 struct sh_keysc_info { enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index f6efed0039ed..6c12989839d9 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -414,7 +414,6 @@ enum TASKLET_SOFTIRQ, SCHED_SOFTIRQ, HRTIMER_SOFTIRQ, - RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS }; diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h index 8cc38d3bab0c..7ad824d510a2 100644 --- a/trunk/include/linux/smp.h +++ b/trunk/include/linux/smp.h @@ -85,15 +85,12 @@ int smp_call_function_any(const struct cpumask *mask, * Generic and arch helpers */ #ifdef CONFIG_USE_GENERIC_SMP_HELPERS -void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); void ipi_call_lock(void); void ipi_call_unlock(void); void ipi_call_lock_irq(void); void ipi_call_unlock_irq(void); -#else -static inline void call_function_init(void) { } #endif /* @@ -137,7 +134,7 @@ static inline void smp_send_reschedule(int cpu) { } #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) -static inline void call_function_init(void) { } +static inline void init_call_single_data(void) { } static inline int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, diff --git a/trunk/include/linux/sunrpc/gss_krb5_enctypes.h b/trunk/include/linux/sunrpc/gss_krb5_enctypes.h deleted file mode 100644 index ec6234eee89c..000000000000 --- a/trunk/include/linux/sunrpc/gss_krb5_enctypes.h +++ /dev/null @@ -1,4 +0,0 @@ -/* - * Dumb way to share this static piece of information with nfsd - */ -#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" diff --git a/trunk/include/net/netfilter/nf_conntrack.h b/trunk/include/net/netfilter/nf_conntrack.h index 5d4f8e586e32..c7c42e7acc31 100644 --- a/trunk/include/net/netfilter/nf_conntrack.h +++ b/trunk/include/net/netfilter/nf_conntrack.h @@ -307,12 +307,6 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct) return test_bit(IPS_UNTRACKED_BIT, &ct->status); } -/* Packet is received from loopback */ -static inline bool nf_is_loopback_packet(const struct sk_buff *skb) -{ - return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; -} - extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max; diff --git a/trunk/include/trace/events/irq.h b/trunk/include/trace/events/irq.h index 1c09820df585..ae045ca7d356 100644 --- a/trunk/include/trace/events/irq.h +++ b/trunk/include/trace/events/irq.h @@ -20,8 +20,7 @@ struct softirq_action; softirq_name(BLOCK_IOPOLL), \ softirq_name(TASKLET), \ softirq_name(SCHED), \ - softirq_name(HRTIMER), \ - softirq_name(RCU)) + softirq_name(HRTIMER)) /** * irq_handler_entry - called immediately before the irq action handler diff --git a/trunk/init/main.c b/trunk/init/main.c index d7211faed2ad..cafba67c13bf 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -542,7 +542,6 @@ asmlinkage void __init start_kernel(void) timekeeping_init(); time_init(); profile_init(); - call_function_init(); if (!irqs_disabled()) printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early\n"); diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 7e59ffb3d0ba..89419ff92e99 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -87,8 +87,6 @@ static struct rcu_state *rcu_state; int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); -#ifdef CONFIG_RCU_BOOST - /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. @@ -100,11 +98,8 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); static char rcu_kthreads_spawnable; -#endif /* #ifdef CONFIG_RCU_BOOST */ - static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); -static void invoke_rcu_core(void); -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); +static void invoke_rcu_cpu_kthread(void); #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ @@ -1093,8 +1088,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) int need_report = 0; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp; + struct task_struct *t; - rcu_stop_cpu_kthread(cpu); + /* Stop the CPU's kthread. */ + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t != NULL) { + per_cpu(rcu_cpu_kthread_task, cpu) = NULL; + kthread_stop(t); + } /* Exclude any attempts to start a new grace period. */ raw_spin_lock_irqsave(&rsp->onofflock, flags); @@ -1230,7 +1231,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* Re-raise the RCU softirq if there are callbacks remaining. */ if (cpu_has_callbacks_ready_to_invoke(rdp)) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); } /* @@ -1276,7 +1277,7 @@ void rcu_check_callbacks(int cpu, int user) } rcu_preempt_check_callbacks(cpu); if (rcu_pending(cpu)) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); } #ifdef CONFIG_SMP @@ -1441,14 +1442,13 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) } /* If there are callbacks ready, invoke them. */ - if (cpu_has_callbacks_ready_to_invoke(rdp)) - invoke_rcu_callbacks(rsp, rdp); + rcu_do_batch(rsp, rdp); } /* * Do softirq processing for the current CPU. */ -static void rcu_process_callbacks(struct softirq_action *unused) +static void rcu_process_callbacks(void) { __rcu_process_callbacks(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); @@ -1465,20 +1465,342 @@ static void rcu_process_callbacks(struct softirq_action *unused) * the current CPU with interrupts disabled, the rcu_cpu_kthread_task * cannot disappear out from under us. */ -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) +static void invoke_rcu_cpu_kthread(void) +{ + unsigned long flags; + + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { + local_irq_restore(flags); + return; + } + wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); + local_irq_restore(flags); +} + +/* + * Wake up the specified per-rcu_node-structure kthread. + * Because the per-rcu_node kthreads are immortal, we don't need + * to do anything to keep them alive. + */ +static void invoke_rcu_node_kthread(struct rcu_node *rnp) +{ + struct task_struct *t; + + t = rnp->node_kthread_task; + if (t != NULL) + wake_up_process(t); +} + +/* + * Set the specified CPU's kthread to run RT or not, as specified by + * the to_rt argument. The CPU-hotplug locks are held, so the task + * is not going away. + */ +static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +{ + int policy; + struct sched_param sp; + struct task_struct *t; + + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t == NULL) + return; + if (to_rt) { + policy = SCHED_FIFO; + sp.sched_priority = RCU_KTHREAD_PRIO; + } else { + policy = SCHED_NORMAL; + sp.sched_priority = 0; + } + sched_setscheduler_nocheck(t, policy, &sp); +} + +/* + * Timer handler to initiate the waking up of per-CPU kthreads that + * have yielded the CPU due to excess numbers of RCU callbacks. + * We wake up the per-rcu_node kthread, which in turn will wake up + * the booster kthread. + */ +static void rcu_cpu_kthread_timer(unsigned long arg) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); + struct rcu_node *rnp = rdp->mynode; + + atomic_or(rdp->grpmask, &rnp->wakemask); + invoke_rcu_node_kthread(rnp); +} + +/* + * Drop to non-real-time priority and yield, but only after posting a + * timer that will cause us to regain our real-time priority if we + * remain preempted. Either way, we restore our real-time priority + * before returning. + */ +static void rcu_yield(void (*f)(unsigned long), unsigned long arg) +{ + struct sched_param sp; + struct timer_list yield_timer; + + setup_timer_on_stack(&yield_timer, f, arg); + mod_timer(&yield_timer, jiffies + 2); + sp.sched_priority = 0; + sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); + set_user_nice(current, 19); + schedule(); + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); + del_timer(&yield_timer); +} + +/* + * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. + * This can happen while the corresponding CPU is either coming online + * or going offline. We cannot wait until the CPU is fully online + * before starting the kthread, because the various notifier functions + * can wait for RCU grace periods. So we park rcu_cpu_kthread() until + * the corresponding CPU is online. + * + * Return 1 if the kthread needs to stop, 0 otherwise. + * + * Caller must disable bh. This function can momentarily enable it. + */ +static int rcu_cpu_kthread_should_stop(int cpu) +{ + while (cpu_is_offline(cpu) || + !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || + smp_processor_id() != cpu) { + if (kthread_should_stop()) + return 1; + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; + per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); + local_bh_enable(); + schedule_timeout_uninterruptible(1); + if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + local_bh_disable(); + } + per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; + return 0; +} + +/* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * earlier RCU softirq. + */ +static int rcu_cpu_kthread(void *arg) +{ + int cpu = (int)(long)arg; + unsigned long flags; + int spincnt = 0; + unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); + char work; + char *workp = &per_cpu(rcu_cpu_has_work, cpu); + + for (;;) { + *statusp = RCU_KTHREAD_WAITING; + rcu_wait(*workp != 0 || kthread_should_stop()); + local_bh_disable(); + if (rcu_cpu_kthread_should_stop(cpu)) { + local_bh_enable(); + break; + } + *statusp = RCU_KTHREAD_RUNNING; + per_cpu(rcu_cpu_kthread_loops, cpu)++; + local_irq_save(flags); + work = *workp; + *workp = 0; + local_irq_restore(flags); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp != 0) + spincnt++; + else + spincnt = 0; + if (spincnt > 10) { + *statusp = RCU_KTHREAD_YIELDING; + rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); + spincnt = 0; + } + } + *statusp = RCU_KTHREAD_STOPPED; + return 0; +} + +/* + * Spawn a per-CPU kthread, setting up affinity and priority. + * Because the CPU hotplug lock is held, no other CPU will be attempting + * to manipulate rcu_cpu_kthread_task. There might be another CPU + * attempting to access it during boot, but the locking in kthread_bind() + * will enforce sufficient ordering. + */ +static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) { - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); + struct sched_param sp; + struct task_struct *t; + + if (!rcu_kthreads_spawnable || + per_cpu(rcu_cpu_kthread_task, cpu) != NULL) + return 0; + t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); + if (IS_ERR(t)) + return PTR_ERR(t); + kthread_bind(t, cpu); + per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; + WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); + per_cpu(rcu_cpu_kthread_task, cpu) = t; + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + return 0; +} + +/* + * Per-rcu_node kthread, which is in charge of waking up the per-CPU + * kthreads when needed. We ignore requests to wake up kthreads + * for offline CPUs, which is OK because force_quiescent_state() + * takes care of this case. + */ +static int rcu_node_kthread(void *arg) +{ + int cpu; + unsigned long flags; + unsigned long mask; + struct rcu_node *rnp = (struct rcu_node *)arg; + struct sched_param sp; + struct task_struct *t; + + for (;;) { + rnp->node_kthread_status = RCU_KTHREAD_WAITING; + rcu_wait(atomic_read(&rnp->wakemask) != 0); + rnp->node_kthread_status = RCU_KTHREAD_RUNNING; + raw_spin_lock_irqsave(&rnp->lock, flags); + mask = atomic_xchg(&rnp->wakemask, 0); + rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { + if ((mask & 0x1) == 0) + continue; + preempt_disable(); + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (!cpu_online(cpu) || t == NULL) { + preempt_enable(); + continue; + } + per_cpu(rcu_cpu_has_work, cpu) = 1; + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + preempt_enable(); + } + } + /* NOTREACHED */ + rnp->node_kthread_status = RCU_KTHREAD_STOPPED; + return 0; +} + +/* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still + * held, so the value of rnp->qsmaskinit will be stable. + * + * We don't include outgoingcpu in the affinity set, use -1 if there is + * no outgoing CPU. If there are no CPUs left in the affinity set, + * this function allows the kthread to execute on any CPU. + */ +static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +{ + cpumask_var_t cm; + int cpu; + unsigned long mask = rnp->qsmaskinit; + + if (rnp->node_kthread_task == NULL) + return; + if (!alloc_cpumask_var(&cm, GFP_KERNEL)) return; + cpumask_clear(cm); + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) + if ((mask & 0x1) && cpu != outgoingcpu) + cpumask_set_cpu(cpu, cm); + if (cpumask_weight(cm) == 0) { + cpumask_setall(cm); + for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) + cpumask_clear_cpu(cpu, cm); + WARN_ON_ONCE(cpumask_weight(cm) == 0); } - invoke_rcu_callbacks_kthread(); + set_cpus_allowed_ptr(rnp->node_kthread_task, cm); + rcu_boost_kthread_setaffinity(rnp, cm); + free_cpumask_var(cm); } -static void invoke_rcu_core(void) +/* + * Spawn a per-rcu_node kthread, setting priority and affinity. + * Called during boot before online/offline can happen, or, if + * during runtime, with the main CPU-hotplug locks held. So only + * one of these can be executing at a time. + */ +static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, + struct rcu_node *rnp) { - raise_softirq(RCU_SOFTIRQ); + unsigned long flags; + int rnp_index = rnp - &rsp->node[0]; + struct sched_param sp; + struct task_struct *t; + + if (!rcu_kthreads_spawnable || + rnp->qsmaskinit == 0) + return 0; + if (rnp->node_kthread_task == NULL) { + t = kthread_create(rcu_node_kthread, (void *)rnp, + "rcun%d", rnp_index); + if (IS_ERR(t)) + return PTR_ERR(t); + raw_spin_lock_irqsave(&rnp->lock, flags); + rnp->node_kthread_task = t; + raw_spin_unlock_irqrestore(&rnp->lock, flags); + sp.sched_priority = 99; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + } + return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); } +static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); + +/* + * Spawn all kthreads -- called as soon as the scheduler is running. + */ +static int __init rcu_spawn_kthreads(void) +{ + int cpu; + struct rcu_node *rnp; + struct task_struct *t; + + rcu_kthreads_spawnable = 1; + for_each_possible_cpu(cpu) { + per_cpu(rcu_cpu_has_work, cpu) = 0; + if (cpu_online(cpu)) { + (void)rcu_spawn_one_cpu_kthread(cpu); + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t) + wake_up_process(t); + } + } + rnp = rcu_get_root(rcu_state); + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + if (rnp->node_kthread_task) + wake_up_process(rnp->node_kthread_task); + if (NUM_RCU_NODES > 1) { + rcu_for_each_leaf_node(rcu_state, rnp) { + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + t = rnp->node_kthread_task; + if (t) + wake_up_process(t); + rcu_wake_one_boost_kthread(rnp); + } + } + return 0; +} +early_initcall(rcu_spawn_kthreads); + static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp) @@ -1885,6 +2207,44 @@ static void __cpuinit rcu_prepare_cpu(int cpu) rcu_preempt_init_percpu_data(cpu); } +static void __cpuinit rcu_prepare_kthreads(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); + struct rcu_node *rnp = rdp->mynode; + + /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ + if (rcu_kthreads_spawnable) { + (void)rcu_spawn_one_cpu_kthread(cpu); + if (rnp->node_kthread_task == NULL) + (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + } +} + +/* + * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, + * but the RCU threads are woken on demand, and if demand is low this + * could be a while triggering the hung task watchdog. + * + * In order to avoid this, poke all tasks once the CPU is fully + * up and running. + */ +static void __cpuinit rcu_online_kthreads(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); + struct rcu_node *rnp = rdp->mynode; + struct task_struct *t; + + t = per_cpu(rcu_cpu_kthread_task, cpu); + if (t) + wake_up_process(t); + + t = rnp->node_kthread_task; + if (t) + wake_up_process(t); + + rcu_wake_one_boost_kthread(rnp); +} + /* * Handle CPU online/offline notification events. */ @@ -1902,6 +2262,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, rcu_prepare_kthreads(cpu); break; case CPU_ONLINE: + rcu_online_kthreads(cpu); case CPU_DOWN_FAILED: rcu_node_kthread_setaffinity(rnp, -1); rcu_cpu_kthread_setrt(cpu, 1); @@ -2049,7 +2410,6 @@ void __init rcu_init(void) rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 01b2ccda26fb..7b9a08b4aaea 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -369,7 +369,6 @@ struct rcu_state { /* period because */ /* force_quiescent_state() */ /* was running. */ - u8 boost; /* Subject to priority boost. */ unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ @@ -427,7 +426,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags); -static void rcu_stop_cpu_kthread(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_task_stall(struct rcu_node *rnp); @@ -452,19 +450,11 @@ static void rcu_preempt_send_cbs_to_online(void); static void __init __rcu_init_preempt(void); static void rcu_needs_cpu_flush(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); -static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); -#ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, cpumask_var_t cm); +static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp, int rnp_index); -static void invoke_rcu_node_kthread(struct rcu_node *rnp); -static void rcu_yield(void (*f)(unsigned long), unsigned long arg); -#endif /* #ifdef CONFIG_RCU_BOOST */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt); -static void __cpuinit rcu_prepare_kthreads(int cpu); #endif /* #ifndef RCU_TREE_NONCORE */ diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 14dc7dd00902..c8bff3099a89 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -602,15 +602,6 @@ static void rcu_preempt_process_callbacks(void) &__get_cpu_var(rcu_preempt_data)); } -#ifdef CONFIG_RCU_BOOST - -static void rcu_preempt_do_callbacks(void) -{ - rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); -} - -#endif /* #ifdef CONFIG_RCU_BOOST */ - /* * Queue a preemptible-RCU callback for invocation after a grace period. */ @@ -1257,23 +1248,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } -/* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { - local_irq_restore(flags); - return; - } - wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); - local_irq_restore(flags); -} - /* * Set the affinity of the boost kthread. The CPU-hotplug locks are * held, so no one should be messing with the existence of the boost @@ -1314,7 +1288,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, if (&rcu_preempt_state != rsp) return 0; - rsp->boost = 1; if (rnp->boost_kthread_task != NULL) return 0; t = kthread_create(rcu_boost_kthread, (void *)rnp, @@ -1326,372 +1299,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, raw_spin_unlock_irqrestore(&rnp->lock, flags); sp.sched_priority = RCU_KTHREAD_PRIO; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ return 0; } -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Stop the RCU's per-CPU kthread when its CPU goes offline,. - */ -static void rcu_stop_cpu_kthread(int cpu) +static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) { - struct task_struct *t; - - /* Stop the CPU's kthread. */ - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t != NULL) { - per_cpu(rcu_cpu_kthread_task, cpu) = NULL; - kthread_stop(t); - } -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); - rcu_preempt_do_callbacks(); -} - -/* - * Wake up the specified per-rcu_node-structure kthread. - * Because the per-rcu_node kthreads are immortal, we don't need - * to do anything to keep them alive. - */ -static void invoke_rcu_node_kthread(struct rcu_node *rnp) -{ - struct task_struct *t; - - t = rnp->node_kthread_task; - if (t != NULL) - wake_up_process(t); -} - -/* - * Set the specified CPU's kthread to run RT or not, as specified by - * the to_rt argument. The CPU-hotplug locks are held, so the task - * is not going away. - */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) -{ - int policy; - struct sched_param sp; - struct task_struct *t; - - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t == NULL) - return; - if (to_rt) { - policy = SCHED_FIFO; - sp.sched_priority = RCU_KTHREAD_PRIO; - } else { - policy = SCHED_NORMAL; - sp.sched_priority = 0; - } - sched_setscheduler_nocheck(t, policy, &sp); -} - -/* - * Timer handler to initiate the waking up of per-CPU kthreads that - * have yielded the CPU due to excess numbers of RCU callbacks. - * We wake up the per-rcu_node kthread, which in turn will wake up - * the booster kthread. - */ -static void rcu_cpu_kthread_timer(unsigned long arg) -{ - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); - struct rcu_node *rnp = rdp->mynode; - - atomic_or(rdp->grpmask, &rnp->wakemask); - invoke_rcu_node_kthread(rnp); -} - -/* - * Drop to non-real-time priority and yield, but only after posting a - * timer that will cause us to regain our real-time priority if we - * remain preempted. Either way, we restore our real-time priority - * before returning. - */ -static void rcu_yield(void (*f)(unsigned long), unsigned long arg) -{ - struct sched_param sp; - struct timer_list yield_timer; - - setup_timer_on_stack(&yield_timer, f, arg); - mod_timer(&yield_timer, jiffies + 2); - sp.sched_priority = 0; - sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); - set_user_nice(current, 19); - schedule(); - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); - del_timer(&yield_timer); -} - -/* - * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. - * This can happen while the corresponding CPU is either coming online - * or going offline. We cannot wait until the CPU is fully online - * before starting the kthread, because the various notifier functions - * can wait for RCU grace periods. So we park rcu_cpu_kthread() until - * the corresponding CPU is online. - * - * Return 1 if the kthread needs to stop, 0 otherwise. - * - * Caller must disable bh. This function can momentarily enable it. - */ -static int rcu_cpu_kthread_should_stop(int cpu) -{ - while (cpu_is_offline(cpu) || - !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || - smp_processor_id() != cpu) { - if (kthread_should_stop()) - return 1; - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; - per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); - local_bh_enable(); - schedule_timeout_uninterruptible(1); - if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - local_bh_disable(); - } - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - return 0; -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * earlier RCU softirq. - */ -static int rcu_cpu_kthread(void *arg) -{ - int cpu = (int)(long)arg; - unsigned long flags; - int spincnt = 0; - unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); - char work; - char *workp = &per_cpu(rcu_cpu_has_work, cpu); - - for (;;) { - *statusp = RCU_KTHREAD_WAITING; - rcu_wait(*workp != 0 || kthread_should_stop()); - local_bh_disable(); - if (rcu_cpu_kthread_should_stop(cpu)) { - local_bh_enable(); - break; - } - *statusp = RCU_KTHREAD_RUNNING; - per_cpu(rcu_cpu_kthread_loops, cpu)++; - local_irq_save(flags); - work = *workp; - *workp = 0; - local_irq_restore(flags); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp != 0) - spincnt++; - else - spincnt = 0; - if (spincnt > 10) { - *statusp = RCU_KTHREAD_YIELDING; - rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); - spincnt = 0; - } - } - *statusp = RCU_KTHREAD_STOPPED; - return 0; -} - -/* - * Spawn a per-CPU kthread, setting up affinity and priority. - * Because the CPU hotplug lock is held, no other CPU will be attempting - * to manipulate rcu_cpu_kthread_task. There might be another CPU - * attempting to access it during boot, but the locking in kthread_bind() - * will enforce sufficient ordering. - * - * Please note that we cannot simply refuse to wake up the per-CPU - * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, - * which can result in softlockup complaints if the task ends up being - * idle for more than a couple of minutes. - * - * However, please note also that we cannot bind the per-CPU kthread to its - * CPU until that CPU is fully online. We also cannot wait until the - * CPU is fully online before we create its per-CPU kthread, as this would - * deadlock the system when CPU notifiers tried waiting for grace - * periods. So we bind the per-CPU kthread to its CPU only if the CPU - * is online. If its CPU is not yet fully online, then the code in - * rcu_cpu_kthread() will wait until it is fully online, and then do - * the binding. - */ -static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) -{ - struct sched_param sp; - struct task_struct *t; - - if (!rcu_kthreads_spawnable || - per_cpu(rcu_cpu_kthread_task, cpu) != NULL) - return 0; - t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); - if (IS_ERR(t)) - return PTR_ERR(t); - if (cpu_online(cpu)) - kthread_bind(t, cpu); - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - per_cpu(rcu_cpu_kthread_task, cpu) = t; - wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ - return 0; -} - -/* - * Per-rcu_node kthread, which is in charge of waking up the per-CPU - * kthreads when needed. We ignore requests to wake up kthreads - * for offline CPUs, which is OK because force_quiescent_state() - * takes care of this case. - */ -static int rcu_node_kthread(void *arg) -{ - int cpu; - unsigned long flags; - unsigned long mask; - struct rcu_node *rnp = (struct rcu_node *)arg; - struct sched_param sp; - struct task_struct *t; - - for (;;) { - rnp->node_kthread_status = RCU_KTHREAD_WAITING; - rcu_wait(atomic_read(&rnp->wakemask) != 0); - rnp->node_kthread_status = RCU_KTHREAD_RUNNING; - raw_spin_lock_irqsave(&rnp->lock, flags); - mask = atomic_xchg(&rnp->wakemask, 0); - rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { - if ((mask & 0x1) == 0) - continue; - preempt_disable(); - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (!cpu_online(cpu) || t == NULL) { - preempt_enable(); - continue; - } - per_cpu(rcu_cpu_has_work, cpu) = 1; - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - preempt_enable(); - } - } - /* NOTREACHED */ - rnp->node_kthread_status = RCU_KTHREAD_STOPPED; - return 0; -} - -/* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still - * held, so the value of rnp->qsmaskinit will be stable. - * - * We don't include outgoingcpu in the affinity set, use -1 if there is - * no outgoing CPU. If there are no CPUs left in the affinity set, - * this function allows the kthread to execute on any CPU. - */ -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) -{ - cpumask_var_t cm; - int cpu; - unsigned long mask = rnp->qsmaskinit; - - if (rnp->node_kthread_task == NULL) - return; - if (!alloc_cpumask_var(&cm, GFP_KERNEL)) - return; - cpumask_clear(cm); - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) - if ((mask & 0x1) && cpu != outgoingcpu) - cpumask_set_cpu(cpu, cm); - if (cpumask_weight(cm) == 0) { - cpumask_setall(cm); - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) - cpumask_clear_cpu(cpu, cm); - WARN_ON_ONCE(cpumask_weight(cm) == 0); - } - set_cpus_allowed_ptr(rnp->node_kthread_task, cm); - rcu_boost_kthread_setaffinity(rnp, cm); - free_cpumask_var(cm); -} - -/* - * Spawn a per-rcu_node kthread, setting priority and affinity. - * Called during boot before online/offline can happen, or, if - * during runtime, with the main CPU-hotplug locks held. So only - * one of these can be executing at a time. - */ -static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, - struct rcu_node *rnp) -{ - unsigned long flags; - int rnp_index = rnp - &rsp->node[0]; - struct sched_param sp; - struct task_struct *t; - - if (!rcu_kthreads_spawnable || - rnp->qsmaskinit == 0) - return 0; - if (rnp->node_kthread_task == NULL) { - t = kthread_create(rcu_node_kthread, (void *)rnp, - "rcun%d", rnp_index); - if (IS_ERR(t)) - return PTR_ERR(t); - raw_spin_lock_irqsave(&rnp->lock, flags); - rnp->node_kthread_task = t; - raw_spin_unlock_irqrestore(&rnp->lock, flags); - sp.sched_priority = 99; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ - } - return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); -} - -/* - * Spawn all kthreads -- called as soon as the scheduler is running. - */ -static int __init rcu_spawn_kthreads(void) -{ - int cpu; - struct rcu_node *rnp; - - rcu_kthreads_spawnable = 1; - for_each_possible_cpu(cpu) { - per_cpu(rcu_cpu_has_work, cpu) = 0; - if (cpu_online(cpu)) - (void)rcu_spawn_one_cpu_kthread(cpu); - } - rnp = rcu_get_root(rcu_state); - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - if (NUM_RCU_NODES > 1) { - rcu_for_each_leaf_node(rcu_state, rnp) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - } - return 0; -} -early_initcall(rcu_spawn_kthreads); - -static void __cpuinit rcu_prepare_kthreads(int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); - struct rcu_node *rnp = rdp->mynode; - - /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_kthreads_spawnable) { - (void)rcu_spawn_one_cpu_kthread(cpu); - if (rnp->node_kthread_task == NULL) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - } + if (rnp->boost_kthread_task) + wake_up_process(rnp->boost_kthread_task); } #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1701,32 +1315,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore(&rnp->lock, flags); } -static void invoke_rcu_callbacks_kthread(void) +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, + cpumask_var_t cm) { - WARN_ON_ONCE(1); } static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { } -#ifdef CONFIG_HOTPLUG_CPU - -static void rcu_stop_cpu_kthread(int cpu) -{ -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) -{ -} - -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp, + int rnp_index) { + return 0; } -static void __cpuinit rcu_prepare_kthreads(int cpu) +static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) { } @@ -1904,7 +1509,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); * * Because it is not legal to invoke rcu_process_callbacks() with irqs * disabled, we do one pass of force_quiescent_state(), then do a - * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked + * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. */ int rcu_needs_cpu(int cpu) @@ -1955,7 +1560,7 @@ int rcu_needs_cpu(int cpu) /* If RCU callbacks are still pending, RCU still needs this CPU. */ if (c) - invoke_rcu_core(); + invoke_rcu_cpu_kthread(); return c; } diff --git a/trunk/kernel/rcutree_trace.c b/trunk/kernel/rcutree_trace.c index 4e144876dc68..9678cc3650f5 100644 --- a/trunk/kernel/rcutree_trace.c +++ b/trunk/kernel/rcutree_trace.c @@ -46,8 +46,6 @@ #define RCU_TREE_NONCORE #include "rcutree.h" -#ifdef CONFIG_RCU_BOOST - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); @@ -60,8 +58,6 @@ static char convert_kthread_status(unsigned int kthread_status) return "SRWOY"[kthread_status]; } -#endif /* #ifdef CONFIG_RCU_BOOST */ - static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) @@ -80,7 +76,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, " ql=%ld qs=%c%c%c%c", + seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], @@ -88,16 +84,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], - ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); -#ifdef CONFIG_RCU_BOOST - seq_printf(m, " kt=%d/%c/%d ktl=%x", + ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, rdp->cpu)), per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), - per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_printf(m, " b=%ld", rdp->blimit); + per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, + rdp->blimit); seq_printf(m, " ci=%lu co=%lu ca=%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } @@ -154,21 +147,18 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); - seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, + seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], ".R"[rdp->nxttail[RCU_WAIT_TAIL] != rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], - ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); -#ifdef CONFIG_RCU_BOOST - seq_printf(m, ",%d,\"%c\"", + ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, - rdp->cpu))); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_printf(m, ",%ld", rdp->blimit); + rdp->cpu)), + rdp->blimit); seq_printf(m, ",%lu,%lu,%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } @@ -179,11 +169,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ - seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); -#ifdef CONFIG_RCU_BOOST - seq_puts(m, "\"kt\",\"ktl\""); -#endif /* #ifdef CONFIG_RCU_BOOST */ - seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); + seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "\"rcu_preempt:\"\n"); PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); diff --git a/trunk/kernel/smp.c b/trunk/kernel/smp.c index fb67dfa8394e..73a195193558 100644 --- a/trunk/kernel/smp.c +++ b/trunk/kernel/smp.c @@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { .notifier_call = hotplug_cfd, }; -void __init call_function_init(void) +static int __cpuinit init_call_single_data(void) { void *cpu = (void *)(long)smp_processor_id(); int i; @@ -88,7 +88,10 @@ void __init call_function_init(void) hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); + + return 0; } +early_initcall(init_call_single_data); /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 40cf63ddd4b3..13960170cad4 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); char *softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", - "TASKLET", "SCHED", "HRTIMER", "RCU" + "TASKLET", "SCHED", "HRTIMER" }; /* diff --git a/trunk/kernel/time/clocksource.c b/trunk/kernel/time/clocksource.c index e0980f0d9a0a..1c95fd677328 100644 --- a/trunk/kernel/time/clocksource.c +++ b/trunk/kernel/time/clocksource.c @@ -185,6 +185,7 @@ static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); static DEFINE_SPINLOCK(watchdog_lock); +static cycle_t watchdog_last; static int watchdog_running; static int clocksource_watchdog_kthread(void *data); @@ -253,6 +254,11 @@ static void clocksource_watchdog(unsigned long data) if (!watchdog_running) goto out; + wdnow = watchdog->read(watchdog); + wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, + watchdog->mult, watchdog->shift); + watchdog_last = wdnow; + list_for_each_entry(cs, &watchdog_list, wd_list) { /* Clocksource already marked unstable? */ @@ -262,28 +268,19 @@ static void clocksource_watchdog(unsigned long data) continue; } - local_irq_disable(); csnow = cs->read(cs); - wdnow = watchdog->read(watchdog); - local_irq_enable(); /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { cs->flags |= CLOCK_SOURCE_WATCHDOG; - cs->wd_last = wdnow; - cs->cs_last = csnow; + cs->wd_last = csnow; continue; } - wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, - watchdog->mult, watchdog->shift); - - cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & - cs->mask, cs->mult, cs->shift); - cs->cs_last = csnow; - cs->wd_last = wdnow; - /* Check the deviation from the watchdog clocksource. */ + cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & + cs->mask, cs->mult, cs->shift); + cs->wd_last = csnow; if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { clocksource_unstable(cs, cs_nsec - wd_nsec); continue; @@ -321,6 +318,7 @@ static inline void clocksource_start_watchdog(void) return; init_timer(&watchdog_timer); watchdog_timer.function = clocksource_watchdog; + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); watchdog_running = 1; diff --git a/trunk/kernel/trace/trace_printk.c b/trunk/kernel/trace/trace_printk.c index 1f06468a10d7..dff763b7baf1 100644 --- a/trunk/kernel/trace/trace_printk.c +++ b/trunk/kernel/trace/trace_printk.c @@ -240,10 +240,13 @@ static const char **find_next(void *v, loff_t *pos) const char **fmt = v; int start_index; + if (!fmt) + fmt = __start___trace_bprintk_fmt + *pos; + start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; if (*pos < start_index) - return __start___trace_bprintk_fmt + *pos; + return fmt; return find_next_mod_format(start_index, v, fmt, pos); } diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index 917ecb93ea28..c7a581a96894 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev) grp->nr_vlans++; if (ngrp) { - if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX)) + if (ops->ndo_vlan_rx_register) ops->ndo_vlan_rx_register(real_dev, ngrp); rcu_assign_pointer(real_dev->vlgrp, ngrp); } diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 77930aa522e3..f13ddbf858ba 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -477,16 +477,14 @@ static void hci_setup_event_mask(struct hci_dev *hdev) * command otherwise */ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; - /* CSR 1.1 dongles does not accept any bitfield so don't try to set - * any event mask for pre 1.2 devices */ - if (hdev->lmp_ver <= 1) - return; - - events[4] |= 0x01; /* Flow Specification Complete */ - events[4] |= 0x02; /* Inquiry Result with RSSI */ - events[4] |= 0x04; /* Read Remote Extended Features Complete */ - events[5] |= 0x08; /* Synchronous Connection Complete */ - events[5] |= 0x10; /* Synchronous Connection Changed */ + /* Events for 1.2 and newer controllers */ + if (hdev->lmp_ver > 1) { + events[4] |= 0x01; /* Flow Specification Complete */ + events[4] |= 0x02; /* Inquiry Result with RSSI */ + events[4] |= 0x04; /* Read Remote Extended Features Complete */ + events[5] |= 0x08; /* Synchronous Connection Complete */ + events[5] |= 0x10; /* Synchronous Connection Changed */ + } if (hdev->features[3] & LMP_RSSI_INQ) events[4] |= 0x04; /* Inquiry Result with RSSI */ diff --git a/trunk/net/bluetooth/l2cap_sock.c b/trunk/net/bluetooth/l2cap_sock.c index 8248303f44e8..18dc9888d8c2 100644 --- a/trunk/net/bluetooth/l2cap_sock.c +++ b/trunk/net/bluetooth/l2cap_sock.c @@ -413,7 +413,6 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us break; } - memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = chan->conn->hcon->handle; memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index 1b10727ce523..386cfaffd4b7 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -788,7 +788,6 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; - memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c index cb4fb7837e5c..42fdffd1d76c 100644 --- a/trunk/net/bluetooth/sco.c +++ b/trunk/net/bluetooth/sco.c @@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk) case BT_CONNECTED: case BT_CONFIG: - if (sco_pi(sk)->conn) { - sk->sk_state = BT_DISCONN; - sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); - hci_conn_put(sco_pi(sk)->conn->hcon); - sco_pi(sk)->conn->hcon = NULL; - } else - sco_chan_del(sk, ECONNRESET); - break; - case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); @@ -828,9 +819,7 @@ static void sco_chan_del(struct sock *sk, int err) conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); - - if (conn->hcon) - hci_conn_put(conn->hcon); + hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; diff --git a/trunk/net/bridge/br_device.c b/trunk/net/bridge/br_device.c index c188c803c09c..a6b2f86378c7 100644 --- a/trunk/net/bridge/br_device.c +++ b/trunk/net/bridge/br_device.c @@ -243,7 +243,6 @@ int br_netpoll_enable(struct net_bridge_port *p) goto out; np->dev = p->dev; - strlcpy(np->dev_name, p->dev->name, IFNAMSIZ); err = __netpoll_setup(np); if (err) { diff --git a/trunk/net/bridge/br_multicast.c b/trunk/net/bridge/br_multicast.c index 29b9812c8da0..2f14eafdeeab 100644 --- a/trunk/net/bridge/br_multicast.c +++ b/trunk/net/bridge/br_multicast.c @@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, switch (ih->type) { case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: - BR_INPUT_SKB_CB(skb)->mrouters_only = 1; + BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; err = br_ip4_multicast_add_group(br, port, ih->group); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: @@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, goto out; } mld = (struct mld_msg *)skb_transport_header(skb2); - BR_INPUT_SKB_CB(skb)->mrouters_only = 1; + BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); break; } diff --git a/trunk/net/caif/cfmuxl.c b/trunk/net/caif/cfmuxl.c index c23979e79dfa..3a66b8c10e09 100644 --- a/trunk/net/caif/cfmuxl.c +++ b/trunk/net/caif/cfmuxl.c @@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { - if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || + if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && layer->id != 0) { diff --git a/trunk/net/ieee802154/nl-phy.c b/trunk/net/ieee802154/nl-phy.c index 02548b292b53..ed0eab39f531 100644 --- a/trunk/net/ieee802154/nl-phy.c +++ b/trunk/net/ieee802154/nl-phy.c @@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, pr_debug("%s\n", __func__); if (!buf) - return -EMSGSIZE; + goto out; hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, IEEE802154_LIST_PHY); @@ -65,7 +65,6 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, pages * sizeof(uint32_t), buf); mutex_unlock(&phy->pib_lock); - kfree(buf); return genlmsg_end(msg, hdr); nla_put_failure: diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index eae1f676f870..9c1926027a26 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -676,7 +676,6 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags) lock_sock(sk2); - sock_rps_record_flow(sk2); WARN_ON(!((1 << sk2->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); diff --git a/trunk/net/ipv4/inet_diag.c b/trunk/net/ipv4/inet_diag.c index 3267d3898437..6ffe94ca5bc9 100644 --- a/trunk/net/ipv4/inet_diag.c +++ b/trunk/net/ipv4/inet_diag.c @@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc) return 0; if (cc == len) return 1; - if (op->yes < 4 || op->yes & 3) + if (op->yes < 4) return 0; len -= op->yes; bc += op->yes; @@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc) static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) { - const void *bc = bytecode; + const unsigned char *bc = bytecode; int len = bytecode_len; while (len > 0) { - const struct inet_diag_bc_op *op = bc; + struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); switch (op->code) { @@ -462,20 +462,22 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) case INET_DIAG_BC_S_LE: case INET_DIAG_BC_D_GE: case INET_DIAG_BC_D_LE: + if (op->yes < 4 || op->yes > len + 4) + return -EINVAL; case INET_DIAG_BC_JMP: - if (op->no < 4 || op->no > len + 4 || op->no & 3) + if (op->no < 4 || op->no > len + 4) return -EINVAL; if (op->no < len && !valid_cc(bytecode, bytecode_len, len - op->no)) return -EINVAL; break; case INET_DIAG_BC_NOP: + if (op->yes < 4 || op->yes > len + 4) + return -EINVAL; break; default: return -EINVAL; } - if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) - return -EINVAL; bc += op->yes; len -= op->yes; } diff --git a/trunk/net/ipv4/netfilter/ip_queue.c b/trunk/net/ipv4/netfilter/ip_queue.c index 5c9b9d963918..f7f9bd7ba12d 100644 --- a/trunk/net/ipv4/netfilter/ip_queue.c +++ b/trunk/net/ipv4/netfilter/ip_queue.c @@ -203,8 +203,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) else pmsg->outdev_name[0] = '\0'; - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { + if (entry->indev && entry->skb->dev) { pmsg->hw_type = entry->skb->dev->type; pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); diff --git a/trunk/net/ipv4/netfilter/ip_tables.c b/trunk/net/ipv4/netfilter/ip_tables.c index 24e556e83a3b..764743843503 100644 --- a/trunk/net/ipv4/netfilter/ip_tables.c +++ b/trunk/net/ipv4/netfilter/ip_tables.c @@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name) const struct xt_entry_target *t; if (!ip_checkentry(&e->ip)) { - duprintf("ip check failed %p %s.\n", e, name); + duprintf("ip check failed %p %s.\n", e, par->match->name); return -EINVAL; } diff --git a/trunk/net/ipv4/netfilter/ipt_ecn.c b/trunk/net/ipv4/netfilter/ipt_ecn.c index 2b57e52c746c..af6e9c778345 100644 --- a/trunk/net/ipv4/netfilter/ipt_ecn.c +++ b/trunk/net/ipv4/netfilter/ipt_ecn.c @@ -25,8 +25,7 @@ MODULE_LICENSE("GPL"); static inline bool match_ip(const struct sk_buff *skb, const struct ipt_ecn_info *einfo) { - return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^ - !!(einfo->invert & IPT_ECN_OP_MATCH_IP); + return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; } static inline bool match_tcp(const struct sk_buff *skb, @@ -77,6 +76,8 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par) return false; if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { + if (ip_hdr(skb)->protocol != IPPROTO_TCP) + return false; if (!match_tcp(skb, info, &par->hotdrop)) return false; } @@ -96,7 +97,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par) return -EINVAL; if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && - (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { + ip->proto != IPPROTO_TCP) { pr_info("cannot match TCP bits in rule for non-tcp packets\n"); return -EINVAL; } diff --git a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index de9da21113a1..db10075dd88e 100644 --- a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -121,9 +121,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum, return ret; } - /* adjust seqs for loopback traffic only in outgoing direction */ - if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && - !nf_is_loopback_packet(skb)) { + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { typeof(nf_nat_seq_adjust_hook) seq_adjust; seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); diff --git a/trunk/net/ipv4/ping.c b/trunk/net/ipv4/ping.c index 39b403f854c6..9aaa67165f42 100644 --- a/trunk/net/ipv4/ping.c +++ b/trunk/net/ipv4/ping.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index aa13ef105110..045f0ec6a4a0 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -1902,7 +1902,9 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); rth = rt_intern_hash(hash, rth, skb, dev->ifindex); - return IS_ERR(rth) ? PTR_ERR(rth) : 0; + err = 0; + if (IS_ERR(rth)) + err = PTR_ERR(rth); e_nobufs: return -ENOBUFS; diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index 708dc203b034..a7d6671e33b8 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -1589,7 +1589,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) goto discard; if (nsk != sk) { - sock_rps_save_rxhash(nsk, skb->rxhash); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; diff --git a/trunk/net/ipv6/netfilter/ip6_queue.c b/trunk/net/ipv6/netfilter/ip6_queue.c index 249394863284..065fe405fb58 100644 --- a/trunk/net/ipv6/netfilter/ip6_queue.c +++ b/trunk/net/ipv6/netfilter/ip6_queue.c @@ -204,8 +204,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) else pmsg->outdev_name[0] = '\0'; - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { + if (entry->indev && entry->skb->dev) { pmsg->hw_type = entry->skb->dev->type; pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); } diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index 87551ca568cd..d1fd28711ba5 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -1644,7 +1644,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) * the new socket.. */ if(nsk != sk) { - sock_rps_save_rxhash(nsk, skb->rxhash); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) diff --git a/trunk/net/netfilter/ipvs/ip_vs_conn.c b/trunk/net/netfilter/ipvs/ip_vs_conn.c index 782db275ac53..bf28ac2fc99b 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_conn.c +++ b/trunk/net/netfilter/ipvs/ip_vs_conn.c @@ -776,16 +776,8 @@ static void ip_vs_conn_expire(unsigned long data) if (cp->control) ip_vs_control_del(cp); - if (cp->flags & IP_VS_CONN_F_NFCT) { + if (cp->flags & IP_VS_CONN_F_NFCT) ip_vs_conn_drop_conntrack(cp); - /* Do not access conntracks during subsys cleanup - * because nf_conntrack_find_get can not be used after - * conntrack cleanup for the net. - */ - smp_rmb(); - if (ipvs->enable) - ip_vs_conn_drop_conntrack(cp); - } ip_vs_pe_put(cp->pe); kfree(cp->pe_data); diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index 24c28d238dcb..55af2242bccd 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -1945,7 +1945,6 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net) { EnterFunction(2); net_ipvs(net)->enable = 0; /* Disable packet reception */ - smp_wmb(); __ip_vs_sync_cleanup(net); LeaveFunction(2); } diff --git a/trunk/net/netfilter/nfnetlink_log.c b/trunk/net/netfilter/nfnetlink_log.c index 2e7ccbb43ddb..e0ee010935e7 100644 --- a/trunk/net/netfilter/nfnetlink_log.c +++ b/trunk/net/netfilter/nfnetlink_log.c @@ -456,8 +456,7 @@ __build_packet_message(struct nfulnl_instance *inst, if (skb->mark) NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); - if (indev && skb->dev && - skb->mac_header != skb->network_header) { + if (indev && skb->dev) { struct nfulnl_msg_packet_hw phw; int len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { diff --git a/trunk/net/netfilter/nfnetlink_queue.c b/trunk/net/netfilter/nfnetlink_queue.c index fdd2fafe0a14..b83123f12b42 100644 --- a/trunk/net/netfilter/nfnetlink_queue.c +++ b/trunk/net/netfilter/nfnetlink_queue.c @@ -335,8 +335,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (entskb->mark) NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); - if (indev && entskb->dev && - entskb->mac_header != entskb->network_header) { + if (indev && entskb->dev) { struct nfqnl_msg_packet_hw phw; int len = dev_parse_header(entskb, phw.hw_addr); if (len) { diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c b/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c index c3b75333b821..0a9a2ec2e469 100644 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -43,7 +43,6 @@ #include #include #include -#include #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH @@ -751,7 +750,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, - .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES, + .gm_upcall_enctypes = "18,17,16,23,3,1,2", }; static int __init init_kerberos_module(void) diff --git a/trunk/security/device_cgroup.c b/trunk/security/device_cgroup.c index 1be68269e1c2..cd1f779fa51d 100644 --- a/trunk/security/device_cgroup.c +++ b/trunk/security/device_cgroup.c @@ -474,11 +474,17 @@ struct cgroup_subsys devices_subsys = { .subsys_id = devices_subsys_id, }; -int __devcgroup_inode_permission(struct inode *inode, int mask) +int devcgroup_inode_permission(struct inode *inode, int mask) { struct dev_cgroup *dev_cgroup; struct dev_whitelist_item *wh; + dev_t device = inode->i_rdev; + if (!device) + return 0; + if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) + return 0; + rcu_read_lock(); dev_cgroup = task_devcgroup(current); diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 940257b5774e..032ba6398a5c 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) -LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group +LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) ALL_CFLAGS += $(BASIC_CFLAGS) ALL_CFLAGS += $(ARCH_CFLAGS) diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 0a7ed5b5e281..1e88485c16a0 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -2187,7 +2187,6 @@ static const struct flag flags[] = { { "TASKLET_SOFTIRQ", 6 }, { "SCHED_SOFTIRQ", 7 }, { "HRTIMER_SOFTIRQ", 8 }, - { "RCU_SOFTIRQ", 9 }, { "HRTIMER_NORESTART", 0 }, { "HRTIMER_RESTART", 1 },