diff --git a/[refs] b/[refs] index 75d4c2dcfde6..e557193b5dd4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d2bac6ab932a407008a95d1993702f0a92f8f35e +refs/heads/master: 55205c916e179e09773d98d290334d319f45ac6b diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index e2a4b5287361..7945b0bd35e2 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -1100,15 +1100,6 @@ emulate them efficiently. The fields in each entry are defined as follows: eax, ebx, ecx, edx: the values returned by the cpuid instruction for this function/index combination -The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned -as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC -support. Instead it is reported via - - ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) - -if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the -feature in userspace, then you can enable the feature for KVM_SET_CPUID2. - 4.47 KVM_PPC_GET_PVINFO Capability: KVM_CAP_PPC_GET_PVINFO @@ -1160,13 +1151,6 @@ following flags are specified: /* Depends on KVM_CAP_IOMMU */ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure -isolation of the device. Usages not specifying this flag are deprecated. - -Only PCI header type 0 devices with PCI BAR resources are supported by -device assignment. The user requesting this ioctl must have read/write -access to the PCI sysfs resource files associated with the device. - 4.49 KVM_DEASSIGN_PCI_DEVICE Capability: KVM_CAP_DEVICE_DEASSIGNMENT diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 0e7a80aefa0c..6afba60c3904 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2700,7 +2700,7 @@ FIREWIRE SUBSYSTEM M: Stefan Richter L: linux1394-devel@lists.sourceforge.net W: http://ieee1394.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained F: drivers/firewire/ F: include/linux/firewire*.h diff --git a/trunk/Makefile b/trunk/Makefile index ea51081812f3..a43733df3978 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c index c074e66ad224..4e0a371630b3 100644 --- a/trunk/arch/arm/oprofile/common.c +++ b/trunk/arch/arm/oprofile/common.c @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void __exit oprofile_arch_exit(void) +void oprofile_arch_exit(void) { oprofile_perf_exit(); } diff --git a/trunk/arch/ia64/include/asm/cputime.h b/trunk/arch/ia64/include/asm/cputime.h index 5a274af31b2b..6073b187528a 100644 --- a/trunk/arch/ia64/include/asm/cputime.h +++ b/trunk/arch/ia64/include/asm/cputime.h @@ -60,7 +60,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs) /* * Convert cputime <-> seconds diff --git a/trunk/arch/powerpc/include/asm/cputime.h b/trunk/arch/powerpc/include/asm/cputime.h index 98b7c4b49c9d..1cf20bdfbeca 100644 --- a/trunk/arch/powerpc/include/asm/cputime.h +++ b/trunk/arch/powerpc/include/asm/cputime.h @@ -150,8 +150,6 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) return ct; } -#define usecs_to_cputime64(us) usecs_to_cputime(us) - /* * Convert cputime <-> seconds */ diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s.h b/trunk/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..d4df013ad779 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s.h @@ -381,6 +381,39 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) } #endif +static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, + unsigned long pte_index) +{ + unsigned long rb, va_low; + + rb = (v & ~0x7fUL) << 16; /* AVA field */ + va_low = pte_index >> 3; + if (v & HPTE_V_SECONDARY) + va_low = ~va_low; + /* xor vsid from AVA */ + if (!(v & HPTE_V_1TB_SEG)) + va_low ^= v >> 12; + else + va_low ^= v >> 24; + va_low &= 0x7ff; + if (v & HPTE_V_LARGE) { + rb |= 1; /* L field */ + if (cpu_has_feature(CPU_FTR_ARCH_206) && + (r & 0xff000)) { + /* non-16MB large page, must be 64k */ + /* (masks depend on page size) */ + rb |= 0x1000; /* page encoding in LP field */ + rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ + rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ + } + } else { + /* 4kB page */ + rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ + } + rb |= (v >> 54) & 0x300; /* B field */ + return rb; +} + /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..e43fe42b9875 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h @@ -29,37 +29,4 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) #define SPAPR_TCE_SHIFT 12 -static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, - unsigned long pte_index) -{ - unsigned long rb, va_low; - - rb = (v & ~0x7fUL) << 16; /* AVA field */ - va_low = pte_index >> 3; - if (v & HPTE_V_SECONDARY) - va_low = ~va_low; - /* xor vsid from AVA */ - if (!(v & HPTE_V_1TB_SEG)) - va_low ^= v >> 12; - else - va_low ^= v >> 24; - va_low &= 0x7ff; - if (v & HPTE_V_LARGE) { - rb |= 1; /* L field */ - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (r & 0xff000)) { - /* non-16MB large page, must be 64k */ - /* (masks depend on page size) */ - rb |= 0x1000; /* page encoding in LP field */ - rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ - rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ - } - } else { - /* 4kB page */ - rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ - } - rb |= (v >> 54) & 0x300; /* B field */ - return rb; -} - #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/trunk/arch/powerpc/kvm/book3s_hv.c b/trunk/arch/powerpc/kvm/book3s_hv.c index 336983da9e72..0cb137a9b038 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv.c +++ b/trunk/arch/powerpc/kvm/book3s_hv.c @@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) tpaca->kvm_hstate.napping = 0; vcpu->cpu = vc->pcpu; smp_wmb(); -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_ICP_NATIVE if (vcpu->arch.ptid) { tpaca->cpu_start = 0x80; wmb(); diff --git a/trunk/arch/powerpc/kvm/book3s_pr.c b/trunk/arch/powerpc/kvm/book3s_pr.c index e2cfb9e1e20e..3c791e1eb675 100644 --- a/trunk/arch/powerpc/kvm/book3s_pr.c +++ b/trunk/arch/powerpc/kvm/book3s_pr.c @@ -658,12 +658,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; -#ifdef CONFIG_KVM_BOOK3S_64_PR if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } -#endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { diff --git a/trunk/arch/powerpc/kvm/e500.c b/trunk/arch/powerpc/kvm/e500.c index 8c0d45a6faf7..26d20903f2bc 100644 --- a/trunk/arch/powerpc/kvm/e500.c +++ b/trunk/arch/powerpc/kvm/e500.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/s390/include/asm/cputime.h b/trunk/arch/s390/include/asm/cputime.h index b9acaaa175d8..081434878296 100644 --- a/trunk/arch/s390/include/asm/cputime.h +++ b/trunk/arch/s390/include/asm/cputime.h @@ -87,8 +87,6 @@ usecs_to_cputime(const unsigned int m) return (cputime_t) m * 4096; } -#define usecs_to_cputime64(m) usecs_to_cputime(m) - /* * Convert cputime to milliseconds and back. */ diff --git a/trunk/arch/sh/oprofile/common.c b/trunk/arch/sh/oprofile/common.c index b4c2d2b946dd..e4dd5d5a1115 100644 --- a/trunk/arch/sh/oprofile/common.c +++ b/trunk/arch/sh/oprofile/common.c @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void __exit oprofile_arch_exit(void) +void oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = sh_backtrace; return -ENODEV; } -void __exit oprofile_arch_exit(void) {} +void oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/trunk/arch/sparc/kernel/pci_sun4v.c b/trunk/arch/sparc/kernel/pci_sun4v.c index af5755d20fbe..b272cda35a01 100644 --- a/trunk/arch/sparc/kernel/pci_sun4v.c +++ b/trunk/arch/sparc/kernel/pci_sun4v.c @@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, if (!irq) return -ENOMEM; - if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) - return -EINVAL; if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) return -EINVAL; + if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) + return -EINVAL; return irq; } diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 405f2620392f..76e3f1cd0369 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -338,15 +338,11 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) +static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; - if (!irqchip_in_kernel(kvm)) - return; - interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -398,13 +394,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(kvm, val, 0); + create_pit_timer(ps, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(kvm, val, 1); + create_pit_timer(ps, val, 1); } break; default: diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 4c938da2ba00..c38efd7b792e 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -602,6 +602,7 @@ static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct kvm_lapic *apic = vcpu->arch.apic; + u32 timer_mode_mask; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) @@ -614,12 +615,15 @@ static void update_cpuid(struct kvm_vcpu *vcpu) best->ecx |= bit(X86_FEATURE_OSXSAVE); } - if (apic) { - if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) - apic->lapic_timer.timer_mode_mask = 3 << 17; - else - apic->lapic_timer.timer_mode_mask = 1 << 17; - } + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + best->function == 0x1) { + best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); + timer_mode_mask = 3 << 17; + } else + timer_mode_mask = 1 << 17; + + if (apic) + apic->lapic_timer.timer_mode_mask = timer_mode_mask; } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -2131,9 +2135,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; - case KVM_CAP_TSC_DEADLINE_TIMER: - r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); - break; default: r = 0; break; diff --git a/trunk/arch/x86/net/bpf_jit_comp.c b/trunk/arch/x86/net/bpf_jit_comp.c index 7b65f752c5f8..bfab3fa10edc 100644 --- a/trunk/arch/x86/net/bpf_jit_comp.c +++ b/trunk/arch/x86/net/bpf_jit_comp.c @@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; break; } if (filter[i].jt != 0) { - if (filter[i].jf && f_offset) - t_offset += is_near(f_offset) ? 2 : 5; + if (filter[i].jf) + t_offset += is_near(f_offset) ? 2 : 6; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); diff --git a/trunk/block/blk-map.c b/trunk/block/blk-map.c index 623e1cd4cffe..164cd0059706 100644 --- a/trunk/block/blk-map.c +++ b/trunk/block/blk-map.c @@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (IS_ERR(bio)) return PTR_ERR(bio); - if (!reading) + if (rq_data_dir(rq) == WRITE) bio->bi_rw |= REQ_WRITE; if (do_copy) diff --git a/trunk/block/blk-tag.c b/trunk/block/blk-tag.c index 4af6f5cc1167..e74d6d13838f 100644 --- a/trunk/block/blk-tag.c +++ b/trunk/block/blk-tag.c @@ -282,9 +282,18 @@ EXPORT_SYMBOL(blk_queue_resize_tags); void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned tag = rq->tag; /* negative tags invalid */ + int tag = rq->tag; - BUG_ON(tag >= bqt->real_max_depth); + BUG_ON(tag == -1); + + if (unlikely(tag >= bqt->max_depth)) { + /* + * This can happen after tag depth has been reduced. + * But tag shouldn't be larger than real_max_depth. + */ + WARN_ON(tag >= bqt->real_max_depth); + return; + } list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3548705b04e4..4c12869fcf77 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1655,8 +1655,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); - struct cfq_data *cfqd = q->elevator->elevator_data; - /* * reposition in fifo if next is older than rq */ @@ -1671,16 +1669,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); - - cfqq = RQ_CFQQ(next); - /* - * all requests of this queue are merged to other queues, delete it - * from the service tree. If it's the active_queue, - * cfq_dispatch_requests() will choose to expire it or do idle - */ - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && - cfqq != cfqd->active_queue) - cfq_del_cfqq_rr(cfqd, cfqq); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig index cf047c406d92..6bdedd7cca2c 100644 --- a/trunk/drivers/ata/Kconfig +++ b/trunk/drivers/ata/Kconfig @@ -820,7 +820,7 @@ config PATA_PLATFORM config PATA_OF_PLATFORM tristate "OpenFirmware platform device PATA support" - depends on PATA_PLATFORM && OF && OF_IRQ + depends on PATA_PLATFORM && OF help This option enables support for generic directly connected ATA devices commonly found on embedded systems with OpenFirmware diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b9da8900ae4e..c681dc149d2a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev) if (i915_semaphores >= 0) return i915_semaphores; - /* Disable semaphores on SNB */ + /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6) - return 0; + return !intel_iommu_enabled; return 1; } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index daa5743ccbd6..d809b038ca88 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -7922,11 +7922,13 @@ static bool intel_enable_rc6(struct drm_device *dev) return 0; /* - * Disable rc6 on Sandybridge + * Enable rc6 on Sandybridge if DMA remapping is disabled */ if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); - return 0; + DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", + intel_iommu_enabled ? "true" : "false", + !intel_iommu_enabled ? "en" : "dis"); + return !intel_iommu_enabled; } DRM_DEBUG_DRIVER("RC6 enabled\n"); return 1; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 92c9628c572d..5e00d1670aa9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3276,18 +3276,6 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } - - /* Don't start up if the MC ucode is missing on BTC parts. - * The default clocks and voltages before the MC ucode - * is loaded are not suffient for advanced operations. - */ - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { - DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; - } - } - return 0; } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f94b33ae2215..8aa1dbb45c67 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1093,6 +1093,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; struct ttm_base_object *user_obj; + u64 required_size; int ret; /** @@ -1101,9 +1102,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * requested framebuffer. */ - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitch, - mode_cmd->height)) { + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/md/bitmap.c b/trunk/drivers/md/bitmap.c index 6d03774b176e..b6907118283a 100644 --- a/trunk/drivers/md/bitmap.c +++ b/trunk/drivers/md/bitmap.c @@ -1393,6 +1393,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto atomic_read(&bitmap->behind_writes), bitmap->mddev->bitmap_info.max_write_behind); } + if (bitmap->mddev->degraded) + /* Never clear bits or update events_cleared when degraded */ + success = 0; while (sectors) { sector_t blocks; @@ -1406,7 +1409,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto return; } - if (success && !bitmap->mddev->degraded && + if (success && bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; diff --git a/trunk/drivers/md/linear.c b/trunk/drivers/md/linear.c index 627456542fb3..c3273efd08cb 100644 --- a/trunk/drivers/md/linear.c +++ b/trunk/drivers/md/linear.c @@ -230,7 +230,6 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) return -EINVAL; rdev->raid_disk = rdev->saved_raid_disk; - rdev->saved_raid_disk = -1; newconf = linear_conf(mddev,mddev->raid_disks+1); diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index f47f1f8ac44b..ee981737edfc 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -7360,7 +7360,8 @@ static int remove_and_add_spares(struct mddev *mddev) spares++; md_new_event(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); - } + } else + break; } } } diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 858fdbb7eb07..31670f8d6b65 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -3065,17 +3065,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); - else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + else { /* in sync if before recovery_offset */ - set_bit(R5_Insync, &dev->flags); - else if (test_bit(R5_UPTODATE, &dev->flags) && - test_bit(R5_Expanded, &dev->flags)) - /* If we've reshaped into here, we assume it is Insync. - * We will shortly update recovery_offset to make - * it official. - */ - set_bit(R5_Insync, &dev->flags); - + if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + set_bit(R5_Insync, &dev->flags); + } if (rdev && test_bit(R5_WriteError, &dev->flags)) { clear_bit(R5_Insync, &dev->flags); if (!test_bit(Faulty, &rdev->flags)) { diff --git a/trunk/drivers/media/video/gspca/gspca.c b/trunk/drivers/media/video/gspca/gspca.c index 512f32ff446a..881e04c7ffe6 100644 --- a/trunk/drivers/media/video/gspca/gspca.c +++ b/trunk/drivers/media/video/gspca/gspca.c @@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) gspca_dev->usb_err = 0; /* do the specific subdriver stuff before endpoint selection */ - intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); - gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0; + gspca_dev->alt = 0; if (gspca_dev->sd_desc->isoc_init) { ret = gspca_dev->sd_desc->isoc_init(gspca_dev); if (ret < 0) goto unlock; } + intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK : USB_ENDPOINT_XFER_ISOC; diff --git a/trunk/drivers/media/video/omap3isp/ispccdc.c b/trunk/drivers/media/video/omap3isp/ispccdc.c index 54a4a3f22e2e..b0b0fa5a3572 100644 --- a/trunk/drivers/media/video/omap3isp/ispccdc.c +++ b/trunk/drivers/media/video/omap3isp/ispccdc.c @@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->video_out.video.entity); - struct video_device *vdev = ccdc->subdev.devnode; + struct video_device *vdev = &ccdc->subdev.devnode; struct v4l2_event event; memset(&event, 0, sizeof(event)); diff --git a/trunk/drivers/media/video/omap3isp/ispstat.c b/trunk/drivers/media/video/omap3isp/ispstat.c index bc0b2c7349b9..68d539456c55 100644 --- a/trunk/drivers/media/video/omap3isp/ispstat.c +++ b/trunk/drivers/media/video/omap3isp/ispstat.c @@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) static void isp_stat_queue_event(struct ispstat *stat, int err) { - struct video_device *vdev = stat->subdev.devnode; + struct video_device *vdev = &stat->subdev.devnode; struct v4l2_event event; struct omap3isp_stat_event_status *status = (void *)event.u.data; diff --git a/trunk/drivers/mfd/ab5500-debugfs.c b/trunk/drivers/mfd/ab5500-debugfs.c index b7b2d3483fd4..43c0ebb81956 100644 --- a/trunk/drivers/mfd/ab5500-debugfs.c +++ b/trunk/drivers/mfd/ab5500-debugfs.c @@ -4,7 +4,7 @@ * Debugfs support for the AB5500 MFD driver */ -#include +#include #include #include #include diff --git a/trunk/drivers/mfd/ab8500-core.c b/trunk/drivers/mfd/ab8500-core.c index d3d572b2317b..1e9173804ede 100644 --- a/trunk/drivers/mfd/ab8500-core.c +++ b/trunk/drivers/mfd/ab8500-core.c @@ -620,7 +620,6 @@ static struct resource __devinitdata ab8500_fg_resources[] = { static struct resource __devinitdata ab8500_chargalg_resources[] = {}; -#ifdef CONFIG_DEBUG_FS static struct resource __devinitdata ab8500_debug_resources[] = { { .name = "IRQ_FIRST", @@ -635,7 +634,6 @@ static struct resource __devinitdata ab8500_debug_resources[] = { .flags = IORESOURCE_IRQ, }, }; -#endif static struct resource __devinitdata ab8500_usb_resources[] = { { diff --git a/trunk/drivers/mfd/adp5520.c b/trunk/drivers/mfd/adp5520.c index 8d816cce8322..f1d88483112c 100644 --- a/trunk/drivers/mfd/adp5520.c +++ b/trunk/drivers/mfd/adp5520.c @@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask) ret = __adp5520_read(chip->client, reg, ®_val); - if (!ret && ((reg_val & bit_mask) != bit_mask)) { + if (!ret && ((reg_val & bit_mask) == 0)) { reg_val |= bit_mask; ret = __adp5520_write(chip->client, reg, reg_val); } diff --git a/trunk/drivers/mfd/da903x.c b/trunk/drivers/mfd/da903x.c index 1924b857a0fb..1b79c37fd599 100644 --- a/trunk/drivers/mfd/da903x.c +++ b/trunk/drivers/mfd/da903x.c @@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask) if (ret) goto out; - if ((reg_val & bit_mask) != bit_mask) { + if ((reg_val & bit_mask) == 0) { reg_val |= bit_mask; ret = __da903x_write(chip->client, reg, reg_val); } @@ -549,7 +549,6 @@ static int __devexit da903x_remove(struct i2c_client *client) struct da903x_chip *chip = i2c_get_clientdata(client); da903x_remove_subdevs(chip); - free_irq(client->irq, chip); kfree(chip); return 0; } diff --git a/trunk/drivers/mfd/jz4740-adc.c b/trunk/drivers/mfd/jz4740-adc.c index ef39528088f2..1e9ee533eacb 100644 --- a/trunk/drivers/mfd/jz4740-adc.c +++ b/trunk/drivers/mfd/jz4740-adc.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include diff --git a/trunk/drivers/mfd/tps6586x.c b/trunk/drivers/mfd/tps6586x.c index a5ddf31b60ca..bba26d96c240 100644 --- a/trunk/drivers/mfd/tps6586x.c +++ b/trunk/drivers/mfd/tps6586x.c @@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask) if (ret) goto out; - if ((reg_val & bit_mask) != bit_mask) { + if ((reg_val & bit_mask) == 0) { reg_val |= bit_mask; ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val); } diff --git a/trunk/drivers/mfd/tps65910.c b/trunk/drivers/mfd/tps65910.c index c1da84bc1573..6f5b8cf2f652 100644 --- a/trunk/drivers/mfd/tps65910.c +++ b/trunk/drivers/mfd/tps65910.c @@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask) goto out; } - data &= ~mask; + data &= mask; err = tps65910_i2c_write(tps65910, reg, 1, &data); if (err) dev_err(tps65910->dev, "write to reg %x failed\n", reg); diff --git a/trunk/drivers/mfd/twl-core.c b/trunk/drivers/mfd/twl-core.c index 61e70cfaa774..bfbd66021afd 100644 --- a/trunk/drivers/mfd/twl-core.c +++ b/trunk/drivers/mfd/twl-core.c @@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - if (unlikely(!inuse)) { - pr_err("%s: not initialized\n", DRIVER_NAME); - return -EPERM; - } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } mutex_lock(&twl->xfer_lock); /* * [MSG1]: fill the register address data @@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - if (unlikely(!inuse)) { - pr_err("%s: not initialized\n", DRIVER_NAME); - return -EPERM; - } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } mutex_lock(&twl->xfer_lock); /* [MSG1] fill the register address data */ msg = &twl->xfer_msg[0]; diff --git a/trunk/drivers/mfd/twl4030-irq.c b/trunk/drivers/mfd/twl4030-irq.c index 29f11e0765fe..f062c8cc6c38 100644 --- a/trunk/drivers/mfd/twl4030-irq.c +++ b/trunk/drivers/mfd/twl4030-irq.c @@ -432,7 +432,6 @@ struct sih_agent { u32 edge_change; struct mutex irq_lock; - char *irq_name; }; /*----------------------------------------------------------------------*/ @@ -590,7 +589,7 @@ static inline int sih_read_isr(const struct sih *sih) * Generic handler for SIH interrupts ... we "know" this is called * in task context, with IRQs enabled. */ -static irqreturn_t handle_twl4030_sih(int irq, void *data) +static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) { struct sih_agent *agent = irq_get_handler_data(irq); const struct sih *sih = agent->sih; @@ -603,7 +602,7 @@ static irqreturn_t handle_twl4030_sih(int irq, void *data) pr_err("twl4030: %s SIH, read ISR error %d\n", sih->name, isr); /* REVISIT: recover; eventually mask it all, etc */ - return IRQ_HANDLED; + return; } while (isr) { @@ -617,7 +616,6 @@ static irqreturn_t handle_twl4030_sih(int irq, void *data) pr_err("twl4030: %s SIH, invalid ISR bit %d\n", sih->name, irq); } - return IRQ_HANDLED; } static unsigned twl4030_irq_next; @@ -670,19 +668,18 @@ int twl4030_sih_setup(int module) activate_irq(irq); } + status = irq_base; twl4030_irq_next += i; /* replace generic PIH handler (handle_simple_irq) */ irq = sih_mod + twl4030_irq_base; irq_set_handler_data(irq, agent); - agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name); - status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0, - agent->irq_name ?: sih->name, NULL); + irq_set_chained_handler(irq, handle_twl4030_sih); pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, irq, irq_base, twl4030_irq_next - 1); - return status < 0 ? status : irq_base; + return status; } /* FIXME need a call to reverse twl4030_sih_setup() ... */ @@ -736,9 +733,8 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) } /* install an irq handler to demultiplex the TWL4030 interrupt */ - status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, - IRQF_ONESHOT, - "TWL4030-PIH", NULL); + status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0, + "TWL4030-PIH", NULL); if (status < 0) { pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status); goto fail_rqirq; diff --git a/trunk/drivers/mfd/wm8994-core.c b/trunk/drivers/mfd/wm8994-core.c index 61894fced8ea..5d6ba132837e 100644 --- a/trunk/drivers/mfd/wm8994-core.c +++ b/trunk/drivers/mfd/wm8994-core.c @@ -239,7 +239,6 @@ static int wm8994_suspend(struct device *dev) switch (wm8994->type) { case WM8958: - case WM1811: ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1); if (ret < 0) { dev_err(dev, "Failed to read power status: %d\n", ret); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index c8f47f17186f..67bf07819992 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -477,6 +477,7 @@ enum rtl_register_content { /* Config1 register p.24 */ LEDS1 = (1 << 7), LEDS0 = (1 << 6), + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ Speed_down = (1 << 4), MEMMAP = (1 << 3), IOMAP = (1 << 2), @@ -484,7 +485,6 @@ enum rtl_register_content { PMEnable = (1 << 0), /* Power Management Enable */ /* Config2 register p. 25 */ - MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ PCI_Clock_66MHz = 0x01, PCI_Clock_33MHz = 0x00, @@ -3426,24 +3426,22 @@ static const struct rtl_cfg_info { }; /* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct rtl8169_private *tp, +static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, const struct rtl_cfg_info *cfg) { - void __iomem *ioaddr = tp->mmio_addr; unsigned msi = 0; u8 cfg2; cfg2 = RTL_R8(Config2) & ~MSIEnable; if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(tp->pci_dev)) { - netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + if (pci_enable_msi(pdev)) { + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); } else { cfg2 |= MSIEnable; msi = RTL_FEATURE_MSI; } } - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - RTL_W8(Config2, cfg2); + RTL_W8(Config2, cfg2); return msi; } @@ -4079,7 +4077,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->features |= RTL_FEATURE_WOL; if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(tp, cfg); + tp->features |= rtl_try_msi(pdev, ioaddr, cfg); RTL_W8(Cfg9346, Cfg9346_Lock); if (rtl_tbi_enabled(tp)) { diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c index c97d2f590855..dca9d3369cdd 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c @@ -836,13 +836,11 @@ int cpdma_chan_stop(struct cpdma_chan *chan) chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ - spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); - spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { diff --git a/trunk/drivers/net/usb/asix.c b/trunk/drivers/net/usb/asix.c index e95f0e60a9bc..e6fed4d4cb77 100644 --- a/trunk/drivers/net/usb/asix.c +++ b/trunk/drivers/net/usb/asix.c @@ -1655,10 +1655,6 @@ static const struct usb_device_id products [] = { // ASIX 88772a USB_DEVICE(0x0db0, 0xa877), .driver_info = (unsigned long) &ax88772_info, -}, { - // Asus USB Ethernet Adapter - USB_DEVICE (0x0b95, 0x7e2b), - .driver_info = (unsigned long) &ax88772_info, }, { }, // END }; diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index 528d5f3e868c..888abc2be3a5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -1271,9 +1271,7 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->max_valid_rate = k; ath_rc_sort_validrates(rate_table, ath_rc_priv); - ath_rc_priv->rate_max_phy = (k > 4) ? - ath_rc_priv->valid_rate_index[k-4] : - ath_rc_priv->valid_rate_index[k-1]; + ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; ath_rc_priv->rate_table = rate_table; ath_dbg(common, ATH_DBG_CONFIG, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 5c7c17c7166a..a7a6def40d05 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) if (ctx->ht.enabled) { /* if HT40 is used, it should not change * after associated except channel switch */ - if (!ctx->ht.is_40mhz || - !iwl_is_associated_ctx(ctx)) + if (iwl_is_associated_ctx(ctx) && + !ctx->ht.is_40mhz) iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index df1540ca6102..35a6b71f358c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -91,10 +91,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - else - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index e0e9a3dfbc00..bacc06c95e7a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2850,9 +2850,6 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, int ret; u8 sta_id; - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return 0; - IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->shrd->mutex); @@ -2901,9 +2898,6 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; - if (ctx->ctxid != IWL_RXON_CTX_PAN) - return; - IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->shrd->mutex); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 5f17ab8e76ba..ce918980e977 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -1197,7 +1197,9 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); + if (is_agg) + iwl_trans_txq_update_byte_cnt_tbl(trans, txq, + le16_to_cpu(tx_cmd->len)); dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index 6e0a3eaecf70..ac278156d390 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -939,6 +939,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; unsigned long cmd_flags; + unsigned long cmd_pending_q_flags; unsigned long scan_pending_q_flags; uint16_t cancel_scan_cmd = false; @@ -948,9 +949,12 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) cmd_node = adapter->curr_cmd; cmd_node->wait_q_enabled = false; cmd_node->cmd_flag |= CMD_F_CANCELED; + spin_lock_irqsave(&adapter->cmd_pending_q_lock, + cmd_pending_q_flags); + list_del(&cmd_node->list); + spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, + cmd_pending_q_flags); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - mwifiex_complete_cmd(adapter, adapter->curr_cmd); - adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); } @@ -977,6 +981,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); } adapter->cmd_wait_q.status = -1; + mwifiex_complete_cmd(adapter, adapter->curr_cmd); } /* diff --git a/trunk/drivers/usb/dwc3/core.c b/trunk/drivers/usb/dwc3/core.c index 600d82348511..717ebc9ff941 100644 --- a/trunk/drivers/usb/dwc3/core.c +++ b/trunk/drivers/usb/dwc3/core.c @@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc) ret = -ENODEV; goto err0; } - dwc->revision = reg; + dwc->revision = reg & DWC3_GSNPSREV_MASK; dwc3_core_soft_reset(dwc); diff --git a/trunk/drivers/usb/gadget/epautoconf.c b/trunk/drivers/usb/gadget/epautoconf.c index 4dff83d2f265..596a0b464e61 100644 --- a/trunk/drivers/usb/gadget/epautoconf.c +++ b/trunk/drivers/usb/gadget/epautoconf.c @@ -130,6 +130,9 @@ ep_matches ( num_req_streams = ep_comp->bmAttributes & 0x1f; if (num_req_streams > ep->max_streams) return 0; + /* Update the ep_comp descriptor if needed */ + if (num_req_streams != ep->max_streams) + ep_comp->bmAttributes = ep->max_streams; } } diff --git a/trunk/drivers/usb/host/isp1760-if.c b/trunk/drivers/usb/host/isp1760-if.c index 2ac4ac2e4ef9..a7dc1e1d45f2 100644 --- a/trunk/drivers/usb/host/isp1760-if.c +++ b/trunk/drivers/usb/host/isp1760-if.c @@ -18,7 +18,7 @@ #include "isp1760-hcd.h" -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF #include #include #include @@ -31,7 +31,7 @@ #include #endif -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF struct isp1760 { struct usb_hcd *hcd; int rst_gpio; @@ -437,7 +437,7 @@ static int __init isp1760_init(void) ret = platform_driver_register(&isp1760_plat_driver); if (!ret) any_ret = 0; -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF ret = platform_driver_register(&isp1760_of_driver); if (!ret) any_ret = 0; @@ -457,7 +457,7 @@ module_init(isp1760_init); static void __exit isp1760_exit(void) { platform_driver_unregister(&isp1760_plat_driver); -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +#ifdef CONFIG_OF platform_driver_unregister(&isp1760_of_driver); #endif #ifdef CONFIG_PCI diff --git a/trunk/drivers/usb/musb/musb_host.c b/trunk/drivers/usb/musb/musb_host.c index 79cb0af779fa..60ddba8066ea 100644 --- a/trunk/drivers/usb/musb/musb_host.c +++ b/trunk/drivers/usb/musb/musb_host.c @@ -774,10 +774,6 @@ static void musb_ep_program(struct musb *musb, u8 epnum, if (musb->double_buffer_not_ok) musb_writew(epio, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); - else if (can_bulk_split(musb, qh->type)) - musb_writew(epio, MUSB_TXMAXP, packet_sz - | ((hw_ep->max_packet_sz_tx / - packet_sz) - 1) << 11); else musb_writew(epio, MUSB_TXMAXP, qh->maxpacket | diff --git a/trunk/firmware/README.AddingFirmware b/trunk/firmware/README.AddingFirmware index ea78c3a17eec..e24cd8986d8b 100644 --- a/trunk/firmware/README.AddingFirmware +++ b/trunk/firmware/README.AddingFirmware @@ -12,7 +12,7 @@ here. This directory is _NOT_ for adding arbitrary new firmware images. The place to add those is the separate linux-firmware repository: - git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git + git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git That repository contains all these firmware images which have been extracted from older drivers, as well various new firmware images which @@ -22,7 +22,6 @@ been permitted to redistribute under separate cover. To submit firmware to that repository, please send either a git binary diff or preferably a git pull request to: David Woodhouse - Ben Hutchings Your commit should include an update to the WHENCE file clearly identifying the licence under which the firmware is available, and diff --git a/trunk/fs/btrfs/async-thread.c b/trunk/fs/btrfs/async-thread.c index 0b394580d860..cb97174e2366 100644 --- a/trunk/fs/btrfs/async-thread.c +++ b/trunk/fs/btrfs/async-thread.c @@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) struct list_head *fallback; int ret; - spin_lock_irqsave(&workers->lock, flags); again: + spin_lock_irqsave(&workers->lock, flags); worker = next_worker(workers); if (!worker) { @@ -579,7 +579,6 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) spin_unlock_irqrestore(&workers->lock, flags); /* we're below the limit, start another worker */ ret = __btrfs_start_workers(workers); - spin_lock_irqsave(&workers->lock, flags); if (ret) goto fallback; goto again; diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index fd1a06df5bc6..0a6b928813a4 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -4590,6 +4590,10 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans, int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, backref, index); + if (!err) { + d_instantiate(dentry, inode); + return 0; + } if (err > 0) err = -EEXIST; return err; @@ -4651,7 +4655,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, else { init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4719,7 +4722,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping->a_ops = &btrfs_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4777,7 +4779,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *parent = dentry->d_parent; err = btrfs_update_inode(trans, root, inode); BUG_ON(err); - d_instantiate(dentry, inode); btrfs_log_new_name(trans, inode, NULL, parent); } @@ -7244,8 +7245,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, drop_inode = 1; out_unlock: - if (!err) - d_instantiate(dentry, inode); nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); if (drop_inode) { diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index 517f211a3bd4..ac86f8b3e3cb 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -47,6 +47,17 @@ struct wb_writeback_work { struct completion *done; /* set if the caller waits */ }; +const char *wb_reason_name[] = { + [WB_REASON_BACKGROUND] = "background", + [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", + [WB_REASON_SYNC] = "sync", + [WB_REASON_PERIODIC] = "periodic", + [WB_REASON_LAPTOP_TIMER] = "laptop_timer", + [WB_REASON_FREE_MORE_MEM] = "free_more_memory", + [WB_REASON_FS_FREE_SPACE] = "fs_free_space", + [WB_REASON_FORKER_THREAD] = "forker_thread" +}; + /* * Include the creation of the trace points after defining the * wb_writeback_work structure so that the definition remains local to this diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 637694bf3a03..3b0d05dcd7c1 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -1205,8 +1205,6 @@ int __break_lease(struct inode *inode, unsigned int mode) int want_write = (mode & O_ACCMODE) != O_RDONLY; new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); - if (IS_ERR(new_fl)) - return PTR_ERR(new_fl); lock_flocks(); @@ -1223,6 +1221,12 @@ int __break_lease(struct inode *inode, unsigned int mode) if (fl->fl_owner == current->files) i_have_this_lease = 1; + if (IS_ERR(new_fl) && !i_have_this_lease + && ((mode & O_NONBLOCK) == 0)) { + error = PTR_ERR(new_fl); + goto out; + } + break_time = 0; if (lease_break_time > 0) { break_time = jiffies + lease_break_time * HZ; @@ -1280,7 +1284,8 @@ int __break_lease(struct inode *inode, unsigned int mode) out: unlock_flocks(); - locks_free_lock(new_fl); + if (!IS_ERR(new_fl)) + locks_free_lock(new_fl); return error; } diff --git a/trunk/fs/proc/stat.c b/trunk/fs/proc/stat.c index 0855e6f20391..2a30d67dd6b8 100644 --- a/trunk/fs/proc/stat.c +++ b/trunk/fs/proc/stat.c @@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu) idle = kstat_cpu(cpu).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(cpu)); } else - idle = usecs_to_cputime64(idle_time); + idle = nsecs_to_jiffies64(1000 * idle_time); return idle; } @@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu) /* !NO_HZ so we can rely on cpustat.iowait */ iowait = kstat_cpu(cpu).cpustat.iowait; else - iowait = usecs_to_cputime64(iowait_time); + iowait = nsecs_to_jiffies64(1000 * iowait_time); return iowait; } diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index 8a899496fd5f..3eca58f51ae9 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -868,6 +868,27 @@ xfs_fs_dirty_inode( XFS_I(inode)->i_update_core = 1; } +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp, 0); +} + STATIC int xfs_fs_write_inode( struct inode *inode, @@ -881,8 +902,10 @@ xfs_fs_write_inode( if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); + if (!ip->i_update_core) + return 0; - if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { + if (wbc->sync_mode == WB_SYNC_ALL) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a @@ -890,14 +913,11 @@ xfs_fs_write_inode( * ->sync_fs call do that for thus, which reduces the number * of synchronous log forces dramatically. */ - error = xfs_log_dirty_inode(ip, NULL, 0); + error = xfs_log_inode(ip); if (error) goto out; return 0; } else { - if (!ip->i_update_core) - return 0; - /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. diff --git a/trunk/fs/xfs/xfs_sync.c b/trunk/fs/xfs/xfs_sync.c index f0994aedcd15..be5c51d8f757 100644 --- a/trunk/fs/xfs/xfs_sync.c +++ b/trunk/fs/xfs/xfs_sync.c @@ -336,32 +336,6 @@ xfs_sync_fsdata( return error; } -int -xfs_log_dirty_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - if (!ip->i_update_core) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -385,16 +359,6 @@ xfs_quiesce_data( { int error, error2 = 0; - /* - * Log all pending size and timestamp updates. The vfs writeback - * code is supposed to do this, but due to its overagressive - * livelock detection it will skip inodes where appending writes - * were written out in the first non-blocking sync phase if their - * completion took long enough that it happened after taking the - * timestamp for the cut-off in the blocking phase. - */ - xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); - xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); diff --git a/trunk/fs/xfs/xfs_sync.h b/trunk/fs/xfs/xfs_sync.h index fa965479d788..941202e7ac6e 100644 --- a/trunk/fs/xfs/xfs_sync.h +++ b/trunk/fs/xfs/xfs_sync.h @@ -34,8 +34,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); - int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes_count(struct xfs_mount *mp); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/trunk/include/asm-generic/cputime.h b/trunk/include/asm-generic/cputime.h index 12a1764f612b..62ce6823c0f2 100644 --- a/trunk/include/asm-generic/cputime.h +++ b/trunk/include/asm-generic/cputime.h @@ -40,7 +40,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) -#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) /* * Convert cputime to seconds and back. diff --git a/trunk/include/linux/kvm.h b/trunk/include/linux/kvm.h index 68e67e50d028..c3892fc1d538 100644 --- a/trunk/include/linux/kvm.h +++ b/trunk/include/linux/kvm.h @@ -557,7 +557,6 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ #define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_S390_GMAP 71 -#define KVM_CAP_TSC_DEADLINE_TIMER 72 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/trunk/include/linux/lglock.h b/trunk/include/linux/lglock.h index 87f402ccec55..f549056fb20b 100644 --- a/trunk/include/linux/lglock.h +++ b/trunk/include/linux/lglock.h @@ -22,7 +22,6 @@ #include #include #include -#include /* can make br locks by using local lock for read side, global lock for write */ #define br_lock_init(name) name##_lock_init() @@ -73,31 +72,9 @@ #define DEFINE_LGLOCK(name) \ \ - DEFINE_SPINLOCK(name##_cpu_lock); \ - cpumask_t name##_cpus __read_mostly; \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_LGLOCK_LOCKDEP(name); \ \ - static int \ - name##_lg_cpu_callback(struct notifier_block *nb, \ - unsigned long action, void *hcpu) \ - { \ - switch (action & ~CPU_TASKS_FROZEN) { \ - case CPU_UP_PREPARE: \ - spin_lock(&name##_cpu_lock); \ - cpu_set((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - break; \ - case CPU_UP_CANCELED: case CPU_DEAD: \ - spin_lock(&name##_cpu_lock); \ - cpu_clear((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - } \ - return NOTIFY_OK; \ - } \ - static struct notifier_block name##_lg_cpu_notifier = { \ - .notifier_call = name##_lg_cpu_callback, \ - }; \ void name##_lock_init(void) { \ int i; \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ @@ -106,11 +83,6 @@ lock = &per_cpu(name##_lock, i); \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ } \ - register_hotcpu_notifier(&name##_lg_cpu_notifier); \ - get_online_cpus(); \ - for_each_online_cpu(i) \ - cpu_set(i, name##_cpus); \ - put_online_cpus(); \ } \ EXPORT_SYMBOL(name##_lock_init); \ \ @@ -152,9 +124,9 @@ \ void name##_global_lock_online(void) { \ int i; \ - spin_lock(&name##_cpu_lock); \ + preempt_disable(); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -165,12 +137,12 @@ void name##_global_unlock_online(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ } \ - spin_unlock(&name##_cpu_lock); \ + preempt_enable(); \ } \ EXPORT_SYMBOL(name##_global_unlock_online); \ \ diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index 75766b42660e..6faec1a60216 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -53,7 +53,6 @@ struct dst_entry { #define DST_NOHASH 0x0008 #define DST_NOCACHE 0x0010 #define DST_NOCOUNT 0x0020 -#define DST_NOPEER 0x0040 short error; short obsolete; diff --git a/trunk/include/net/flow.h b/trunk/include/net/flow.h index 57f15a7f1cdd..a09447749e2d 100644 --- a/trunk/include/net/flow.h +++ b/trunk/include/net/flow.h @@ -207,7 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup( u8 dir, flow_resolve_t resolver, void *ctx); extern void flow_cache_flush(void); -extern void flow_cache_flush_deferred(void); extern atomic_t flow_cache_genid; #endif diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index a15432da27c3..e90e7a9935dd 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -241,9 +241,6 @@ extern struct sctp_globals { * bits is an indicator of when to send and window update SACK. */ int rwnd_update_shift; - - /* Threshold for autoclose timeout, in seconds. */ - unsigned long max_autoclose; } sctp_globals; #define sctp_rto_initial (sctp_globals.rto_initial) @@ -284,7 +281,6 @@ extern struct sctp_globals { #define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_checksum_disable (sctp_globals.checksum_disable) #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) -#define sctp_max_autoclose (sctp_globals.max_autoclose) /* SCTP Socket type: UDP or TCP style. */ typedef enum { diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index 32e39371fba6..abb6e0f0c3c3 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -637,14 +637,12 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) /* * Take into account size of receive queue and backlog queue - * Do not take into account this skb truesize, - * to allow even a single big packet to come. */ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize > sk->sk_rcvbuf; + return qsize + skb->truesize > sk->sk_rcvbuf; } /* The per-socket spinlock must be held here. */ diff --git a/trunk/include/trace/events/writeback.h b/trunk/include/trace/events/writeback.h index 99d1d0decf88..b99caa8b780c 100644 --- a/trunk/include/trace/events/writeback.h +++ b/trunk/include/trace/events/writeback.h @@ -21,16 +21,6 @@ {I_REFERENCED, "I_REFERENCED"} \ ) -#define WB_WORK_REASON \ - {WB_REASON_BACKGROUND, "background"}, \ - {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \ - {WB_REASON_SYNC, "sync"}, \ - {WB_REASON_PERIODIC, "periodic"}, \ - {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \ - {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \ - {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \ - {WB_REASON_FORKER_THREAD, "forker_thread"} - struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -65,7 +55,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, - __print_symbolic(__entry->reason, WB_WORK_REASON) + wb_reason_name[__entry->reason] ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -194,8 +184,7 @@ TRACE_EVENT(writeback_queue_io, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, - __print_symbolic(__entry->reason, WB_WORK_REASON) - ) + wb_reason_name[__entry->reason]) ); TRACE_EVENT(global_dirty_state, diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 5f0a3c91fdac..c106d3b3cc64 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1828,7 +1828,7 @@ static struct page *__read_cache_page(struct address_space *mapping, page = __page_cache_alloc(gfp | __GFP_COLD); if (!page) return ERR_PTR(-ENOMEM); - err = add_to_page_cache_lru(page, mapping, index, gfp); + err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) @@ -1925,7 +1925,10 @@ static struct page *wait_on_page_read(struct page *page) * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with - * any new page allocations done using the specified allocation flags. + * any new page allocations done using the specified allocation flags. Note + * that the Radix tree operations will still use GFP_KERNEL, so you can't + * expect to do this atomically or anything like that - but you can pass in + * other page requirements. * * If the page does not get brought uptodate, return -EIO. */ diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 2316840b337a..73f17c0293c0 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -901,6 +901,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) h->resv_huge_pages += delta; ret = 0; + spin_unlock(&hugetlb_lock); /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) @@ -914,7 +915,6 @@ static int gather_surplus_pages(struct hstate *h, int delta) VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } - spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ free: diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index c3fdbcb17658..adc395481813 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; - pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -644,21 +643,13 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (!vma || vma->vm_start > start) return -EFAULT; - if (start > vma->vm_start) - prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - - pgoff = vma->vm_pgoff + - ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, + vma->anon_vma, vma->vm_file, vma->vm_pgoff, new_pol); if (prev) { vma = prev; diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index c1c597e3e198..e0af7237cd92 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) goto encrypt; auth: - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) return 0; if (!hci_conn_auth(conn, sec_level, auth_type)) diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 17b5b1cd9657..5ea94a1eecf2 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi void *ptr = req->data; int type, olen; unsigned long val; - struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_rfc rfc; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); @@ -2271,16 +2271,6 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) } } - /* Use sane default values in case a misbehaving remote device - * did not send an RFC option. - */ - rfc.mode = chan->mode; - rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); - rfc.max_pdu_size = cpu_to_le16(chan->imtu); - - BT_ERR("Expected RFC option was not found, using defaults"); - done: switch (rfc.mode) { case L2CAP_MODE_ERTM: diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index 2d28dfe98389..4e32e18211f9 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -1146,7 +1146,6 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) if (list_empty(&s->dlcs)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); - rfcomm_session_clear_timer(s); } break; diff --git a/trunk/net/bridge/br_netfilter.c b/trunk/net/bridge/br_netfilter.c index fa8b8f763580..d6ec3720c77e 100644 --- a/trunk/net/bridge/br_netfilter.c +++ b/trunk/net/bridge/br_netfilter.c @@ -114,18 +114,12 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo return NULL; } -static unsigned int fake_mtu(const struct dst_entry *dst) -{ - return dst->dev->mtu; -} - static struct dst_ops fake_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, .cow_metrics = fake_cow_metrics, .neigh_lookup = fake_neigh_lookup, - .mtu = fake_mtu, }; /* @@ -147,7 +141,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_NOPEER; + rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } diff --git a/trunk/net/core/flow.c b/trunk/net/core/flow.c index e318c7e98042..8ae42de9c79e 100644 --- a/trunk/net/core/flow.c +++ b/trunk/net/core/flow.c @@ -358,18 +358,6 @@ void flow_cache_flush(void) put_online_cpus(); } -static void flow_cache_flush_task(struct work_struct *work) -{ - flow_cache_flush(); -} - -static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); - -void flow_cache_flush_deferred(void) -{ - schedule_work(&flow_cache_flush_work); -} - static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); diff --git a/trunk/net/core/net-sysfs.c b/trunk/net/core/net-sysfs.c index 385aefe53648..c71c434a4c05 100644 --- a/trunk/net/core/net-sysfs.c +++ b/trunk/net/core/net-sysfs.c @@ -665,14 +665,11 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, if (count) { int i; - if (count > INT_MAX) - return -EINVAL; - count = roundup_pow_of_two(count); - if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) - / sizeof(struct rps_dev_flow)) { + if (count > 1<<30) { /* Enforce a limit to prevent overflow */ return -EINVAL; } + count = roundup_pow_of_two(count); table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); if (!table) return -ENOMEM; diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index b23f174ab84c..4ed7b1d12f5e 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -288,7 +288,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces + number of warnings when compiling with -W --ANK + */ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; diff --git a/trunk/net/ipv4/ipconfig.c b/trunk/net/ipv4/ipconfig.c index 99ec116bef14..0da2afc97f32 100644 --- a/trunk/net/ipv4/ipconfig.c +++ b/trunk/net/ipv4/ipconfig.c @@ -253,10 +253,6 @@ static int __init ic_open_devs(void) } } - /* no point in waiting if we could not bring up at least one device */ - if (!ic_first_dev) - goto have_carrier; - /* wait for a carrier on at least one device */ start = jiffies; while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 94cdbc55ca7e..46af62363b8c 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -91,7 +91,6 @@ #include #include #include -#include #include #include #include @@ -121,7 +120,6 @@ static int ip_rt_max_size; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; -static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_redirect_number __read_mostly = 9; static int ip_rt_redirect_load __read_mostly = HZ / 50; @@ -135,9 +133,6 @@ static int ip_rt_min_advmss __read_mostly = 256; static int rt_chain_length_max __read_mostly = 20; static int redirect_genid; -static struct delayed_work expires_work; -static unsigned long expires_ljiffies; - /* * Interface to generic destination cache. */ @@ -835,97 +830,6 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) return ONE; } -static void rt_check_expire(void) -{ - static unsigned int rover; - unsigned int i = rover, goal; - struct rtable *rth; - struct rtable __rcu **rthp; - unsigned long samples = 0; - unsigned long sum = 0, sum2 = 0; - unsigned long delta; - u64 mult; - - delta = jiffies - expires_ljiffies; - expires_ljiffies = jiffies; - mult = ((u64)delta) << rt_hash_log; - if (ip_rt_gc_timeout > 1) - do_div(mult, ip_rt_gc_timeout); - goal = (unsigned int)mult; - if (goal > rt_hash_mask) - goal = rt_hash_mask + 1; - for (; goal > 0; goal--) { - unsigned long tmo = ip_rt_gc_timeout; - unsigned long length; - - i = (i + 1) & rt_hash_mask; - rthp = &rt_hash_table[i].chain; - - if (need_resched()) - cond_resched(); - - samples++; - - if (rcu_dereference_raw(*rthp) == NULL) - continue; - length = 0; - spin_lock_bh(rt_hash_lock_addr(i)); - while ((rth = rcu_dereference_protected(*rthp, - lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { - prefetch(rth->dst.rt_next); - if (rt_is_expired(rth)) { - *rthp = rth->dst.rt_next; - rt_free(rth); - continue; - } - if (rth->dst.expires) { - /* Entry is expired even if it is in use */ - if (time_before_eq(jiffies, rth->dst.expires)) { -nofree: - tmo >>= 1; - rthp = &rth->dst.rt_next; - /* - * We only count entries on - * a chain with equal hash inputs once - * so that entries for different QOS - * levels, and other non-hash input - * attributes don't unfairly skew - * the length computation - */ - length += has_noalias(rt_hash_table[i].chain, rth); - continue; - } - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) - goto nofree; - - /* Cleanup aged off entries. */ - *rthp = rth->dst.rt_next; - rt_free(rth); - } - spin_unlock_bh(rt_hash_lock_addr(i)); - sum += length; - sum2 += length*length; - } - if (samples) { - unsigned long avg = sum / samples; - unsigned long sd = int_sqrt(sum2 / samples - avg*avg); - rt_chain_length_max = max_t(unsigned long, - ip_rt_gc_elasticity, - (avg + 4*sd) >> FRACT_BITS); - } - rover = i; -} - -/* - * rt_worker_func() is run in process context. - * we call rt_check_expire() to scan part of the hash table - */ -static void rt_worker_func(struct work_struct *work) -{ - rt_check_expire(); - schedule_delayed_work(&expires_work, ip_rt_gc_interval); -} - /* * Perturbation of rt_genid by a small quantity [1..256] * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() @@ -1367,7 +1271,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) { struct rtable *rt = (struct rtable *) dst; - if (rt && !(rt->dst.flags & DST_NOPEER)) { + if (rt) { if (rt->peer == NULL) rt_bind_peer(rt, rt->rt_dst, 1); @@ -1378,7 +1282,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) iph->id = htons(inet_getid(rt->peer, more)); return; } - } else if (!rt) + } else printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", __builtin_return_address(0)); @@ -3274,13 +3178,6 @@ static ctl_table ipv4_route_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { - .procname = "gc_interval", - .data = &ip_rt_gc_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, { .procname = "redirect_load", .data = &ip_rt_redirect_load, @@ -3491,11 +3388,6 @@ int __init ip_rt_init(void) devinet_init(); ip_fib_init(); - INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func); - expires_ljiffies = jiffies; - schedule_delayed_work(&expires_work, - net_random() % ip_rt_gc_interval + ip_rt_gc_interval); - if (ip_rt_proc_init()) printk(KERN_ERR "Unable to create route proc files\n"); #ifdef CONFIG_XFRM diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index ec562713db9b..84d0bd5cac93 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) static atomic_t ipv6_fragmentation_id; int old, new; - if (rt && !(rt->dst.flags & DST_NOPEER)) { + if (rt) { struct inet_peer *peer; if (!rt->rt6i_peer) diff --git a/trunk/net/llc/af_llc.c b/trunk/net/llc/af_llc.c index a18e6c3d36e3..dfd3a648a551 100644 --- a/trunk/net/llc/af_llc.c +++ b/trunk/net/llc/af_llc.c @@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, copied += used; len -= used; - /* For non stream protcols we get one packet per recvmsg call */ - if (sk->sk_type != SOCK_STREAM) - goto copy_uaddr; - if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, 0); *seq = 0; } + /* For non stream protcols we get one packet per recvmsg call */ + if (sk->sk_type != SOCK_STREAM) + goto copy_uaddr; + /* Partial read */ if (used + offset < skb->len) continue; @@ -857,12 +857,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); - - if (!(flags & MSG_PEEK)) { - sk_eat_skb(sk, skb, 0); - *seq = 0; - } - goto out; } diff --git a/trunk/net/netfilter/xt_connbytes.c b/trunk/net/netfilter/xt_connbytes.c index 9ddf1c3bfb39..5b138506690e 100644 --- a/trunk/net/netfilter/xt_connbytes.c +++ b/trunk/net/netfilter/xt_connbytes.c @@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par) break; } - if (sinfo->count.to >= sinfo->count.from) + if (sinfo->count.to) return what <= sinfo->count.to && what >= sinfo->count.from; - else /* inverted */ - return what < sinfo->count.to || what > sinfo->count.from; + else + return what >= sinfo->count.from; } static int connbytes_mt_check(const struct xt_mtchk_param *par) diff --git a/trunk/net/nfc/nci/core.c b/trunk/net/nfc/nci/core.c index ea66034499ce..3925c6578767 100644 --- a/trunk/net/nfc/nci/core.c +++ b/trunk/net/nfc/nci/core.c @@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev, __u32 timeout) { int rc = 0; - long completion_rc; + unsigned long completion_rc; ndev->req_status = NCI_REQ_PEND; diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index 3891702b81df..82a6f34d39d0 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -1630,7 +1630,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, if (snaplen > res) snaplen = res; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { @@ -1761,7 +1762,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && - atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { + atomic_read(&sk->sk_rmem_alloc) + skb->truesize + < (unsigned)sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { diff --git a/trunk/net/sched/sch_mqprio.c b/trunk/net/sched/sch_mqprio.c index 28de43092330..f88256cbacbf 100644 --- a/trunk/net/sched/sch_mqprio.c +++ b/trunk/net/sched/sch_mqprio.c @@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; - if (!opt || nla_len(opt) < sizeof(*qopt)) + if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index acd2edbc073e..152b5b3c3fff 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = - min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; + (unsigned long)sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) diff --git a/trunk/net/sctp/output.c b/trunk/net/sctp/output.c index 817174eb5f41..08b3cead6503 100644 --- a/trunk/net/sctp/output.c +++ b/trunk/net/sctp/output.c @@ -697,7 +697,13 @@ static void sctp_packet_append_data(struct sctp_packet *packet, /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; - /* Update our view of the receiver's rwnd. */ + /* Update our view of the receiver's rwnd. Include sk_buff overhead + * while updating peer.rwnd so that it reduces the chances of a + * receiver running out of receive buffer space even when receive + * window is still open. This can happen when a sender is sending + * sending small messages. + */ + datasize += sizeof(struct sk_buff); if (datasize < rwnd) rwnd -= datasize; else diff --git a/trunk/net/sctp/outqueue.c b/trunk/net/sctp/outqueue.c index cfeb1d4a1ee6..14c2b06028ff 100644 --- a/trunk/net/sctp/outqueue.c +++ b/trunk/net/sctp/outqueue.c @@ -411,7 +411,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, chunk->transport->flight_size -= sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); - q->asoc->peer.rwnd += sctp_data_size(chunk); + q->asoc->peer.rwnd += (sctp_data_size(chunk) + + sizeof(struct sk_buff)); } continue; } @@ -431,7 +432,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, * (Section 7.2.4)), add the data size of those * chunks to the rwnd. */ - q->asoc->peer.rwnd += sctp_data_size(chunk); + q->asoc->peer.rwnd += (sctp_data_size(chunk) + + sizeof(struct sk_buff)); q->outstanding_bytes -= sctp_data_size(chunk); if (chunk->transport) transport->flight_size -= sctp_data_size(chunk); diff --git a/trunk/net/sctp/protocol.c b/trunk/net/sctp/protocol.c index 6f6ad8686833..61b9fca5a173 100644 --- a/trunk/net/sctp/protocol.c +++ b/trunk/net/sctp/protocol.c @@ -1285,9 +1285,6 @@ SCTP_STATIC __init int sctp_init(void) sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; - /* Initialize maximum autoclose timeout. */ - sctp_max_autoclose = INT_MAX / HZ; - /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 54a7cd2fdd7a..13bf5fcdbff1 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -2200,6 +2200,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; + /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ + sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); return 0; } diff --git a/trunk/net/sctp/sysctl.c b/trunk/net/sctp/sysctl.c index 60ffbd067ff7..6b3952961b85 100644 --- a/trunk/net/sctp/sysctl.c +++ b/trunk/net/sctp/sysctl.c @@ -53,10 +53,6 @@ static int sack_timer_min = 1; static int sack_timer_max = 500; static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ static int rwnd_scale_max = 16; -static unsigned long max_autoclose_min = 0; -static unsigned long max_autoclose_max = - (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) - ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; @@ -262,15 +258,6 @@ static ctl_table sctp_table[] = { .extra1 = &one, .extra2 = &rwnd_scale_max, }, - { - .procname = "max_autoclose", - .data = &sctp_max_autoclose, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, - .extra1 = &max_autoclose_min, - .extra2 = &max_autoclose_max, - }, { /* sentinel */ } }; diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index 9049a5caeb25..2118d6446630 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -2276,6 +2276,8 @@ static void __xfrm_garbage_collect(struct net *net) { struct dst_entry *head, *next; + flow_cache_flush(); + spin_lock_bh(&xfrm_policy_sk_bundle_lock); head = xfrm_policy_sk_bundles; xfrm_policy_sk_bundles = NULL; @@ -2288,18 +2290,6 @@ static void __xfrm_garbage_collect(struct net *net) } } -static void xfrm_garbage_collect(struct net *net) -{ - flow_cache_flush(); - __xfrm_garbage_collect(net); -} - -static void xfrm_garbage_collect_deferred(struct net *net) -{ - flow_cache_flush_deferred(); - __xfrm_garbage_collect(net); -} - static void xfrm_init_pmtu(struct dst_entry *dst) { do { @@ -2432,7 +2422,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(afinfo->garbage_collect == NULL)) - afinfo->garbage_collect = xfrm_garbage_collect_deferred; + afinfo->garbage_collect = __xfrm_garbage_collect; xfrm_policy_afinfo[afinfo->family] = afinfo; } write_unlock_bh(&xfrm_policy_afinfo_lock); @@ -2526,7 +2516,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void switch (event) { case NETDEV_DOWN: - xfrm_garbage_collect(dev_net(dev)); + __xfrm_garbage_collect(dev_net(dev)); } return NOTIFY_DONE; } diff --git a/trunk/scripts/kconfig/Makefile b/trunk/scripts/kconfig/Makefile index 914833d99b06..ba573fe7c74d 100644 --- a/trunk/scripts/kconfig/Makefile +++ b/trunk/scripts/kconfig/Makefile @@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h --directory=$(srctree) --directory=$(objtree) \ --output $(obj)/config.pot $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot - $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \ - $(srctree)/arch/*/um/Kconfig`; \ + $(Q)ln -fs Kconfig.x86 arch/um/Kconfig + $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \ do \ echo " GEN $$i"; \ $(obj)/kxgettext $$i \ @@ -69,6 +69,7 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h done ) $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \ --output $(obj)/linux.pot + $(Q)rm -f $(srctree)/arch/um/Kconfig $(Q)rm -f $(obj)/config.pot PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig diff --git a/trunk/sound/atmel/ac97c.c b/trunk/sound/atmel/ac97c.c index 73516f69ac7c..6e5addeb236b 100644 --- a/trunk/sound/atmel/ac97c.c +++ b/trunk/sound/atmel/ac97c.c @@ -899,10 +899,6 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip) /* AC97 v2.2 specifications says minimum 1 us. */ udelay(2); gpio_set_value(chip->reset_pin, 1); - } else { - ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA); - udelay(2); - ac97c_writel(chip, MR, AC97C_MR_ENA); } } diff --git a/trunk/virt/kvm/assigned-dev.c b/trunk/virt/kvm/assigned-dev.c index 758e3b36d4cf..3ad0925d23a9 100644 --- a/trunk/virt/kvm/assigned-dev.c +++ b/trunk/virt/kvm/assigned-dev.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -482,76 +480,12 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, return r; } -/* - * We want to test whether the caller has been granted permissions to - * use this device. To be able to configure and control the device, - * the user needs access to PCI configuration space and BAR resources. - * These are accessed through PCI sysfs. PCI config space is often - * passed to the process calling this ioctl via file descriptor, so we - * can't rely on access to that file. We can check for permissions - * on each of the BAR resource files, which is a pretty clear - * indicator that the user has been granted access to the device. - */ -static int probe_sysfs_permissions(struct pci_dev *dev) -{ -#ifdef CONFIG_SYSFS - int i; - bool bar_found = false; - - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { - char *kpath, *syspath; - struct path path; - struct inode *inode; - int r; - - if (!pci_resource_len(dev, i)) - continue; - - kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); - if (!kpath) - return -ENOMEM; - - /* Per sysfs-rules, sysfs is always at /sys */ - syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); - kfree(kpath); - if (!syspath) - return -ENOMEM; - - r = kern_path(syspath, LOOKUP_FOLLOW, &path); - kfree(syspath); - if (r) - return r; - - inode = path.dentry->d_inode; - - r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); - path_put(&path); - if (r) - return r; - - bar_found = true; - } - - /* If no resources, probably something special */ - if (!bar_found) - return -EPERM; - - return 0; -#else - return -EINVAL; /* No way to control the device without sysfs */ -#endif -} - static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; - u8 header_type; - - if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) - return -EINVAL; mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); @@ -579,18 +513,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -EINVAL; goto out_free; } - - /* Don't allow bridges to be assigned */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { - r = -EPERM; - goto out_put; - } - - r = probe_sysfs_permissions(dev); - if (r) - goto out_put; - if (pci_enable_device(dev)) { printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); r = -EBUSY; @@ -622,14 +544,16 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); - if (!kvm->arch.iommu_domain) { - r = kvm_iommu_map_guest(kvm); + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); + if (r) + goto out_list_del; + } + r = kvm_assign_device(kvm, match); if (r) goto out_list_del; } - r = kvm_assign_device(kvm, match); - if (r) - goto out_list_del; out: srcu_read_unlock(&kvm->srcu, idx); @@ -669,7 +593,8 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, goto out; } - kvm_deassign_device(kvm, match); + if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); kvm_free_assigned_device(kvm, match);