From fa9d14a6ed336d14454d036e5a776b8cabfdf368 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 29 Dec 2011 20:19:42 -0500 Subject: [PATCH] --- yaml --- r: 279132 b: refs/heads/master c: 358a0d1c9edcf6ff041776d65cdc2bc59887ab9c h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/virtual/kvm/api.txt | 16 ---- trunk/MAINTAINERS | 2 +- trunk/Makefile | 2 +- trunk/arch/arm/oprofile/common.c | 2 +- trunk/arch/ia64/include/asm/cputime.h | 1 - trunk/arch/powerpc/include/asm/cputime.h | 2 - trunk/arch/powerpc/include/asm/kvm_book3s.h | 33 +++++++ .../arch/powerpc/include/asm/kvm_book3s_64.h | 33 ------- trunk/arch/powerpc/kvm/book3s_hv.c | 2 +- trunk/arch/powerpc/kvm/book3s_pr.c | 2 - trunk/arch/powerpc/kvm/e500.c | 1 - trunk/arch/s390/include/asm/cputime.h | 2 - trunk/arch/sh/oprofile/common.c | 4 +- trunk/arch/sparc/kernel/pci_sun4v.c | 4 +- trunk/arch/x86/kernel/cpu/perf_event_intel.c | 2 +- trunk/arch/x86/kvm/i8254.c | 10 +- trunk/arch/x86/kvm/x86.c | 19 ++-- trunk/block/blk-map.c | 2 +- trunk/block/blk-tag.c | 13 ++- trunk/block/cfq-iosched.c | 12 --- .../gpu/drm/i915/i915_gem_execbuffer.c | 4 +- trunk/drivers/gpu/drm/i915/intel_display.c | 8 +- trunk/drivers/gpu/drm/radeon/evergreen.c | 12 --- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 +- trunk/drivers/iommu/iommu.c | 2 +- trunk/drivers/md/bitmap.c | 5 +- trunk/drivers/md/linear.c | 1 - trunk/drivers/md/md.c | 3 +- trunk/drivers/md/raid5.c | 14 +-- trunk/drivers/media/video/gspca/gspca.c | 4 +- trunk/drivers/media/video/omap3isp/ispccdc.c | 2 +- trunk/drivers/media/video/omap3isp/ispstat.c | 2 +- trunk/drivers/net/ethernet/mellanox/mlx4/fw.c | 6 +- .../drivers/net/ethernet/mellanox/mlx4/main.c | 2 +- trunk/drivers/net/virtio_net.c | 21 ++--- trunk/fs/btrfs/async-thread.c | 3 +- trunk/fs/btrfs/inode.c | 9 +- trunk/fs/fs-writeback.c | 11 +++ trunk/fs/locks.c | 11 ++- trunk/fs/proc/stat.c | 4 +- trunk/fs/xfs/xfs_super.c | 30 +++++- trunk/fs/xfs/xfs_sync.c | 36 ------- trunk/fs/xfs/xfs_sync.h | 2 - trunk/include/asm-generic/cputime.h | 1 - trunk/include/linux/kvm.h | 1 - trunk/include/linux/lglock.h | 36 +------ trunk/include/trace/events/writeback.h | 15 +-- trunk/mm/hugetlb.c | 2 +- trunk/mm/mempolicy.c | 11 +-- trunk/net/ipv6/route.c | 28 ++---- trunk/net/netfilter/Kconfig | 2 +- trunk/net/netfilter/nf_conntrack_netlink.c | 18 +--- trunk/net/packet/af_packet.c | 6 +- trunk/net/sched/sch_netem.c | 7 +- trunk/net/sched/sch_tbf.c | 1 - trunk/net/tipc/bcast.c | 2 +- trunk/net/tipc/bearer.c | 12 +-- trunk/net/tipc/bearer.h | 10 +- trunk/net/tipc/eth_media.c | 4 +- trunk/net/tipc/link.c | 2 +- trunk/scripts/kconfig/Makefile | 5 +- trunk/virt/kvm/assigned-dev.c | 93 ++----------------- 63 files changed, 206 insertions(+), 414 deletions(-) diff --git a/[refs] b/[refs] index 21a4f51a7070..5ac6b248fa42 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7f8e3234c5f7fbdb06be050c8a1907e9c36d7c61 +refs/heads/master: 358a0d1c9edcf6ff041776d65cdc2bc59887ab9c diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index e2a4b5287361..7945b0bd35e2 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -1100,15 +1100,6 @@ emulate them efficiently. The fields in each entry are defined as follows: eax, ebx, ecx, edx: the values returned by the cpuid instruction for this function/index combination -The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned -as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC -support. Instead it is reported via - - ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) - -if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the -feature in userspace, then you can enable the feature for KVM_SET_CPUID2. - 4.47 KVM_PPC_GET_PVINFO Capability: KVM_CAP_PPC_GET_PVINFO @@ -1160,13 +1151,6 @@ following flags are specified: /* Depends on KVM_CAP_IOMMU */ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure -isolation of the device. Usages not specifying this flag are deprecated. - -Only PCI header type 0 devices with PCI BAR resources are supported by -device assignment. The user requesting this ioctl must have read/write -access to the PCI sysfs resource files associated with the device. - 4.49 KVM_DEASSIGN_PCI_DEVICE Capability: KVM_CAP_DEVICE_DEASSIGNMENT diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index dbf3d94b1ded..aace41763860 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2700,7 +2700,7 @@ FIREWIRE SUBSYSTEM M: Stefan Richter L: linux1394-devel@lists.sourceforge.net W: http://ieee1394.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained F: drivers/firewire/ F: include/linux/firewire*.h diff --git a/trunk/Makefile b/trunk/Makefile index ea51081812f3..a43733df3978 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c index 4e0a371630b3..c074e66ad224 100644 --- a/trunk/arch/arm/oprofile/common.c +++ b/trunk/arch/arm/oprofile/common.c @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); } diff --git a/trunk/arch/ia64/include/asm/cputime.h b/trunk/arch/ia64/include/asm/cputime.h index 5a274af31b2b..6073b187528a 100644 --- a/trunk/arch/ia64/include/asm/cputime.h +++ b/trunk/arch/ia64/include/asm/cputime.h @@ -60,7 +60,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs) /* * Convert cputime <-> seconds diff --git a/trunk/arch/powerpc/include/asm/cputime.h b/trunk/arch/powerpc/include/asm/cputime.h index 98b7c4b49c9d..1cf20bdfbeca 100644 --- a/trunk/arch/powerpc/include/asm/cputime.h +++ b/trunk/arch/powerpc/include/asm/cputime.h @@ -150,8 +150,6 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) return ct; } -#define usecs_to_cputime64(us) usecs_to_cputime(us) - /* * Convert cputime <-> seconds */ diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s.h b/trunk/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..d4df013ad779 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s.h @@ -381,6 +381,39 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) } #endif +static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, + unsigned long pte_index) +{ + unsigned long rb, va_low; + + rb = (v & ~0x7fUL) << 16; /* AVA field */ + va_low = pte_index >> 3; + if (v & HPTE_V_SECONDARY) + va_low = ~va_low; + /* xor vsid from AVA */ + if (!(v & HPTE_V_1TB_SEG)) + va_low ^= v >> 12; + else + va_low ^= v >> 24; + va_low &= 0x7ff; + if (v & HPTE_V_LARGE) { + rb |= 1; /* L field */ + if (cpu_has_feature(CPU_FTR_ARCH_206) && + (r & 0xff000)) { + /* non-16MB large page, must be 64k */ + /* (masks depend on page size) */ + rb |= 0x1000; /* page encoding in LP field */ + rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ + rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ + } + } else { + /* 4kB page */ + rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ + } + rb |= (v >> 54) & 0x300; /* B field */ + return rb; +} + /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..e43fe42b9875 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h @@ -29,37 +29,4 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) #define SPAPR_TCE_SHIFT 12 -static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, - unsigned long pte_index) -{ - unsigned long rb, va_low; - - rb = (v & ~0x7fUL) << 16; /* AVA field */ - va_low = pte_index >> 3; - if (v & HPTE_V_SECONDARY) - va_low = ~va_low; - /* xor vsid from AVA */ - if (!(v & HPTE_V_1TB_SEG)) - va_low ^= v >> 12; - else - va_low ^= v >> 24; - va_low &= 0x7ff; - if (v & HPTE_V_LARGE) { - rb |= 1; /* L field */ - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (r & 0xff000)) { - /* non-16MB large page, must be 64k */ - /* (masks depend on page size) */ - rb |= 0x1000; /* page encoding in LP field */ - rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ - rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ - } - } else { - /* 4kB page */ - rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ - } - rb |= (v >> 54) & 0x300; /* B field */ - return rb; -} - #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/trunk/arch/powerpc/kvm/book3s_hv.c b/trunk/arch/powerpc/kvm/book3s_hv.c index 336983da9e72..0cb137a9b038 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv.c +++ b/trunk/arch/powerpc/kvm/book3s_hv.c @@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) tpaca->kvm_hstate.napping = 0; vcpu->cpu = vc->pcpu; smp_wmb(); -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_ICP_NATIVE if (vcpu->arch.ptid) { tpaca->cpu_start = 0x80; wmb(); diff --git a/trunk/arch/powerpc/kvm/book3s_pr.c b/trunk/arch/powerpc/kvm/book3s_pr.c index e2cfb9e1e20e..3c791e1eb675 100644 --- a/trunk/arch/powerpc/kvm/book3s_pr.c +++ b/trunk/arch/powerpc/kvm/book3s_pr.c @@ -658,12 +658,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; -#ifdef CONFIG_KVM_BOOK3S_64_PR if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } -#endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { diff --git a/trunk/arch/powerpc/kvm/e500.c b/trunk/arch/powerpc/kvm/e500.c index 8c0d45a6faf7..26d20903f2bc 100644 --- a/trunk/arch/powerpc/kvm/e500.c +++ b/trunk/arch/powerpc/kvm/e500.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/s390/include/asm/cputime.h b/trunk/arch/s390/include/asm/cputime.h index b9acaaa175d8..081434878296 100644 --- a/trunk/arch/s390/include/asm/cputime.h +++ b/trunk/arch/s390/include/asm/cputime.h @@ -87,8 +87,6 @@ usecs_to_cputime(const unsigned int m) return (cputime_t) m * 4096; } -#define usecs_to_cputime64(m) usecs_to_cputime(m) - /* * Convert cputime to milliseconds and back. */ diff --git a/trunk/arch/sh/oprofile/common.c b/trunk/arch/sh/oprofile/common.c index e4dd5d5a1115..b4c2d2b946dd 100644 --- a/trunk/arch/sh/oprofile/common.c +++ b/trunk/arch/sh/oprofile/common.c @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = sh_backtrace; return -ENODEV; } -void oprofile_arch_exit(void) {} +void __exit oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/trunk/arch/sparc/kernel/pci_sun4v.c b/trunk/arch/sparc/kernel/pci_sun4v.c index af5755d20fbe..b272cda35a01 100644 --- a/trunk/arch/sparc/kernel/pci_sun4v.c +++ b/trunk/arch/sparc/kernel/pci_sun4v.c @@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, if (!irq) return -ENOMEM; - if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) - return -EINVAL; if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) return -EINVAL; + if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) + return -EINVAL; return irq; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 121f1be4da19..8d601b18bf9f 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1169,7 +1169,7 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, */ c = &unconstrained; } else if (intel_try_alt_er(event, orig_idx)) { - raw_spin_unlock_irqrestore(&era->lock, flags); + raw_spin_unlock(&era->lock); goto again; } raw_spin_unlock_irqrestore(&era->lock, flags); diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 405f2620392f..76e3f1cd0369 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -338,15 +338,11 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) +static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; - if (!irqchip_in_kernel(kvm)) - return; - interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -398,13 +394,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(kvm, val, 0); + create_pit_timer(ps, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(kvm, val, 1); + create_pit_timer(ps, val, 1); } break; default: diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 4c938da2ba00..c38efd7b792e 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -602,6 +602,7 @@ static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct kvm_lapic *apic = vcpu->arch.apic; + u32 timer_mode_mask; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) @@ -614,12 +615,15 @@ static void update_cpuid(struct kvm_vcpu *vcpu) best->ecx |= bit(X86_FEATURE_OSXSAVE); } - if (apic) { - if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) - apic->lapic_timer.timer_mode_mask = 3 << 17; - else - apic->lapic_timer.timer_mode_mask = 1 << 17; - } + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + best->function == 0x1) { + best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); + timer_mode_mask = 3 << 17; + } else + timer_mode_mask = 1 << 17; + + if (apic) + apic->lapic_timer.timer_mode_mask = timer_mode_mask; } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -2131,9 +2135,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; - case KVM_CAP_TSC_DEADLINE_TIMER: - r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); - break; default: r = 0; break; diff --git a/trunk/block/blk-map.c b/trunk/block/blk-map.c index 623e1cd4cffe..164cd0059706 100644 --- a/trunk/block/blk-map.c +++ b/trunk/block/blk-map.c @@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (IS_ERR(bio)) return PTR_ERR(bio); - if (!reading) + if (rq_data_dir(rq) == WRITE) bio->bi_rw |= REQ_WRITE; if (do_copy) diff --git a/trunk/block/blk-tag.c b/trunk/block/blk-tag.c index 4af6f5cc1167..e74d6d13838f 100644 --- a/trunk/block/blk-tag.c +++ b/trunk/block/blk-tag.c @@ -282,9 +282,18 @@ EXPORT_SYMBOL(blk_queue_resize_tags); void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned tag = rq->tag; /* negative tags invalid */ + int tag = rq->tag; - BUG_ON(tag >= bqt->real_max_depth); + BUG_ON(tag == -1); + + if (unlikely(tag >= bqt->max_depth)) { + /* + * This can happen after tag depth has been reduced. + * But tag shouldn't be larger than real_max_depth. + */ + WARN_ON(tag >= bqt->real_max_depth); + return; + } list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3548705b04e4..4c12869fcf77 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1655,8 +1655,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); - struct cfq_data *cfqd = q->elevator->elevator_data; - /* * reposition in fifo if next is older than rq */ @@ -1671,16 +1669,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); - - cfqq = RQ_CFQQ(next); - /* - * all requests of this queue are merged to other queues, delete it - * from the service tree. If it's the active_queue, - * cfq_dispatch_requests() will choose to expire it or do idle - */ - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && - cfqq != cfqd->active_queue) - cfq_del_cfqq_rr(cfqd, cfqq); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b9da8900ae4e..c681dc149d2a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev) if (i915_semaphores >= 0) return i915_semaphores; - /* Disable semaphores on SNB */ + /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6) - return 0; + return !intel_iommu_enabled; return 1; } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index daa5743ccbd6..d809b038ca88 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -7922,11 +7922,13 @@ static bool intel_enable_rc6(struct drm_device *dev) return 0; /* - * Disable rc6 on Sandybridge + * Enable rc6 on Sandybridge if DMA remapping is disabled */ if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); - return 0; + DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", + intel_iommu_enabled ? "true" : "false", + !intel_iommu_enabled ? "en" : "dis"); + return !intel_iommu_enabled; } DRM_DEBUG_DRIVER("RC6 enabled\n"); return 1; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 92c9628c572d..5e00d1670aa9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3276,18 +3276,6 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } - - /* Don't start up if the MC ucode is missing on BTC parts. - * The default clocks and voltages before the MC ucode - * is loaded are not suffient for advanced operations. - */ - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { - DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; - } - } - return 0; } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f94b33ae2215..8aa1dbb45c67 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1093,6 +1093,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; struct ttm_base_object *user_obj; + u64 required_size; int ret; /** @@ -1101,9 +1102,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * requested framebuffer. */ - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitch, - mode_cmd->height)) { + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/iommu/iommu.c b/trunk/drivers/iommu/iommu.c index 5b5fa5cdaa31..2fb2963df553 100644 --- a/trunk/drivers/iommu/iommu.c +++ b/trunk/drivers/iommu/iommu.c @@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) if (bus == NULL || bus->iommu_ops == NULL) return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); + domain = kmalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; diff --git a/trunk/drivers/md/bitmap.c b/trunk/drivers/md/bitmap.c index 6d03774b176e..b6907118283a 100644 --- a/trunk/drivers/md/bitmap.c +++ b/trunk/drivers/md/bitmap.c @@ -1393,6 +1393,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto atomic_read(&bitmap->behind_writes), bitmap->mddev->bitmap_info.max_write_behind); } + if (bitmap->mddev->degraded) + /* Never clear bits or update events_cleared when degraded */ + success = 0; while (sectors) { sector_t blocks; @@ -1406,7 +1409,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto return; } - if (success && !bitmap->mddev->degraded && + if (success && bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; diff --git a/trunk/drivers/md/linear.c b/trunk/drivers/md/linear.c index 627456542fb3..c3273efd08cb 100644 --- a/trunk/drivers/md/linear.c +++ b/trunk/drivers/md/linear.c @@ -230,7 +230,6 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) return -EINVAL; rdev->raid_disk = rdev->saved_raid_disk; - rdev->saved_raid_disk = -1; newconf = linear_conf(mddev,mddev->raid_disks+1); diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index f47f1f8ac44b..ee981737edfc 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -7360,7 +7360,8 @@ static int remove_and_add_spares(struct mddev *mddev) spares++; md_new_event(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); - } + } else + break; } } } diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 858fdbb7eb07..31670f8d6b65 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -3065,17 +3065,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); - else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + else { /* in sync if before recovery_offset */ - set_bit(R5_Insync, &dev->flags); - else if (test_bit(R5_UPTODATE, &dev->flags) && - test_bit(R5_Expanded, &dev->flags)) - /* If we've reshaped into here, we assume it is Insync. - * We will shortly update recovery_offset to make - * it official. - */ - set_bit(R5_Insync, &dev->flags); - + if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) + set_bit(R5_Insync, &dev->flags); + } if (rdev && test_bit(R5_WriteError, &dev->flags)) { clear_bit(R5_Insync, &dev->flags); if (!test_bit(Faulty, &rdev->flags)) { diff --git a/trunk/drivers/media/video/gspca/gspca.c b/trunk/drivers/media/video/gspca/gspca.c index 512f32ff446a..881e04c7ffe6 100644 --- a/trunk/drivers/media/video/gspca/gspca.c +++ b/trunk/drivers/media/video/gspca/gspca.c @@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) gspca_dev->usb_err = 0; /* do the specific subdriver stuff before endpoint selection */ - intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); - gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0; + gspca_dev->alt = 0; if (gspca_dev->sd_desc->isoc_init) { ret = gspca_dev->sd_desc->isoc_init(gspca_dev); if (ret < 0) goto unlock; } + intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK : USB_ENDPOINT_XFER_ISOC; diff --git a/trunk/drivers/media/video/omap3isp/ispccdc.c b/trunk/drivers/media/video/omap3isp/ispccdc.c index 54a4a3f22e2e..b0b0fa5a3572 100644 --- a/trunk/drivers/media/video/omap3isp/ispccdc.c +++ b/trunk/drivers/media/video/omap3isp/ispccdc.c @@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->video_out.video.entity); - struct video_device *vdev = ccdc->subdev.devnode; + struct video_device *vdev = &ccdc->subdev.devnode; struct v4l2_event event; memset(&event, 0, sizeof(event)); diff --git a/trunk/drivers/media/video/omap3isp/ispstat.c b/trunk/drivers/media/video/omap3isp/ispstat.c index bc0b2c7349b9..68d539456c55 100644 --- a/trunk/drivers/media/video/omap3isp/ispstat.c +++ b/trunk/drivers/media/video/omap3isp/ispstat.c @@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) static void isp_stat_queue_event(struct ispstat *stat, int err) { - struct video_device *vdev = stat->subdev.devnode; + struct video_device *vdev = &stat->subdev.devnode; struct v4l2_event event; struct omap3isp_stat_event_status *status = (void *)event.u.data; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index 8bcc66f2c2a3..e0639ebebe5e 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -657,8 +657,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u8 port_type; int err; -#define MLX4_VF_PORT_ETH_ONLY_MASK 0xE6 - err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); @@ -673,8 +671,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_GET(port_type, outbox->buf, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - /* Allow only Eth port, no link sensing allowed */ - port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; + /* disable ib */ + port_type &= 0xFE; /* check eth is enabled for this port */ if (!(port_type & 2)) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 6bb62c580e2d..1209934844c4 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -332,7 +332,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) * and perform sense_port FW command to try and set the correct * port type from beginning */ - if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { + if (mlx4_priv(dev)->sense.sense_allowed && dev->caps.default_sense[i]) { enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; mlx4_SENSE_PORT(dev, i, &sensed_port); diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index 2055386eda58..d1c3dce15dc2 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -440,13 +440,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) return err; } -/* - * Returns false if we couldn't fill entirely (OOM). - * - * Normally run in the receive path, but can also be run from ndo_open - * before we're receiving packets, or from refill_work which is - * careful to disable receiving (using napi_disable). - */ +/* Returns false if we couldn't fill entirely (OOM). */ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { int err; @@ -508,7 +502,7 @@ static void refill_work(struct work_struct *work) /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) - queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); + schedule_delayed_work(&vi->refill, HZ/2); } static int virtnet_poll(struct napi_struct *napi, int budget) @@ -527,7 +521,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (vi->num < vi->max / 2) { if (!try_fill_recv(vi, GFP_ATOMIC)) - queue_delayed_work(system_nrt_wq, &vi->refill, 0); + schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ @@ -727,10 +721,6 @@ static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(vi, GFP_KERNEL)) - queue_delayed_work(system_nrt_wq, &vi->refill, 0); - virtnet_napi_enable(vi); return 0; } @@ -784,8 +774,6 @@ static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - /* Make sure refill_work doesn't re-enable napi! */ - cancel_delayed_work_sync(&vi->refill); napi_disable(&vi->napi); return 0; @@ -1112,6 +1100,7 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); + cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free_stats: @@ -1150,7 +1139,9 @@ static void __devexit virtnet_remove(struct virtio_device *vdev) /* Stop all the virtqueues. */ vdev->config->reset(vdev); + unregister_netdev(vi->dev); + cancel_delayed_work_sync(&vi->refill); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); diff --git a/trunk/fs/btrfs/async-thread.c b/trunk/fs/btrfs/async-thread.c index 0b394580d860..cb97174e2366 100644 --- a/trunk/fs/btrfs/async-thread.c +++ b/trunk/fs/btrfs/async-thread.c @@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) struct list_head *fallback; int ret; - spin_lock_irqsave(&workers->lock, flags); again: + spin_lock_irqsave(&workers->lock, flags); worker = next_worker(workers); if (!worker) { @@ -579,7 +579,6 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) spin_unlock_irqrestore(&workers->lock, flags); /* we're below the limit, start another worker */ ret = __btrfs_start_workers(workers); - spin_lock_irqsave(&workers->lock, flags); if (ret) goto fallback; goto again; diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index fd1a06df5bc6..0a6b928813a4 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -4590,6 +4590,10 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans, int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, backref, index); + if (!err) { + d_instantiate(dentry, inode); + return 0; + } if (err > 0) err = -EEXIST; return err; @@ -4651,7 +4655,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, else { init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4719,7 +4722,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping->a_ops = &btrfs_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; - d_instantiate(dentry, inode); } out_unlock: nr = trans->blocks_used; @@ -4777,7 +4779,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *parent = dentry->d_parent; err = btrfs_update_inode(trans, root, inode); BUG_ON(err); - d_instantiate(dentry, inode); btrfs_log_new_name(trans, inode, NULL, parent); } @@ -7244,8 +7245,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, drop_inode = 1; out_unlock: - if (!err) - d_instantiate(dentry, inode); nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); if (drop_inode) { diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index 517f211a3bd4..ac86f8b3e3cb 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -47,6 +47,17 @@ struct wb_writeback_work { struct completion *done; /* set if the caller waits */ }; +const char *wb_reason_name[] = { + [WB_REASON_BACKGROUND] = "background", + [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", + [WB_REASON_SYNC] = "sync", + [WB_REASON_PERIODIC] = "periodic", + [WB_REASON_LAPTOP_TIMER] = "laptop_timer", + [WB_REASON_FREE_MORE_MEM] = "free_more_memory", + [WB_REASON_FS_FREE_SPACE] = "fs_free_space", + [WB_REASON_FORKER_THREAD] = "forker_thread" +}; + /* * Include the creation of the trace points after defining the * wb_writeback_work structure so that the definition remains local to this diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 637694bf3a03..3b0d05dcd7c1 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -1205,8 +1205,6 @@ int __break_lease(struct inode *inode, unsigned int mode) int want_write = (mode & O_ACCMODE) != O_RDONLY; new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); - if (IS_ERR(new_fl)) - return PTR_ERR(new_fl); lock_flocks(); @@ -1223,6 +1221,12 @@ int __break_lease(struct inode *inode, unsigned int mode) if (fl->fl_owner == current->files) i_have_this_lease = 1; + if (IS_ERR(new_fl) && !i_have_this_lease + && ((mode & O_NONBLOCK) == 0)) { + error = PTR_ERR(new_fl); + goto out; + } + break_time = 0; if (lease_break_time > 0) { break_time = jiffies + lease_break_time * HZ; @@ -1280,7 +1284,8 @@ int __break_lease(struct inode *inode, unsigned int mode) out: unlock_flocks(); - locks_free_lock(new_fl); + if (!IS_ERR(new_fl)) + locks_free_lock(new_fl); return error; } diff --git a/trunk/fs/proc/stat.c b/trunk/fs/proc/stat.c index 0855e6f20391..2a30d67dd6b8 100644 --- a/trunk/fs/proc/stat.c +++ b/trunk/fs/proc/stat.c @@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu) idle = kstat_cpu(cpu).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(cpu)); } else - idle = usecs_to_cputime64(idle_time); + idle = nsecs_to_jiffies64(1000 * idle_time); return idle; } @@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu) /* !NO_HZ so we can rely on cpustat.iowait */ iowait = kstat_cpu(cpu).cpustat.iowait; else - iowait = usecs_to_cputime64(iowait_time); + iowait = nsecs_to_jiffies64(1000 * iowait_time); return iowait; } diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index 8a899496fd5f..3eca58f51ae9 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -868,6 +868,27 @@ xfs_fs_dirty_inode( XFS_I(inode)->i_update_core = 1; } +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp, 0); +} + STATIC int xfs_fs_write_inode( struct inode *inode, @@ -881,8 +902,10 @@ xfs_fs_write_inode( if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); + if (!ip->i_update_core) + return 0; - if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { + if (wbc->sync_mode == WB_SYNC_ALL) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a @@ -890,14 +913,11 @@ xfs_fs_write_inode( * ->sync_fs call do that for thus, which reduces the number * of synchronous log forces dramatically. */ - error = xfs_log_dirty_inode(ip, NULL, 0); + error = xfs_log_inode(ip); if (error) goto out; return 0; } else { - if (!ip->i_update_core) - return 0; - /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. diff --git a/trunk/fs/xfs/xfs_sync.c b/trunk/fs/xfs/xfs_sync.c index f0994aedcd15..be5c51d8f757 100644 --- a/trunk/fs/xfs/xfs_sync.c +++ b/trunk/fs/xfs/xfs_sync.c @@ -336,32 +336,6 @@ xfs_sync_fsdata( return error; } -int -xfs_log_dirty_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - if (!ip->i_update_core) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -385,16 +359,6 @@ xfs_quiesce_data( { int error, error2 = 0; - /* - * Log all pending size and timestamp updates. The vfs writeback - * code is supposed to do this, but due to its overagressive - * livelock detection it will skip inodes where appending writes - * were written out in the first non-blocking sync phase if their - * completion took long enough that it happened after taking the - * timestamp for the cut-off in the blocking phase. - */ - xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); - xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); diff --git a/trunk/fs/xfs/xfs_sync.h b/trunk/fs/xfs/xfs_sync.h index fa965479d788..941202e7ac6e 100644 --- a/trunk/fs/xfs/xfs_sync.h +++ b/trunk/fs/xfs/xfs_sync.h @@ -34,8 +34,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); - int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes_count(struct xfs_mount *mp); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/trunk/include/asm-generic/cputime.h b/trunk/include/asm-generic/cputime.h index 12a1764f612b..62ce6823c0f2 100644 --- a/trunk/include/asm-generic/cputime.h +++ b/trunk/include/asm-generic/cputime.h @@ -40,7 +40,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) -#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) /* * Convert cputime to seconds and back. diff --git a/trunk/include/linux/kvm.h b/trunk/include/linux/kvm.h index 68e67e50d028..c3892fc1d538 100644 --- a/trunk/include/linux/kvm.h +++ b/trunk/include/linux/kvm.h @@ -557,7 +557,6 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ #define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_S390_GMAP 71 -#define KVM_CAP_TSC_DEADLINE_TIMER 72 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/trunk/include/linux/lglock.h b/trunk/include/linux/lglock.h index 87f402ccec55..f549056fb20b 100644 --- a/trunk/include/linux/lglock.h +++ b/trunk/include/linux/lglock.h @@ -22,7 +22,6 @@ #include #include #include -#include /* can make br locks by using local lock for read side, global lock for write */ #define br_lock_init(name) name##_lock_init() @@ -73,31 +72,9 @@ #define DEFINE_LGLOCK(name) \ \ - DEFINE_SPINLOCK(name##_cpu_lock); \ - cpumask_t name##_cpus __read_mostly; \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_LGLOCK_LOCKDEP(name); \ \ - static int \ - name##_lg_cpu_callback(struct notifier_block *nb, \ - unsigned long action, void *hcpu) \ - { \ - switch (action & ~CPU_TASKS_FROZEN) { \ - case CPU_UP_PREPARE: \ - spin_lock(&name##_cpu_lock); \ - cpu_set((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - break; \ - case CPU_UP_CANCELED: case CPU_DEAD: \ - spin_lock(&name##_cpu_lock); \ - cpu_clear((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - } \ - return NOTIFY_OK; \ - } \ - static struct notifier_block name##_lg_cpu_notifier = { \ - .notifier_call = name##_lg_cpu_callback, \ - }; \ void name##_lock_init(void) { \ int i; \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ @@ -106,11 +83,6 @@ lock = &per_cpu(name##_lock, i); \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ } \ - register_hotcpu_notifier(&name##_lg_cpu_notifier); \ - get_online_cpus(); \ - for_each_online_cpu(i) \ - cpu_set(i, name##_cpus); \ - put_online_cpus(); \ } \ EXPORT_SYMBOL(name##_lock_init); \ \ @@ -152,9 +124,9 @@ \ void name##_global_lock_online(void) { \ int i; \ - spin_lock(&name##_cpu_lock); \ + preempt_disable(); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -165,12 +137,12 @@ void name##_global_unlock_online(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ + for_each_online_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ } \ - spin_unlock(&name##_cpu_lock); \ + preempt_enable(); \ } \ EXPORT_SYMBOL(name##_global_unlock_online); \ \ diff --git a/trunk/include/trace/events/writeback.h b/trunk/include/trace/events/writeback.h index 99d1d0decf88..b99caa8b780c 100644 --- a/trunk/include/trace/events/writeback.h +++ b/trunk/include/trace/events/writeback.h @@ -21,16 +21,6 @@ {I_REFERENCED, "I_REFERENCED"} \ ) -#define WB_WORK_REASON \ - {WB_REASON_BACKGROUND, "background"}, \ - {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \ - {WB_REASON_SYNC, "sync"}, \ - {WB_REASON_PERIODIC, "periodic"}, \ - {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \ - {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \ - {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \ - {WB_REASON_FORKER_THREAD, "forker_thread"} - struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -65,7 +55,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, - __print_symbolic(__entry->reason, WB_WORK_REASON) + wb_reason_name[__entry->reason] ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -194,8 +184,7 @@ TRACE_EVENT(writeback_queue_io, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, - __print_symbolic(__entry->reason, WB_WORK_REASON) - ) + wb_reason_name[__entry->reason]) ); TRACE_EVENT(global_dirty_state, diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 2316840b337a..73f17c0293c0 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -901,6 +901,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) h->resv_huge_pages += delta; ret = 0; + spin_unlock(&hugetlb_lock); /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) @@ -914,7 +915,6 @@ static int gather_surplus_pages(struct hstate *h, int delta) VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } - spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ free: diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index c3fdbcb17658..adc395481813 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; - pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -644,21 +643,13 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (!vma || vma->vm_start > start) return -EFAULT; - if (start > vma->vm_start) - prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - - pgoff = vma->vm_pgoff + - ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, + vma->anon_vma, vma->vm_file, vma->vm_pgoff, new_pol); if (prev) { vma = prev; diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 4a62c47599b4..0940729d2f91 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -129,14 +129,11 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const voi return neigh_create(&nd_tbl, daddr, dst->dev); } -static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev) +static int rt6_bind_neighbour(struct rt6_info *rt) { - struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway); - if (!n) { - n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev); - if (IS_ERR(n)) - return PTR_ERR(n); - } + struct neighbour *n = ip6_neigh_lookup(&rt->dst, &rt->rt6i_gateway); + if (IS_ERR(n)) + return PTR_ERR(n); dst_set_neighbour(&rt->dst, n); return 0; @@ -749,7 +746,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, #endif retry: - if (rt6_bind_neighbour(rt, rt->dst.dev)) { + if (rt6_bind_neighbour(rt)) { struct net *net = dev_net(rt->dst.dev); int saved_rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; @@ -1400,7 +1397,7 @@ int ip6_route_add(struct fib6_config *cfg) rt->rt6i_prefsrc.plen = 0; if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { - err = rt6_bind_neighbour(rt, dev); + err = rt6_bind_neighbour(rt); if (err) goto out; } @@ -2087,7 +2084,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_flags |= RTF_ANYCAST; else rt->rt6i_flags |= RTF_LOCAL; - err = rt6_bind_neighbour(rt, rt->dst.dev); + err = rt6_bind_neighbour(rt); if (err) { dst_free(&rt->dst); return ERR_PTR(err); @@ -2363,13 +2360,11 @@ static int rt6_fill_node(struct net *net, int iif, int type, u32 pid, u32 seq, int prefix, int nowait, unsigned int flags) { - const struct inet_peer *peer; struct rtmsg *rtm; struct nlmsghdr *nlh; long expires; u32 table; struct neighbour *n; - u32 ts, tsage; if (prefix) { /* user wants prefix routes only */ if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { @@ -2476,14 +2471,7 @@ static int rt6_fill_node(struct net *net, else expires = INT_MAX; - peer = rt->rt6i_peer; - ts = tsage = 0; - if (peer && peer->tcp_ts_stamp) { - ts = peer->tcp_ts; - tsage = get_seconds() - peer->tcp_ts_stamp; - } - - if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage, + if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, rt->dst.error) < 0) goto nla_put_failure; diff --git a/trunk/net/netfilter/Kconfig b/trunk/net/netfilter/Kconfig index f8ac4ef0b794..f6275a0f1ea7 100644 --- a/trunk/net/netfilter/Kconfig +++ b/trunk/net/netfilter/Kconfig @@ -908,7 +908,7 @@ config NETFILTER_XT_MATCH_MULTIPORT config NETFILTER_XT_MATCH_NFACCT tristate '"nfacct" match support' - depends on NETFILTER_ADVANCED + default m if NETFILTER_ADVANCED=n select NETFILTER_NETLINK_ACCT help This option allows you to use the extended accounting through diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index bb10c077a01a..85033344aed2 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); - spin_unlock_bh(&nf_conntrack_lock); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { - spin_lock_bh(&nf_conntrack_lock); err = -EOPNOTSUPP; goto err1; } - spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), @@ -1883,30 +1880,25 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, err = -ENOMEM; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - nf_ct_expect_put(exp); + if (skb2 == NULL) goto out; - } rcu_read_lock(); err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); rcu_read_unlock(); - nf_ct_expect_put(exp); if (err <= 0) goto free; - err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); - if (err < 0) - goto out; + nf_ct_expect_put(exp); - return 0; + return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); free: kfree_skb(skb2); out: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + nf_ct_expect_put(exp); + return err; } static int diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index 2dbb32b988c4..e56ca75e3f43 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -2453,12 +2453,8 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc { struct packet_sock *po = pkt_sk(sk); - if (po->fanout) { - if (dev) - dev_put(dev); - + if (po->fanout) return -EINVAL; - } lock_sock(sk); diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index a92c1b3dab83..ffcaa5975947 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -532,7 +532,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); - d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); + d = kmalloc(s, GFP_KERNEL); if (!d) d = vmalloc(s); if (!d) @@ -545,10 +545,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); - swap(q->delay_dist, d); + dist_free(q->delay_dist); + q->delay_dist = d; spin_unlock_bh(root_lock); - - dist_free(d); return 0; } diff --git a/trunk/net/sched/sch_tbf.c b/trunk/net/sched/sch_tbf.c index b8e156319d7b..1dcfb5223a86 100644 --- a/trunk/net/sched/sch_tbf.c +++ b/trunk/net/sched/sch_tbf.c @@ -346,7 +346,6 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) struct nlattr *nest; struct tc_tbf_qopt opt; - sch->qstats.backlog = q->qdisc->qstats.backlog; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; diff --git a/trunk/net/tipc/bcast.c b/trunk/net/tipc/bcast.c index 048b7a3e848e..b6afe7356a86 100644 --- a/trunk/net/tipc/bcast.c +++ b/trunk/net/tipc/bcast.c @@ -76,7 +76,7 @@ struct bcbearer_pair { struct bcbearer { struct tipc_bearer bearer; - struct media media; + struct tipc_media media; struct bcbearer_pair bpairs[MAX_BEARERS]; struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; struct tipc_node_map remains; diff --git a/trunk/net/tipc/bearer.c b/trunk/net/tipc/bearer.c index b40e98adfd7f..cddec3d9ee17 100644 --- a/trunk/net/tipc/bearer.c +++ b/trunk/net/tipc/bearer.c @@ -41,7 +41,7 @@ #define MAX_ADDR_STR 32 -static struct media *media_list[MAX_MEDIA]; +static struct tipc_media *media_list[MAX_MEDIA]; static u32 media_count; struct tipc_bearer tipc_bearers[MAX_BEARERS]; @@ -68,7 +68,7 @@ static int media_name_valid(const char *name) * tipc_media_find - locates specified media object by name */ -struct media *tipc_media_find(const char *name) +struct tipc_media *tipc_media_find(const char *name) { u32 i; @@ -83,7 +83,7 @@ struct media *tipc_media_find(const char *name) * media_find_id - locates specified media object by type identifier */ -static struct media *media_find_id(u8 type) +static struct tipc_media *media_find_id(u8 type) { u32 i; @@ -100,7 +100,7 @@ static struct media *media_find_id(u8 type) * Bearers for this media type must be activated separately at a later stage. */ -int tipc_register_media(struct media *m_ptr) +int tipc_register_media(struct tipc_media *m_ptr) { int res = -EINVAL; @@ -138,7 +138,7 @@ int tipc_register_media(struct media *m_ptr) void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) { char addr_str[MAX_ADDR_STR]; - struct media *m_ptr; + struct tipc_media *m_ptr; m_ptr = media_find_id(a->media_id); @@ -425,7 +425,7 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr) int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) { struct tipc_bearer *b_ptr; - struct media *m_ptr; + struct tipc_media *m_ptr; struct bearer_name b_name; char addr_string[16]; u32 bearer_id; diff --git a/trunk/net/tipc/bearer.h b/trunk/net/tipc/bearer.h index cfe77c4b20f9..af01ca23a6d7 100644 --- a/trunk/net/tipc/bearer.h +++ b/trunk/net/tipc/bearer.h @@ -74,7 +74,7 @@ struct tipc_media_addr { struct tipc_bearer; /** - * struct media - TIPC media information available to internal users + * struct tipc_media - TIPC media information available to internal users * @send_msg: routine which handles buffer transmission * @enable_bearer: routine which enables a bearer * @disable_bearer: routine which disables a bearer @@ -90,7 +90,7 @@ struct tipc_bearer; * @name: media name */ -struct media { +struct tipc_media { int (*send_msg)(struct sk_buff *buf, struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); @@ -139,7 +139,7 @@ struct tipc_bearer { struct tipc_media_addr addr; /* initalized by media */ char name[TIPC_MAX_BEARER_NAME]; spinlock_t lock; - struct media *media; + struct tipc_media *media; u32 priority; u32 window; u32 tolerance; @@ -164,7 +164,7 @@ extern struct tipc_bearer tipc_bearers[]; /* * TIPC routines available to supported media types */ -int tipc_register_media(struct media *m_ptr); +int tipc_register_media(struct tipc_media *m_ptr); void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr); @@ -191,7 +191,7 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr); struct tipc_bearer *tipc_bearer_find(const char *name); struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); -struct media *tipc_media_find(const char *name); +struct tipc_media *tipc_media_find(const char *name); int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr); int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr); void tipc_bearer_stop(void); diff --git a/trunk/net/tipc/eth_media.c b/trunk/net/tipc/eth_media.c index cd0a4b8b0ecf..527e3f0e165d 100644 --- a/trunk/net/tipc/eth_media.c +++ b/trunk/net/tipc/eth_media.c @@ -56,7 +56,7 @@ struct eth_bearer { struct work_struct cleanup; }; -static struct media eth_media_info; +static struct tipc_media eth_media_info; static struct eth_bearer eth_bearers[MAX_ETH_BEARERS]; static int eth_started; static struct notifier_block notifier; @@ -340,7 +340,7 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) * Ethernet media registration info */ -static struct media eth_media_info = { +static struct tipc_media eth_media_info = { .send_msg = send_msg, .enable_bearer = enable_bearer, .disable_bearer = disable_bearer, diff --git a/trunk/net/tipc/link.c b/trunk/net/tipc/link.c index 853b286dd08c..515dfe4fd4ce 100644 --- a/trunk/net/tipc/link.c +++ b/trunk/net/tipc/link.c @@ -2793,7 +2793,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) struct tipc_node *node; struct link *l_ptr; struct tipc_bearer *b_ptr; - struct media *m_ptr; + struct tipc_media *m_ptr; l_ptr = link_find_link(name, &node); if (l_ptr) { diff --git a/trunk/scripts/kconfig/Makefile b/trunk/scripts/kconfig/Makefile index 914833d99b06..ba573fe7c74d 100644 --- a/trunk/scripts/kconfig/Makefile +++ b/trunk/scripts/kconfig/Makefile @@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h --directory=$(srctree) --directory=$(objtree) \ --output $(obj)/config.pot $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot - $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \ - $(srctree)/arch/*/um/Kconfig`; \ + $(Q)ln -fs Kconfig.x86 arch/um/Kconfig + $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \ do \ echo " GEN $$i"; \ $(obj)/kxgettext $$i \ @@ -69,6 +69,7 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h done ) $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \ --output $(obj)/linux.pot + $(Q)rm -f $(srctree)/arch/um/Kconfig $(Q)rm -f $(obj)/config.pot PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig diff --git a/trunk/virt/kvm/assigned-dev.c b/trunk/virt/kvm/assigned-dev.c index 758e3b36d4cf..3ad0925d23a9 100644 --- a/trunk/virt/kvm/assigned-dev.c +++ b/trunk/virt/kvm/assigned-dev.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -482,76 +480,12 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, return r; } -/* - * We want to test whether the caller has been granted permissions to - * use this device. To be able to configure and control the device, - * the user needs access to PCI configuration space and BAR resources. - * These are accessed through PCI sysfs. PCI config space is often - * passed to the process calling this ioctl via file descriptor, so we - * can't rely on access to that file. We can check for permissions - * on each of the BAR resource files, which is a pretty clear - * indicator that the user has been granted access to the device. - */ -static int probe_sysfs_permissions(struct pci_dev *dev) -{ -#ifdef CONFIG_SYSFS - int i; - bool bar_found = false; - - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { - char *kpath, *syspath; - struct path path; - struct inode *inode; - int r; - - if (!pci_resource_len(dev, i)) - continue; - - kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); - if (!kpath) - return -ENOMEM; - - /* Per sysfs-rules, sysfs is always at /sys */ - syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); - kfree(kpath); - if (!syspath) - return -ENOMEM; - - r = kern_path(syspath, LOOKUP_FOLLOW, &path); - kfree(syspath); - if (r) - return r; - - inode = path.dentry->d_inode; - - r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); - path_put(&path); - if (r) - return r; - - bar_found = true; - } - - /* If no resources, probably something special */ - if (!bar_found) - return -EPERM; - - return 0; -#else - return -EINVAL; /* No way to control the device without sysfs */ -#endif -} - static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; - u8 header_type; - - if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) - return -EINVAL; mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); @@ -579,18 +513,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -EINVAL; goto out_free; } - - /* Don't allow bridges to be assigned */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { - r = -EPERM; - goto out_put; - } - - r = probe_sysfs_permissions(dev); - if (r) - goto out_put; - if (pci_enable_device(dev)) { printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); r = -EBUSY; @@ -622,14 +544,16 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); - if (!kvm->arch.iommu_domain) { - r = kvm_iommu_map_guest(kvm); + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); + if (r) + goto out_list_del; + } + r = kvm_assign_device(kvm, match); if (r) goto out_list_del; } - r = kvm_assign_device(kvm, match); - if (r) - goto out_list_del; out: srcu_read_unlock(&kvm->srcu, idx); @@ -669,7 +593,8 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, goto out; } - kvm_deassign_device(kvm, match); + if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); kvm_free_assigned_device(kvm, match);