diff --git a/[refs] b/[refs] index df32cc74eeb6..2af02459ff20 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3b87487ac5008072f138953b07505a7e3493327f +refs/heads/master: baaf939db44d08ef910558ff5eb16d123df3f621 diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index e2a4b5287361..7945b0bd35e2 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -1100,15 +1100,6 @@ emulate them efficiently. The fields in each entry are defined as follows: eax, ebx, ecx, edx: the values returned by the cpuid instruction for this function/index combination -The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned -as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC -support. Instead it is reported via - - ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) - -if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the -feature in userspace, then you can enable the feature for KVM_SET_CPUID2. - 4.47 KVM_PPC_GET_PVINFO Capability: KVM_CAP_PPC_GET_PVINFO @@ -1160,13 +1151,6 @@ following flags are specified: /* Depends on KVM_CAP_IOMMU */ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure -isolation of the device. Usages not specifying this flag are deprecated. - -Only PCI header type 0 devices with PCI BAR resources are supported by -device assignment. The user requesting this ioctl must have read/write -access to the PCI sysfs resource files associated with the device. - 4.49 KVM_DEASSIGN_PCI_DEVICE Capability: KVM_CAP_DEVICE_DEASSIGNMENT diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 0e7a80aefa0c..6afba60c3904 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2700,7 +2700,7 @@ FIREWIRE SUBSYSTEM M: Stefan Richter L: linux1394-devel@lists.sourceforge.net W: http://ieee1394.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git S: Maintained F: drivers/firewire/ F: include/linux/firewire*.h diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c index 4e0a371630b3..c074e66ad224 100644 --- a/trunk/arch/arm/oprofile/common.c +++ b/trunk/arch/arm/oprofile/common.c @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); } diff --git a/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index dac4760c0f0a..95509d8eb140 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/trunk/arch/arm/plat-samsung/include/plat/cpu-freq-core.h @@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls, extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void); extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void); -extern void s3c2410_iotiming_debugfs(struct seq_file *seq, - struct s3c_cpufreq_config *cfg, - union s3c_iobank *iob); - -extern void s3c2412_iotiming_debugfs(struct seq_file *seq, - struct s3c_cpufreq_config *cfg, - union s3c_iobank *iob); - #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS #define s3c_cpufreq_debugfs_call(x) x #else @@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg); extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg); #ifdef CONFIG_S3C2410_IOTIMING +extern void s3c2410_iotiming_debugfs(struct seq_file *seq, + struct s3c_cpufreq_config *cfg, + union s3c_iobank *iob); + extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); @@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg, extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); #else +#define s3c2410_iotiming_debugfs NULL #define s3c2410_iotiming_calc NULL #define s3c2410_iotiming_get NULL #define s3c2410_iotiming_set NULL @@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, /* S3C2412 compatible routines */ -extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, - struct s3c_iotimings *timings); +#ifdef CONFIG_S3C2412_IOTIMING +extern void s3c2412_iotiming_debugfs(struct seq_file *seq, + struct s3c_cpufreq_config *cfg, + union s3c_iobank *iob); extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *timings); @@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg, extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot); +#else +#define s3c2412_iotiming_debugfs NULL +#define s3c2412_iotiming_calc NULL +#define s3c2412_iotiming_get NULL +#define s3c2412_iotiming_set NULL +#endif /* CONFIG_S3C2412_IOTIMING */ #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG #define s3c_freq_dbg(x...) printk(KERN_INFO x) diff --git a/trunk/arch/ia64/include/asm/cputime.h b/trunk/arch/ia64/include/asm/cputime.h index 5a274af31b2b..6073b187528a 100644 --- a/trunk/arch/ia64/include/asm/cputime.h +++ b/trunk/arch/ia64/include/asm/cputime.h @@ -60,7 +60,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs) /* * Convert cputime <-> seconds diff --git a/trunk/arch/powerpc/include/asm/cputime.h b/trunk/arch/powerpc/include/asm/cputime.h index 98b7c4b49c9d..1cf20bdfbeca 100644 --- a/trunk/arch/powerpc/include/asm/cputime.h +++ b/trunk/arch/powerpc/include/asm/cputime.h @@ -150,8 +150,6 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) return ct; } -#define usecs_to_cputime64(us) usecs_to_cputime(us) - /* * Convert cputime <-> seconds */ diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s.h b/trunk/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..d4df013ad779 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s.h @@ -381,6 +381,39 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) } #endif +static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, + unsigned long pte_index) +{ + unsigned long rb, va_low; + + rb = (v & ~0x7fUL) << 16; /* AVA field */ + va_low = pte_index >> 3; + if (v & HPTE_V_SECONDARY) + va_low = ~va_low; + /* xor vsid from AVA */ + if (!(v & HPTE_V_1TB_SEG)) + va_low ^= v >> 12; + else + va_low ^= v >> 24; + va_low &= 0x7ff; + if (v & HPTE_V_LARGE) { + rb |= 1; /* L field */ + if (cpu_has_feature(CPU_FTR_ARCH_206) && + (r & 0xff000)) { + /* non-16MB large page, must be 64k */ + /* (masks depend on page size) */ + rb |= 0x1000; /* page encoding in LP field */ + rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ + rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ + } + } else { + /* 4kB page */ + rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ + } + rb |= (v >> 54) & 0x300; /* B field */ + return rb; +} + /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..e43fe42b9875 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_64.h @@ -29,37 +29,4 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) #define SPAPR_TCE_SHIFT 12 -static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, - unsigned long pte_index) -{ - unsigned long rb, va_low; - - rb = (v & ~0x7fUL) << 16; /* AVA field */ - va_low = pte_index >> 3; - if (v & HPTE_V_SECONDARY) - va_low = ~va_low; - /* xor vsid from AVA */ - if (!(v & HPTE_V_1TB_SEG)) - va_low ^= v >> 12; - else - va_low ^= v >> 24; - va_low &= 0x7ff; - if (v & HPTE_V_LARGE) { - rb |= 1; /* L field */ - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (r & 0xff000)) { - /* non-16MB large page, must be 64k */ - /* (masks depend on page size) */ - rb |= 0x1000; /* page encoding in LP field */ - rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ - rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ - } - } else { - /* 4kB page */ - rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ - } - rb |= (v >> 54) & 0x300; /* B field */ - return rb; -} - #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/trunk/arch/powerpc/kvm/book3s_hv.c b/trunk/arch/powerpc/kvm/book3s_hv.c index 336983da9e72..0cb137a9b038 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv.c +++ b/trunk/arch/powerpc/kvm/book3s_hv.c @@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) tpaca->kvm_hstate.napping = 0; vcpu->cpu = vc->pcpu; smp_wmb(); -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_ICP_NATIVE if (vcpu->arch.ptid) { tpaca->cpu_start = 0x80; wmb(); diff --git a/trunk/arch/powerpc/kvm/book3s_pr.c b/trunk/arch/powerpc/kvm/book3s_pr.c index e2cfb9e1e20e..3c791e1eb675 100644 --- a/trunk/arch/powerpc/kvm/book3s_pr.c +++ b/trunk/arch/powerpc/kvm/book3s_pr.c @@ -658,12 +658,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; -#ifdef CONFIG_KVM_BOOK3S_64_PR if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } -#endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { diff --git a/trunk/arch/powerpc/kvm/e500.c b/trunk/arch/powerpc/kvm/e500.c index 8c0d45a6faf7..26d20903f2bc 100644 --- a/trunk/arch/powerpc/kvm/e500.c +++ b/trunk/arch/powerpc/kvm/e500.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/s390/include/asm/cputime.h b/trunk/arch/s390/include/asm/cputime.h index b9acaaa175d8..081434878296 100644 --- a/trunk/arch/s390/include/asm/cputime.h +++ b/trunk/arch/s390/include/asm/cputime.h @@ -87,8 +87,6 @@ usecs_to_cputime(const unsigned int m) return (cputime_t) m * 4096; } -#define usecs_to_cputime64(m) usecs_to_cputime(m) - /* * Convert cputime to milliseconds and back. */ diff --git a/trunk/arch/sh/oprofile/common.c b/trunk/arch/sh/oprofile/common.c index e4dd5d5a1115..b4c2d2b946dd 100644 --- a/trunk/arch/sh/oprofile/common.c +++ b/trunk/arch/sh/oprofile/common.c @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = sh_backtrace; return -ENODEV; } -void oprofile_arch_exit(void) {} +void __exit oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 121f1be4da19..8d601b18bf9f 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1169,7 +1169,7 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, */ c = &unconstrained; } else if (intel_try_alt_er(event, orig_idx)) { - raw_spin_unlock_irqrestore(&era->lock, flags); + raw_spin_unlock(&era->lock); goto again; } raw_spin_unlock_irqrestore(&era->lock, flags); diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 405f2620392f..76e3f1cd0369 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -338,15 +338,11 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) +static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; - if (!irqchip_in_kernel(kvm)) - return; - interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -398,13 +394,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(kvm, val, 0); + create_pit_timer(ps, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(kvm, val, 1); + create_pit_timer(ps, val, 1); } break; default: diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 4c938da2ba00..c38efd7b792e 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -602,6 +602,7 @@ static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct kvm_lapic *apic = vcpu->arch.apic; + u32 timer_mode_mask; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) @@ -614,12 +615,15 @@ static void update_cpuid(struct kvm_vcpu *vcpu) best->ecx |= bit(X86_FEATURE_OSXSAVE); } - if (apic) { - if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) - apic->lapic_timer.timer_mode_mask = 3 << 17; - else - apic->lapic_timer.timer_mode_mask = 1 << 17; - } + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + best->function == 0x1) { + best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); + timer_mode_mask = 3 << 17; + } else + timer_mode_mask = 1 << 17; + + if (apic) + apic->lapic_timer.timer_mode_mask = timer_mode_mask; } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -2131,9 +2135,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; - case KVM_CAP_TSC_DEADLINE_TIMER: - r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); - break; default: r = 0; break; diff --git a/trunk/block/blk-map.c b/trunk/block/blk-map.c index 623e1cd4cffe..164cd0059706 100644 --- a/trunk/block/blk-map.c +++ b/trunk/block/blk-map.c @@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (IS_ERR(bio)) return PTR_ERR(bio); - if (!reading) + if (rq_data_dir(rq) == WRITE) bio->bi_rw |= REQ_WRITE; if (do_copy) diff --git a/trunk/block/blk-tag.c b/trunk/block/blk-tag.c index 4af6f5cc1167..e74d6d13838f 100644 --- a/trunk/block/blk-tag.c +++ b/trunk/block/blk-tag.c @@ -282,9 +282,18 @@ EXPORT_SYMBOL(blk_queue_resize_tags); void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned tag = rq->tag; /* negative tags invalid */ + int tag = rq->tag; - BUG_ON(tag >= bqt->real_max_depth); + BUG_ON(tag == -1); + + if (unlikely(tag >= bqt->max_depth)) { + /* + * This can happen after tag depth has been reduced. + * But tag shouldn't be larger than real_max_depth. + */ + WARN_ON(tag >= bqt->real_max_depth); + return; + } list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3548705b04e4..4c12869fcf77 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1655,8 +1655,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); - struct cfq_data *cfqd = q->elevator->elevator_data; - /* * reposition in fifo if next is older than rq */ @@ -1671,16 +1669,6 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); - - cfqq = RQ_CFQQ(next); - /* - * all requests of this queue are merged to other queues, delete it - * from the service tree. If it's the active_queue, - * cfq_dispatch_requests() will choose to expire it or do idle - */ - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && - cfqq != cfqd->active_queue) - cfq_del_cfqq_rr(cfqd, cfqq); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b9da8900ae4e..c681dc149d2a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev) if (i915_semaphores >= 0) return i915_semaphores; - /* Disable semaphores on SNB */ + /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6) - return 0; + return !intel_iommu_enabled; return 1; } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index daa5743ccbd6..d809b038ca88 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -7922,11 +7922,13 @@ static bool intel_enable_rc6(struct drm_device *dev) return 0; /* - * Disable rc6 on Sandybridge + * Enable rc6 on Sandybridge if DMA remapping is disabled */ if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); - return 0; + DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", + intel_iommu_enabled ? "true" : "false", + !intel_iommu_enabled ? "en" : "dis"); + return !intel_iommu_enabled; } DRM_DEBUG_DRIVER("RC6 enabled\n"); return 1; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 92c9628c572d..5e00d1670aa9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3276,18 +3276,6 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } - - /* Don't start up if the MC ucode is missing on BTC parts. - * The default clocks and voltages before the MC ucode - * is loaded are not suffient for advanced operations. - */ - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { - DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; - } - } - return 0; } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f94b33ae2215..8aa1dbb45c67 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1093,6 +1093,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; struct ttm_base_object *user_obj; + u64 required_size; int ret; /** @@ -1101,9 +1102,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * requested framebuffer. */ - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitch, - mode_cmd->height)) { + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/iommu/iommu.c b/trunk/drivers/iommu/iommu.c index 5b5fa5cdaa31..2fb2963df553 100644 --- a/trunk/drivers/iommu/iommu.c +++ b/trunk/drivers/iommu/iommu.c @@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) if (bus == NULL || bus->iommu_ops == NULL) return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); + domain = kmalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; diff --git a/trunk/drivers/media/video/gspca/gspca.c b/trunk/drivers/media/video/gspca/gspca.c index 512f32ff446a..881e04c7ffe6 100644 --- a/trunk/drivers/media/video/gspca/gspca.c +++ b/trunk/drivers/media/video/gspca/gspca.c @@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) gspca_dev->usb_err = 0; /* do the specific subdriver stuff before endpoint selection */ - intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); - gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0; + gspca_dev->alt = 0; if (gspca_dev->sd_desc->isoc_init) { ret = gspca_dev->sd_desc->isoc_init(gspca_dev); if (ret < 0) goto unlock; } + intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK : USB_ENDPOINT_XFER_ISOC; diff --git a/trunk/drivers/watchdog/coh901327_wdt.c b/trunk/drivers/watchdog/coh901327_wdt.c index 5b89f7d6cd0f..03f449a430d2 100644 --- a/trunk/drivers/watchdog/coh901327_wdt.c +++ b/trunk/drivers/watchdog/coh901327_wdt.c @@ -76,6 +76,8 @@ static int irq; static void __iomem *virtbase; static unsigned long coh901327_users; static unsigned long boot_status; +static u16 wdogenablestore; +static u16 irqmaskstore; static struct device *parent; /* @@ -459,10 +461,6 @@ static int __init coh901327_probe(struct platform_device *pdev) } #ifdef CONFIG_PM - -static u16 wdogenablestore; -static u16 irqmaskstore; - static int coh901327_suspend(struct platform_device *pdev, pm_message_t state) { irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U; diff --git a/trunk/drivers/watchdog/hpwdt.c b/trunk/drivers/watchdog/hpwdt.c index 8464ea1c36a1..3774c9b8dac9 100644 --- a/trunk/drivers/watchdog/hpwdt.c +++ b/trunk/drivers/watchdog/hpwdt.c @@ -231,7 +231,6 @@ static int __devinit cru_detect(unsigned long map_entry, cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; - set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE)); asminline_call(&cmn_regs, bios32_entrypoint); if (cmn_regs.u1.ral != 0) { @@ -249,10 +248,8 @@ static int __devinit cru_detect(unsigned long map_entry, if ((physical_bios_base + physical_bios_offset)) { cru_rom_addr = ioremap(cru_physical_address, cru_length); - if (cru_rom_addr) { - set_memory_x((unsigned long)cru_rom_addr, cru_length); + if (cru_rom_addr) retval = 0; - } } printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n", diff --git a/trunk/drivers/watchdog/iTCO_wdt.c b/trunk/drivers/watchdog/iTCO_wdt.c index 99796c5d913d..ba6ad662635a 100644 --- a/trunk/drivers/watchdog/iTCO_wdt.c +++ b/trunk/drivers/watchdog/iTCO_wdt.c @@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static int turn_SMI_watchdog_clear_off = 1; +static int turn_SMI_watchdog_clear_off = 0; module_param(turn_SMI_watchdog_clear_off, int, 0); MODULE_PARM_DESC(turn_SMI_watchdog_clear_off, - "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)"); + "Turn off SMI clearing watchdog (default=0)"); /* * Some TCO specific functions @@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, ret = -EIO; goto out_unmap; } - if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) { + if (turn_SMI_watchdog_clear_off) { /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(SMI_EN); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ diff --git a/trunk/drivers/watchdog/sp805_wdt.c b/trunk/drivers/watchdog/sp805_wdt.c index bfaf9bb1ee0d..cc2cfbe33b30 100644 --- a/trunk/drivers/watchdog/sp805_wdt.c +++ b/trunk/drivers/watchdog/sp805_wdt.c @@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev) return 0; } -static struct amba_id sp805_wdt_ids[] = { +static struct amba_id sp805_wdt_ids[] __initdata = { { .id = 0x00141805, .mask = 0x00ffffff, diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 637694bf3a03..3b0d05dcd7c1 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -1205,8 +1205,6 @@ int __break_lease(struct inode *inode, unsigned int mode) int want_write = (mode & O_ACCMODE) != O_RDONLY; new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); - if (IS_ERR(new_fl)) - return PTR_ERR(new_fl); lock_flocks(); @@ -1223,6 +1221,12 @@ int __break_lease(struct inode *inode, unsigned int mode) if (fl->fl_owner == current->files) i_have_this_lease = 1; + if (IS_ERR(new_fl) && !i_have_this_lease + && ((mode & O_NONBLOCK) == 0)) { + error = PTR_ERR(new_fl); + goto out; + } + break_time = 0; if (lease_break_time > 0) { break_time = jiffies + lease_break_time * HZ; @@ -1280,7 +1284,8 @@ int __break_lease(struct inode *inode, unsigned int mode) out: unlock_flocks(); - locks_free_lock(new_fl); + if (!IS_ERR(new_fl)) + locks_free_lock(new_fl); return error; } diff --git a/trunk/fs/proc/stat.c b/trunk/fs/proc/stat.c index 0855e6f20391..2a30d67dd6b8 100644 --- a/trunk/fs/proc/stat.c +++ b/trunk/fs/proc/stat.c @@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu) idle = kstat_cpu(cpu).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(cpu)); } else - idle = usecs_to_cputime64(idle_time); + idle = nsecs_to_jiffies64(1000 * idle_time); return idle; } @@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu) /* !NO_HZ so we can rely on cpustat.iowait */ iowait = kstat_cpu(cpu).cpustat.iowait; else - iowait = usecs_to_cputime64(iowait_time); + iowait = nsecs_to_jiffies64(1000 * iowait_time); return iowait; } diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index 8a899496fd5f..3eca58f51ae9 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -868,6 +868,27 @@ xfs_fs_dirty_inode( XFS_I(inode)->i_update_core = 1; } +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp, 0); +} + STATIC int xfs_fs_write_inode( struct inode *inode, @@ -881,8 +902,10 @@ xfs_fs_write_inode( if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); + if (!ip->i_update_core) + return 0; - if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { + if (wbc->sync_mode == WB_SYNC_ALL) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a @@ -890,14 +913,11 @@ xfs_fs_write_inode( * ->sync_fs call do that for thus, which reduces the number * of synchronous log forces dramatically. */ - error = xfs_log_dirty_inode(ip, NULL, 0); + error = xfs_log_inode(ip); if (error) goto out; return 0; } else { - if (!ip->i_update_core) - return 0; - /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. diff --git a/trunk/fs/xfs/xfs_sync.c b/trunk/fs/xfs/xfs_sync.c index f0994aedcd15..be5c51d8f757 100644 --- a/trunk/fs/xfs/xfs_sync.c +++ b/trunk/fs/xfs/xfs_sync.c @@ -336,32 +336,6 @@ xfs_sync_fsdata( return error; } -int -xfs_log_dirty_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - if (!ip->i_update_core) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -385,16 +359,6 @@ xfs_quiesce_data( { int error, error2 = 0; - /* - * Log all pending size and timestamp updates. The vfs writeback - * code is supposed to do this, but due to its overagressive - * livelock detection it will skip inodes where appending writes - * were written out in the first non-blocking sync phase if their - * completion took long enough that it happened after taking the - * timestamp for the cut-off in the blocking phase. - */ - xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); - xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); diff --git a/trunk/fs/xfs/xfs_sync.h b/trunk/fs/xfs/xfs_sync.h index fa965479d788..941202e7ac6e 100644 --- a/trunk/fs/xfs/xfs_sync.h +++ b/trunk/fs/xfs/xfs_sync.h @@ -34,8 +34,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); - int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes_count(struct xfs_mount *mp); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/trunk/include/asm-generic/cputime.h b/trunk/include/asm-generic/cputime.h index 12a1764f612b..62ce6823c0f2 100644 --- a/trunk/include/asm-generic/cputime.h +++ b/trunk/include/asm-generic/cputime.h @@ -40,7 +40,6 @@ typedef u64 cputime64_t; */ #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) -#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) /* * Convert cputime to seconds and back. diff --git a/trunk/include/linux/kvm.h b/trunk/include/linux/kvm.h index 68e67e50d028..c3892fc1d538 100644 --- a/trunk/include/linux/kvm.h +++ b/trunk/include/linux/kvm.h @@ -557,7 +557,6 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ #define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_S390_GMAP 71 -#define KVM_CAP_TSC_DEADLINE_TIMER 72 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/trunk/kernel/time/clockevents.c b/trunk/kernel/time/clockevents.c index 1ecd6ba36d6c..c4eb71c8b2ea 100644 --- a/trunk/kernel/time/clockevents.c +++ b/trunk/kernel/time/clockevents.c @@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old, * released list and do a notify add later. */ if (old) { + old->event_handler = clockevents_handle_noop; clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 2316840b337a..73f17c0293c0 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -901,6 +901,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) h->resv_huge_pages += delta; ret = 0; + spin_unlock(&hugetlb_lock); /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) @@ -914,7 +915,6 @@ static int gather_surplus_pages(struct hstate *h, int delta) VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } - spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ free: diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index c3fdbcb17658..adc395481813 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; - pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -644,21 +643,13 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (!vma || vma->vm_start > start) return -EFAULT; - if (start > vma->vm_start) - prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - - pgoff = vma->vm_pgoff + - ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, + vma->anon_vma, vma->vm_file, vma->vm_pgoff, new_pol); if (prev) { vma = prev; diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index b6977776d715..ef21b221f036 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -1358,15 +1358,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); - spin_unlock_bh(&nf_conntrack_lock); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { - spin_lock_bh(&nf_conntrack_lock); err = -EOPNOTSUPP; goto err1; } - spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), @@ -1872,30 +1869,25 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, err = -ENOMEM; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - nf_ct_expect_put(exp); + if (skb2 == NULL) goto out; - } rcu_read_lock(); err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); rcu_read_unlock(); - nf_ct_expect_put(exp); if (err <= 0) goto free; - err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); - if (err < 0) - goto out; + nf_ct_expect_put(exp); - return 0; + return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); free: kfree_skb(skb2); out: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + nf_ct_expect_put(exp); + return err; } static int diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index d9d4970b9b07..3891702b81df 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -2448,12 +2448,8 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc { struct packet_sock *po = pkt_sk(sk); - if (po->fanout) { - if (dev) - dev_put(dev); - + if (po->fanout) return -EINVAL; - } lock_sock(sk); diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index a4ab207cdc59..eb3b9a86c6ed 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -488,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); - d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); + d = kmalloc(s, GFP_KERNEL); if (!d) d = vmalloc(s); if (!d) @@ -501,10 +501,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); - swap(q->delay_dist, d); + dist_free(q->delay_dist); + q->delay_dist = d; spin_unlock_bh(root_lock); - - dist_free(d); return 0; } diff --git a/trunk/virt/kvm/assigned-dev.c b/trunk/virt/kvm/assigned-dev.c index 758e3b36d4cf..3ad0925d23a9 100644 --- a/trunk/virt/kvm/assigned-dev.c +++ b/trunk/virt/kvm/assigned-dev.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -482,76 +480,12 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, return r; } -/* - * We want to test whether the caller has been granted permissions to - * use this device. To be able to configure and control the device, - * the user needs access to PCI configuration space and BAR resources. - * These are accessed through PCI sysfs. PCI config space is often - * passed to the process calling this ioctl via file descriptor, so we - * can't rely on access to that file. We can check for permissions - * on each of the BAR resource files, which is a pretty clear - * indicator that the user has been granted access to the device. - */ -static int probe_sysfs_permissions(struct pci_dev *dev) -{ -#ifdef CONFIG_SYSFS - int i; - bool bar_found = false; - - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { - char *kpath, *syspath; - struct path path; - struct inode *inode; - int r; - - if (!pci_resource_len(dev, i)) - continue; - - kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); - if (!kpath) - return -ENOMEM; - - /* Per sysfs-rules, sysfs is always at /sys */ - syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); - kfree(kpath); - if (!syspath) - return -ENOMEM; - - r = kern_path(syspath, LOOKUP_FOLLOW, &path); - kfree(syspath); - if (r) - return r; - - inode = path.dentry->d_inode; - - r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); - path_put(&path); - if (r) - return r; - - bar_found = true; - } - - /* If no resources, probably something special */ - if (!bar_found) - return -EPERM; - - return 0; -#else - return -EINVAL; /* No way to control the device without sysfs */ -#endif -} - static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_pci_dev *assigned_dev) { int r = 0, idx; struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; - u8 header_type; - - if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) - return -EINVAL; mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); @@ -579,18 +513,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, r = -EINVAL; goto out_free; } - - /* Don't allow bridges to be assigned */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { - r = -EPERM; - goto out_put; - } - - r = probe_sysfs_permissions(dev); - if (r) - goto out_put; - if (pci_enable_device(dev)) { printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); r = -EBUSY; @@ -622,14 +544,16 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); - if (!kvm->arch.iommu_domain) { - r = kvm_iommu_map_guest(kvm); + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); + if (r) + goto out_list_del; + } + r = kvm_assign_device(kvm, match); if (r) goto out_list_del; } - r = kvm_assign_device(kvm, match); - if (r) - goto out_list_del; out: srcu_read_unlock(&kvm->srcu, idx); @@ -669,7 +593,8 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, goto out; } - kvm_deassign_device(kvm, match); + if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); kvm_free_assigned_device(kvm, match);