diff --git a/[refs] b/[refs] index d0caf46a9b1e..a18b304199e7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 909ccdb4cf9b2c3bfb803392e93c6195d2e68799 +refs/heads/master: 1c010ff8912cbc08d80e865aab9c32b6b00c527d diff --git a/trunk/Documentation/ABI/testing/ima_policy b/trunk/Documentation/ABI/testing/ima_policy index 6cd6daefaaed..6434f0df012e 100644 --- a/trunk/Documentation/ABI/testing/ima_policy +++ b/trunk/Documentation/ABI/testing/ima_policy @@ -20,7 +20,7 @@ Description: lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] + base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] fsmagic:= hex value uid:= decimal value @@ -40,11 +40,11 @@ Description: measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC - measure func=FILE_CHECK mask=MAY_READ uid=0 + measure func=INODE_PERM mask=MAY_READ uid=0 The default policy measures all executables in bprm_check, all files mmapped executable in file_mmap, and all files - open for read by root in do_filp_open. + open for read by root in inode_permission. Examples of LSM specific definitions: @@ -54,8 +54,8 @@ Description: dont_measure obj_type=var_log_t dont_measure obj_type=auditd_log_t - measure subj_user=system_u func=FILE_CHECK mask=MAY_READ - measure subj_role=system_r func=FILE_CHECK mask=MAY_READ + measure subj_user=system_u func=INODE_PERM mask=MAY_READ + measure subj_role=system_r func=INODE_PERM mask=MAY_READ Smack: - measure subj_user=_ func=FILE_CHECK mask=MAY_READ + measure subj_user=_ func=INODE_PERM mask=MAY_READ diff --git a/trunk/Documentation/cpu-freq/governors.txt b/trunk/Documentation/cpu-freq/governors.txt index 737988fca64d..aed082f49d09 100644 --- a/trunk/Documentation/cpu-freq/governors.txt +++ b/trunk/Documentation/cpu-freq/governors.txt @@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT. up_threshold: defines what the average CPU usage between the samplings of 'sampling_rate' needs to be for the kernel to make a decision on whether it should increase the frequency. For example when it is set -to its default value of '95' it means that between the checking -intervals the CPU needs to be on average more than 95% in use to then +to its default value of '80' it means that between the checking +intervals the CPU needs to be on average more than 80% in use to then decide that the CPU frequency needs to be increased. ignore_nice_load: this parameter takes a value of '0' or '1'. When diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 602022d2c7a5..03f38c18f323 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3836,7 +3836,6 @@ NETWORKING DRIVERS L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git S: Odd Fixes F: drivers/net/ F: include/linux/if_* diff --git a/trunk/Makefile b/trunk/Makefile index f8e02e9491d0..394aec712c7d 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 33 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/trunk/arch/avr32/mach-at32ap/at32ap700x.c b/trunk/arch/avr32/mach-at32ap/at32ap700x.c index b13d1879e51b..1aa1ea5e9212 100644 --- a/trunk/arch/avr32/mach-at32ap/at32ap700x.c +++ b/trunk/arch/avr32/mach-at32ap/at32ap700x.c @@ -1325,7 +1325,7 @@ struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { struct platform_device *pdev; - struct mci_dma_data *slave; + struct mci_dma_slave *slave; u32 pioa_mask; u32 piob_mask; @@ -1344,9 +1344,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL); - if (!slave) - goto fail; + slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); slave->sdata.dma_dev = &dw_dmac0_device.dev; slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; @@ -1359,7 +1357,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) - goto fail_free; + goto fail; /* CLK line is common to both slots */ pioa_mask = 1 << 10; @@ -1383,7 +1381,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) /* Slot is unused */ break; default: - goto fail_free; + goto fail; } select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); @@ -1410,7 +1408,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) break; default: if (!data->slot[0].bus_width) - goto fail_free; + goto fail; data->slot[1].bus_width = 0; break; @@ -1421,10 +1419,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) platform_device_add(pdev); return pdev; -fail_free: - kfree(slave); fail: data->dma_slave = NULL; + kfree(slave); platform_device_put(pdev); return NULL; } diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c index bb8c4b9ccb80..5372b24ad049 100644 --- a/trunk/arch/microblaze/kernel/setup.c +++ b/trunk/arch/microblaze/kernel/setup.c @@ -54,7 +54,6 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); - invalidate_dcache(); enable_dcache(); invalidate_icache(); diff --git a/trunk/arch/powerpc/mm/tlb_hash64.c b/trunk/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..282d9306361f 100644 --- a/trunk/arch/powerpc/mm/tlb_hash64.c +++ b/trunk/arch/powerpc/mm/tlb_hash64.c @@ -63,21 +63,15 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); - /* Mask the address for the correct page size */ - addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif - } else { + } else psize = pte_pagesize_index(mm, addr, pte); - /* Mask the address for the standard page size. If we - * have a 64k page kernel, but the hardware does not - * support 64k pages, this might be different from the - * hardware page size encoded in the slice table. */ - addr &= PAGE_MASK; - } + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); /* Build full vaddr */ if (!is_kernel_addr(addr)) { diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c index f5f79196721c..1ee66db003be 100644 --- a/trunk/arch/powerpc/platforms/pseries/xics.c +++ b/trunk/arch/powerpc/platforms/pseries/xics.c @@ -784,13 +784,9 @@ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - /* - * we only really want to set the priority when there's - * just one cppr value on the stack - */ - WARN_ON(os_cppr->index != 0); + BUG_ON(os_cppr->index != 0); - os_cppr->stack[0] = cppr; + os_cppr->stack[os_cppr->index] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); @@ -825,14 +821,8 @@ void xics_setup_cpu(void) void xics_teardown_cpu(void) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); - /* - * we have to reset the cppr index to 0 because we're - * not going to return from the IPI - */ - os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ diff --git a/trunk/arch/s390/include/asm/lowcore.h b/trunk/arch/s390/include/asm/lowcore.h index c25dfac7dd76..f2ef4b619ce1 100644 --- a/trunk/arch/s390/include/asm/lowcore.h +++ b/trunk/arch/s390/include/asm/lowcore.h @@ -293,12 +293,12 @@ struct _lowcore __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ __u32 ftrace_func; /* 0x02dc */ - __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */ + __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ + __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/trunk/arch/sh/kernel/cpu/sh3/entry.S b/trunk/arch/sh/kernel/cpu/sh3/entry.S index f6a389c996cb..3f7e2a22c7c2 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/entry.S +++ b/trunk/arch/sh/kernel/cpu/sh3/entry.S @@ -132,6 +132,7 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_handle_tlbmiss: + setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 @@ -364,8 +365,6 @@ handle_exception: mov.l @k2, k2 ! read out vector and keep in k2 handle_exception_special: - setup_frame_reg - ! Setup return address and jump to exception handler mov.l 7f, r9 ! fetch return address stc r2_bank, r0 ! k2 (vector) diff --git a/trunk/arch/sh/kernel/dwarf.c b/trunk/arch/sh/kernel/dwarf.c index e51168064e56..88d28ec3780a 100644 --- a/trunk/arch/sh/kernel/dwarf.c +++ b/trunk/arch/sh/kernel/dwarf.c @@ -540,8 +540,6 @@ void dwarf_free_frame(struct dwarf_frame *frame) mempool_free(frame, dwarf_frame_pool); } -extern void ret_from_irq(void); - /** * dwarf_unwind_stack - unwind the stack * @@ -680,24 +678,6 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); - /* - * Ah, the joys of unwinding through interrupts. - * - * Interrupts are tricky - the DWARF info needs to be _really_ - * accurate and unfortunately I'm seeing a lot of bogus DWARF - * info. For example, I've seen interrupts occur in epilogues - * just after the frame pointer (r14) had been restored. The - * problem was that the DWARF info claimed that the CFA could be - * reached by using the value of the frame pointer before it was - * restored. - * - * So until the compiler can be trusted to produce reliable - * DWARF info when it really matters, let's stop unwinding once - * we've calculated the function that was interrupted. - */ - if (prev && prev->pc == (unsigned long)ret_from_irq) - frame->return_addr = 0; - return frame; bail: diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index 2b15ae60c3a0..f0abd58c3a69 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -70,14 +70,8 @@ ret_from_exception: CFI_STARTPROC simple CFI_DEF_CFA r14, 0 CFI_REL_OFFSET 17, 64 - CFI_REL_OFFSET 15, 60 + CFI_REL_OFFSET 15, 0 CFI_REL_OFFSET 14, 56 - CFI_REL_OFFSET 13, 52 - CFI_REL_OFFSET 12, 48 - CFI_REL_OFFSET 11, 44 - CFI_REL_OFFSET 10, 40 - CFI_REL_OFFSET 9, 36 - CFI_REL_OFFSET 8, 32 preempt_stop() ENTRY(ret_from_irq) ! diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6e44519960c8..f125e5c551c0 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1356,7 +1356,6 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) kfree(data->powernow_table); kfree(data); - per_cpu(powernow_data, pol->cpu) = NULL; return 0; } @@ -1376,7 +1375,7 @@ static unsigned int powernowk8_get(unsigned int cpu) int err; if (!data) - return 0; + return -EINVAL; smp_call_function_single(cpu, query_values_on_cpu, &err, true); if (err) diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 15578f180e59..296aba49472a 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -467,9 +467,6 @@ static int pit_ioport_read(struct kvm_io_device *this, return -EOPNOTSUPP; addr &= KVM_PIT_CHANNEL_MASK; - if (addr == 3) - return 0; - s = &pit_state->channels[addr]; mutex_lock(&pit_state->lock); diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index a1e1bc9d412d..1ddcad452add 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { static int version; struct pvclock_wall_clock wc; - struct timespec boot; + struct timespec now, sys, boot; if (!wall_clock) return; @@ -685,7 +685,9 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ - getboottime(&boot); + now = current_kernel_time(); + ktime_get_ts(&sys); + boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; @@ -760,7 +762,6 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) local_irq_save(flags); kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); ktime_get_ts(&ts); - monotonic_to_bootbased(&ts); local_irq_restore(flags); /* With all the info we got, fill in the values */ diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index b34390347c16..b8bea100a160 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -2868,21 +2868,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) }, .driver_data = "F.23", /* cutoff BIOS version */ }, - /* - * Acer eMachines G725 has the same problem. BIOS - * V1.03 is known to be broken. V3.04 is known to - * work. Inbetween, there are V1.06, V2.06 and V3.03 - * that we don't have much idea about. For now, - * blacklist anything older than V3.04. - */ - { - .ident = "G725", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), - DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), - }, - .driver_data = "V3.04", /* cutoff BIOS version */ - }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index d096fbcbc771..f4ea5a8c325b 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * write indication (used for PIO/DMA setup), result TF is * copied back and we don't whine too much about its failure. */ - tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) tf->flags |= ATA_TFLAG_WRITE; diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c index 730ef3c384ca..741065c9da67 100644 --- a/trunk/drivers/ata/libata-sff.c +++ b/trunk/drivers/ata/libata-sff.c @@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } - if (!do_write) - flush_dcache_page(page); - qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/trunk/drivers/bluetooth/btmrvl_sdio.c b/trunk/drivers/bluetooth/btmrvl_sdio.c index 57d965b7f521..f36defa37764 100644 --- a/trunk/drivers/bluetooth/btmrvl_sdio.c +++ b/trunk/drivers/bluetooth/btmrvl_sdio.c @@ -808,7 +808,6 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, exit: sdio_release_host(card->func); - kfree(tmpbuf); return ret; } diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c index dcb9083ecde0..c6f3b48be9dd 100644 --- a/trunk/drivers/char/tty_io.c +++ b/trunk/drivers/char/tty_io.c @@ -1951,10 +1951,8 @@ static int tty_fasync(int fd, struct file *filp, int on) pid = task_pid(current); type = PIDTYPE_PID; } - get_pid(pid); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); retval = __f_setown(filp, pid, type, 0); - put_pid(pid); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (retval) goto out; } else { diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index bd444dc93cf2..4b34ade2332b 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -554,9 +554,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); - if (freq_next < policy->min) - freq_next = policy->min; - if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); diff --git a/trunk/drivers/gpu/drm/ati_pcigart.c b/trunk/drivers/gpu/drm/ati_pcigart.c index 17be051b7aa3..a1fce68e3bbe 100644 --- a/trunk/drivers/gpu/drm/ati_pcigart.c +++ b/trunk/drivers/gpu/drm/ati_pcigart.c @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", - (unsigned long long)gart_info->table_mask); + gart_info->table_mask); ret = 1; goto done; } diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index ecac882e1d54..46d88965852a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .need_gfx_hws = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index b4c8c0230689..dda787aafcc6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -3564,9 +3564,6 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; - if (relocs == NULL) - return 0; - for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3656,7 +3653,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; + struct drm_i915_gem_relocation_entry *relocs; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3725,8 +3722,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3735,8 +3730,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3933,7 +3926,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); -pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3948,6 +3940,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = ret2; } +pre_mutex_err: drm_free_large(object_list); kfree(cliprects); diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 50ddf4a95c5e..89a071a3e6fb 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -309,22 +309,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); - if (de_iir & DE_PLANEA_FLIP_DONE) - intel_prepare_page_flip(dev, 0); - - if (de_iir & DE_PLANEB_FLIP_DONE) - intel_prepare_page_flip(dev, 1); - - if (de_iir & DE_PIPEA_VBLANK) { - drm_handle_vblank(dev, 0); - intel_finish_page_flip(dev, 0); - } - - if (de_iir & DE_PIPEB_VBLANK) { - drm_handle_vblank(dev, 1); - intel_finish_page_flip(dev, 1); - } - /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -860,11 +844,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else if (IS_I965G(dev)) + return 0; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -882,14 +866,13 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + return; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1032,14 +1015,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->de_irq_enable_reg = display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 79dd4026586f..ddefc871edfe 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -157,9 +157,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 12775df1bbfd..45da78ef4a92 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1638,7 +1638,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); - drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2520,10 +2519,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2567,10 +2562,6 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2639,10 +2630,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3997,12 +3984,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { - if (work && !work->pending) { - obj_priv = work->obj->driver_private; - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", - obj_priv, - atomic_read(&obj_priv->pending_flip)); - } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4024,10 +4005,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); obj_priv = work->obj->driver_private; - - /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) + if (atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4040,11 +4018,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) { + if (intel_crtc->unpin_work) intel_crtc->unpin_work->pending = 1; - } else { - DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); - } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4078,7 +4053,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4092,10 +4066,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - obj->driver_private); kfree(work); - intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index b1d0acbae4e4..aa74e59bec61 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), + DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -622,13 +622,6 @@ static const struct dmi_system_id bad_lid_status[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), }, }, - { - .ident = "Aspire 1810T", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), - }, - }, { .ident = "PC-81005", .matches = { @@ -650,7 +643,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) + if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) status = connector_status_disconnected; return status; diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 82678d30ab06..eaacfd0920df 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,14 +2345,6 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_CVBS0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index c0d4650cdb79..11c9a3fe6810 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -354,17 +354,11 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } -/* Who ever call radeon_fence_emit should call ring_lock and ask - * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* We have to make sure that caches are flushed before - * CPU might read something from VRAM. */ - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); + /* Who ever call radeon_fence_emit should call ring_lock and ask + * for enough space (today caller are ib schedule and buffer move) */ /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); @@ -3375,6 +3369,7 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3486,12 +3481,13 @@ int r100_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index 43b55a030b4d..0051d11b907c 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -506,14 +506,11 @@ void r300_vram_info(struct radeon_device *rdev) /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; - tmp = RREG32(RADEON_MEM_CNTL); - tmp &= R300_MEM_NUM_CHANNELS_MASK; - switch (tmp) { - case 0: rdev->mc.vram_width = 64; break; - case 1: rdev->mc.vram_width = 128; break; - case 2: rdev->mc.vram_width = 256; break; - default: rdev->mc.vram_width = 128; break; + if (tmp & R300_MEM_NUM_CHANNELS_MASK) { + rdev->mc.vram_width = 128; + } else { + rdev->mc.vram_width = 64; } r100_vram_init_sizes(rdev); @@ -1330,6 +1327,7 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1420,15 +1418,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r420.c b/trunk/drivers/gpu/drm/radeon/r420.c index d9373246c97f..4526faaacca8 100644 --- a/trunk/drivers/gpu/drm/radeon/r420.c +++ b/trunk/drivers/gpu/drm/radeon/r420.c @@ -389,15 +389,16 @@ int r420_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r420_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index ddf5731eba0d..9a189072f2b9 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -294,12 +294,13 @@ int r520_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index a1198d99cdf9..1b6d0001b20e 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1654,12 +1654,6 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.align_mask = 16 - 1; } -void r600_cp_fini(struct radeon_device *rdev) -{ - r600_cp_stop(rdev); - radeon_ring_fini(rdev); -} - /* * GPU scratch registers helpers function. @@ -1867,12 +1861,6 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -2057,15 +2045,19 @@ int r600_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = r600_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + r600_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -2091,17 +2083,20 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { + /* Suspend operations */ + r600_suspend(rdev); + r600_audio_fini(rdev); r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); - radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); + radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -2905,18 +2900,3 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } - -/** - * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl - * rdev: radeon device structure - * bo: buffer object struct which userspace is waiting for idle - * - * Some R6XX/R7XX doesn't seems to take into account HDP flush performed - * through ring buffer, this leads to corruption in rendering, see - * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we - * directly perform HDP flush by writing register through MMIO. - */ -void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) -{ - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); -} diff --git a/trunk/drivers/gpu/drm/radeon/r600_audio.c b/trunk/drivers/gpu/drm/radeon/r600_audio.c index b1c1d3433454..99e2c3891a7d 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_audio.c +++ b/trunk/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) + return rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..2d5f2bfa7201 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -661,13 +661,6 @@ struct radeon_asic { void (*hpd_fini)(struct radeon_device *rdev); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); - /* ioctl hw specific callback. Some hw might want to perform special - * operation on specific ioctl. For instance on wait idle some hw - * might want to perform and HDP flush through MMIO as it seems that - * some R6XX/R7XX hw doesn't take HDP flush into account if programmed - * through ring. - */ - void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); }; /* @@ -1150,7 +1143,6 @@ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); -extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index 05ee1aeac3fd..f2fbd2e4e9df 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -117,7 +117,6 @@ static struct radeon_asic r100_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -177,7 +176,6 @@ static struct radeon_asic r300_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -221,7 +219,6 @@ static struct radeon_asic r420_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -270,7 +267,6 @@ static struct radeon_asic rs400_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -327,7 +323,6 @@ static struct radeon_asic rs600_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -375,7 +370,6 @@ static struct radeon_asic rs690_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -427,7 +421,6 @@ static struct radeon_asic rv515_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -470,7 +463,6 @@ static struct radeon_asic r520_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -512,7 +504,6 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); -extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -547,7 +538,6 @@ static struct radeon_asic r600_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; /* @@ -592,7 +582,6 @@ static struct radeon_asic rv770_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; #endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..579c8920e081 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -971,7 +971,8 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); - lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); + if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) + lvds->panel_vcc_delay = 2000; lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 2d8e5a70f284..55266416fa47 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, - radeon_connector->dac_load_detect); + 1); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a01..0e1325e18534 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -308,9 +308,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (robj->rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index 287fcebfb4e6..9f5418983e2a 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -223,31 +223,15 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } -int rs400_mc_wait_for_idle(struct radeon_device *rdev) -{ - unsigned i; - uint32_t tmp; - - for (i = 0; i < rdev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 2)) { - return 0; - } - DRM_UDELAY(1); - } - return -1; -} - void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (rs400_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "rs400: Failed to wait MC idle while " - "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); + if (r300_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); } } @@ -386,8 +370,8 @@ void rs400_mc_program(struct radeon_device *rdev) r100_mc_stop(rdev, &save); /* Wait for mc idle */ - if (rs400_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); + if (r300_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); @@ -464,6 +448,7 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -542,6 +527,7 @@ int rs400_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index c3818562a13e..d5255751e7b3 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -610,6 +610,7 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -688,6 +689,7 @@ int rs600_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 06e2771aee5a..cd31da913771 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -676,6 +676,7 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -755,6 +756,7 @@ int rs690_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 0e1e6b8632b8..62756717b044 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -537,6 +537,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -614,12 +615,13 @@ int rv515_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 5943d561fd1e..afd9e8213c29 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -887,12 +887,6 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1061,15 +1055,19 @@ int rv770_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = rv770_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + rv770_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -1091,11 +1089,13 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { + rv770_suspend(rdev); + r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/trunk/drivers/hwmon/adt7462.c b/trunk/drivers/hwmon/adt7462.c index b8156b4893bb..a31e77c776ae 100644 --- a/trunk/drivers/hwmon/adt7462.c +++ b/trunk/drivers/hwmon/adt7462.c @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; * * Some, but not all, of these voltages have low/high limits. */ -#define ADT7462_VOLT_COUNT 13 +#define ADT7462_VOLT_COUNT 12 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 diff --git a/trunk/drivers/hwmon/lm78.c b/trunk/drivers/hwmon/lm78.c index 72ff2c4e757d..cadcbd90ff3b 100644 --- a/trunk/drivers/hwmon/lm78.c +++ b/trunk/drivers/hwmon/lm78.c @@ -851,16 +851,17 @@ static struct lm78_data *lm78_update_device(struct device *dev) static int __init lm78_isa_found(unsigned short address) { int val, save, found = 0; - int port; - - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 - * to base+7 and some base+5 to base+6. So we better request each port - * individually for the probing phase. */ - for (port = address; port < address + LM78_EXTENT; port++) { - if (!request_region(port, 1, "lm78")) { - pr_debug("lm78: Failed to request port 0x%x\n", port); - goto release; - } + + /* We have to request the region in two parts because some + boards declare base+4 to base+7 as a PNP device */ + if (!request_region(address, 4, "lm78")) { + pr_debug("lm78: Failed to request low part of region\n"); + return 0; + } + if (!request_region(address + 4, 4, "lm78")) { + pr_debug("lm78: Failed to request high part of region\n"); + release_region(address, 4); + return 0; } #define REALLY_SLOW_IO @@ -924,8 +925,8 @@ static int __init lm78_isa_found(unsigned short address) val & 0x80 ? "LM79" : "LM78", (int)address); release: - for (port--; port >= address; port--) - release_region(port, 1); + release_region(address + 4, 4); + release_region(address, 4); return found; } diff --git a/trunk/drivers/hwmon/w83781d.c b/trunk/drivers/hwmon/w83781d.c index 32d4adee73db..05f9225b6f94 100644 --- a/trunk/drivers/hwmon/w83781d.c +++ b/trunk/drivers/hwmon/w83781d.c @@ -1793,17 +1793,17 @@ static int __init w83781d_isa_found(unsigned short address) { int val, save, found = 0; - int port; - - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 - * to base+7 and some base+5 to base+6. So we better request each port - * individually for the probing phase. */ - for (port = address; port < address + W83781D_EXTENT; port++) { - if (!request_region(port, 1, "w83781d")) { - pr_debug("w83781d: Failed to request port 0x%x\n", - port); - goto release; - } + + /* We have to request the region in two parts because some + boards declare base+4 to base+7 as a PNP device */ + if (!request_region(address, 4, "w83781d")) { + pr_debug("w83781d: Failed to request low part of region\n"); + return 0; + } + if (!request_region(address + 4, 4, "w83781d")) { + pr_debug("w83781d: Failed to request high part of region\n"); + release_region(address, 4); + return 0; } #define REALLY_SLOW_IO @@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) val == 0x30 ? "W83782D" : "W83781D", (int)address); release: - for (port--; port >= address; port--) - release_region(port, 1); + release_region(address + 4, 4); + release_region(address, 4); return found; } diff --git a/trunk/drivers/i2c/busses/i2c-tiny-usb.c b/trunk/drivers/i2c/busses/i2c-tiny-usb.c index b1c050ff311d..e29b6d5ba8ef 100644 --- a/trunk/drivers/i2c/busses/i2c-tiny-usb.c +++ b/trunk/drivers/i2c/busses/i2c-tiny-usb.c @@ -13,6 +13,7 @@ #include #include #include +#include /* include interfaces to usb layer */ #include @@ -31,8 +32,8 @@ #define CMD_I2C_IO_END (1<<1) /* i2c bit delay, default is 10us -> 100kHz */ -static int delay = 10; -module_param(delay, int, 0); +static unsigned short delay = 10; +module_param(delay, ushort, 0); MODULE_PARM_DESC(delay, "bit delay in microseconds, " "e.g. 10 for 100kHz (default is 100kHz)"); @@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) static u32 usb_func(struct i2c_adapter *adapter) { - u32 func; + __le32 func; /* get functionality from adapter */ if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != @@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter) return 0; } - return func; + return le32_to_cpu(func); } /* This is the actual algorithm we define */ @@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, "i2c-tiny-usb at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); - if (usb_write(&dev->adapter, CMD_SET_DELAY, - cpu_to_le16(delay), 0, NULL, 0) != 0) { + if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { dev_err(&dev->adapter.dev, "failure setting delay to %dus\n", delay); retval = -EIO; diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index a20a71e5efd3..dd3dfe42d5a9 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -4075,10 +4075,8 @@ static void mddev_delayed_delete(struct work_struct *ws) { mddev_t *mddev = container_of(ws, mddev_t, del_work); - if (mddev->private) { + if (mddev->private == &md_redundancy_group) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); - if (mddev->private != (void*)1) - sysfs_remove_group(&mddev->kobj, mddev->private); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); mddev->sysfs_action = NULL; @@ -4289,7 +4287,10 @@ static int do_md_run(mddev_t * mddev) sysfs_notify_dirent(rdev->sysfs_state); } + md_probe(mddev->unit, NULL, NULL); disk = mddev->gendisk; + if (!disk) + return -ENOMEM; spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -4529,8 +4530,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; module_put(mddev->pers->owner); - if (mddev->pers->sync_request && mddev->private == NULL) - mddev->private = (void*)1; + if (mddev->pers->sync_request) + mddev->private = &md_redundancy_group; mddev->pers = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent(mddev->sysfs_state); @@ -4577,6 +4578,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) } mddev->bitmap_info.offset = 0; + /* make sure all md_delayed_delete calls have finished */ + flush_scheduled_work(); + export_array(mddev); mddev->array_sectors = 0; diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index ceb24afdc147..e84204eb12df 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -5136,8 +5136,9 @@ static int stop(mddev_t *mddev) mddev->thread = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); free_conf(conf); - mddev->private = &raid5_attrs_group; + mddev->private = NULL; return 0; } @@ -5463,11 +5464,11 @@ static int raid5_start_reshape(mddev_t *mddev) !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { char nm[20]; - if (rdev->raid_disk >= conf->previous_raid_disks) { + if (rdev->raid_disk >= conf->previous_raid_disks) set_bit(In_sync, &rdev->flags); - added_devices++; - } else + else rdev->recovery_offset = 0; + added_devices++; sprintf(nm, "rd%d", rdev->raid_disk); if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) @@ -5479,12 +5480,9 @@ static int raid5_start_reshape(mddev_t *mddev) break; } - /* When a reshape changes the number of devices, ->degraded - * is measured against the large of the pre and post number of - * devices.*/ if (mddev->delta_disks > 0) { spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) + mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; spin_unlock_irqrestore(&conf->device_lock, flags); } diff --git a/trunk/drivers/media/dvb/dvb-core/dmxdev.c b/trunk/drivers/media/dvb/dvb-core/dmxdev.c index 9ddc57909d49..c37790ad92d0 100644 --- a/trunk/drivers/media/dvb/dvb-core/dmxdev.c +++ b/trunk/drivers/media/dvb/dvb-core/dmxdev.c @@ -761,6 +761,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file) dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); + INIT_LIST_HEAD(&dmxdevfilter->feed.ts); init_timer(&dmxdevfilter->timer); dvbdev->users++; @@ -886,7 +887,6 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, dmxdevfilter->type = DMXDEV_TYPE_PES; memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); - INIT_LIST_HEAD(&dmxdevfilter->feed.ts); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c index 67f189b7aa1f..b78cfb7d1897 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c @@ -426,7 +426,16 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; }; - if (demux->cnt_storage) { + if (dvb_demux_tscheck) { + if (!demux->cnt_storage) + demux->cnt_storage = vmalloc(MAX_PID + 1); + + if (!demux->cnt_storage) { + printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); + dvb_demux_tscheck = 0; + goto no_dvb_demux_tscheck; + } + /* check pkt counter */ if (pid < MAX_PID) { if (buf[1] & 0x80) @@ -445,6 +454,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; /* end check */ }; +no_dvb_demux_tscheck: list_for_each_entry(feed, &demux->feed_list, list_head) { if ((feed->pid != pid) && (feed->pid != 0x2000)) @@ -1236,7 +1246,6 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); if (!dvbdemux->feed) { vfree(dvbdemux->filter); - dvbdemux->filter = NULL; return -ENOMEM; } for (i = 0; i < dvbdemux->filternum; i++) { @@ -1248,13 +1257,6 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed[i].index = i; } - if (dvb_demux_tscheck) { - dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); - - if (!dvbdemux->cnt_storage) - printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); - } - INIT_LIST_HEAD(&dvbdemux->frontend_list); for (i = 0; i < DMX_TS_PES_OTHER; i++) { diff --git a/trunk/drivers/net/ax88796.c b/trunk/drivers/net/ax88796.c index 1dd4403247ca..62d9c9cc5671 100644 --- a/trunk/drivers/net/ax88796.c +++ b/trunk/drivers/net/ax88796.c @@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev) size = (res->end - res->start) + 1; ax->mem2 = request_mem_region(res->start, size, pdev->name); - if (ax->mem2 == NULL) { + if (ax->mem == NULL) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem1; diff --git a/trunk/drivers/net/cxgb3/sge.c b/trunk/drivers/net/cxgb3/sge.c index 318a018ca7c5..bdbd14727e4b 100644 --- a/trunk/drivers/net/cxgb3/sge.c +++ b/trunk/drivers/net/cxgb3/sge.c @@ -2079,7 +2079,6 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; struct skb_frag_struct *rx_frag; @@ -2117,19 +2116,12 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, if (!nr_frags) { offset = 2 + sizeof(struct cpl_rx_pkt); - cpl = qs->lro_va = sd->pg_chunk.va + 2; - - if ((pi->rx_offload & T3_RX_CSUM) && - cpl->csum_valid && cpl->csum == htons(0xffff)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; - } else - skb->ip_summed = CHECKSUM_NONE; - } else - cpl = qs->lro_va; - + qs->lro_va = sd->pg_chunk.va + 2; + } len -= offset; + prefetch(qs->lro_va); + rx_frag += nr_frags; rx_frag->page = sd->pg_chunk.page; rx_frag->page_offset = sd->pg_chunk.offset + offset; @@ -2144,8 +2136,12 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, return; skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); + skb->ip_summed = CHECKSUM_UNNECESSARY; + cpl = qs->lro_va; if (unlikely(cpl->vlan_valid)) { + struct net_device *dev = qs->netdev; + struct port_info *pi = netdev_priv(dev); struct vlan_group *grp = pi->vlan_grp; if (likely(grp != NULL)) { diff --git a/trunk/drivers/net/igb/igb_main.c b/trunk/drivers/net/igb/igb_main.c index c881347cb26d..997124d2992a 100644 --- a/trunk/drivers/net/igb/igb_main.c +++ b/trunk/drivers/net/igb/igb_main.c @@ -421,8 +421,6 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; - if (!adapter->msix_entries && msix_vector == 0) - msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; @@ -879,6 +877,7 @@ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; int err = 0; if (adapter->msix_entries) { @@ -910,7 +909,20 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); } else { - igb_assign_vector(adapter->q_vector[0], 0); + switch (hw->mac.type) { + case e1000_82575: + wr32(E1000_MSIXBM(0), + (E1000_EICR_RX_QUEUE0 | + E1000_EICR_TX_QUEUE0 | + E1000_EIMS_OTHER)); + break; + case e1000_82580: + case e1000_82576: + wr32(E1000_IVAR0, E1000_IVAR_VALID); + break; + default: + break; + } } if (adapter->flags & IGB_FLAG_HAS_MSI) { @@ -1128,8 +1140,6 @@ int igb_up(struct igb_adapter *adapter) } if (adapter->msix_entries) igb_configure_msix(adapter); - else - igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ rd32(E1000_ICR); diff --git a/trunk/drivers/net/ixgbe/ixgbe_main.c b/trunk/drivers/net/ixgbe/ixgbe_main.c index 7b7c8486c0bf..b5f64ad67975 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ixgbe/ixgbe_main.c @@ -5179,7 +5179,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); } - return 0; + return count; } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, @@ -5329,11 +5329,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = smp_processor_id(); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - while (unlikely(txq >= dev->real_num_tx_queues)) - txq -= dev->real_num_tx_queues; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) return txq; - } #ifdef IXGBE_FCOE if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c index 24279e6e55f5..9f9d6081959b 100644 --- a/trunk/drivers/net/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/netxen/netxen_nic_main.c @@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); - return; + } else { clear_bit(__NX_RESETTING, &adapter->state); if (!netxen_nic_reset_context(adapter)) { @@ -2240,9 +2240,7 @@ netxen_detach_work(struct work_struct *work) netxen_nic_down(adapter, netdev); - rtnl_lock(); netxen_nic_detach(adapter); - rtnl_unlock(); status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index 67249c3c9f50..d760650c5c04 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -1025,8 +1025,11 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) { struct sky2_tx_le *le = sky2->tx_le + *slot; + struct tx_ring_info *re = sky2->tx_ring + *slot; *slot = RING_NEXT(*slot, sky2->tx_ring_size); + re->flags = 0; + re->skb = NULL; le->ctrl = 0; return le; } @@ -1619,7 +1622,8 @@ static unsigned tx_le_req(const struct sk_buff *skb) return count; } -static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) +static void sky2_tx_unmap(struct pci_dev *pdev, + const struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), @@ -1629,7 +1633,6 @@ static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), pci_unmap_len(re, maplen), PCI_DMA_TODEVICE); - re->flags = 0; } /* @@ -1836,7 +1839,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - re->skb = NULL; dev_kfree_skb_any(skb); sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index d58b94030ef3..c74694345b6e 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -338,23 +338,6 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); -/* - * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS - * ver. 1.33 20070103) don't set the correct ISA PCI region header info. - * BAR0 should be 8 bytes; instead, it may be set to something like 8k - * (which conflicts w/ BAR1's memory range). - */ -static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) -{ - if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected " - "(incorrect header); workaround applied.\n"); - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); - static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { diff --git a/trunk/drivers/s390/cio/qdio_main.c b/trunk/drivers/s390/cio/qdio_main.c index 62b654af9237..999fe80c4051 100644 --- a/trunk/drivers/s390/cio/qdio_main.c +++ b/trunk/drivers/s390/cio/qdio_main.c @@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) qdio_siga_sync_q(q); get_buf_state(q, q->first_to_check, &state, 0); - if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) + if (state == SLSB_P_INPUT_PRIMED) /* more work coming */ return 0; @@ -960,8 +960,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, qdio_handle_activate_check(cdev, intparm, cstat, dstat); break; - case QDIO_IRQ_STATE_STOPPED: - break; default: WARN_ON(1); } diff --git a/trunk/drivers/usb/host/r8a66597-hcd.c b/trunk/drivers/usb/host/r8a66597-hcd.c index bee558aed427..0ceec123ddfd 100644 --- a/trunk/drivers/usb/host/r8a66597-hcd.c +++ b/trunk/drivers/usb/host/r8a66597-hcd.c @@ -35,9 +35,7 @@ #include #include #include -#include #include -#include #include "../core/hcd.h" #include "r8a66597.h" @@ -822,26 +820,6 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } -static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, - int status) -__releases(r8a66597->lock) -__acquires(r8a66597->lock) -{ - if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { - void *ptr; - - for (ptr = urb->transfer_buffer; - ptr < urb->transfer_buffer + urb->transfer_buffer_length; - ptr += PAGE_SIZE) - flush_dcache_page(virt_to_page(ptr)); - } - - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); - spin_lock(&r8a66597->lock); -} - /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -860,9 +838,15 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) - r8a66597_urb_done(r8a66597, urb, -ENODEV); + if (urb) { + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), + urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, + -ENODEV); + spin_lock(&r8a66597->lock); + } break; } } @@ -1022,8 +1006,6 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) -__releases(r8a66597->lock) -__acquires(r8a66597->lock) { if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); @@ -1041,9 +1023,7 @@ __acquires(r8a66597->lock) usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); } - spin_unlock(&r8a66597->lock); usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); - spin_lock(&r8a66597->lock); } /* this function must be called with interrupt disabled */ @@ -1303,7 +1283,10 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - r8a66597_urb_done(r8a66597, urb, status); + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&r8a66597->lock); } if (restart) { diff --git a/trunk/fs/9p/v9fs.c b/trunk/fs/9p/v9fs.c index 7d6c2139891d..cf62b05e296a 100644 --- a/trunk/fs/9p/v9fs.c +++ b/trunk/fs/9p/v9fs.c @@ -84,7 +84,7 @@ static const match_table_t tokens = { static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { - char *options, *tmp_options; + char *options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; @@ -102,12 +102,9 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) if (!opts) return 0; - tmp_options = kstrdup(opts, GFP_KERNEL); - if (!tmp_options) { - ret = -ENOMEM; + options = kstrdup(opts, GFP_KERNEL); + if (!options) goto fail_option_alloc; - } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -162,12 +159,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) break; case Opt_cache: s = match_strdup(&args[0]); - if (!s) { - ret = -ENOMEM; - P9_DPRINTK(P9_DEBUG_ERROR, - "problem allocating copy of cache arg\n"); - goto free_and_return; - } + if (!s) + goto fail_option_alloc; if (strcmp(s, "loose") == 0) v9ses->cache = CACHE_LOOSE; @@ -180,12 +173,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_access: s = match_strdup(&args[0]); - if (!s) { - ret = -ENOMEM; - P9_DPRINTK(P9_DEBUG_ERROR, - "problem allocating copy of access arg\n"); - goto free_and_return; - } + if (!s) + goto fail_option_alloc; v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) @@ -205,11 +194,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) continue; } } + kfree(options); + return ret; -free_and_return: - kfree(tmp_options); fail_option_alloc: - return ret; + P9_DPRINTK(P9_DEBUG_ERROR, + "failed to allocate copy of option argument\n"); + return -ENOMEM; } /** diff --git a/trunk/fs/9p/v9fs_vfs.h b/trunk/fs/9p/v9fs_vfs.h index ed835836e0dc..3a7560e35865 100644 --- a/trunk/fs/9p/v9fs_vfs.h +++ b/trunk/fs/9p/v9fs_vfs.h @@ -60,4 +60,3 @@ void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); -void v9fs_blank_wstat(struct p9_wstat *wstat); diff --git a/trunk/fs/9p/vfs_file.c b/trunk/fs/9p/vfs_file.c index 74a0461a9ac0..3902bf43a088 100644 --- a/trunk/fs/9p/vfs_file.c +++ b/trunk/fs/9p/vfs_file.c @@ -257,23 +257,6 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } -static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, - int datasync) -{ - struct p9_fid *fid; - struct p9_wstat wstat; - int retval; - - P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, - dentry, datasync); - - fid = filp->private_data; - v9fs_blank_wstat(&wstat); - - retval = p9_client_wstat(fid, &wstat); - return retval; -} - static const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -283,7 +266,6 @@ static const struct file_operations v9fs_cached_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, - .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations = { @@ -294,5 +276,4 @@ const struct file_operations v9fs_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, - .fsync = v9fs_file_fsync, }; diff --git a/trunk/fs/9p/vfs_inode.c b/trunk/fs/9p/vfs_inode.c index a407fa3388c0..9d03d1ebca6f 100644 --- a/trunk/fs/9p/vfs_inode.c +++ b/trunk/fs/9p/vfs_inode.c @@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended) * */ -void +static void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c index 34ddda888e63..33baf27fac78 100644 --- a/trunk/fs/befs/linuxvfs.c +++ b/trunk/fs/befs/linuxvfs.c @@ -873,7 +873,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); unacquire_priv_sbp: - kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index d11d0289f3d2..73d6a735b8f3 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -246,8 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) if (!sb) goto out; if (sb->s_flags & MS_RDONLY) { - sb->s_frozen = SB_FREEZE_TRANS; - up_write(&sb->s_umount); + deactivate_locked_super(sb); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } @@ -308,7 +307,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) BUG_ON(sb->s_bdev != bdev); down_write(&sb->s_umount); if (sb->s_flags & MS_RDONLY) - goto out_unfrozen; + goto out_deactivate; if (sb->s_op->unfreeze_fs) { error = sb->s_op->unfreeze_fs(sb); @@ -322,11 +321,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) } } -out_unfrozen: sb->s_frozen = SB_UNFROZEN; smp_wmb(); wake_up(&sb->s_wait_unfrozen); +out_deactivate: if (sb) deactivate_locked_super(sb); out_unlock: diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 2b59201b955c..87b25543d7d1 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1982,12 +1982,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - if (ret < 0) { - printk(KERN_WARNING - "btrfs: failed to recover relocation\n"); - err = -EINVAL; - goto fail_trans_kthread; - } + BUG_ON(ret); } location.objectid = BTRFS_FS_TREE_OBJECTID; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 559f72489b3b..432a2da4641e 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -5402,6 +5402,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5409,10 +5413,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index b177ed319612..96577e8bf9fd 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -3165,9 +3165,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } + spin_unlock(&tree->buffer_lock); + /* add one reference for the tree */ atomic_inc(&eb->refs); - spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/trunk/fs/btrfs/file.c b/trunk/fs/btrfs/file.c index 9d0809629967..c02033596f02 100644 --- a/trunk/fs/btrfs/file.c +++ b/trunk/fs/btrfs/file.c @@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? -EIO : ret; + return ret > 0 ? EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 4deb280f8969..8cd109972fa6 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -1681,6 +1681,24 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ +static noinline void reada_csum(struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_ordered_extent *ordered_extent) +{ + struct btrfs_ordered_sum *sum; + u64 bytenr; + + sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, + list); + bytenr = sum->sums[0].bytenr; + + /* + * we don't care about the results, the point of this search is + * just to get the btree leaves into ram + */ + btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); +} + /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1691,6 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_path *path; int compressed = 0; int ret; @@ -1698,9 +1717,32 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - ordered_extent = btrfs_lookup_ordered_extent(inode, start); - BUG_ON(!ordered_extent); + /* + * before we join the transaction, try to do some of our IO. + * This will limit the amount of IO that we have to do with + * the transaction running. We're unlikely to need to do any + * IO if the file extents are new, the disk_i_size checks + * covers the most common case. + */ + if (start < BTRFS_I(inode)->disk_i_size) { + path = btrfs_alloc_path(); + if (path) { + ret = btrfs_lookup_file_extent(NULL, root, path, + inode->i_ino, + start, 0); + ordered_extent = btrfs_lookup_ordered_extent(inode, + start); + if (!list_empty(&ordered_extent->list)) { + btrfs_release_path(root, path); + reada_csum(root, path, ordered_extent); + } + btrfs_free_path(path); + } + } + if (!ordered_extent) + ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -5799,9 +5841,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - (actual_len > inode->i_size) && - (cur_offset > inode->i_size)) { - + cur_offset > inode->i_size) { if (cur_offset > actual_len) i_size = actual_len; else diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index ab7ab5318745..ed3e4a2ec2c8 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -3764,8 +3764,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - else - btrfs_orphan_cleanup(fs_root); + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/trunk/fs/cifs/CHANGES b/trunk/fs/cifs/CHANGES index 49503d2edc7e..7b2600b380d7 100644 --- a/trunk/fs/cifs/CHANGES +++ b/trunk/fs/cifs/CHANGES @@ -1,7 +1,3 @@ -Version 1.62 ------------- -Add sockopt=TCP_NODELAY mount option. - Version 1.61 ------------ Fix append problem to Samba servers (files opened with O_APPEND could diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h index 78c1b86d55f6..ac2b24c192f8 100644 --- a/trunk/fs/cifs/cifsfs.h +++ b/trunk/fs/cifs/cifsfs.h @@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.62" +#define CIFS_VERSION "1.61" #endif /* _CIFSFS_H */ diff --git a/trunk/fs/cifs/cifsglob.h b/trunk/fs/cifs/cifsglob.h index ed751bb657db..4b35f7ec0583 100644 --- a/trunk/fs/cifs/cifsglob.h +++ b/trunk/fs/cifs/cifsglob.h @@ -149,7 +149,6 @@ struct TCP_Server_Info { bool svlocal:1; /* local server or remote */ bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ - bool tcp_nodelay; atomic_t inFlight; /* number of requests on the wire to server */ #ifdef CONFIG_CIFS_STATS2 atomic_t inSend; /* requests trying to send */ diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 2e9e09ca0e30..3bbcaa716b3c 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -98,7 +98,7 @@ struct smb_vol { bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ unsigned int rsize; unsigned int wsize; - bool sockopt_tcp_nodelay:1; + unsigned int sockopt; unsigned short int port; char *prepath; }; @@ -1142,11 +1142,9 @@ cifs_parse_mount_options(char *options, const char *devname, simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { - if (!value || !*value) { - cERROR(1, ("no socket option specified")); - continue; - } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { - vol->sockopt_tcp_nodelay = 1; + if (value && *value) { + vol->sockopt = + simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { @@ -1516,7 +1514,6 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; - tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1767,7 +1764,6 @@ static int ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; - int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1849,14 +1845,6 @@ ipv4_connect(struct TCP_Server_Info *server) socket->sk->sk_rcvbuf = 140 * 1024; } - if (server->tcp_nodelay) { - val = 1; - rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - if (rc) - cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); - } - cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); @@ -1928,7 +1916,6 @@ static int ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; - int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -2000,15 +1987,6 @@ ipv6_connect(struct TCP_Server_Info *server) */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; - - if (server->tcp_nodelay) { - val = 1; - rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - if (rc) - cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); - } - server->ssocket = socket; return rc; diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index e3fda978f481..cf18ee765590 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -1762,18 +1762,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) CIFS_MOUNT_MAP_SPECIAL_CHR); } - if (!rc) { + if (!rc) rc = inode_setattr(inode, attrs); - - /* force revalidate when any of these times are set since some - of the fs types (eg ext3, fat) do not have fine enough - time granularity to match protocol, and we do not have a - a way (yet) to query the server fs's time granularity (and - whether it rounds times down). - */ - if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))) - cifsInode->time = 0; - } out: kfree(args); kfree(full_path); diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c index c343b14ba2d3..f84062f9a985 100644 --- a/trunk/fs/cifs/readdir.c +++ b/trunk/fs/cifs/readdir.c @@ -77,11 +77,6 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, cFYI(1, ("For %s", name->name)); - if (parent->d_op && parent->d_op->d_hash) - parent->d_op->d_hash(parent, name); - else - name->hash = full_name_hash(name->name, name->len); - dentry = d_lookup(parent, name); if (dentry) { /* FIXME: check for inode number changes? */ @@ -671,11 +666,12 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, min(len, max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - pqst->len -= nls_nullsize(nlt); } else { pqst->name = filename; pqst->len = len; } + pqst->hash = full_name_hash(pqst->name, pqst->len); +/* cFYI(1, ("filldir on %s",pqst->name)); */ return rc; } diff --git a/trunk/fs/cifs/sess.c b/trunk/fs/cifs/sess.c index aaa9c1c5a5bd..7085a6275c4c 100644 --- a/trunk/fs/cifs/sess.c +++ b/trunk/fs/cifs/sess.c @@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; - } else { + } else { /* 300 should be long enough for any conceivable user name */ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, - MAX_USERNAME_SIZE, nls_cp); + 300, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ @@ -246,10 +246,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* copy user */ if (ses->userName == NULL) { /* BB what about null user mounts - check that we do this BB */ - } else { - strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); + } else { /* 300 should be long enough for any conceivable user name */ + strncpy(bcc_ptr, ses->userName, 300); } - bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); + /* BB improve check for overflow */ + bcc_ptr += strnlen(ses->userName, 300); *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ diff --git a/trunk/fs/fcntl.c b/trunk/fs/fcntl.c index 97e01dc0d95f..5ef953e6f908 100644 --- a/trunk/fs/fcntl.c +++ b/trunk/fs/fcntl.c @@ -199,7 +199,9 @@ static int setfl(int fd, struct file * filp, unsigned long arg) static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int force) { - write_lock_irq(&filp->f_owner.lock); + unsigned long flags; + + write_lock_irqsave(&filp->f_owner.lock, flags); if (force || !filp->f_owner.pid) { put_pid(filp->f_owner.pid); filp->f_owner.pid = get_pid(pid); @@ -211,7 +213,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, filp->f_owner.euid = cred->euid; } } - write_unlock_irq(&filp->f_owner.lock); + write_unlock_irqrestore(&filp->f_owner.lock, flags); } int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c index b98404b54383..69652c5bd5f0 100644 --- a/trunk/fs/file_table.c +++ b/trunk/fs/file_table.c @@ -253,7 +253,6 @@ void __fput(struct file *file) if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); - ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index d62fdc875f22..94a5e60779f9 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -1736,7 +1736,8 @@ struct file *do_filp_open(int dfd, const char *pathname, if (nd.root.mnt) path_put(&nd.root); if (!IS_ERR(filp)) { - error = ima_file_check(filp, acc_mode); + error = ima_path_check(&filp->f_path, filp->f_mode & + (MAY_READ | MAY_WRITE | MAY_EXEC)); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1796,7 +1797,8 @@ struct file *do_filp_open(int dfd, const char *pathname, } filp = nameidata_to_filp(&nd); if (!IS_ERR(filp)) { - error = ima_file_check(filp, acc_mode); + error = ima_path_check(&filp->f_path, filp->f_mode & + (MAY_READ | MAY_WRITE | MAY_EXEC)); if (error) { fput(filp); filp = ERR_PTR(error); diff --git a/trunk/fs/nfsd/export.c b/trunk/fs/nfsd/export.c index a0c4016413f1..c487810a2366 100644 --- a/trunk/fs/nfsd/export.c +++ b/trunk/fs/nfsd/export.c @@ -1316,11 +1316,19 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) { + struct svc_export *exp; u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); - return rqst_exp_find(rqstp, FSID_NUM, fsidv); + exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); + /* + * We shouldn't have accepting an nfsv4 request at all if we + * don't have a pseudoexport!: + */ + if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT) + exp = ERR_PTR(-ESERVERFAULT); + return exp; } /* diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c index 97d79eff6b7f..c194793b642b 100644 --- a/trunk/fs/nfsd/vfs.c +++ b/trunk/fs/nfsd/vfs.c @@ -752,7 +752,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); - host_err = ima_file_check(*filp, access); out_nfserr: err = nfserrno(host_err); out: @@ -2128,6 +2127,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, */ path.mnt = exp->ex_path.mnt; path.dentry = dentry; + err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); nfsd_out: return err? nfserrno(err) : 0; } diff --git a/trunk/fs/ocfs2/aops.c b/trunk/fs/ocfs2/aops.c index 7e9df11260f4..3dae4a13f6e4 100644 --- a/trunk/fs/ocfs2/aops.c +++ b/trunk/fs/ocfs2/aops.c @@ -599,7 +599,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, return ret; } -/* +/* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. Like the core uses * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from @@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw, ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, - nr_segs, + nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); diff --git a/trunk/fs/ocfs2/buffer_head_io.c b/trunk/fs/ocfs2/buffer_head_io.c index 21c808f752d8..d43d34a1dd31 100644 --- a/trunk/fs/ocfs2/buffer_head_io.c +++ b/trunk/fs/ocfs2/buffer_head_io.c @@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } ocfs2_metadata_cache_io_unlock(ci); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", flags); diff --git a/trunk/fs/ocfs2/cluster/heartbeat.c b/trunk/fs/ocfs2/cluster/heartbeat.c index 5c9890006708..eda5b8bcddd5 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.c +++ b/trunk/fs/ocfs2/cluster/heartbeat.c @@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; -/* Only sets a new threshold if there are no active regions. +/* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading * o2hb_dead_threshold as it can't change once regions are active and @@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work) mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, - jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); o2quo_disk_timeout(); } @@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, "seq %llu last %llu changed %u equal %u\n", slot->ds_node_num, (long long)slot->ds_last_generation, le32_to_cpu(hb_block->hb_cksum), - (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, slot->ds_equal_samples); diff --git a/trunk/fs/ocfs2/cluster/tcp.c b/trunk/fs/ocfs2/cluster/tcp.c index d8d0c65ac03c..334f231a422c 100644 --- a/trunk/fs/ocfs2/cluster/tcp.c +++ b/trunk/fs/ocfs2/cluster/tcp.c @@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_NOTICE "o2net: no longer connected to " + printk(KERN_INFO "o2net: no longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (!was_valid && valid) { o2quo_conn_up(o2net_num_from_nn(nn)); cancel_delayed_work(&nn->nn_connect_expired); - printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", + printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? "connected to" : "accepted connection from", SC_NODEF_ARGS(sc)); @@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc, cond_resched(); continue; } - mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); o2net_ensure_shutdown(nn, sc, 0); break; @@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, now.tv_sec, (long) now.tv_usec, sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, sc->sc_tv_advance_start.tv_sec, diff --git a/trunk/fs/ocfs2/cluster/tcp_internal.h b/trunk/fs/ocfs2/cluster/tcp_internal.h index 96fa7ebc530c..8d58cfe410b1 100644 --- a/trunk/fs/ocfs2/cluster/tcp_internal.h +++ b/trunk/fs/ocfs2/cluster/tcp_internal.h @@ -32,10 +32,10 @@ * on their number */ #define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) -/* +/* * This version number represents quite a lot, unfortunately. It not * only represents the raw network message protocol on the wire but also - * locking semantics of the file system using the protocol. It should + * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * * With version 11, we separate out the filesystem locking portion. The diff --git a/trunk/fs/ocfs2/dlm/dlmapi.h b/trunk/fs/ocfs2/dlm/dlmapi.h index 3cfa114aa391..b5786a787fab 100644 --- a/trunk/fs/ocfs2/dlm/dlmapi.h +++ b/trunk/fs/ocfs2/dlm/dlmapi.h @@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err); mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ } while (0) -#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_UNUSED1 0x01 #define DLM_LKSB_PUT_LVB 0x02 #define DLM_LKSB_GET_LVB 0x04 #define DLM_LKSB_UNUSED2 0x08 diff --git a/trunk/fs/ocfs2/dlm/dlmast.c b/trunk/fs/ocfs2/dlm/dlmast.c index dccc439fa087..01cf8cc3d286 100644 --- a/trunk/fs/ocfs2/dlm/dlmast.c +++ b/trunk/fs/ocfs2/dlm/dlmast.c @@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved - * ast because *both* an ast and a bast were reserved + * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); diff --git a/trunk/fs/ocfs2/dlm/dlmconvert.c b/trunk/fs/ocfs2/dlm/dlmconvert.c index f283bce776b4..ca96bce50e18 100644 --- a/trunk/fs/ocfs2/dlm/dlmconvert.c +++ b/trunk/fs/ocfs2/dlm/dlmconvert.c @@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ - dlm_wait_for_node_death(dlm, res->owner, + dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " diff --git a/trunk/fs/ocfs2/dlm/dlmdebug.c b/trunk/fs/ocfs2/dlm/dlmdebug.c index 0cd24cf54396..42b0bad7a612 100644 --- a/trunk/fs/ocfs2/dlm/dlmdebug.c +++ b/trunk/fs/ocfs2/dlm/dlmdebug.c @@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, - buf, sizeof(buf)); + buf, sizeof(buf) - 1); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", diff --git a/trunk/fs/ocfs2/dlm/dlmdomain.c b/trunk/fs/ocfs2/dlm/dlmdomain.c index 988c9055fd4e..0334000676d3 100644 --- a/trunk/fs/ocfs2/dlm/dlmdomain.c +++ b/trunk/fs/ocfs2/dlm/dlmdomain.c @@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, } /* Once the dlm ctxt is marked as leaving then we don't want - * to be put in someone's domain map. + * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { diff --git a/trunk/fs/ocfs2/dlm/dlmlock.c b/trunk/fs/ocfs2/dlm/dlmlock.c index 733337772671..437698e9465f 100644 --- a/trunk/fs/ocfs2/dlm/dlmlock.c +++ b/trunk/fs/ocfs2/dlm/dlmlock.c @@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); - } else if (dlm_is_recovery_lock(res->lockname.name, + } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put diff --git a/trunk/fs/ocfs2/dlm/dlmmaster.c b/trunk/fs/ocfs2/dlm/dlmmaster.c index a659606dcb95..03ccf9a7b1f4 100644 --- a/trunk/fs/ocfs2/dlm/dlmmaster.c +++ b/trunk/fs/ocfs2/dlm/dlmmaster.c @@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); - + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); @@ -833,7 +833,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map - * to see if there are any nodes that still need to be + * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); @@ -883,7 +883,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, msleep(500); } continue; - } + } dlm_kick_recovery_thread(dlm); msleep(1000); @@ -939,8 +939,8 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", - dlm->name, res->lockname.len, + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); @@ -1029,7 +1029,7 @@ static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { - mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; @@ -1602,7 +1602,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, } mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); - ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); @@ -1701,7 +1701,7 @@ static int dlm_do_assert_master(struct dlm_ctxt *dlm, if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " - "nodes and requests a re-assert\n", + "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } @@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; - } + } } } spin_unlock(&dlm->master_lock); @@ -1883,7 +1883,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, int extra_ref = 0; int nn = -1; int rr, err = 0; - + spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; @@ -1891,7 +1891,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ - while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) master_request = 1; @@ -2002,7 +2002,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); - *ret_data = (void *)res; + *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } @@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, item->u.am.request_from = request_from; item->u.am.flags = flags; - if (ignore_higher) - mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); - + spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); @@ -2133,7 +2133,7 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources - * mastered by dead nodes are purged when the hearbeat callback is + * mastered by dead nodes are purged when the hearbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, @@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, ret = -EAGAIN; } spin_unlock(&dlm->spinlock); - mlog(0, "%s: reco lock master is %u\n", dlm->name, + mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } @@ -2602,7 +2602,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); - /* avoid hang during shutdown when migrating lockres + /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " @@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); - /* target has died, so make the caller break out of the + /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) diff --git a/trunk/fs/ocfs2/dlm/dlmrecovery.c b/trunk/fs/ocfs2/dlm/dlmrecovery.c index 344bcf90cbf4..2f9e4e19a4f2 100644 --- a/trunk/fs/ocfs2/dlm/dlmrecovery.c +++ b/trunk/fs/ocfs2/dlm/dlmrecovery.c @@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " - "node %u (%s)!\n", + "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); @@ -1164,39 +1164,6 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, mres->master = master; } -static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, - struct dlm_migratable_lockres *mres, - int queue) -{ - if (!lock->lksb) - return; - - /* Ignore lvb in all locks in the blocked list */ - if (queue == DLM_BLOCKED_LIST) - return; - - /* Only consider lvbs in locks with granted EX or PR lock levels */ - if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) - return; - - if (dlm_lvb_is_empty(mres->lvb)) { - memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); - return; - } - - /* Ensure the lvb copied for migration matches in other valid locks */ - if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) - return; - - mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " - "node=%u\n", - dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), - dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), - lock->lockres->lockname.len, lock->lockres->lockname.name, - lock->ml.node); - dlm_print_one_lock_resource(lock->lockres); - BUG(); -} /* returns 1 if this lock fills the network structure, * 0 otherwise */ @@ -1214,7 +1181,20 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; - dlm_prepare_lvb_for_migration(lock, mres, queue); + /* send our current lvb */ + if (ml->type == LKM_EXMODE || + ml->type == LKM_PRMODE) { + /* if it is already set, this had better be a PR + * and it has to match */ + if (!dlm_lvb_is_empty(mres->lvb) && + (ml->type == LKM_EXMODE || + memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { + mlog(ML_ERROR, "mismatched lvbs!\n"); + dlm_print_one_lock_resource(lock->lockres); + BUG(); + } + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + } } ml->node = lock->ml.node; mres->num_locks++; @@ -1750,7 +1730,6 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; - __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; inum_locks; i++) { @@ -1798,48 +1777,19 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { - c = ml->cookie; - mlog(ML_ERROR, "Could not find local lock " - "with cookie %u:%llu, node %u, " - "list %u, flags 0x%x, type %d, " - "conv %d, highest blocked %d\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - ml->node, ml->list, ml->flags, ml->type, - ml->convert_type, ml->highest_blocked); - __dlm_print_one_lock_resource(res); - BUG(); - } - - if (lock->ml.node != ml->node) { - c = lock->ml.cookie; - mlog(ML_ERROR, "Mismatched node# in lock " - "cookie %u:%llu, name %.*s, node %u\n", + __be64 c = ml->cookie; + mlog(ML_ERROR, "could not find local lock " + "with cookie %u:%llu!\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - res->lockname.len, res->lockname.name, - lock->ml.node); - c = ml->cookie; - mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " - "node %u, list %u, flags 0x%x, type %d, " - "conv %d, highest blocked %d\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - ml->node, ml->list, ml->flags, ml->type, - ml->convert_type, ml->highest_blocked); + dlm_get_lock_cookie_seq(be64_to_cpu(c))); __dlm_print_one_lock_resource(res); BUG(); } + BUG_ON(lock->ml.node != ml->node); if (tmpq != queue) { - c = ml->cookie; - mlog(0, "Lock cookie %u:%llu was on list %u " - "instead of list %u for %.*s\n", - dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c)), - j, ml->list, res->lockname.len, - res->lockname.name); - __dlm_print_one_lock_resource(res); + mlog(0, "lock was on %u instead of %u for %.*s\n", + j, ml->list, res->lockname.len, res->lockname.name); spin_unlock(&res->spinlock); continue; } @@ -1889,7 +1839,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { - /* otherwise, the node is sending its + /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); @@ -1936,7 +1886,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { - c = lock->ml.cookie; + __be64 c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, @@ -2164,7 +2114,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) - /* if this node owned the lockres, and if the dead node + /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { @@ -2202,7 +2152,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, /* this node is the lockres master: * 1) remove any stale locks for the dead node - * 2) if the dead node had an EX when he died, blank out the lvb + * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -2243,12 +2193,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); - if(!test_bit(dead_node, res->refmap)) { - mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " - "but ref was not set\n", dlm->name, - res->lockname.len, res->lockname.name, freed, dead_node); - __dlm_print_one_lock_resource(res); - } + BUG_ON(!test_bit(dead_node, res->refmap)); dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " @@ -2315,7 +2260,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) } spin_unlock(&res->spinlock); continue; - } + } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); @@ -2466,7 +2411,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), - * or b) dlm->reco.new_master gets set to some nodenum + * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will @@ -2479,7 +2424,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); -again: +again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, @@ -2492,8 +2437,8 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); - - /* got the EX lock. check to see if another node + + /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " @@ -2506,12 +2451,12 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { - status = -EINVAL; + status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " - "but no dead node!\n", + "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } @@ -2523,7 +2468,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " - "begin_reco now\n", dlm->name, + "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); @@ -2556,7 +2501,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on - * reco.new_master != O2NM_INVALID_NODE_NUM + * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), @@ -2644,13 +2589,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } - - /* - * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, - * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. - * We are handling both for compatibility reasons. - */ - if (ret == -EAGAIN || ret == EAGAIN) { + if (ret == -EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, @@ -2660,7 +2599,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) } if (ret < 0) { struct dlm_lock_resource *res; - /* this is now a serious problem, possibly ENOMEM + /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " @@ -2673,7 +2612,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) } else { mlog(ML_ERROR, "recovery lock not found\n"); } - /* sleep for a bit in hopes that we can avoid + /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; @@ -2725,7 +2664,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " - "node %u changing it to %u\n", dlm->name, + "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); @@ -2791,8 +2730,8 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) if (ret < 0) { mlog_errno(ret); if (dlm_is_host_down(ret)) { - /* this has no effect on this recovery - * session, so set the status to zero to + /* this has no effect on this recovery + * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); @@ -2829,7 +2768,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); - + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { diff --git a/trunk/fs/ocfs2/dlm/dlmunlock.c b/trunk/fs/ocfs2/dlm/dlmunlock.c index 49e29ecd0201..00f53b2aea76 100644 --- a/trunk/fs/ocfs2/dlm/dlmunlock.c +++ b/trunk/fs/ocfs2/dlm/dlmunlock.c @@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); - } else if (status == DLM_RECOVERING || - status == DLM_MIGRATING || + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || status == DLM_FORWARD) { /* must clear the actions because this unlock * is about to be retried. cannot free or do @@ -661,14 +661,14 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { - /* it is possible that there is one last bast + /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); - wait_event(dlm->ast_wq, + wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); diff --git a/trunk/fs/ocfs2/dlmglue.c b/trunk/fs/ocfs2/dlmglue.c index e044019cb3b1..c5e4a49e3a12 100644 --- a/trunk/fs/ocfs2/dlmglue.c +++ b/trunk/fs/ocfs2/dlmglue.c @@ -875,14 +875,6 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; - - /* - * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing - * the OCFS2_LOCK_BUSY flag to prevent the dc thread from - * downconverting the lock before the upconvert has fully completed. - */ - lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); - lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); mlog_exit_void(); @@ -915,6 +907,8 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, assert_spin_locked(&lockres->l_lock); + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're @@ -927,9 +921,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, lockres->l_blocking = level; } - if (needs_downconvert) - lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); - mlog_exit(needs_downconvert); return needs_downconvert; } @@ -1142,7 +1133,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, mlog_entry_void(); spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); - lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else @@ -1333,13 +1323,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, again: wait = 0; - spin_lock_irqsave(&lockres->l_lock, flags); - if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; - goto unlock; + goto out; } + spin_lock_irqsave(&lockres->l_lock, flags); + mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); @@ -1356,25 +1346,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, goto unlock; } - if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { - /* - * We've upconverted. If the lock now has a level we can - * work with, we take it. If, however, the lock is not at the - * required level, we go thru the full cycle. One way this could - * happen is if a process requesting an upconvert to PR is - * closely followed by another requesting upconvert to an EX. - * If the process requesting EX lands here, we want it to - * continue attempting to upconvert and let the process - * requesting PR take the lock. - * If multiple processes request upconvert to PR, the first one - * here will take the lock. The others will have to go thru the - * OCFS2_LOCK_BLOCKED check to ensure that there is no pending - * downconvert request. - */ - if (level <= lockres->l_level) - goto update_holders; - } - if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of @@ -1445,14 +1416,11 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, goto again; } -update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: - lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); - spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* @@ -3187,7 +3155,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider - * it safe to drop. + * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) @@ -3384,7 +3352,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, unsigned long flags; int blocking; int new_level; - int level; int ret = 0; int set_lvb = 0; unsigned int gen; @@ -3393,17 +3360,9 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); -recheck: - /* - * Is it still blocking? If not, we have no more work to do. - */ - if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { - BUG_ON(lockres->l_blocking != DLM_LOCK_NL); - spin_unlock_irqrestore(&lockres->l_lock, flags); - ret = 0; - goto leave; - } + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); +recheck: if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag @@ -3442,31 +3401,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, goto leave; } - /* - * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is - * set when the ast is received for an upconvert just before the - * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast - * on the heels of the ast, we want to delay the downconvert just - * enough to allow the up requestor to do its task. Because this - * lock is in the blocked queue, the lock will be downconverted - * as soon as the requestor is done with the lock. - */ - if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) - goto leave_requeue; - - /* - * How can we block and yet be at NL? We were trying to upconvert - * from NL and got canceled. The code comes back here, and now - * we notice and clear BLOCKING. - */ - if (lockres->l_level == DLM_LOCK_NL) { - BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); - lockres->l_blocking = DLM_LOCK_NL; - lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); - spin_unlock_irqrestore(&lockres->l_lock, flags); - goto leave; - } - /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) @@ -3504,7 +3438,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; - level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); @@ -3513,7 +3446,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, goto leave; spin_lock_irqsave(&lockres->l_lock, flags); - if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { + if (blocking != lockres->l_blocking) { /* If this changed underneath us, then we can't drop * it just yet. */ goto recheck; diff --git a/trunk/fs/ocfs2/export.c b/trunk/fs/ocfs2/export.c index 19ad145d2af3..15713cbb865c 100644 --- a/trunk/fs/ocfs2/export.c +++ b/trunk/fs/ocfs2/export.c @@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } - + *max_len = len; bail: diff --git a/trunk/fs/ocfs2/extent_map.c b/trunk/fs/ocfs2/extent_map.c index 5328529e7fd2..d35a27f4523e 100644 --- a/trunk/fs/ocfs2/extent_map.c +++ b/trunk/fs/ocfs2/extent_map.c @@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && - (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c index 558ce0312421..06ccf6a86d35 100644 --- a/trunk/fs/ocfs2/file.c +++ b/trunk/fs/ocfs2/file.c @@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode, int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - /* ugh. in prepare/commit_write, if from==to==start of block, we + /* ugh. in prepare/commit_write, if from==to==start of block, we ** skip the prepare. make sure we never send an offset for the start ** of a block */ @@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, struct inode *inode = dentry->d_inode; loff_t saved_pos, end; - /* + /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ @@ -2013,8 +2013,8 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || - ((file->f_flags & O_DIRECT) && has_refcount)) { + if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || + (file->f_flags & O_DIRECT && has_refcount)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) @@ -2033,7 +2033,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, pos + count - 1); } - /* + /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. (it's the clustered equivalent of @@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, goto bail; } - /* + /* * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ @@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. - * + * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. + * generic_file_aio_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { @@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) up_read(&inode->i_alloc_sem); - if (rw_level != -1) + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); mlog_exit(ret); diff --git a/trunk/fs/ocfs2/inode.c b/trunk/fs/ocfs2/inode.c index 88459bdd1ff3..0297fb8982b8 100644 --- a/trunk/fs/ocfs2/inode.c +++ b/trunk/fs/ocfs2/inode.c @@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { status = ocfs2_try_open_lock(inode, 0); if (status) { - make_bad_inode(inode); + make_bad_inode(inode); return status; } } @@ -684,7 +684,7 @@ static int ocfs2_remove_inode(struct inode *inode, return status; } -/* +/* * Serialize with orphan dir recovery. If the process doing * recovery on this orphan dir does an iget() with the dir * i_mutex held, we'll deadlock here. Instead we detect this diff --git a/trunk/fs/ocfs2/ioctl.c b/trunk/fs/ocfs2/ioctl.c index 7d9d9c132cef..31fbb0619510 100644 --- a/trunk/fs/ocfs2/ioctl.c +++ b/trunk/fs/ocfs2/ioctl.c @@ -7,7 +7,6 @@ #include #include -#include #define MLOG_MASK_PREFIX ML_INODE #include @@ -182,10 +181,6 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - bool preserve; - struct reflink_arguments args; - struct inode *inode = file->f_path.dentry->d_inode; - switch (cmd) { case OCFS2_IOC32_GETFLAGS: cmd = OCFS2_IOC_GETFLAGS; @@ -200,15 +195,8 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - break; case OCFS2_IOC_REFLINK: - if (copy_from_user(&args, (struct reflink_arguments *)arg, - sizeof(args))) - return -EFAULT; - preserve = (args.preserve != 0); - - return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), - compat_ptr(args.new_path), preserve); + break; default: return -ENOIOCTLCMD; } diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index 9336c60e3a36..bf34c491ae96 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, status = -ENOENT; mlog_errno(status); return status; - } + } mutex_lock(&orphan_dir_inode->i_mutex); status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); diff --git a/trunk/fs/ocfs2/ocfs2.h b/trunk/fs/ocfs2/ocfs2.h index 740f448041e2..9362eea7424b 100644 --- a/trunk/fs/ocfs2/ocfs2.h +++ b/trunk/fs/ocfs2/ocfs2.h @@ -136,10 +136,6 @@ enum ocfs2_unlock_action { #define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a call to dlm_lock. Only exists with BUSY set. */ -#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread - * from downconverting - * before the upconvert - * has completed */ struct ocfs2_lock_res_ops; diff --git a/trunk/fs/ocfs2/ocfs2_fs.h b/trunk/fs/ocfs2/ocfs2_fs.h index 7638a38c32bc..1a1a679e51b5 100644 --- a/trunk/fs/ocfs2/ocfs2_fs.h +++ b/trunk/fs/ocfs2/ocfs2_fs.h @@ -1417,16 +1417,9 @@ static inline int ocfs2_fast_symlink_chars(int blocksize) return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } -static inline int ocfs2_max_inline_data_with_xattr(int blocksize, - struct ocfs2_dinode *di) +static inline int ocfs2_max_inline_data(int blocksize) { - if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) - return blocksize - - offsetof(struct ocfs2_dinode, id2.i_data.id_data) - - di->i_xattr_inline_size; - else - return blocksize - - offsetof(struct ocfs2_dinode, id2.i_data.id_data); + return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) diff --git a/trunk/fs/ocfs2/refcounttree.c b/trunk/fs/ocfs2/refcounttree.c index 8ae65c9c020c..74db2be75dd6 100644 --- a/trunk/fs/ocfs2/refcounttree.c +++ b/trunk/fs/ocfs2/refcounttree.c @@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; @@ -2957,12 +2957,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, page = grab_cache_page(mapping, page_index); - /* - * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page - * can't be dirtied before we CoW it out. - */ - if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) - BUG_ON(PageDirty(page)); + /* This page can't be dirtied before we CoW it out. */ + BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); @@ -3174,7 +3170,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; diff --git a/trunk/fs/ocfs2/stack_o2cb.c b/trunk/fs/ocfs2/stack_o2cb.c index 3038c92af493..e49c41050264 100644 --- a/trunk/fs/ocfs2/stack_o2cb.c +++ b/trunk/fs/ocfs2/stack_o2cb.c @@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; - struct dlm_protocol_version fs_version; + struct dlm_protocol_version dlm_version; BUG_ON(conn == NULL); BUG_ON(o2cb_stack.sp_proto == NULL); @@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); - fs_version.pv_major = conn->cc_version.pv_major; - fs_version.pv_minor = conn->cc_version.pv_minor; + dlm_version.pv_major = conn->cc_version.pv_major; + dlm_version.pv_minor = conn->cc_version.pv_minor; - dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); + dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } - conn->cc_version.pv_major = fs_version.pv_major; - conn->cc_version.pv_minor = fs_version.pv_minor; + conn->cc_version.pv_major = dlm_version.pv_major; + conn->cc_version.pv_minor = dlm_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); diff --git a/trunk/fs/ocfs2/super.c b/trunk/fs/ocfs2/super.c index 755cd49a5ef3..26069917a9f5 100644 --- a/trunk/fs/ocfs2/super.c +++ b/trunk/fs/ocfs2/super.c @@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) "file system, but write access is " "unavailable.\n"); else - mlog_errno(status); + mlog_errno(status); goto read_super_error; } diff --git a/trunk/fs/ocfs2/symlink.c b/trunk/fs/ocfs2/symlink.c index 32499d213fc4..49b133ccbf11 100644 --- a/trunk/fs/ocfs2/symlink.c +++ b/trunk/fs/ocfs2/symlink.c @@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } memcpy(link, target, len); + nd_set_link(nd, link); bail: - nd_set_link(nd, status ? ERR_PTR(status) : link); brelse(bh); mlog_exit(status); - return NULL; + return status ? ERR_PTR(status) : link; } static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - char *link = nd_get_link(nd); - if (!IS_ERR(link)) - kfree(link); + char *link = cookie; + + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { diff --git a/trunk/fs/ocfs2/uptodate.c b/trunk/fs/ocfs2/uptodate.c index a0a120e82b97..c61369342a27 100644 --- a/trunk/fs/ocfs2/uptodate.c +++ b/trunk/fs/ocfs2/uptodate.c @@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. - * + * the block is stored in our inode metadata cache. + * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, diff --git a/trunk/include/linux/ata.h b/trunk/include/linux/ata.h index 20f31567ccee..38a6948ce0c2 100644 --- a/trunk/include/linux/ata.h +++ b/trunk/include/linux/ata.h @@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id) return id[ATA_ID_SECTOR_SIZE] & (1 << 13); } -static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) +static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) { - return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); + return id[ATA_ID_SECTOR_SIZE] & 0xf; } static inline int ata_id_has_lba48(const u16 *id) diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index 188fcae10a99..5be3dab4a695 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -15,7 +15,6 @@ # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -33,7 +32,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu #endif #ifdef __KERNEL__ diff --git a/trunk/include/linux/ima.h b/trunk/include/linux/ima.h index 975837e7d6c0..99dc6d5cf7e5 100644 --- a/trunk/include/linux/ima.h +++ b/trunk/include/linux/ima.h @@ -17,7 +17,7 @@ struct linux_binprm; extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_inode_alloc(struct inode *inode); extern void ima_inode_free(struct inode *inode); -extern int ima_file_check(struct file *file, int mask); +extern int ima_path_check(struct path *path, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern void ima_counts_get(struct file *file); @@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode) return; } -static inline int ima_file_check(struct file *file, int mask) +static inline int ima_path_check(struct path *path, int mask) { return 0; } diff --git a/trunk/include/net/netns/conntrack.h b/trunk/include/net/netns/conntrack.h index 63d449807d9b..ba1ba0c5efd1 100644 --- a/trunk/include/net/netns/conntrack.h +++ b/trunk/include/net/netns/conntrack.h @@ -11,8 +11,6 @@ struct nf_conntrack_ecache; struct netns_ct { atomic_t count; unsigned int expect_count; - unsigned int htable_size; - struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct hlist_nulls_head unconfirmed; @@ -30,6 +28,5 @@ struct netns_ct { #endif int hash_vmalloc; int expect_vmalloc; - char *slabname; }; #endif diff --git a/trunk/include/net/netns/ipv4.h b/trunk/include/net/netns/ipv4.h index 9a4b8b714079..2eb3814d6258 100644 --- a/trunk/include/net/netns/ipv4.h +++ b/trunk/include/net/netns/ipv4.h @@ -40,7 +40,6 @@ struct netns_ipv4 { struct xt_table *iptable_security; struct xt_table *nat_table; struct hlist_head *nat_bysource; - unsigned int nat_htable_size; int nat_vmalloced; #endif diff --git a/trunk/init/main.c b/trunk/init/main.c index 4cb47a159f02..dac44a9356a5 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void) proc_caches_init(); buffer_init(); key_init(); - radix_tree_init(); security_init(); vfs_caches_init(totalram_pages); + radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index e2ab064c6d41..7faaa32fbf4f 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -880,7 +880,6 @@ void getboottime(struct timespec *ts) set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } -EXPORT_SYMBOL_GPL(getboottime); /** * monotonic_to_bootbased - Convert the monotonic time to boot based. @@ -890,7 +889,6 @@ void monotonic_to_bootbased(struct timespec *ts) { *ts = timespec_add_safe(*ts, total_sleep_time); } -EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { diff --git a/trunk/mm/migrate.c b/trunk/mm/migrate.c index 9a0db5bbabe4..efddbf0926b2 100644 --- a/trunk/mm/migrate.c +++ b/trunk/mm/migrate.c @@ -912,9 +912,6 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, goto out_pm; err = -ENODEV; - if (node < 0 || node >= MAX_NUMNODES) - goto out_pm; - if (!node_state(node, N_HIGH_MEMORY)) goto out_pm; diff --git a/trunk/net/9p/client.c b/trunk/net/9p/client.c index 09d4f1e2e4a8..8af95b2dddd6 100644 --- a/trunk/net/9p/client.c +++ b/trunk/net/9p/client.c @@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); static int parse_opts(char *opts, struct p9_client *clnt) { - char *options, *tmp_options; + char *options; char *p; substring_t args[MAX_OPT_ARGS]; int option; @@ -81,13 +81,12 @@ static int parse_opts(char *opts, struct p9_client *clnt) if (!opts) return 0; - tmp_options = kstrdup(opts, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(opts, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -109,13 +108,6 @@ static int parse_opts(char *opts, struct p9_client *clnt) break; case Opt_trans: clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); - if(clnt->trans_mod == NULL) { - P9_DPRINTK(P9_DEBUG_ERROR, - "Could not find request transport: %s\n", - (char *) &args[0]); - ret = -EINVAL; - goto free_and_return; - } break; case Opt_legacy: clnt->dotu = 0; @@ -125,8 +117,7 @@ static int parse_opts(char *opts, struct p9_client *clnt) } } -free_and_return: - kfree(tmp_options); + kfree(options); return ret; } @@ -676,12 +667,18 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) clnt->trans = NULL; spin_lock_init(&clnt->lock); INIT_LIST_HEAD(&clnt->fidlist); + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + clnt->fidpool = NULL; + goto error; + } p9_tag_init(clnt); err = parse_opts(options, clnt); if (err < 0) - goto free_client; + goto error; if (!clnt->trans_mod) clnt->trans_mod = v9fs_get_default_trans(); @@ -690,14 +687,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = -EPROTONOSUPPORT; P9_DPRINTK(P9_DEBUG_ERROR, "No transport defined or default transport\n"); - goto free_client; - } - - clnt->fidpool = p9_idpool_create(); - if (IS_ERR(clnt->fidpool)) { - err = PTR_ERR(clnt->fidpool); - clnt->fidpool = NULL; - goto put_trans; + goto error; } P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", @@ -705,25 +695,19 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = clnt->trans_mod->create(clnt, dev_name, options); if (err) - goto destroy_fidpool; + goto error; if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; err = p9_client_version(clnt); if (err) - goto close_trans; + goto error; return clnt; -close_trans: - clnt->trans_mod->close(clnt); -destroy_fidpool: - p9_idpool_destroy(clnt->fidpool); -put_trans: - v9fs_put_trans(clnt->trans_mod); -free_client: - kfree(clnt); +error: + p9_client_destroy(clnt); return ERR_PTR(err); } EXPORT_SYMBOL(p9_client_create); @@ -1230,11 +1214,10 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional) { int ret; - /* NOTE: size shouldn't include its own length */ /* size[2] type[2] dev[4] qid[13] */ /* mode[4] atime[4] mtime[4] length[8]*/ /* name[s] uid[s] gid[s] muid[s] */ - ret = 2+4+13+4+4+4+8+2+2+2+2; + ret = 2+2+4+13+4+4+4+8+2+2+2+2; if (wst->name) ret += strlen(wst->name); @@ -1275,7 +1258,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) wst->name, wst->uid, wst->gid, wst->muid, wst->extension, wst->n_uid, wst->n_gid, wst->n_muid); - req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); if (IS_ERR(req)) { err = PTR_ERR(req); goto error; diff --git a/trunk/net/9p/trans_fd.c b/trunk/net/9p/trans_fd.c index 31d0b05582a9..be1cb909d8c0 100644 --- a/trunk/net/9p/trans_fd.c +++ b/trunk/net/9p/trans_fd.c @@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options, *tmp_options; + char *options; int ret; opts->port = P9_PORT; @@ -724,13 +724,12 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) if (!params) return 0; - tmp_options = kstrdup(params, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(params, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -761,8 +760,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) continue; } } - - kfree(tmp_options); + kfree(options); return 0; } diff --git a/trunk/net/9p/trans_rdma.c b/trunk/net/9p/trans_rdma.c index 2c95a89c0f46..65cb29db03f8 100644 --- a/trunk/net/9p/trans_rdma.c +++ b/trunk/net/9p/trans_rdma.c @@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options, *tmp_options; + char *options; int ret; opts->port = P9_PORT; @@ -177,13 +177,12 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) if (!params) return 0; - tmp_options = kstrdup(params, GFP_KERNEL); - if (!tmp_options) { + options = kstrdup(params, GFP_KERNEL); + if (!options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } - options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -217,7 +216,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); - kfree(tmp_options); + kfree(options); return 0; } diff --git a/trunk/net/9p/trans_virtio.c b/trunk/net/9p/trans_virtio.c index cb50f4ae5eef..ea1e3daabefe 100644 --- a/trunk/net/9p/trans_virtio.c +++ b/trunk/net/9p/trans_virtio.c @@ -102,8 +102,7 @@ static void p9_virtio_close(struct p9_client *client) struct virtio_chan *chan = client->trans; mutex_lock(&virtio_9p_lock); - if (chan) - chan->inuse = false; + chan->inuse = false; mutex_unlock(&virtio_9p_lock); } @@ -312,7 +311,6 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) } client->trans = (void *)chan; - client->status = Connected; chan->client = client; return 0; diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index b10e3cdb08f8..b7c4224f4e7d 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -377,9 +377,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { - acl->power_save = 1; - hci_conn_enter_active_mode(acl); - if (lmp_esco_capable(hdev)) hci_setup_sync(sco, acl->handle); else diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 592da5c909c1..28517bad796c 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -1699,7 +1699,6 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu break; case 0x1c: /* SCO interval rejected */ - case 0x1a: /* Unsupported Remote Feature */ case 0x1f: /* Unspecified error */ if (conn->out && conn->attempt < 2) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | diff --git a/trunk/net/bluetooth/hidp/core.c b/trunk/net/bluetooth/hidp/core.c index fc6ec1e72652..6cf526d06e21 100644 --- a/trunk/net/bluetooth/hidp/core.c +++ b/trunk/net/bluetooth/hidp/core.c @@ -703,9 +703,29 @@ static void hidp_close(struct hid_device *hid) static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; + struct hidp_connadd_req *req = session->req; + unsigned char *buf; + int ret; + + buf = kmalloc(req->rd_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, req->rd_data, req->rd_size)) { + kfree(buf); + return -EFAULT; + } + + ret = hid_parse_report(session->hid, buf, req->rd_size); + + kfree(buf); + + if (ret) + return ret; + + session->req = NULL; - return hid_parse_report(session->hid, session->rd_data, - session->rd_size); + return 0; } static int hidp_start(struct hid_device *hid) @@ -750,24 +770,12 @@ static int hidp_setup_hid(struct hidp_session *session, bdaddr_t src, dst; int err; - session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); - if (!session->rd_data) - return -ENOMEM; - - if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { - err = -EFAULT; - goto fault; - } - session->rd_size = req->rd_size; - hid = hid_allocate_device(); - if (IS_ERR(hid)) { - err = PTR_ERR(hid); - goto fault; - } + if (IS_ERR(hid)) + return PTR_ERR(hid); session->hid = hid; - + session->req = req; hid->driver_data = session; baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); @@ -798,10 +806,6 @@ static int hidp_setup_hid(struct hidp_session *session, hid_destroy_device(hid); session->hid = NULL; -fault: - kfree(session->rd_data); - session->rd_data = NULL; - return err; } @@ -896,9 +900,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->hid = NULL; } - kfree(session->rd_data); - session->rd_data = NULL; - purge: skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); diff --git a/trunk/net/bluetooth/hidp/hidp.h b/trunk/net/bluetooth/hidp/hidp.h index a4e215d50c10..faf3d74c3586 100644 --- a/trunk/net/bluetooth/hidp/hidp.h +++ b/trunk/net/bluetooth/hidp/hidp.h @@ -154,9 +154,7 @@ struct hidp_session { struct sk_buff_head ctrl_transmit; struct sk_buff_head intr_transmit; - /* Report descriptor */ - __u8 *rd_data; - uint rd_size; + struct hidp_connadd_req *req; }; static inline void hidp_schedule(struct hidp_session *session) diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index 89f4a59eb82b..fc5ee3296e22 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -252,6 +252,7 @@ static void rfcomm_session_timeout(unsigned long arg) BT_DBG("session %p state %ld", s, s->state); set_bit(RFCOMM_TIMED_OUT, &s->flags); + rfcomm_session_put(s); rfcomm_schedule(RFCOMM_SCHED_TIMEO); } @@ -1150,11 +1151,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) break; case BT_DISCONN: - /* When socket is closed and we are not RFCOMM - * initiator rfcomm_process_rx already calls - * rfcomm_session_put() */ - if (s->sock->sk->sk_state != BT_CLOSED) - rfcomm_session_put(s); + rfcomm_session_put(s); break; } } @@ -1923,7 +1920,6 @@ static inline void rfcomm_process_sessions(void) if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); - rfcomm_session_put(s); continue; } diff --git a/trunk/net/core/dst.c b/trunk/net/core/dst.c index cb1b3488b739..57bc4d5b8d08 100644 --- a/trunk/net/core/dst.c +++ b/trunk/net/core/dst.c @@ -17,7 +17,6 @@ #include #include #include -#include #include @@ -80,7 +79,6 @@ static void dst_gc_task(struct work_struct *work) while ((dst = next) != NULL) { next = dst->next; prefetch(&next->next); - cond_resched(); if (likely(atomic_read(&dst->__refcnt))) { last->next = dst; last = dst; diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index 2e692afdc55d..de0c2c726420 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -3524,7 +3524,6 @@ static int pktgen_thread_worker(void *arg) wait_event_interruptible_timeout(t->queue, t->control != 0, HZ/10); - try_to_freeze(); continue; } diff --git a/trunk/net/dccp/ccid.c b/trunk/net/dccp/ccid.c index ff16e9df1969..57dfb9c8c4f2 100644 --- a/trunk/net/dccp/ccid.c +++ b/trunk/net/dccp/ccid.c @@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f va_list args; va_start(args, fmt); - vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); + vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); va_end(args); slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, diff --git a/trunk/net/dccp/ccid.h b/trunk/net/dccp/ccid.h index 6df6f8ac9636..269958bf7fe9 100644 --- a/trunk/net/dccp/ccid.h +++ b/trunk/net/dccp/ccid.h @@ -19,9 +19,7 @@ #include #include -/* maximum value for a CCID (RFC 4340, 19.5) */ -#define CCID_MAX 255 -#define CCID_SLAB_NAME_LENGTH 32 +#define CCID_MAX 255 struct tcp_info; @@ -51,8 +49,8 @@ struct ccid_operations { const char *ccid_name; struct kmem_cache *ccid_hc_rx_slab, *ccid_hc_tx_slab; - char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH]; - char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH]; + char ccid_hc_rx_slab_name[32]; + char ccid_hc_tx_slab_name[32]; __u32 ccid_hc_rx_obj_size, ccid_hc_tx_obj_size; /* Interface Routines */ diff --git a/trunk/net/dccp/probe.c b/trunk/net/dccp/probe.c index f5b3464f1242..bace1d8cbcfd 100644 --- a/trunk/net/dccp/probe.c +++ b/trunk/net/dccp/probe.c @@ -161,8 +161,8 @@ static __init int dccpprobe_init(void) if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, - "dccp"); + ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), + "dccp"); if (ret) goto err1; diff --git a/trunk/net/ipv4/netfilter/arp_tables.c b/trunk/net/ipv4/netfilter/arp_tables.c index 90203e1b9187..06632762ba5f 100644 --- a/trunk/net/ipv4/netfilter/arp_tables.c +++ b/trunk/net/ipv4/netfilter/arp_tables.c @@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; -#ifdef CONFIG_COMPAT - struct xt_table_info tmp; +#ifdef CONFIG_COMPAT if (compat) { + struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; diff --git a/trunk/net/ipv4/netfilter/ip_tables.c b/trunk/net/ipv4/netfilter/ip_tables.c index 3ce53cf13d5a..572330a552ef 100644 --- a/trunk/net/ipv4/netfilter/ip_tables.c +++ b/trunk/net/ipv4/netfilter/ip_tables.c @@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; -#ifdef CONFIG_COMPAT - struct xt_table_info tmp; +#ifdef CONFIG_COMPAT if (compat) { + struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; diff --git a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index d1ea38a7c490..d171b123a656 100644 --- a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = { }, { .procname = "ip_conntrack_buckets", - .data = &init_net.ct.htable_size, + .data = &nf_conntrack_htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, diff --git a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 2fb7b76da94f..8668a3defda6 100644 --- a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < net->ct.htable_size; + st->bucket < nf_conntrack_htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= net->ct.htable_size) + if (++st->bucket >= nf_conntrack_htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); diff --git a/trunk/net/ipv4/netfilter/nf_nat_core.c b/trunk/net/ipv4/netfilter/nf_nat_core.c index 26066a2327ad..fe1a64479dd0 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_core.c +++ b/trunk/net/ipv4/netfilter/nf_nat_core.c @@ -35,6 +35,9 @@ static DEFINE_SPINLOCK(nf_nat_lock); static struct nf_conntrack_l3proto *l3proto __read_mostly; +/* Calculated at init based on memory size */ +static unsigned int nf_nat_htable_size __read_mostly; + #define MAX_IP_NAT_PROTO 256 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] __read_mostly; @@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int -hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) +hash_by_src(const struct nf_conntrack_tuple *tuple) { unsigned int hash; @@ -77,7 +80,7 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) hash = jhash_3words((__force u32)tuple->src.u3.ip, (__force u32)tuple->src.u.all, tuple->dst.protonum, 0); - return ((u64)hash * net->ipv4.nat_htable_size) >> 32; + return ((u64)hash * nf_nat_htable_size) >> 32; } /* Is this tuple already taken? (not by us) */ @@ -144,7 +147,7 @@ find_appropriate_src(struct net *net, struct nf_conntrack_tuple *result, const struct nf_nat_range *range) { - unsigned int h = hash_by_src(net, tuple); + unsigned int h = hash_by_src(tuple); const struct nf_conn_nat *nat; const struct nf_conn *ct; const struct hlist_node *n; @@ -327,7 +330,7 @@ nf_nat_setup_info(struct nf_conn *ct, if (have_to_hash) { unsigned int srchash; - srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); spin_lock_bh(&nf_nat_lock); /* nf_conntrack_alter_reply might re-allocate exntension aera */ nat = nfct_nat(ct); @@ -676,10 +679,8 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, static int __net_init nf_nat_net_init(struct net *net) { - /* Leave them the same for the moment. */ - net->ipv4.nat_htable_size = net->ct.htable_size; - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, - &net->ipv4.nat_vmalloced, 0); + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, + &net->ipv4.nat_vmalloced, 0); if (!net->ipv4.nat_bysource) return -ENOMEM; return 0; @@ -702,7 +703,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) nf_ct_iterate_cleanup(net, &clean_nat, NULL); synchronize_rcu(); nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, - net->ipv4.nat_htable_size); + nf_nat_htable_size); } static struct pernet_operations nf_nat_net_ops = { @@ -723,6 +724,9 @@ static int __init nf_nat_init(void) return ret; } + /* Leave them the same for the moment. */ + nf_nat_htable_size = nf_conntrack_htable_size; + ret = register_pernet_subsys(&nf_nat_net_ops); if (ret < 0) goto cleanup_extend; diff --git a/trunk/net/ipv6/netfilter/ip6_tables.c b/trunk/net/ipv6/netfilter/ip6_tables.c index 8a7e0f52e177..480d7f8c9802 100644 --- a/trunk/net/ipv6/netfilter/ip6_tables.c +++ b/trunk/net/ipv6/netfilter/ip6_tables.c @@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; -#ifdef CONFIG_COMPAT - struct xt_table_info tmp; +#ifdef CONFIG_COMPAT if (compat) { + struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; diff --git a/trunk/net/irda/irnet/irnet_ppp.c b/trunk/net/irda/irnet/irnet_ppp.c index 6b3602de359a..156020d138b5 100644 --- a/trunk/net/irda/irnet/irnet_ppp.c +++ b/trunk/net/irda/irnet/irnet_ppp.c @@ -698,18 +698,15 @@ dev_irnet_ioctl( /* Query PPP channel and unit number */ case PPPIOCGCHAN: - lock_kernel(); if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; - unlock_kernel(); break; case PPPIOCGUNIT: lock_kernel(); if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) - err = 0; - unlock_kernel(); + err = 0; break; /* All these ioctls can be passed both directly and from ppp_generic, diff --git a/trunk/net/key/af_key.c b/trunk/net/key/af_key.c index 539f43bc97db..76fa6fef6473 100644 --- a/trunk/net/key/af_key.c +++ b/trunk/net/key/af_key.c @@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = { static void __exit ipsec_pfkey_exit(void) { + unregister_pernet_subsys(&pfkey_net_ops); xfrm_unregister_km(&pfkeyv2_mgr); sock_unregister(PF_KEY); - unregister_pernet_subsys(&pfkey_net_ops); proto_unregister(&key_proto); } @@ -3807,22 +3807,21 @@ static int __init ipsec_pfkey_init(void) if (err != 0) goto out; - err = register_pernet_subsys(&pfkey_net_ops); - if (err != 0) - goto out_unregister_key_proto; err = sock_register(&pfkey_family_ops); if (err != 0) - goto out_unregister_pernet; + goto out_unregister_key_proto; err = xfrm_register_km(&pfkeyv2_mgr); if (err != 0) goto out_sock_unregister; + err = register_pernet_subsys(&pfkey_net_ops); + if (err != 0) + goto out_xfrm_unregister_km; out: return err; - +out_xfrm_unregister_km: + xfrm_unregister_km(&pfkeyv2_mgr); out_sock_unregister: sock_unregister(PF_KEY); -out_unregister_pernet: - unregister_pernet_subsys(&pfkey_net_ops); out_unregister_key_proto: proto_unregister(&key_proto); goto out; diff --git a/trunk/net/netfilter/nf_conntrack_core.c b/trunk/net/netfilter/nf_conntrack_core.c index 4d79e3c1616c..0e98c3282d42 100644 --- a/trunk/net/netfilter/nf_conntrack_core.c +++ b/trunk/net/netfilter/nf_conntrack_core.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -64,6 +63,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); struct nf_conn nf_conntrack_untracked __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_untracked); +static struct kmem_cache *nf_conntrack_cachep __read_mostly; + static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; @@ -85,10 +86,9 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, return ((u64)h * size) >> 32; } -static inline u_int32_t hash_conntrack(const struct net *net, - const struct nf_conntrack_tuple *tuple) +static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(tuple, net->ct.htable_size, + return __hash_conntrack(tuple, nf_conntrack_htable_size, nf_conntrack_hash_rnd); } @@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(net, tuple); + unsigned int hash = hash_conntrack(tuple); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. @@ -366,11 +366,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, void nf_conntrack_hash_insert(struct nf_conn *ct) { - struct net *net = nf_ct_net(ct); unsigned int hash, repl_hash; - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); __nf_conntrack_hash_insert(ct, hash, repl_hash); } @@ -398,8 +397,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); /* We're not in hash table, and we refuse to set up related connections for unconfirmed conns. But packet copies and @@ -469,7 +468,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(net, tuple); + unsigned int hash = hash_conntrack(tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. @@ -504,7 +503,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) int dropped = 0; rcu_read_lock(); - for (i = 0; i < net->ct.htable_size; i++) { + for (i = 0; i < nf_conntrack_htable_size; i++) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); @@ -524,7 +523,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) if (cnt >= NF_CT_EVICTION_RANGE) break; - hash = (hash + 1) % net->ct.htable_size; + hash = (hash + 1) % nf_conntrack_htable_size; } rcu_read_unlock(); @@ -558,7 +557,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { - unsigned int hash = hash_conntrack(net, orig); + unsigned int hash = hash_conntrack(orig); if (!early_drop(net, hash)) { atomic_dec(&net->ct.count); if (net_ratelimit()) @@ -573,7 +572,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ - ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); + ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); if (ct == NULL) { pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); atomic_dec(&net->ct.count); @@ -612,7 +611,7 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); atomic_dec(&net->ct.count); nf_ct_ext_free(ct); - kmem_cache_free(net->ct.nf_conntrack_cachep, ct); + kmem_cache_free(nf_conntrack_cachep, ct); } EXPORT_SYMBOL_GPL(nf_conntrack_free); @@ -1015,7 +1014,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); - for (; *bucket < net->ct.htable_size; (*bucket)++) { + for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) @@ -1114,12 +1113,9 @@ static void nf_ct_release_dying_list(struct net *net) static void nf_conntrack_cleanup_init_net(void) { - /* wait until all references to nf_conntrack_untracked are dropped */ - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) - schedule(); - nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); + kmem_cache_destroy(nf_conntrack_cachep); } static void nf_conntrack_cleanup_net(struct net *net) @@ -1131,14 +1127,15 @@ static void nf_conntrack_cleanup_net(struct net *net) schedule(); goto i_see_dead_people; } + /* wait until all references to nf_conntrack_untracked are dropped */ + while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) + schedule(); nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - net->ct.htable_size); + nf_conntrack_htable_size); nf_conntrack_ecache_fini(net); nf_conntrack_acct_fini(net); nf_conntrack_expect_fini(net); - kmem_cache_destroy(net->ct.nf_conntrack_cachep); - kfree(net->ct.slabname); free_percpu(net->ct.stat); } @@ -1193,12 +1190,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { int i, bucket, vmalloced, old_vmalloced; unsigned int hashsize, old_size; + int rnd; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; - if (current->nsproxy->net_ns != &init_net) - return -EOPNOTSUPP; - /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); @@ -1211,29 +1206,33 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) if (!hash) return -ENOMEM; + /* We have to rehahs for the new table anyway, so we also can + * use a newrandom seed */ + get_random_bytes(&rnd, sizeof(rnd)); + /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the lock. */ spin_lock_bh(&nf_conntrack_lock); - for (i = 0; i < init_net.ct.htable_size; i++) { + for (i = 0; i < nf_conntrack_htable_size; i++) { while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); hlist_nulls_del_rcu(&h->hnnode); - bucket = __hash_conntrack(&h->tuple, hashsize, - nf_conntrack_hash_rnd); + bucket = __hash_conntrack(&h->tuple, hashsize, rnd); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = init_net.ct.htable_size; + old_size = nf_conntrack_htable_size; old_vmalloced = init_net.ct.hash_vmalloc; old_hash = init_net.ct.hash; - init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; + nf_conntrack_htable_size = hashsize; init_net.ct.hash_vmalloc = vmalloced; init_net.ct.hash = hash; + nf_conntrack_hash_rnd = rnd; spin_unlock_bh(&nf_conntrack_lock); nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); @@ -1272,6 +1271,15 @@ static int nf_conntrack_init_init_net(void) NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); + nf_conntrack_cachep = kmem_cache_create("nf_conntrack", + sizeof(struct nf_conn), + 0, SLAB_DESTROY_BY_RCU, NULL); + if (!nf_conntrack_cachep) { + printk(KERN_ERR "Unable to create nf_conn slab cache\n"); + ret = -ENOMEM; + goto err_cache; + } + ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; @@ -1280,19 +1288,13 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_helper; - /* Set up fake conntrack: to never be deleted, not in any hashes */ -#ifdef CONFIG_NET_NS - nf_conntrack_untracked.ct_net = &init_net; -#endif - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); - /* - and look it like as a confirmed connection */ - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); - return 0; err_helper: nf_conntrack_proto_fini(); err_proto: + kmem_cache_destroy(nf_conntrack_cachep); +err_cache: return ret; } @@ -1314,24 +1316,7 @@ static int nf_conntrack_init_net(struct net *net) ret = -ENOMEM; goto err_stat; } - - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); - if (!net->ct.slabname) { - ret = -ENOMEM; - goto err_slabname; - } - - net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, - sizeof(struct nf_conn), 0, - SLAB_DESTROY_BY_RCU, NULL); - if (!net->ct.nf_conntrack_cachep) { - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); - ret = -ENOMEM; - goto err_cache; - } - - net->ct.htable_size = nf_conntrack_htable_size; - net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, + net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, &net->ct.hash_vmalloc, 1); if (!net->ct.hash) { ret = -ENOMEM; @@ -1348,6 +1333,15 @@ static int nf_conntrack_init_net(struct net *net) if (ret < 0) goto err_ecache; + /* Set up fake conntrack: + - to never be deleted, not in any hashes */ +#ifdef CONFIG_NET_NS + nf_conntrack_untracked.ct_net = &init_net; +#endif + atomic_set(&nf_conntrack_untracked.ct_general.use, 1); + /* - and look it like as a confirmed connection */ + set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); + return 0; err_ecache: @@ -1356,12 +1350,8 @@ static int nf_conntrack_init_net(struct net *net) nf_conntrack_expect_fini(net); err_expect: nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - net->ct.htable_size); + nf_conntrack_htable_size); err_hash: - kmem_cache_destroy(net->ct.nf_conntrack_cachep); -err_cache: - kfree(net->ct.slabname); -err_slabname: free_percpu(net->ct.stat); err_stat: return ret; diff --git a/trunk/net/netfilter/nf_conntrack_expect.c b/trunk/net/netfilter/nf_conntrack_expect.c index 2f25ff610982..fdf5d2a1d9b4 100644 --- a/trunk/net/netfilter/nf_conntrack_expect.c +++ b/trunk/net/netfilter/nf_conntrack_expect.c @@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) #endif /* CONFIG_PROC_FS */ } -module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); +module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); int nf_conntrack_expect_init(struct net *net) { @@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net) if (net_eq(net, &init_net)) { if (!nf_ct_expect_hsize) { - nf_ct_expect_hsize = net->ct.htable_size / 256; + nf_ct_expect_hsize = nf_conntrack_htable_size / 256; if (!nf_ct_expect_hsize) nf_ct_expect_hsize = 1; } diff --git a/trunk/net/netfilter/nf_conntrack_helper.c b/trunk/net/netfilter/nf_conntrack_helper.c index 4b1a56bd074c..65c2a7bc3afc 100644 --- a/trunk/net/netfilter/nf_conntrack_helper.c +++ b/trunk/net/netfilter/nf_conntrack_helper.c @@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, /* Get rid of expecteds, set helpers to NULL. */ hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) unhelp(h, me); - for (i = 0; i < net->ct.htable_size; i++) { + for (i = 0; i < nf_conntrack_htable_size; i++) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); } diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index 0ffe689dfe97..42f21c01a93e 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conn *)cb->args[1]; - for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { + for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], hnnode) { diff --git a/trunk/net/netfilter/nf_conntrack_standalone.c b/trunk/net/netfilter/nf_conntrack_standalone.c index e310f1561bb2..028aba667ef7 100644 --- a/trunk/net/netfilter/nf_conntrack_standalone.c +++ b/trunk/net/netfilter/nf_conntrack_standalone.c @@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < net->ct.htable_size; + st->bucket < nf_conntrack_htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= net->ct.htable_size) + if (++st->bucket >= nf_conntrack_htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); @@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = { }, { .procname = "nf_conntrack_buckets", - .data = &init_net.ct.htable_size, + .data = &nf_conntrack_htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, @@ -421,7 +421,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) goto out_kmemdup; table[1].data = &net->ct.count; - table[2].data = &net->ct.htable_size; table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index 4c5972ba8c78..a4957bf2ca60 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -455,14 +455,9 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; - else - err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); - if (err < 0) - goto out; - err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; diff --git a/trunk/net/sched/Kconfig b/trunk/net/sched/Kconfig index 21f9c7678aa3..929218a47620 100644 --- a/trunk/net/sched/Kconfig +++ b/trunk/net/sched/Kconfig @@ -433,7 +433,7 @@ config NET_ACT_POLICE module. To compile this code as a module, choose M here: the - module will be called act_police. + module will be called police. config NET_ACT_GACT tristate "Generic actions" @@ -443,7 +443,7 @@ config NET_ACT_GACT accepting packets. To compile this code as a module, choose M here: the - module will be called act_gact. + module will be called gact. config GACT_PROB bool "Probability support" @@ -459,7 +459,7 @@ config NET_ACT_MIRRED other devices. To compile this code as a module, choose M here: the - module will be called act_mirred. + module will be called mirred. config NET_ACT_IPT tristate "IPtables targets" @@ -469,7 +469,7 @@ config NET_ACT_IPT classification. To compile this code as a module, choose M here: the - module will be called act_ipt. + module will be called ipt. config NET_ACT_NAT tristate "Stateless NAT" @@ -479,7 +479,7 @@ config NET_ACT_NAT netfilter for NAT unless you know what you are doing. To compile this code as a module, choose M here: the - module will be called act_nat. + module will be called nat. config NET_ACT_PEDIT tristate "Packet Editing" @@ -488,7 +488,7 @@ config NET_ACT_PEDIT Say Y here if you want to mangle the content of packets. To compile this code as a module, choose M here: the - module will be called act_pedit. + module will be called pedit. config NET_ACT_SIMP tristate "Simple Example (Debug)" @@ -502,7 +502,7 @@ config NET_ACT_SIMP If unsure, say N. To compile this code as a module, choose M here: the - module will be called act_simple. + module will be called simple. config NET_ACT_SKBEDIT tristate "SKB Editing" @@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT If unsure, say N. To compile this code as a module, choose M here: the - module will be called act_skbedit. + module will be called skbedit. config NET_CLS_IND bool "Incoming device classification" diff --git a/trunk/security/integrity/ima/ima.h b/trunk/security/integrity/ima/ima.h index 47fb65d1fcbd..c41afe6639a0 100644 --- a/trunk/security/integrity/ima/ima.h +++ b/trunk/security/integrity/ima/ima.h @@ -65,6 +65,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, const char *cause, int result, int info); /* Internal IMA function definitions */ +void ima_iintcache_init(void); int ima_init(void); void ima_cleanup(void); int ima_fs_init(void); @@ -130,7 +131,7 @@ void iint_free(struct kref *kref); void iint_rcu_free(struct rcu_head *rcu); /* IMA policy related functions */ -enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; +enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); void ima_init_policy(void); diff --git a/trunk/security/integrity/ima/ima_api.c b/trunk/security/integrity/ima/ima_api.c index 2a5e0bcf3887..3cd58b60afd2 100644 --- a/trunk/security/integrity/ima/ima_api.c +++ b/trunk/security/integrity/ima/ima_api.c @@ -95,12 +95,12 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, * ima_must_measure - measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP + * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP * mask: contains the permission mask * fsmagic: hex value * diff --git a/trunk/security/integrity/ima/ima_iint.c b/trunk/security/integrity/ima/ima_iint.c index 0d83edcfc402..fa592ff1ac1c 100644 --- a/trunk/security/integrity/ima/ima_iint.c +++ b/trunk/security/integrity/ima/ima_iint.c @@ -52,6 +52,9 @@ int ima_inode_alloc(struct inode *inode) struct ima_iint_cache *iint = NULL; int rc = 0; + if (!ima_initialized) + return 0; + iint = kmem_cache_alloc(iint_cache, GFP_NOFS); if (!iint) return -ENOMEM; @@ -115,6 +118,8 @@ void ima_inode_free(struct inode *inode) { struct ima_iint_cache *iint; + if (!ima_initialized) + return; spin_lock(&ima_iint_lock); iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); spin_unlock(&ima_iint_lock); @@ -136,11 +141,9 @@ static void init_once(void *foo) kref_set(&iint->refcount, 1); } -static int __init ima_iintcache_init(void) +void __init ima_iintcache_init(void) { iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); - return 0; } -security_initcall(ima_iintcache_init); diff --git a/trunk/security/integrity/ima/ima_main.c b/trunk/security/integrity/ima/ima_main.c index 294b005d6520..a89f44d5e030 100644 --- a/trunk/security/integrity/ima/ima_main.c +++ b/trunk/security/integrity/ima/ima_main.c @@ -14,7 +14,7 @@ * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, - * and ima_file_check. + * and ima_path_check. */ #include #include @@ -84,36 +84,6 @@ static bool ima_limit_imbalance(struct file *file) return found; } -/* ima_read_write_check - reflect possible reading/writing errors in the PCR. - * - * When opening a file for read, if the file is already open for write, - * the file could change, resulting in a file measurement error. - * - * Opening a file for write, if the file is already open for read, results - * in a time of measure, time of use (ToMToU) error. - * - * In either case invalidate the PCR. - */ -enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; -static void ima_read_write_check(enum iint_pcr_error error, - struct ima_iint_cache *iint, - struct inode *inode, - const unsigned char *filename) -{ - switch (error) { - case TOMTOU: - if (iint->readcount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "ToMToU"); - break; - case OPEN_WRITERS: - if (iint->writecount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "open_writers"); - break; - } -} - /* * Update the counts given an fmode_t */ @@ -128,47 +98,6 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode) iint->writecount++; } -/* - * ima_counts_get - increment file counts - * - * Maintain read/write counters for all files, but only - * invalidate the PCR for measured files: - * - Opening a file for write when already open for read, - * results in a time of measure, time of use (ToMToU) error. - * - Opening a file for read when already open for write, - * could result in a file measurement error. - * - */ -void ima_counts_get(struct file *file) -{ - struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; - fmode_t mode = file->f_mode; - struct ima_iint_cache *iint; - int rc; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return; - iint = ima_iint_find_get(inode); - if (!iint) - return; - mutex_lock(&iint->mutex); - rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); - if (rc < 0) - goto out; - - if (mode & FMODE_WRITE) { - ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name); - goto out; - } - ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name); -out: - ima_inc_counts(iint, file->f_mode); - mutex_unlock(&iint->mutex); - - kref_put(&iint->refcount, iint_free); -} - /* * Decrement ima counts */ @@ -224,6 +153,123 @@ void ima_file_free(struct file *file) kref_put(&iint->refcount, iint_free); } +/* ima_read_write_check - reflect possible reading/writing errors in the PCR. + * + * When opening a file for read, if the file is already open for write, + * the file could change, resulting in a file measurement error. + * + * Opening a file for write, if the file is already open for read, results + * in a time of measure, time of use (ToMToU) error. + * + * In either case invalidate the PCR. + */ +enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; +static void ima_read_write_check(enum iint_pcr_error error, + struct ima_iint_cache *iint, + struct inode *inode, + const unsigned char *filename) +{ + switch (error) { + case TOMTOU: + if (iint->readcount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "ToMToU"); + break; + case OPEN_WRITERS: + if (iint->writecount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "open_writers"); + break; + } +} + +static int get_path_measurement(struct ima_iint_cache *iint, struct file *file, + const unsigned char *filename) +{ + int rc = 0; + + ima_inc_counts(iint, file->f_mode); + + rc = ima_collect_measurement(iint, file); + if (!rc) + ima_store_measurement(iint, file, filename); + return rc; +} + +/** + * ima_path_check - based on policy, collect/store measurement. + * @path: contains a pointer to the path to be measured + * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE + * + * Measure the file being open for readonly, based on the + * ima_must_measure() policy decision. + * + * Keep read/write counters for all files, but only + * invalidate the PCR for measured files: + * - Opening a file for write when already open for read, + * results in a time of measure, time of use (ToMToU) error. + * - Opening a file for read when already open for write, + * could result in a file measurement error. + * + * Always return 0 and audit dentry_open failures. + * (Return code will be based upon measurement appraisal.) + */ +int ima_path_check(struct path *path, int mask) +{ + struct inode *inode = path->dentry->d_inode; + struct ima_iint_cache *iint; + struct file *file = NULL; + int rc; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return 0; + iint = ima_iint_find_get(inode); + if (!iint) + return 0; + + mutex_lock(&iint->mutex); + + rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK); + if (rc < 0) + goto out; + + if ((mask & MAY_WRITE) || (mask == 0)) + ima_read_write_check(TOMTOU, iint, inode, + path->dentry->d_name.name); + + if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ) + goto out; + + ima_read_write_check(OPEN_WRITERS, iint, inode, + path->dentry->d_name.name); + if (!(iint->flags & IMA_MEASURED)) { + struct dentry *dentry = dget(path->dentry); + struct vfsmount *mnt = mntget(path->mnt); + + file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, + current_cred()); + if (IS_ERR(file)) { + int audit_info = 0; + + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, + dentry->d_name.name, + "add_measurement", + "dentry_open failed", + 1, audit_info); + file = NULL; + goto out; + } + rc = get_path_measurement(iint, file, dentry->d_name.name); + } +out: + mutex_unlock(&iint->mutex); + if (file) + fput(file); + kref_put(&iint->refcount, iint_free); + return 0; +} +EXPORT_SYMBOL_GPL(ima_path_check); + static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { @@ -251,6 +297,33 @@ static int process_measurement(struct file *file, const unsigned char *filename, return rc; } +/* + * ima_counts_get - increment file counts + * + * - for IPC shm and shmat file. + * - for nfsd exported files. + * + * Increment the counts for these files to prevent unnecessary + * imbalance messages. + */ +void ima_counts_get(struct file *file) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ima_iint_cache *iint; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return; + iint = ima_iint_find_get(inode); + if (!iint) + return; + mutex_lock(&iint->mutex); + ima_inc_counts(iint, file->f_mode); + mutex_unlock(&iint->mutex); + + kref_put(&iint->refcount, iint_free); +} +EXPORT_SYMBOL_GPL(ima_counts_get); + /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) @@ -296,31 +369,11 @@ int ima_bprm_check(struct linux_binprm *bprm) return 0; } -/** - * ima_path_check - based on policy, collect/store measurement. - * @file: pointer to the file to be measured - * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE - * - * Measure files based on the ima_must_measure() policy decision. - * - * Always return 0 and audit dentry_open failures. - * (Return code will be based upon measurement appraisal.) - */ -int ima_file_check(struct file *file, int mask) -{ - int rc; - - rc = process_measurement(file, file->f_dentry->d_name.name, - mask & (MAY_READ | MAY_WRITE | MAY_EXEC), - FILE_CHECK); - return 0; -} -EXPORT_SYMBOL_GPL(ima_file_check); - static int __init init_ima(void) { int error; + ima_iintcache_init(); error = ima_init(); ima_initialized = 1; return error; diff --git a/trunk/security/integrity/ima/ima_policy.c b/trunk/security/integrity/ima/ima_policy.c index 4759d0f99335..e1278399b345 100644 --- a/trunk/security/integrity/ima/ima_policy.c +++ b/trunk/security/integrity/ima/ima_policy.c @@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = { .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, + {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, }; @@ -282,11 +282,8 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) break; case Opt_func: audit_log_format(ab, "func=%s ", args[0].from); - if (strcmp(args[0].from, "FILE_CHECK") == 0) - entry->func = FILE_CHECK; - /* PATH_CHECK is for backwards compat */ - else if (strcmp(args[0].from, "PATH_CHECK") == 0) - entry->func = FILE_CHECK; + if (strcmp(args[0].from, "PATH_CHECK") == 0) + entry->func = PATH_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) diff --git a/trunk/security/security.c b/trunk/security/security.c index 122b748d0f4c..24e060be9fa5 100644 --- a/trunk/security/security.c +++ b/trunk/security/security.c @@ -666,6 +666,8 @@ int security_file_alloc(struct file *file) void security_file_free(struct file *file) { security_ops->file_free_security(file); + if (file->f_dentry) + ima_file_free(file); } int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/trunk/sound/pci/ctxfi/ctatc.c b/trunk/sound/pci/ctxfi/ctatc.c index 459c1f62783b..cb65bd0dd35b 100644 --- a/trunk/sound/pci/ctxfi/ctatc.c +++ b/trunk/sound/pci/ctxfi/ctatc.c @@ -166,7 +166,18 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm) static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) { - return atc->vm->get_ptp_phys(atc->vm, index); + struct ct_vm *vm; + void *kvirt_addr; + unsigned long phys_addr; + + vm = atc->vm; + kvirt_addr = vm->get_ptp_virt(vm, index); + if (kvirt_addr == NULL) + phys_addr = (~0UL); + else + phys_addr = virt_to_phys(kvirt_addr); + + return phys_addr; } static unsigned int convert_format(snd_pcm_format_t snd_format) @@ -1658,7 +1669,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, } /* Set up device virtual memory management object */ - err = ct_vm_create(&atc->vm, pci); + err = ct_vm_create(&atc->vm); if (err < 0) goto error1; diff --git a/trunk/sound/pci/ctxfi/ctvmem.c b/trunk/sound/pci/ctxfi/ctvmem.c index 65da6e466f80..6b78752e9503 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.c +++ b/trunk/sound/pci/ctxfi/ctvmem.c @@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) return NULL; } - ptp = (unsigned long *)vm->ptp[0].area; + ptp = vm->ptp[0]; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { @@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) } /* * - * return the host physical addr of the @index-th device - * page table page on success, or ~0UL on failure. - * The first returned ~0UL indicates the termination. + * return the host (kmalloced) addr of the @index-th device + * page talbe page on success, or NULL on failure. + * The first returned NULL indicates the termination. * */ -static dma_addr_t -ct_get_ptp_phys(struct ct_vm *vm, int index) +static void * +ct_get_ptp_virt(struct ct_vm *vm, int index) { - dma_addr_t addr; + void *addr; - addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; + addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; return addr; } -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) +int ct_vm_create(struct ct_vm **rvm) { struct ct_vm *vm; struct ct_vm_block *block; - int i, err = 0; + int i; *rvm = NULL; @@ -188,21 +188,23 @@ int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { - err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, - snd_dma_pci_data(pci), - PAGE_SIZE, &vm->ptp[i]); - if (err < 0) + vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!vm->ptp[i]) break; } - if (err < 0) { + if (!i) { /* no page table pages are allocated */ - ct_vm_destroy(vm); + kfree(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; + /* Initialise remaining ptps */ + for (; i < CT_PTP_NUM; i++) + vm->ptp[i] = NULL; + vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; - vm->get_ptp_phys = ct_get_ptp_phys; + vm->get_ptp_virt = ct_get_ptp_virt; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); @@ -240,7 +242,7 @@ void ct_vm_destroy(struct ct_vm *vm) /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) - snd_dma_free_pages(&vm->ptp[i]); + kfree(vm->ptp[i]); vm->size = 0; diff --git a/trunk/sound/pci/ctxfi/ctvmem.h b/trunk/sound/pci/ctxfi/ctvmem.h index b23adfca4de6..01e4fd0386a3 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.h +++ b/trunk/sound/pci/ctxfi/ctvmem.h @@ -22,8 +22,6 @@ #include #include -#include -#include /* The chip can handle the page table of 4k pages * (emu20k1 can handle even 8k pages, but we don't use it right now) @@ -43,7 +41,7 @@ struct snd_pcm_substream; /* Virtual memory management object for card device */ struct ct_vm { - struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ + void *ptp[CT_PTP_NUM]; /* Device page table pages */ unsigned int size; /* Available addr space in bytes */ struct list_head unused; /* List of unused blocks */ struct list_head used; /* List of used blocks */ @@ -54,10 +52,10 @@ struct ct_vm { int size); /* Unmap device logical addr area. */ void (*unmap)(struct ct_vm *, struct ct_vm_block *block); - dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); + void *(*get_ptp_virt)(struct ct_vm *vm, int index); }; -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); +int ct_vm_create(struct ct_vm **rvm); void ct_vm_destroy(struct ct_vm *vm); #endif /* CTVMEM_H */ diff --git a/trunk/sound/pci/hda/hda_intel.c b/trunk/sound/pci/hda/hda_intel.c index b8faa6dc5abe..565de38a3fc7 100644 --- a/trunk/sound/pci/hda/hda_intel.c +++ b/trunk/sound/pci/hda/hda_intel.c @@ -426,7 +426,6 @@ struct azx { /* flags */ int position_fix; - int poll_count; unsigned int running :1; unsigned int initialized :1; unsigned int single_cmd :1; @@ -507,7 +506,7 @@ static char *driver_short_names[] __devinitdata = { #define get_azx_dev(substream) (substream->runtime->private_data) static int azx_acquire_irq(struct azx *chip, int do_disconnect); -static int azx_send_cmd(struct hda_bus *bus, unsigned int val); + /* * Interface for HD codec */ @@ -665,12 +664,11 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; - int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { - if (chip->polling_mode || do_poll) { + if (chip->polling_mode) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); @@ -678,9 +676,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, if (!chip->rirb.cmds[addr]) { smp_rmb(); bus->rirb_error = 0; - - if (!do_poll) - chip->poll_count = 0; return chip->rirb.res[addr]; /* the last value */ } if (time_after(jiffies, timeout)) @@ -693,16 +688,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } - if (!chip->polling_mode && chip->poll_count < 2) { - snd_printdd(SFX "azx_get_response timeout, " - "polling the codec once: last cmd=0x%08x\n", - chip->last_cmd[addr]); - do_poll = 1; - chip->poll_count++; - goto again; - } - - if (!chip->polling_mode) { snd_printk(KERN_WARNING SFX "azx_get_response timeout, " "switching to polling mode: last cmd=0x%08x\n", @@ -2058,7 +2043,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) { if (request_irq(chip->pci->irq, azx_interrupt, chip->msi ? 0 : IRQF_SHARED, - "hda_intel", chip)) { + "HDA Intel", chip)) { printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " "disabling device\n", chip->pci->irq); if (do_disconnect) diff --git a/trunk/sound/pci/ice1712/aureon.c b/trunk/sound/pci/ice1712/aureon.c index 9e66f6d306f8..765d7bd4c3d4 100644 --- a/trunk/sound/pci/ice1712/aureon.c +++ b/trunk/sound/pci/ice1712/aureon.c @@ -703,13 +703,11 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho { unsigned char nvol; - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; - } else { + else nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / WM_VOL_MAX; - nvol += 0x1b; - } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x180 | nvol); @@ -780,7 +778,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_ for (ch = 0; ch < 2; ch++) { unsigned int vol = ucontrol->value.integer.value[ch]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; + continue; vol |= spec->master[ch] & WM_VOL_MUTE; if (vol != spec->master[ch]) { int dac; @@ -836,8 +834,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * for (i = 0; i < voices; i++) { unsigned int vol = ucontrol->value.integer.value[i]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; - vol |= spec->vol[ofs+i] & WM_VOL_MUTE; + continue; + vol |= spec->vol[ofs+i]; if (vol != spec->vol[ofs+i]) { spec->vol[ofs+i] = vol; idx = WM_DAC_ATTEN + ofs + i; diff --git a/trunk/sound/soc/omap/omap3pandora.c b/trunk/sound/soc/omap/omap3pandora.c index 68980c19a3bc..71b2c161158d 100644 --- a/trunk/sound/soc/omap/omap3pandora.c +++ b/trunk/sound/soc/omap/omap3pandora.c @@ -145,7 +145,6 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { - {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"},