diff --git a/[refs] b/[refs] index fdd788bde32f..e7057ae698d0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f6510ec5a96d07897a109ad8919c6af5e9b3f4bc +refs/heads/master: 85f8d3e5faea8bd36c3e5196f8334f7db45e19b2 diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index b34390347c16..b8bea100a160 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -2868,21 +2868,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) }, .driver_data = "F.23", /* cutoff BIOS version */ }, - /* - * Acer eMachines G725 has the same problem. BIOS - * V1.03 is known to be broken. V3.04 is known to - * work. Inbetween, there are V1.06, V2.06 and V3.03 - * that we don't have much idea about. For now, - * blacklist anything older than V3.04. - */ - { - .ident = "G725", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), - DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), - }, - .driver_data = "V3.04", /* cutoff BIOS version */ - }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index d096fbcbc771..f4ea5a8c325b 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * write indication (used for PIO/DMA setup), result TF is * copied back and we don't whine too much about its failure. */ - tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) tf->flags |= ATA_TFLAG_WRITE; diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c index 730ef3c384ca..741065c9da67 100644 --- a/trunk/drivers/ata/libata-sff.c +++ b/trunk/drivers/ata/libata-sff.c @@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } - if (!do_write) - flush_dcache_page(page); - qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/trunk/drivers/gpu/drm/ati_pcigart.c b/trunk/drivers/gpu/drm/ati_pcigart.c index 17be051b7aa3..a1fce68e3bbe 100644 --- a/trunk/drivers/gpu/drm/ati_pcigart.c +++ b/trunk/drivers/gpu/drm/ati_pcigart.c @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", - (unsigned long long)gart_info->table_mask); + gart_info->table_mask); ret = 1; goto done; } diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index ecac882e1d54..46d88965852a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .need_gfx_hws = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index b4c8c0230689..dda787aafcc6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -3564,9 +3564,6 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; - if (relocs == NULL) - return 0; - for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3656,7 +3653,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; + struct drm_i915_gem_relocation_entry *relocs; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3725,8 +3722,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3735,8 +3730,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3933,7 +3926,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); -pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3948,6 +3940,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = ret2; } +pre_mutex_err: drm_free_large(object_list); kfree(cliprects); diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 50ddf4a95c5e..89a071a3e6fb 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -309,22 +309,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); - if (de_iir & DE_PLANEA_FLIP_DONE) - intel_prepare_page_flip(dev, 0); - - if (de_iir & DE_PLANEB_FLIP_DONE) - intel_prepare_page_flip(dev, 1); - - if (de_iir & DE_PIPEA_VBLANK) { - drm_handle_vblank(dev, 0); - intel_finish_page_flip(dev, 0); - } - - if (de_iir & DE_PIPEB_VBLANK) { - drm_handle_vblank(dev, 1); - intel_finish_page_flip(dev, 1); - } - /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -860,11 +844,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else if (IS_I965G(dev)) + return 0; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -882,14 +866,13 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + return; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1032,14 +1015,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->de_irq_enable_reg = display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 79dd4026586f..ddefc871edfe 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -157,9 +157,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 12775df1bbfd..45da78ef4a92 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1638,7 +1638,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); - drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2520,10 +2519,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2567,10 +2562,6 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2639,10 +2630,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3997,12 +3984,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { - if (work && !work->pending) { - obj_priv = work->obj->driver_private; - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", - obj_priv, - atomic_read(&obj_priv->pending_flip)); - } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4024,10 +4005,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); obj_priv = work->obj->driver_private; - - /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) + if (atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4040,11 +4018,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) { + if (intel_crtc->unpin_work) intel_crtc->unpin_work->pending = 1; - } else { - DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); - } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4078,7 +4053,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4092,10 +4066,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - obj->driver_private); kfree(work); - intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index b1d0acbae4e4..aa74e59bec61 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), + DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -622,13 +622,6 @@ static const struct dmi_system_id bad_lid_status[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), }, }, - { - .ident = "Aspire 1810T", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), - }, - }, { .ident = "PC-81005", .matches = { @@ -650,7 +643,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) + if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) status = connector_status_disconnected; return status; diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 82678d30ab06..eaacfd0920df 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,14 +2345,6 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_CVBS0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index c0d4650cdb79..11c9a3fe6810 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -354,17 +354,11 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } -/* Who ever call radeon_fence_emit should call ring_lock and ask - * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* We have to make sure that caches are flushed before - * CPU might read something from VRAM. */ - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); + /* Who ever call radeon_fence_emit should call ring_lock and ask + * for enough space (today caller are ib schedule and buffer move) */ /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); @@ -3375,6 +3369,7 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3486,12 +3481,13 @@ int r100_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index 43b55a030b4d..0051d11b907c 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -506,14 +506,11 @@ void r300_vram_info(struct radeon_device *rdev) /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; - tmp = RREG32(RADEON_MEM_CNTL); - tmp &= R300_MEM_NUM_CHANNELS_MASK; - switch (tmp) { - case 0: rdev->mc.vram_width = 64; break; - case 1: rdev->mc.vram_width = 128; break; - case 2: rdev->mc.vram_width = 256; break; - default: rdev->mc.vram_width = 128; break; + if (tmp & R300_MEM_NUM_CHANNELS_MASK) { + rdev->mc.vram_width = 128; + } else { + rdev->mc.vram_width = 64; } r100_vram_init_sizes(rdev); @@ -1330,6 +1327,7 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1420,15 +1418,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r420.c b/trunk/drivers/gpu/drm/radeon/r420.c index d9373246c97f..4526faaacca8 100644 --- a/trunk/drivers/gpu/drm/radeon/r420.c +++ b/trunk/drivers/gpu/drm/radeon/r420.c @@ -389,15 +389,16 @@ int r420_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r420_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index ddf5731eba0d..9a189072f2b9 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -294,12 +294,13 @@ int r520_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index a1198d99cdf9..1b6d0001b20e 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1654,12 +1654,6 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.align_mask = 16 - 1; } -void r600_cp_fini(struct radeon_device *rdev) -{ - r600_cp_stop(rdev); - radeon_ring_fini(rdev); -} - /* * GPU scratch registers helpers function. @@ -1867,12 +1861,6 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -2057,15 +2045,19 @@ int r600_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = r600_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + r600_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -2091,17 +2083,20 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { + /* Suspend operations */ + r600_suspend(rdev); + r600_audio_fini(rdev); r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); - radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); + radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -2905,18 +2900,3 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } - -/** - * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl - * rdev: radeon device structure - * bo: buffer object struct which userspace is waiting for idle - * - * Some R6XX/R7XX doesn't seems to take into account HDP flush performed - * through ring buffer, this leads to corruption in rendering, see - * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we - * directly perform HDP flush by writing register through MMIO. - */ -void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) -{ - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); -} diff --git a/trunk/drivers/gpu/drm/radeon/r600_audio.c b/trunk/drivers/gpu/drm/radeon/r600_audio.c index b1c1d3433454..99e2c3891a7d 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_audio.c +++ b/trunk/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) + return rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..2d5f2bfa7201 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -661,13 +661,6 @@ struct radeon_asic { void (*hpd_fini)(struct radeon_device *rdev); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); - /* ioctl hw specific callback. Some hw might want to perform special - * operation on specific ioctl. For instance on wait idle some hw - * might want to perform and HDP flush through MMIO as it seems that - * some R6XX/R7XX hw doesn't take HDP flush into account if programmed - * through ring. - */ - void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); }; /* @@ -1150,7 +1143,6 @@ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); -extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index 05ee1aeac3fd..f2fbd2e4e9df 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -117,7 +117,6 @@ static struct radeon_asic r100_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -177,7 +176,6 @@ static struct radeon_asic r300_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -221,7 +219,6 @@ static struct radeon_asic r420_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -270,7 +267,6 @@ static struct radeon_asic rs400_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -327,7 +323,6 @@ static struct radeon_asic rs600_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -375,7 +370,6 @@ static struct radeon_asic rs690_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -427,7 +421,6 @@ static struct radeon_asic rv515_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -470,7 +463,6 @@ static struct radeon_asic r520_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -512,7 +504,6 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); -extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -547,7 +538,6 @@ static struct radeon_asic r600_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; /* @@ -592,7 +582,6 @@ static struct radeon_asic rv770_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; #endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..579c8920e081 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -971,7 +971,8 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); - lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); + if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) + lvds->panel_vcc_delay = 2000; lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 2d8e5a70f284..55266416fa47 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, - radeon_connector->dac_load_detect); + 1); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a01..0e1325e18534 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -308,9 +308,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (robj->rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index 287fcebfb4e6..9f5418983e2a 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -223,31 +223,15 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } -int rs400_mc_wait_for_idle(struct radeon_device *rdev) -{ - unsigned i; - uint32_t tmp; - - for (i = 0; i < rdev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 2)) { - return 0; - } - DRM_UDELAY(1); - } - return -1; -} - void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (rs400_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "rs400: Failed to wait MC idle while " - "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); + if (r300_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); } } @@ -386,8 +370,8 @@ void rs400_mc_program(struct radeon_device *rdev) r100_mc_stop(rdev, &save); /* Wait for mc idle */ - if (rs400_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); + if (r300_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); @@ -464,6 +448,7 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -542,6 +527,7 @@ int rs400_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index c3818562a13e..d5255751e7b3 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -610,6 +610,7 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -688,6 +689,7 @@ int rs600_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 06e2771aee5a..cd31da913771 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -676,6 +676,7 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -755,6 +756,7 @@ int rs690_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 0e1e6b8632b8..62756717b044 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -537,6 +537,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -614,12 +615,13 @@ int rv515_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 5943d561fd1e..afd9e8213c29 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -887,12 +887,6 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1061,15 +1055,19 @@ int rv770_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = rv770_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + rv770_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -1091,11 +1089,13 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { + rv770_suspend(rdev); + r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/trunk/drivers/hwmon/adt7462.c b/trunk/drivers/hwmon/adt7462.c index a31e77c776ae..b8156b4893bb 100644 --- a/trunk/drivers/hwmon/adt7462.c +++ b/trunk/drivers/hwmon/adt7462.c @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; * * Some, but not all, of these voltages have low/high limits. */ -#define ADT7462_VOLT_COUNT 12 +#define ADT7462_VOLT_COUNT 13 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index d58b94030ef3..c74694345b6e 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -338,23 +338,6 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); -/* - * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS - * ver. 1.33 20070103) don't set the correct ISA PCI region header info. - * BAR0 should be 8 bytes; instead, it may be set to something like 8k - * (which conflicts w/ BAR1's memory range). - */ -static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) -{ - if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected " - "(incorrect header); workaround applied.\n"); - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); - static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 2b59201b955c..87b25543d7d1 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1982,12 +1982,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - if (ret < 0) { - printk(KERN_WARNING - "btrfs: failed to recover relocation\n"); - err = -EINVAL; - goto fail_trans_kthread; - } + BUG_ON(ret); } location.objectid = BTRFS_FS_TREE_OBJECTID; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 559f72489b3b..432a2da4641e 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -5402,6 +5402,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5409,10 +5413,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index b177ed319612..96577e8bf9fd 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -3165,9 +3165,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } + spin_unlock(&tree->buffer_lock); + /* add one reference for the tree */ atomic_inc(&eb->refs); - spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/trunk/fs/btrfs/file.c b/trunk/fs/btrfs/file.c index 9d0809629967..c02033596f02 100644 --- a/trunk/fs/btrfs/file.c +++ b/trunk/fs/btrfs/file.c @@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? -EIO : ret; + return ret > 0 ? EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 4deb280f8969..8cd109972fa6 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -1681,6 +1681,24 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ +static noinline void reada_csum(struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_ordered_extent *ordered_extent) +{ + struct btrfs_ordered_sum *sum; + u64 bytenr; + + sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, + list); + bytenr = sum->sums[0].bytenr; + + /* + * we don't care about the results, the point of this search is + * just to get the btree leaves into ram + */ + btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); +} + /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1691,6 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_path *path; int compressed = 0; int ret; @@ -1698,9 +1717,32 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - ordered_extent = btrfs_lookup_ordered_extent(inode, start); - BUG_ON(!ordered_extent); + /* + * before we join the transaction, try to do some of our IO. + * This will limit the amount of IO that we have to do with + * the transaction running. We're unlikely to need to do any + * IO if the file extents are new, the disk_i_size checks + * covers the most common case. + */ + if (start < BTRFS_I(inode)->disk_i_size) { + path = btrfs_alloc_path(); + if (path) { + ret = btrfs_lookup_file_extent(NULL, root, path, + inode->i_ino, + start, 0); + ordered_extent = btrfs_lookup_ordered_extent(inode, + start); + if (!list_empty(&ordered_extent->list)) { + btrfs_release_path(root, path); + reada_csum(root, path, ordered_extent); + } + btrfs_free_path(path); + } + } + if (!ordered_extent) + ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -5799,9 +5841,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - (actual_len > inode->i_size) && - (cur_offset > inode->i_size)) { - + cur_offset > inode->i_size) { if (cur_offset > actual_len) i_size = actual_len; else diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index ab7ab5318745..ed3e4a2ec2c8 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -3764,8 +3764,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - else - btrfs_orphan_cleanup(fs_root); + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/trunk/include/linux/ata.h b/trunk/include/linux/ata.h index 20f31567ccee..38a6948ce0c2 100644 --- a/trunk/include/linux/ata.h +++ b/trunk/include/linux/ata.h @@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id) return id[ATA_ID_SECTOR_SIZE] & (1 << 13); } -static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) +static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) { - return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); + return id[ATA_ID_SECTOR_SIZE] & 0xf; } static inline int ata_id_has_lba48(const u16 *id) diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index 188fcae10a99..5be3dab4a695 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -15,7 +15,6 @@ # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -33,7 +32,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu #endif #ifdef __KERNEL__ diff --git a/trunk/mm/migrate.c b/trunk/mm/migrate.c index 9a0db5bbabe4..efddbf0926b2 100644 --- a/trunk/mm/migrate.c +++ b/trunk/mm/migrate.c @@ -912,9 +912,6 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, goto out_pm; err = -ENODEV; - if (node < 0 || node >= MAX_NUMNODES) - goto out_pm; - if (!node_state(node, N_HIGH_MEMORY)) goto out_pm; diff --git a/trunk/sound/pci/ctxfi/ctatc.c b/trunk/sound/pci/ctxfi/ctatc.c index 459c1f62783b..cb65bd0dd35b 100644 --- a/trunk/sound/pci/ctxfi/ctatc.c +++ b/trunk/sound/pci/ctxfi/ctatc.c @@ -166,7 +166,18 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm) static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) { - return atc->vm->get_ptp_phys(atc->vm, index); + struct ct_vm *vm; + void *kvirt_addr; + unsigned long phys_addr; + + vm = atc->vm; + kvirt_addr = vm->get_ptp_virt(vm, index); + if (kvirt_addr == NULL) + phys_addr = (~0UL); + else + phys_addr = virt_to_phys(kvirt_addr); + + return phys_addr; } static unsigned int convert_format(snd_pcm_format_t snd_format) @@ -1658,7 +1669,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, } /* Set up device virtual memory management object */ - err = ct_vm_create(&atc->vm, pci); + err = ct_vm_create(&atc->vm); if (err < 0) goto error1; diff --git a/trunk/sound/pci/ctxfi/ctvmem.c b/trunk/sound/pci/ctxfi/ctvmem.c index 65da6e466f80..6b78752e9503 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.c +++ b/trunk/sound/pci/ctxfi/ctvmem.c @@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) return NULL; } - ptp = (unsigned long *)vm->ptp[0].area; + ptp = vm->ptp[0]; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { @@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) } /* * - * return the host physical addr of the @index-th device - * page table page on success, or ~0UL on failure. - * The first returned ~0UL indicates the termination. + * return the host (kmalloced) addr of the @index-th device + * page talbe page on success, or NULL on failure. + * The first returned NULL indicates the termination. * */ -static dma_addr_t -ct_get_ptp_phys(struct ct_vm *vm, int index) +static void * +ct_get_ptp_virt(struct ct_vm *vm, int index) { - dma_addr_t addr; + void *addr; - addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; + addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; return addr; } -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) +int ct_vm_create(struct ct_vm **rvm) { struct ct_vm *vm; struct ct_vm_block *block; - int i, err = 0; + int i; *rvm = NULL; @@ -188,21 +188,23 @@ int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { - err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, - snd_dma_pci_data(pci), - PAGE_SIZE, &vm->ptp[i]); - if (err < 0) + vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!vm->ptp[i]) break; } - if (err < 0) { + if (!i) { /* no page table pages are allocated */ - ct_vm_destroy(vm); + kfree(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; + /* Initialise remaining ptps */ + for (; i < CT_PTP_NUM; i++) + vm->ptp[i] = NULL; + vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; - vm->get_ptp_phys = ct_get_ptp_phys; + vm->get_ptp_virt = ct_get_ptp_virt; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); @@ -240,7 +242,7 @@ void ct_vm_destroy(struct ct_vm *vm) /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) - snd_dma_free_pages(&vm->ptp[i]); + kfree(vm->ptp[i]); vm->size = 0; diff --git a/trunk/sound/pci/ctxfi/ctvmem.h b/trunk/sound/pci/ctxfi/ctvmem.h index b23adfca4de6..01e4fd0386a3 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.h +++ b/trunk/sound/pci/ctxfi/ctvmem.h @@ -22,8 +22,6 @@ #include #include -#include -#include /* The chip can handle the page table of 4k pages * (emu20k1 can handle even 8k pages, but we don't use it right now) @@ -43,7 +41,7 @@ struct snd_pcm_substream; /* Virtual memory management object for card device */ struct ct_vm { - struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ + void *ptp[CT_PTP_NUM]; /* Device page table pages */ unsigned int size; /* Available addr space in bytes */ struct list_head unused; /* List of unused blocks */ struct list_head used; /* List of used blocks */ @@ -54,10 +52,10 @@ struct ct_vm { int size); /* Unmap device logical addr area. */ void (*unmap)(struct ct_vm *, struct ct_vm_block *block); - dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); + void *(*get_ptp_virt)(struct ct_vm *vm, int index); }; -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); +int ct_vm_create(struct ct_vm **rvm); void ct_vm_destroy(struct ct_vm *vm); #endif /* CTVMEM_H */ diff --git a/trunk/sound/pci/hda/hda_intel.c b/trunk/sound/pci/hda/hda_intel.c index b8faa6dc5abe..565de38a3fc7 100644 --- a/trunk/sound/pci/hda/hda_intel.c +++ b/trunk/sound/pci/hda/hda_intel.c @@ -426,7 +426,6 @@ struct azx { /* flags */ int position_fix; - int poll_count; unsigned int running :1; unsigned int initialized :1; unsigned int single_cmd :1; @@ -507,7 +506,7 @@ static char *driver_short_names[] __devinitdata = { #define get_azx_dev(substream) (substream->runtime->private_data) static int azx_acquire_irq(struct azx *chip, int do_disconnect); -static int azx_send_cmd(struct hda_bus *bus, unsigned int val); + /* * Interface for HD codec */ @@ -665,12 +664,11 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; - int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { - if (chip->polling_mode || do_poll) { + if (chip->polling_mode) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); @@ -678,9 +676,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, if (!chip->rirb.cmds[addr]) { smp_rmb(); bus->rirb_error = 0; - - if (!do_poll) - chip->poll_count = 0; return chip->rirb.res[addr]; /* the last value */ } if (time_after(jiffies, timeout)) @@ -693,16 +688,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } - if (!chip->polling_mode && chip->poll_count < 2) { - snd_printdd(SFX "azx_get_response timeout, " - "polling the codec once: last cmd=0x%08x\n", - chip->last_cmd[addr]); - do_poll = 1; - chip->poll_count++; - goto again; - } - - if (!chip->polling_mode) { snd_printk(KERN_WARNING SFX "azx_get_response timeout, " "switching to polling mode: last cmd=0x%08x\n", @@ -2058,7 +2043,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) { if (request_irq(chip->pci->irq, azx_interrupt, chip->msi ? 0 : IRQF_SHARED, - "hda_intel", chip)) { + "HDA Intel", chip)) { printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " "disabling device\n", chip->pci->irq); if (do_disconnect) diff --git a/trunk/sound/pci/ice1712/aureon.c b/trunk/sound/pci/ice1712/aureon.c index 9e66f6d306f8..765d7bd4c3d4 100644 --- a/trunk/sound/pci/ice1712/aureon.c +++ b/trunk/sound/pci/ice1712/aureon.c @@ -703,13 +703,11 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho { unsigned char nvol; - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; - } else { + else nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / WM_VOL_MAX; - nvol += 0x1b; - } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x180 | nvol); @@ -780,7 +778,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_ for (ch = 0; ch < 2; ch++) { unsigned int vol = ucontrol->value.integer.value[ch]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; + continue; vol |= spec->master[ch] & WM_VOL_MUTE; if (vol != spec->master[ch]) { int dac; @@ -836,8 +834,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * for (i = 0; i < voices; i++) { unsigned int vol = ucontrol->value.integer.value[i]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; - vol |= spec->vol[ofs+i] & WM_VOL_MUTE; + continue; + vol |= spec->vol[ofs+i]; if (vol != spec->vol[ofs+i]) { spec->vol[ofs+i] = vol; idx = WM_DAC_ATTEN + ofs + i; diff --git a/trunk/sound/soc/omap/omap3pandora.c b/trunk/sound/soc/omap/omap3pandora.c index 68980c19a3bc..71b2c161158d 100644 --- a/trunk/sound/soc/omap/omap3pandora.c +++ b/trunk/sound/soc/omap/omap3pandora.c @@ -145,7 +145,6 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { - {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"},