From 23012d99cac986a1239977abdd42fa079cfcdf19 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 4 Feb 2010 06:57:58 +0000 Subject: [PATCH] --- yaml --- r: 180334 b: refs/heads/master c: 2717568e7c44fe7dc3f4f52ea823811cfeede2b5 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/ABI/testing/ima_policy | 12 +- trunk/Makefile | 2 +- trunk/drivers/ata/ahci.c | 15 -- trunk/drivers/ata/libata-scsi.c | 2 +- trunk/drivers/ata/libata-sff.c | 3 - trunk/drivers/char/tty_io.c | 4 +- trunk/drivers/gpu/drm/ati_pcigart.c | 2 +- trunk/drivers/gpu/drm/i915/i915_drv.c | 2 +- trunk/drivers/gpu/drm/i915/i915_gem.c | 11 +- trunk/drivers/gpu/drm/i915/i915_irq.c | 42 +-- trunk/drivers/gpu/drm/i915/intel_crt.c | 3 - trunk/drivers/gpu/drm/i915/intel_display.c | 33 +-- trunk/drivers/gpu/drm/i915/intel_lvds.c | 11 +- trunk/drivers/gpu/drm/i915/intel_sdvo.c | 8 - trunk/drivers/gpu/drm/radeon/r100.c | 14 +- trunk/drivers/gpu/drm/radeon/r300.c | 16 +- trunk/drivers/gpu/drm/radeon/r420.c | 3 +- trunk/drivers/gpu/drm/radeon/r520.c | 3 +- trunk/drivers/gpu/drm/radeon/r600.c | 48 +--- trunk/drivers/gpu/drm/radeon/r600_audio.c | 2 +- trunk/drivers/gpu/drm/radeon/radeon.h | 8 - trunk/drivers/gpu/drm/radeon/radeon_asic.h | 11 - trunk/drivers/gpu/drm/radeon/radeon_combios.c | 3 +- .../gpu/drm/radeon/radeon_connectors.c | 2 +- trunk/drivers/gpu/drm/radeon/radeon_gem.c | 3 - trunk/drivers/gpu/drm/radeon/rs400.c | 28 +- trunk/drivers/gpu/drm/radeon/rs600.c | 2 + trunk/drivers/gpu/drm/radeon/rs690.c | 2 + trunk/drivers/gpu/drm/radeon/rv515.c | 4 +- trunk/drivers/gpu/drm/radeon/rv770.c | 24 +- trunk/drivers/hwmon/adt7462.c | 2 +- trunk/drivers/hwmon/lm78.c | 25 +- trunk/drivers/hwmon/w83781d.c | 26 +- trunk/drivers/pci/quirks.c | 17 -- trunk/drivers/usb/host/r8a66597-hcd.c | 37 ++- trunk/fs/befs/linuxvfs.c | 1 - trunk/fs/block_dev.c | 7 +- trunk/fs/btrfs/disk-io.c | 7 +- trunk/fs/btrfs/extent-tree.c | 8 +- trunk/fs/btrfs/extent_io.c | 3 +- trunk/fs/btrfs/file.c | 2 +- trunk/fs/btrfs/inode.c | 50 +++- trunk/fs/btrfs/relocation.c | 3 +- trunk/fs/fcntl.c | 6 +- trunk/fs/file_table.c | 1 - trunk/fs/namei.c | 6 +- trunk/fs/nfsd/vfs.c | 2 +- trunk/include/linux/ata.h | 4 +- trunk/include/linux/compiler.h | 2 - trunk/include/linux/ima.h | 4 +- trunk/init/main.c | 2 +- trunk/mm/migrate.c | 3 - trunk/security/integrity/ima/ima.h | 3 +- trunk/security/integrity/ima/ima_api.c | 4 +- trunk/security/integrity/ima/ima_iint.c | 9 +- trunk/security/integrity/ima/ima_main.c | 239 +++++++++++------- trunk/security/integrity/ima/ima_policy.c | 9 +- trunk/security/security.c | 2 + trunk/sound/pci/ctxfi/ctatc.c | 15 +- trunk/sound/pci/ctxfi/ctvmem.c | 38 +-- trunk/sound/pci/ctxfi/ctvmem.h | 8 +- trunk/sound/pci/hda/hda_intel.c | 21 +- trunk/sound/pci/ice1712/aureon.c | 12 +- trunk/sound/soc/omap/omap3pandora.c | 1 - 65 files changed, 418 insertions(+), 486 deletions(-) diff --git a/[refs] b/[refs] index f9d92aee9ca4..825d493e52e3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6339204ecc2aa2067a99595522de0403f0854bb8 +refs/heads/master: 2717568e7c44fe7dc3f4f52ea823811cfeede2b5 diff --git a/trunk/Documentation/ABI/testing/ima_policy b/trunk/Documentation/ABI/testing/ima_policy index 6cd6daefaaed..6434f0df012e 100644 --- a/trunk/Documentation/ABI/testing/ima_policy +++ b/trunk/Documentation/ABI/testing/ima_policy @@ -20,7 +20,7 @@ Description: lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] + base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] fsmagic:= hex value uid:= decimal value @@ -40,11 +40,11 @@ Description: measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC - measure func=FILE_CHECK mask=MAY_READ uid=0 + measure func=INODE_PERM mask=MAY_READ uid=0 The default policy measures all executables in bprm_check, all files mmapped executable in file_mmap, and all files - open for read by root in do_filp_open. + open for read by root in inode_permission. Examples of LSM specific definitions: @@ -54,8 +54,8 @@ Description: dont_measure obj_type=var_log_t dont_measure obj_type=auditd_log_t - measure subj_user=system_u func=FILE_CHECK mask=MAY_READ - measure subj_role=system_r func=FILE_CHECK mask=MAY_READ + measure subj_user=system_u func=INODE_PERM mask=MAY_READ + measure subj_role=system_r func=INODE_PERM mask=MAY_READ Smack: - measure subj_user=_ func=FILE_CHECK mask=MAY_READ + measure subj_user=_ func=INODE_PERM mask=MAY_READ diff --git a/trunk/Makefile b/trunk/Makefile index f8e02e9491d0..394aec712c7d 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 33 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index b34390347c16..b8bea100a160 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -2868,21 +2868,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) }, .driver_data = "F.23", /* cutoff BIOS version */ }, - /* - * Acer eMachines G725 has the same problem. BIOS - * V1.03 is known to be broken. V3.04 is known to - * work. Inbetween, there are V1.06, V2.06 and V3.03 - * that we don't have much idea about. For now, - * blacklist anything older than V3.04. - */ - { - .ident = "G725", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), - DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), - }, - .driver_data = "V3.04", /* cutoff BIOS version */ - }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index d096fbcbc771..f4ea5a8c325b 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * write indication (used for PIO/DMA setup), result TF is * copied back and we don't whine too much about its failure. */ - tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) tf->flags |= ATA_TFLAG_WRITE; diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c index 730ef3c384ca..741065c9da67 100644 --- a/trunk/drivers/ata/libata-sff.c +++ b/trunk/drivers/ata/libata-sff.c @@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } - if (!do_write) - flush_dcache_page(page); - qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c index dcb9083ecde0..c6f3b48be9dd 100644 --- a/trunk/drivers/char/tty_io.c +++ b/trunk/drivers/char/tty_io.c @@ -1951,10 +1951,8 @@ static int tty_fasync(int fd, struct file *filp, int on) pid = task_pid(current); type = PIDTYPE_PID; } - get_pid(pid); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); retval = __f_setown(filp, pid, type, 0); - put_pid(pid); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (retval) goto out; } else { diff --git a/trunk/drivers/gpu/drm/ati_pcigart.c b/trunk/drivers/gpu/drm/ati_pcigart.c index 17be051b7aa3..a1fce68e3bbe 100644 --- a/trunk/drivers/gpu/drm/ati_pcigart.c +++ b/trunk/drivers/gpu/drm/ati_pcigart.c @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", - (unsigned long long)gart_info->table_mask); + gart_info->table_mask); ret = 1; goto done; } diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index ecac882e1d54..46d88965852a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .need_gfx_hws = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index b4c8c0230689..dda787aafcc6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -3564,9 +3564,6 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; - if (relocs == NULL) - return 0; - for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3656,7 +3653,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; + struct drm_i915_gem_relocation_entry *relocs; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3725,8 +3722,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3735,8 +3730,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3933,7 +3926,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); -pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3948,6 +3940,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = ret2; } +pre_mutex_err: drm_free_large(object_list); kfree(cliprects); diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 50ddf4a95c5e..89a071a3e6fb 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -309,22 +309,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); - if (de_iir & DE_PLANEA_FLIP_DONE) - intel_prepare_page_flip(dev, 0); - - if (de_iir & DE_PLANEB_FLIP_DONE) - intel_prepare_page_flip(dev, 1); - - if (de_iir & DE_PIPEA_VBLANK) { - drm_handle_vblank(dev, 0); - intel_finish_page_flip(dev, 0); - } - - if (de_iir & DE_PIPEB_VBLANK) { - drm_handle_vblank(dev, 1); - intel_finish_page_flip(dev, 1); - } - /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -860,11 +844,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else if (IS_I965G(dev)) + return 0; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -882,14 +866,13 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_IRONLAKE(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + return; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1032,14 +1015,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->de_irq_enable_reg = display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 79dd4026586f..ddefc871edfe 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -157,9 +157,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 12775df1bbfd..45da78ef4a92 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1638,7 +1638,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); - drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2520,10 +2519,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2567,10 +2562,6 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2639,10 +2630,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3997,12 +3984,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { - if (work && !work->pending) { - obj_priv = work->obj->driver_private; - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", - obj_priv, - atomic_read(&obj_priv->pending_flip)); - } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4024,10 +4005,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); obj_priv = work->obj->driver_private; - - /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) + if (atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4040,11 +4018,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) { + if (intel_crtc->unpin_work) intel_crtc->unpin_work->pending = 1; - } else { - DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); - } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4078,7 +4053,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4092,10 +4066,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - obj->driver_private); kfree(work); - intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index b1d0acbae4e4..aa74e59bec61 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), + DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -622,13 +622,6 @@ static const struct dmi_system_id bad_lid_status[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), }, }, - { - .ident = "Aspire 1810T", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), - }, - }, { .ident = "PC-81005", .matches = { @@ -650,7 +643,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) + if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) status = connector_status_disconnected; return status; diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 82678d30ab06..eaacfd0920df 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,14 +2345,6 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_CVBS0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index c0d4650cdb79..11c9a3fe6810 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -354,17 +354,11 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } -/* Who ever call radeon_fence_emit should call ring_lock and ask - * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* We have to make sure that caches are flushed before - * CPU might read something from VRAM. */ - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); - radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); + /* Who ever call radeon_fence_emit should call ring_lock and ask + * for enough space (today caller are ib schedule and buffer move) */ /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); @@ -3375,6 +3369,7 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3486,12 +3481,13 @@ int r100_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index 43b55a030b4d..0051d11b907c 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -506,14 +506,11 @@ void r300_vram_info(struct radeon_device *rdev) /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; - tmp = RREG32(RADEON_MEM_CNTL); - tmp &= R300_MEM_NUM_CHANNELS_MASK; - switch (tmp) { - case 0: rdev->mc.vram_width = 64; break; - case 1: rdev->mc.vram_width = 128; break; - case 2: rdev->mc.vram_width = 256; break; - default: rdev->mc.vram_width = 128; break; + if (tmp & R300_MEM_NUM_CHANNELS_MASK) { + rdev->mc.vram_width = 128; + } else { + rdev->mc.vram_width = 64; } r100_vram_init_sizes(rdev); @@ -1330,6 +1327,7 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1420,15 +1418,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r420.c b/trunk/drivers/gpu/drm/radeon/r420.c index d9373246c97f..4526faaacca8 100644 --- a/trunk/drivers/gpu/drm/radeon/r420.c +++ b/trunk/drivers/gpu/drm/radeon/r420.c @@ -389,15 +389,16 @@ int r420_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r420_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index ddf5731eba0d..9a189072f2b9 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -294,12 +294,13 @@ int r520_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index a1198d99cdf9..1b6d0001b20e 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1654,12 +1654,6 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.align_mask = 16 - 1; } -void r600_cp_fini(struct radeon_device *rdev) -{ - r600_cp_stop(rdev); - radeon_ring_fini(rdev); -} - /* * GPU scratch registers helpers function. @@ -1867,12 +1861,6 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -2057,15 +2045,19 @@ int r600_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = r600_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + r600_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -2091,17 +2083,20 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { + /* Suspend operations */ + r600_suspend(rdev); + r600_audio_fini(rdev); r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); - radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); + radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -2905,18 +2900,3 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } - -/** - * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl - * rdev: radeon device structure - * bo: buffer object struct which userspace is waiting for idle - * - * Some R6XX/R7XX doesn't seems to take into account HDP flush performed - * through ring buffer, this leads to corruption in rendering, see - * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we - * directly perform HDP flush by writing register through MMIO. - */ -void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) -{ - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); -} diff --git a/trunk/drivers/gpu/drm/radeon/r600_audio.c b/trunk/drivers/gpu/drm/radeon/r600_audio.c index b1c1d3433454..99e2c3891a7d 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_audio.c +++ b/trunk/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) + return rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..2d5f2bfa7201 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -661,13 +661,6 @@ struct radeon_asic { void (*hpd_fini)(struct radeon_device *rdev); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); - /* ioctl hw specific callback. Some hw might want to perform special - * operation on specific ioctl. For instance on wait idle some hw - * might want to perform and HDP flush through MMIO as it seems that - * some R6XX/R7XX hw doesn't take HDP flush into account if programmed - * through ring. - */ - void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); }; /* @@ -1150,7 +1143,6 @@ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); -extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index 05ee1aeac3fd..f2fbd2e4e9df 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -117,7 +117,6 @@ static struct radeon_asic r100_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -177,7 +176,6 @@ static struct radeon_asic r300_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -221,7 +219,6 @@ static struct radeon_asic r420_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -270,7 +267,6 @@ static struct radeon_asic rs400_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -327,7 +323,6 @@ static struct radeon_asic rs600_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -375,7 +370,6 @@ static struct radeon_asic rs690_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -427,7 +421,6 @@ static struct radeon_asic rv515_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; @@ -470,7 +463,6 @@ static struct radeon_asic r520_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, }; /* @@ -512,7 +504,6 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); -extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -547,7 +538,6 @@ static struct radeon_asic r600_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; /* @@ -592,7 +582,6 @@ static struct radeon_asic rv770_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, }; #endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..579c8920e081 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -971,7 +971,8 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); - lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); + if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) + lvds->panel_vcc_delay = 2000; lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 2d8e5a70f284..55266416fa47 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, - radeon_connector->dac_load_detect); + 1); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a01..0e1325e18534 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -308,9 +308,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (robj->rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index 287fcebfb4e6..9f5418983e2a 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -223,31 +223,15 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } -int rs400_mc_wait_for_idle(struct radeon_device *rdev) -{ - unsigned i; - uint32_t tmp; - - for (i = 0; i < rdev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 2)) { - return 0; - } - DRM_UDELAY(1); - } - return -1; -} - void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (rs400_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "rs400: Failed to wait MC idle while " - "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); + if (r300_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); } } @@ -386,8 +370,8 @@ void rs400_mc_program(struct radeon_device *rdev) r100_mc_stop(rdev, &save); /* Wait for mc idle */ - if (rs400_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); + if (r300_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); @@ -464,6 +448,7 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -542,6 +527,7 @@ int rs400_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index c3818562a13e..d5255751e7b3 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -610,6 +610,7 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -688,6 +689,7 @@ int rs600_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 06e2771aee5a..cd31da913771 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -676,6 +676,7 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -755,6 +756,7 @@ int rs690_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 0e1e6b8632b8..62756717b044 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -537,6 +537,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -614,12 +615,13 @@ int rv515_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); - radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 5943d561fd1e..afd9e8213c29 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -887,12 +887,6 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1061,15 +1055,19 @@ int rv770_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } rdev->accel_working = true; r = rv770_startup(rdev); if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + rv770_suspend(rdev); r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -1091,11 +1089,13 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { + rv770_suspend(rdev); + r600_blit_fini(rdev); - r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/trunk/drivers/hwmon/adt7462.c b/trunk/drivers/hwmon/adt7462.c index b8156b4893bb..a31e77c776ae 100644 --- a/trunk/drivers/hwmon/adt7462.c +++ b/trunk/drivers/hwmon/adt7462.c @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; * * Some, but not all, of these voltages have low/high limits. */ -#define ADT7462_VOLT_COUNT 13 +#define ADT7462_VOLT_COUNT 12 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 diff --git a/trunk/drivers/hwmon/lm78.c b/trunk/drivers/hwmon/lm78.c index 72ff2c4e757d..cadcbd90ff3b 100644 --- a/trunk/drivers/hwmon/lm78.c +++ b/trunk/drivers/hwmon/lm78.c @@ -851,16 +851,17 @@ static struct lm78_data *lm78_update_device(struct device *dev) static int __init lm78_isa_found(unsigned short address) { int val, save, found = 0; - int port; - - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 - * to base+7 and some base+5 to base+6. So we better request each port - * individually for the probing phase. */ - for (port = address; port < address + LM78_EXTENT; port++) { - if (!request_region(port, 1, "lm78")) { - pr_debug("lm78: Failed to request port 0x%x\n", port); - goto release; - } + + /* We have to request the region in two parts because some + boards declare base+4 to base+7 as a PNP device */ + if (!request_region(address, 4, "lm78")) { + pr_debug("lm78: Failed to request low part of region\n"); + return 0; + } + if (!request_region(address + 4, 4, "lm78")) { + pr_debug("lm78: Failed to request high part of region\n"); + release_region(address, 4); + return 0; } #define REALLY_SLOW_IO @@ -924,8 +925,8 @@ static int __init lm78_isa_found(unsigned short address) val & 0x80 ? "LM79" : "LM78", (int)address); release: - for (port--; port >= address; port--) - release_region(port, 1); + release_region(address + 4, 4); + release_region(address, 4); return found; } diff --git a/trunk/drivers/hwmon/w83781d.c b/trunk/drivers/hwmon/w83781d.c index 32d4adee73db..05f9225b6f94 100644 --- a/trunk/drivers/hwmon/w83781d.c +++ b/trunk/drivers/hwmon/w83781d.c @@ -1793,17 +1793,17 @@ static int __init w83781d_isa_found(unsigned short address) { int val, save, found = 0; - int port; - - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 - * to base+7 and some base+5 to base+6. So we better request each port - * individually for the probing phase. */ - for (port = address; port < address + W83781D_EXTENT; port++) { - if (!request_region(port, 1, "w83781d")) { - pr_debug("w83781d: Failed to request port 0x%x\n", - port); - goto release; - } + + /* We have to request the region in two parts because some + boards declare base+4 to base+7 as a PNP device */ + if (!request_region(address, 4, "w83781d")) { + pr_debug("w83781d: Failed to request low part of region\n"); + return 0; + } + if (!request_region(address + 4, 4, "w83781d")) { + pr_debug("w83781d: Failed to request high part of region\n"); + release_region(address, 4); + return 0; } #define REALLY_SLOW_IO @@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) val == 0x30 ? "W83782D" : "W83781D", (int)address); release: - for (port--; port >= address; port--) - release_region(port, 1); + release_region(address + 4, 4); + release_region(address, 4); return found; } diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index d58b94030ef3..c74694345b6e 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -338,23 +338,6 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); -/* - * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS - * ver. 1.33 20070103) don't set the correct ISA PCI region header info. - * BAR0 should be 8 bytes; instead, it may be set to something like 8k - * (which conflicts w/ BAR1's memory range). - */ -static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) -{ - if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected " - "(incorrect header); workaround applied.\n"); - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); - static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { diff --git a/trunk/drivers/usb/host/r8a66597-hcd.c b/trunk/drivers/usb/host/r8a66597-hcd.c index 0ceec123ddfd..50a3e2d6a6cc 100644 --- a/trunk/drivers/usb/host/r8a66597-hcd.c +++ b/trunk/drivers/usb/host/r8a66597-hcd.c @@ -35,7 +35,9 @@ #include #include #include +#include #include +#include #include "../core/hcd.h" #include "r8a66597.h" @@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } +static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, + int status) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) +{ + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); + spin_lock(&r8a66597->lock); +} + /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) { - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), - urb); + if (urb) + r8a66597_urb_done(r8a66597, urb, -ENODEV); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, - -ENODEV); - spin_lock(&r8a66597->lock); - } break; } } @@ -1283,10 +1299,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(hcd, urb, status); - spin_lock(&r8a66597->lock); + r8a66597_urb_done(r8a66597, urb, status); } if (restart) { diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c index 34ddda888e63..33baf27fac78 100644 --- a/trunk/fs/befs/linuxvfs.c +++ b/trunk/fs/befs/linuxvfs.c @@ -873,7 +873,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); unacquire_priv_sbp: - kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index d11d0289f3d2..73d6a735b8f3 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -246,8 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) if (!sb) goto out; if (sb->s_flags & MS_RDONLY) { - sb->s_frozen = SB_FREEZE_TRANS; - up_write(&sb->s_umount); + deactivate_locked_super(sb); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } @@ -308,7 +307,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) BUG_ON(sb->s_bdev != bdev); down_write(&sb->s_umount); if (sb->s_flags & MS_RDONLY) - goto out_unfrozen; + goto out_deactivate; if (sb->s_op->unfreeze_fs) { error = sb->s_op->unfreeze_fs(sb); @@ -322,11 +321,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) } } -out_unfrozen: sb->s_frozen = SB_UNFROZEN; smp_wmb(); wake_up(&sb->s_wait_unfrozen); +out_deactivate: if (sb) deactivate_locked_super(sb); out_unlock: diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 2b59201b955c..87b25543d7d1 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -1982,12 +1982,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - if (ret < 0) { - printk(KERN_WARNING - "btrfs: failed to recover relocation\n"); - err = -EINVAL; - goto fail_trans_kthread; - } + BUG_ON(ret); } location.objectid = BTRFS_FS_TREE_OBJECTID; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 559f72489b3b..432a2da4641e 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -5402,6 +5402,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5409,10 +5413,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index b177ed319612..96577e8bf9fd 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -3165,9 +3165,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } + spin_unlock(&tree->buffer_lock); + /* add one reference for the tree */ atomic_inc(&eb->refs); - spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/trunk/fs/btrfs/file.c b/trunk/fs/btrfs/file.c index 9d0809629967..c02033596f02 100644 --- a/trunk/fs/btrfs/file.c +++ b/trunk/fs/btrfs/file.c @@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? -EIO : ret; + return ret > 0 ? EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 4deb280f8969..8cd109972fa6 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -1681,6 +1681,24 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ +static noinline void reada_csum(struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_ordered_extent *ordered_extent) +{ + struct btrfs_ordered_sum *sum; + u64 bytenr; + + sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, + list); + bytenr = sum->sums[0].bytenr; + + /* + * we don't care about the results, the point of this search is + * just to get the btree leaves into ram + */ + btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); +} + /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1691,6 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_path *path; int compressed = 0; int ret; @@ -1698,9 +1717,32 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - ordered_extent = btrfs_lookup_ordered_extent(inode, start); - BUG_ON(!ordered_extent); + /* + * before we join the transaction, try to do some of our IO. + * This will limit the amount of IO that we have to do with + * the transaction running. We're unlikely to need to do any + * IO if the file extents are new, the disk_i_size checks + * covers the most common case. + */ + if (start < BTRFS_I(inode)->disk_i_size) { + path = btrfs_alloc_path(); + if (path) { + ret = btrfs_lookup_file_extent(NULL, root, path, + inode->i_ino, + start, 0); + ordered_extent = btrfs_lookup_ordered_extent(inode, + start); + if (!list_empty(&ordered_extent->list)) { + btrfs_release_path(root, path); + reada_csum(root, path, ordered_extent); + } + btrfs_free_path(path); + } + } + if (!ordered_extent) + ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -5799,9 +5841,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - (actual_len > inode->i_size) && - (cur_offset > inode->i_size)) { - + cur_offset > inode->i_size) { if (cur_offset > actual_len) i_size = actual_len; else diff --git a/trunk/fs/btrfs/relocation.c b/trunk/fs/btrfs/relocation.c index ab7ab5318745..ed3e4a2ec2c8 100644 --- a/trunk/fs/btrfs/relocation.c +++ b/trunk/fs/btrfs/relocation.c @@ -3764,8 +3764,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - else - btrfs_orphan_cleanup(fs_root); + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/trunk/fs/fcntl.c b/trunk/fs/fcntl.c index 97e01dc0d95f..5ef953e6f908 100644 --- a/trunk/fs/fcntl.c +++ b/trunk/fs/fcntl.c @@ -199,7 +199,9 @@ static int setfl(int fd, struct file * filp, unsigned long arg) static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int force) { - write_lock_irq(&filp->f_owner.lock); + unsigned long flags; + + write_lock_irqsave(&filp->f_owner.lock, flags); if (force || !filp->f_owner.pid) { put_pid(filp->f_owner.pid); filp->f_owner.pid = get_pid(pid); @@ -211,7 +213,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, filp->f_owner.euid = cred->euid; } } - write_unlock_irq(&filp->f_owner.lock); + write_unlock_irqrestore(&filp->f_owner.lock, flags); } int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c index b98404b54383..69652c5bd5f0 100644 --- a/trunk/fs/file_table.c +++ b/trunk/fs/file_table.c @@ -253,7 +253,6 @@ void __fput(struct file *file) if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); - ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index d62fdc875f22..94a5e60779f9 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -1736,7 +1736,8 @@ struct file *do_filp_open(int dfd, const char *pathname, if (nd.root.mnt) path_put(&nd.root); if (!IS_ERR(filp)) { - error = ima_file_check(filp, acc_mode); + error = ima_path_check(&filp->f_path, filp->f_mode & + (MAY_READ | MAY_WRITE | MAY_EXEC)); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1796,7 +1797,8 @@ struct file *do_filp_open(int dfd, const char *pathname, } filp = nameidata_to_filp(&nd); if (!IS_ERR(filp)) { - error = ima_file_check(filp, acc_mode); + error = ima_path_check(&filp->f_path, filp->f_mode & + (MAY_READ | MAY_WRITE | MAY_EXEC)); if (error) { fput(filp); filp = ERR_PTR(error); diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c index 97d79eff6b7f..c194793b642b 100644 --- a/trunk/fs/nfsd/vfs.c +++ b/trunk/fs/nfsd/vfs.c @@ -752,7 +752,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); - host_err = ima_file_check(*filp, access); out_nfserr: err = nfserrno(host_err); out: @@ -2128,6 +2127,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, */ path.mnt = exp->ex_path.mnt; path.dentry = dentry; + err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); nfsd_out: return err? nfserrno(err) : 0; } diff --git a/trunk/include/linux/ata.h b/trunk/include/linux/ata.h index 20f31567ccee..38a6948ce0c2 100644 --- a/trunk/include/linux/ata.h +++ b/trunk/include/linux/ata.h @@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id) return id[ATA_ID_SECTOR_SIZE] & (1 << 13); } -static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) +static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) { - return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); + return id[ATA_ID_SECTOR_SIZE] & 0xf; } static inline int ata_id_has_lba48(const u16 *id) diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index 188fcae10a99..5be3dab4a695 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -15,7 +15,6 @@ # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -33,7 +32,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu #endif #ifdef __KERNEL__ diff --git a/trunk/include/linux/ima.h b/trunk/include/linux/ima.h index 975837e7d6c0..99dc6d5cf7e5 100644 --- a/trunk/include/linux/ima.h +++ b/trunk/include/linux/ima.h @@ -17,7 +17,7 @@ struct linux_binprm; extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_inode_alloc(struct inode *inode); extern void ima_inode_free(struct inode *inode); -extern int ima_file_check(struct file *file, int mask); +extern int ima_path_check(struct path *path, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern void ima_counts_get(struct file *file); @@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode) return; } -static inline int ima_file_check(struct file *file, int mask) +static inline int ima_path_check(struct path *path, int mask) { return 0; } diff --git a/trunk/init/main.c b/trunk/init/main.c index 4cb47a159f02..dac44a9356a5 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void) proc_caches_init(); buffer_init(); key_init(); - radix_tree_init(); security_init(); vfs_caches_init(totalram_pages); + radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/trunk/mm/migrate.c b/trunk/mm/migrate.c index 9a0db5bbabe4..efddbf0926b2 100644 --- a/trunk/mm/migrate.c +++ b/trunk/mm/migrate.c @@ -912,9 +912,6 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, goto out_pm; err = -ENODEV; - if (node < 0 || node >= MAX_NUMNODES) - goto out_pm; - if (!node_state(node, N_HIGH_MEMORY)) goto out_pm; diff --git a/trunk/security/integrity/ima/ima.h b/trunk/security/integrity/ima/ima.h index 47fb65d1fcbd..c41afe6639a0 100644 --- a/trunk/security/integrity/ima/ima.h +++ b/trunk/security/integrity/ima/ima.h @@ -65,6 +65,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, const char *cause, int result, int info); /* Internal IMA function definitions */ +void ima_iintcache_init(void); int ima_init(void); void ima_cleanup(void); int ima_fs_init(void); @@ -130,7 +131,7 @@ void iint_free(struct kref *kref); void iint_rcu_free(struct rcu_head *rcu); /* IMA policy related functions */ -enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; +enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); void ima_init_policy(void); diff --git a/trunk/security/integrity/ima/ima_api.c b/trunk/security/integrity/ima/ima_api.c index 2a5e0bcf3887..3cd58b60afd2 100644 --- a/trunk/security/integrity/ima/ima_api.c +++ b/trunk/security/integrity/ima/ima_api.c @@ -95,12 +95,12 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, * ima_must_measure - measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP + * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP * mask: contains the permission mask * fsmagic: hex value * diff --git a/trunk/security/integrity/ima/ima_iint.c b/trunk/security/integrity/ima/ima_iint.c index 0d83edcfc402..fa592ff1ac1c 100644 --- a/trunk/security/integrity/ima/ima_iint.c +++ b/trunk/security/integrity/ima/ima_iint.c @@ -52,6 +52,9 @@ int ima_inode_alloc(struct inode *inode) struct ima_iint_cache *iint = NULL; int rc = 0; + if (!ima_initialized) + return 0; + iint = kmem_cache_alloc(iint_cache, GFP_NOFS); if (!iint) return -ENOMEM; @@ -115,6 +118,8 @@ void ima_inode_free(struct inode *inode) { struct ima_iint_cache *iint; + if (!ima_initialized) + return; spin_lock(&ima_iint_lock); iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); spin_unlock(&ima_iint_lock); @@ -136,11 +141,9 @@ static void init_once(void *foo) kref_set(&iint->refcount, 1); } -static int __init ima_iintcache_init(void) +void __init ima_iintcache_init(void) { iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); - return 0; } -security_initcall(ima_iintcache_init); diff --git a/trunk/security/integrity/ima/ima_main.c b/trunk/security/integrity/ima/ima_main.c index 294b005d6520..a89f44d5e030 100644 --- a/trunk/security/integrity/ima/ima_main.c +++ b/trunk/security/integrity/ima/ima_main.c @@ -14,7 +14,7 @@ * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, - * and ima_file_check. + * and ima_path_check. */ #include #include @@ -84,36 +84,6 @@ static bool ima_limit_imbalance(struct file *file) return found; } -/* ima_read_write_check - reflect possible reading/writing errors in the PCR. - * - * When opening a file for read, if the file is already open for write, - * the file could change, resulting in a file measurement error. - * - * Opening a file for write, if the file is already open for read, results - * in a time of measure, time of use (ToMToU) error. - * - * In either case invalidate the PCR. - */ -enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; -static void ima_read_write_check(enum iint_pcr_error error, - struct ima_iint_cache *iint, - struct inode *inode, - const unsigned char *filename) -{ - switch (error) { - case TOMTOU: - if (iint->readcount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "ToMToU"); - break; - case OPEN_WRITERS: - if (iint->writecount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "open_writers"); - break; - } -} - /* * Update the counts given an fmode_t */ @@ -128,47 +98,6 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode) iint->writecount++; } -/* - * ima_counts_get - increment file counts - * - * Maintain read/write counters for all files, but only - * invalidate the PCR for measured files: - * - Opening a file for write when already open for read, - * results in a time of measure, time of use (ToMToU) error. - * - Opening a file for read when already open for write, - * could result in a file measurement error. - * - */ -void ima_counts_get(struct file *file) -{ - struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; - fmode_t mode = file->f_mode; - struct ima_iint_cache *iint; - int rc; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return; - iint = ima_iint_find_get(inode); - if (!iint) - return; - mutex_lock(&iint->mutex); - rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); - if (rc < 0) - goto out; - - if (mode & FMODE_WRITE) { - ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name); - goto out; - } - ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name); -out: - ima_inc_counts(iint, file->f_mode); - mutex_unlock(&iint->mutex); - - kref_put(&iint->refcount, iint_free); -} - /* * Decrement ima counts */ @@ -224,6 +153,123 @@ void ima_file_free(struct file *file) kref_put(&iint->refcount, iint_free); } +/* ima_read_write_check - reflect possible reading/writing errors in the PCR. + * + * When opening a file for read, if the file is already open for write, + * the file could change, resulting in a file measurement error. + * + * Opening a file for write, if the file is already open for read, results + * in a time of measure, time of use (ToMToU) error. + * + * In either case invalidate the PCR. + */ +enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; +static void ima_read_write_check(enum iint_pcr_error error, + struct ima_iint_cache *iint, + struct inode *inode, + const unsigned char *filename) +{ + switch (error) { + case TOMTOU: + if (iint->readcount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "ToMToU"); + break; + case OPEN_WRITERS: + if (iint->writecount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "open_writers"); + break; + } +} + +static int get_path_measurement(struct ima_iint_cache *iint, struct file *file, + const unsigned char *filename) +{ + int rc = 0; + + ima_inc_counts(iint, file->f_mode); + + rc = ima_collect_measurement(iint, file); + if (!rc) + ima_store_measurement(iint, file, filename); + return rc; +} + +/** + * ima_path_check - based on policy, collect/store measurement. + * @path: contains a pointer to the path to be measured + * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE + * + * Measure the file being open for readonly, based on the + * ima_must_measure() policy decision. + * + * Keep read/write counters for all files, but only + * invalidate the PCR for measured files: + * - Opening a file for write when already open for read, + * results in a time of measure, time of use (ToMToU) error. + * - Opening a file for read when already open for write, + * could result in a file measurement error. + * + * Always return 0 and audit dentry_open failures. + * (Return code will be based upon measurement appraisal.) + */ +int ima_path_check(struct path *path, int mask) +{ + struct inode *inode = path->dentry->d_inode; + struct ima_iint_cache *iint; + struct file *file = NULL; + int rc; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return 0; + iint = ima_iint_find_get(inode); + if (!iint) + return 0; + + mutex_lock(&iint->mutex); + + rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK); + if (rc < 0) + goto out; + + if ((mask & MAY_WRITE) || (mask == 0)) + ima_read_write_check(TOMTOU, iint, inode, + path->dentry->d_name.name); + + if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ) + goto out; + + ima_read_write_check(OPEN_WRITERS, iint, inode, + path->dentry->d_name.name); + if (!(iint->flags & IMA_MEASURED)) { + struct dentry *dentry = dget(path->dentry); + struct vfsmount *mnt = mntget(path->mnt); + + file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, + current_cred()); + if (IS_ERR(file)) { + int audit_info = 0; + + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, + dentry->d_name.name, + "add_measurement", + "dentry_open failed", + 1, audit_info); + file = NULL; + goto out; + } + rc = get_path_measurement(iint, file, dentry->d_name.name); + } +out: + mutex_unlock(&iint->mutex); + if (file) + fput(file); + kref_put(&iint->refcount, iint_free); + return 0; +} +EXPORT_SYMBOL_GPL(ima_path_check); + static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { @@ -251,6 +297,33 @@ static int process_measurement(struct file *file, const unsigned char *filename, return rc; } +/* + * ima_counts_get - increment file counts + * + * - for IPC shm and shmat file. + * - for nfsd exported files. + * + * Increment the counts for these files to prevent unnecessary + * imbalance messages. + */ +void ima_counts_get(struct file *file) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ima_iint_cache *iint; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return; + iint = ima_iint_find_get(inode); + if (!iint) + return; + mutex_lock(&iint->mutex); + ima_inc_counts(iint, file->f_mode); + mutex_unlock(&iint->mutex); + + kref_put(&iint->refcount, iint_free); +} +EXPORT_SYMBOL_GPL(ima_counts_get); + /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) @@ -296,31 +369,11 @@ int ima_bprm_check(struct linux_binprm *bprm) return 0; } -/** - * ima_path_check - based on policy, collect/store measurement. - * @file: pointer to the file to be measured - * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE - * - * Measure files based on the ima_must_measure() policy decision. - * - * Always return 0 and audit dentry_open failures. - * (Return code will be based upon measurement appraisal.) - */ -int ima_file_check(struct file *file, int mask) -{ - int rc; - - rc = process_measurement(file, file->f_dentry->d_name.name, - mask & (MAY_READ | MAY_WRITE | MAY_EXEC), - FILE_CHECK); - return 0; -} -EXPORT_SYMBOL_GPL(ima_file_check); - static int __init init_ima(void) { int error; + ima_iintcache_init(); error = ima_init(); ima_initialized = 1; return error; diff --git a/trunk/security/integrity/ima/ima_policy.c b/trunk/security/integrity/ima/ima_policy.c index 4759d0f99335..e1278399b345 100644 --- a/trunk/security/integrity/ima/ima_policy.c +++ b/trunk/security/integrity/ima/ima_policy.c @@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = { .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, + {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, }; @@ -282,11 +282,8 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) break; case Opt_func: audit_log_format(ab, "func=%s ", args[0].from); - if (strcmp(args[0].from, "FILE_CHECK") == 0) - entry->func = FILE_CHECK; - /* PATH_CHECK is for backwards compat */ - else if (strcmp(args[0].from, "PATH_CHECK") == 0) - entry->func = FILE_CHECK; + if (strcmp(args[0].from, "PATH_CHECK") == 0) + entry->func = PATH_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) diff --git a/trunk/security/security.c b/trunk/security/security.c index 122b748d0f4c..24e060be9fa5 100644 --- a/trunk/security/security.c +++ b/trunk/security/security.c @@ -666,6 +666,8 @@ int security_file_alloc(struct file *file) void security_file_free(struct file *file) { security_ops->file_free_security(file); + if (file->f_dentry) + ima_file_free(file); } int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/trunk/sound/pci/ctxfi/ctatc.c b/trunk/sound/pci/ctxfi/ctatc.c index 459c1f62783b..cb65bd0dd35b 100644 --- a/trunk/sound/pci/ctxfi/ctatc.c +++ b/trunk/sound/pci/ctxfi/ctatc.c @@ -166,7 +166,18 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm) static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) { - return atc->vm->get_ptp_phys(atc->vm, index); + struct ct_vm *vm; + void *kvirt_addr; + unsigned long phys_addr; + + vm = atc->vm; + kvirt_addr = vm->get_ptp_virt(vm, index); + if (kvirt_addr == NULL) + phys_addr = (~0UL); + else + phys_addr = virt_to_phys(kvirt_addr); + + return phys_addr; } static unsigned int convert_format(snd_pcm_format_t snd_format) @@ -1658,7 +1669,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, } /* Set up device virtual memory management object */ - err = ct_vm_create(&atc->vm, pci); + err = ct_vm_create(&atc->vm); if (err < 0) goto error1; diff --git a/trunk/sound/pci/ctxfi/ctvmem.c b/trunk/sound/pci/ctxfi/ctvmem.c index 65da6e466f80..6b78752e9503 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.c +++ b/trunk/sound/pci/ctxfi/ctvmem.c @@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) return NULL; } - ptp = (unsigned long *)vm->ptp[0].area; + ptp = vm->ptp[0]; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { @@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) } /* * - * return the host physical addr of the @index-th device - * page table page on success, or ~0UL on failure. - * The first returned ~0UL indicates the termination. + * return the host (kmalloced) addr of the @index-th device + * page talbe page on success, or NULL on failure. + * The first returned NULL indicates the termination. * */ -static dma_addr_t -ct_get_ptp_phys(struct ct_vm *vm, int index) +static void * +ct_get_ptp_virt(struct ct_vm *vm, int index) { - dma_addr_t addr; + void *addr; - addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; + addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; return addr; } -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) +int ct_vm_create(struct ct_vm **rvm) { struct ct_vm *vm; struct ct_vm_block *block; - int i, err = 0; + int i; *rvm = NULL; @@ -188,21 +188,23 @@ int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { - err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, - snd_dma_pci_data(pci), - PAGE_SIZE, &vm->ptp[i]); - if (err < 0) + vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!vm->ptp[i]) break; } - if (err < 0) { + if (!i) { /* no page table pages are allocated */ - ct_vm_destroy(vm); + kfree(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; + /* Initialise remaining ptps */ + for (; i < CT_PTP_NUM; i++) + vm->ptp[i] = NULL; + vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; - vm->get_ptp_phys = ct_get_ptp_phys; + vm->get_ptp_virt = ct_get_ptp_virt; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); @@ -240,7 +242,7 @@ void ct_vm_destroy(struct ct_vm *vm) /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) - snd_dma_free_pages(&vm->ptp[i]); + kfree(vm->ptp[i]); vm->size = 0; diff --git a/trunk/sound/pci/ctxfi/ctvmem.h b/trunk/sound/pci/ctxfi/ctvmem.h index b23adfca4de6..01e4fd0386a3 100644 --- a/trunk/sound/pci/ctxfi/ctvmem.h +++ b/trunk/sound/pci/ctxfi/ctvmem.h @@ -22,8 +22,6 @@ #include #include -#include -#include /* The chip can handle the page table of 4k pages * (emu20k1 can handle even 8k pages, but we don't use it right now) @@ -43,7 +41,7 @@ struct snd_pcm_substream; /* Virtual memory management object for card device */ struct ct_vm { - struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ + void *ptp[CT_PTP_NUM]; /* Device page table pages */ unsigned int size; /* Available addr space in bytes */ struct list_head unused; /* List of unused blocks */ struct list_head used; /* List of used blocks */ @@ -54,10 +52,10 @@ struct ct_vm { int size); /* Unmap device logical addr area. */ void (*unmap)(struct ct_vm *, struct ct_vm_block *block); - dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); + void *(*get_ptp_virt)(struct ct_vm *vm, int index); }; -int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); +int ct_vm_create(struct ct_vm **rvm); void ct_vm_destroy(struct ct_vm *vm); #endif /* CTVMEM_H */ diff --git a/trunk/sound/pci/hda/hda_intel.c b/trunk/sound/pci/hda/hda_intel.c index b8faa6dc5abe..565de38a3fc7 100644 --- a/trunk/sound/pci/hda/hda_intel.c +++ b/trunk/sound/pci/hda/hda_intel.c @@ -426,7 +426,6 @@ struct azx { /* flags */ int position_fix; - int poll_count; unsigned int running :1; unsigned int initialized :1; unsigned int single_cmd :1; @@ -507,7 +506,7 @@ static char *driver_short_names[] __devinitdata = { #define get_azx_dev(substream) (substream->runtime->private_data) static int azx_acquire_irq(struct azx *chip, int do_disconnect); -static int azx_send_cmd(struct hda_bus *bus, unsigned int val); + /* * Interface for HD codec */ @@ -665,12 +664,11 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; - int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { - if (chip->polling_mode || do_poll) { + if (chip->polling_mode) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); @@ -678,9 +676,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, if (!chip->rirb.cmds[addr]) { smp_rmb(); bus->rirb_error = 0; - - if (!do_poll) - chip->poll_count = 0; return chip->rirb.res[addr]; /* the last value */ } if (time_after(jiffies, timeout)) @@ -693,16 +688,6 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } - if (!chip->polling_mode && chip->poll_count < 2) { - snd_printdd(SFX "azx_get_response timeout, " - "polling the codec once: last cmd=0x%08x\n", - chip->last_cmd[addr]); - do_poll = 1; - chip->poll_count++; - goto again; - } - - if (!chip->polling_mode) { snd_printk(KERN_WARNING SFX "azx_get_response timeout, " "switching to polling mode: last cmd=0x%08x\n", @@ -2058,7 +2043,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) { if (request_irq(chip->pci->irq, azx_interrupt, chip->msi ? 0 : IRQF_SHARED, - "hda_intel", chip)) { + "HDA Intel", chip)) { printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " "disabling device\n", chip->pci->irq); if (do_disconnect) diff --git a/trunk/sound/pci/ice1712/aureon.c b/trunk/sound/pci/ice1712/aureon.c index 9e66f6d306f8..765d7bd4c3d4 100644 --- a/trunk/sound/pci/ice1712/aureon.c +++ b/trunk/sound/pci/ice1712/aureon.c @@ -703,13 +703,11 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho { unsigned char nvol; - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; - } else { + else nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / WM_VOL_MAX; - nvol += 0x1b; - } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x180 | nvol); @@ -780,7 +778,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_ for (ch = 0; ch < 2; ch++) { unsigned int vol = ucontrol->value.integer.value[ch]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; + continue; vol |= spec->master[ch] & WM_VOL_MUTE; if (vol != spec->master[ch]) { int dac; @@ -836,8 +834,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * for (i = 0; i < voices; i++) { unsigned int vol = ucontrol->value.integer.value[i]; if (vol > WM_VOL_MAX) - vol = WM_VOL_MAX; - vol |= spec->vol[ofs+i] & WM_VOL_MUTE; + continue; + vol |= spec->vol[ofs+i]; if (vol != spec->vol[ofs+i]) { spec->vol[ofs+i] = vol; idx = WM_DAC_ATTEN + ofs + i; diff --git a/trunk/sound/soc/omap/omap3pandora.c b/trunk/sound/soc/omap/omap3pandora.c index 68980c19a3bc..71b2c161158d 100644 --- a/trunk/sound/soc/omap/omap3pandora.c +++ b/trunk/sound/soc/omap/omap3pandora.c @@ -145,7 +145,6 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { - {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"},