diff --git a/[refs] b/[refs] index bd677334e54b..f1d143c624b9 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 490213556ad5bc1b46857bce1bc2c6db41d3e63d +refs/heads/master: 7c04d1d97a8d918b7ae2ef478229862b71a65f06 diff --git a/trunk/arch/sparc/kernel/chmc.c b/trunk/arch/sparc/kernel/chmc.c index e1a9598e2a4d..3b9f4d6e14a9 100644 --- a/trunk/arch/sparc/kernel/chmc.c +++ b/trunk/arch/sparc/kernel/chmc.c @@ -306,7 +306,6 @@ static int jbusmc_print_dimm(int syndrome_code, buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; - return 0; } p = dp->controller; prop = &p->layout; diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 65b635ce28c8..a2834276cb38 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -217,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) return false; } -#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0) +#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index e2466425d9ca..01e3cffd03b8 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -1237,9 +1237,8 @@ static void end_sync_write(struct bio *bio, int error) update_head_pos(mirror, r1_bio); if (atomic_dec_and_test(&r1_bio->remaining)) { - sector_t s = r1_bio->sectors; + md_done_sync(mddev, r1_bio->sectors, uptodate); put_buf(r1_bio); - md_done_sync(mddev, s, uptodate); } } diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index 7301631abe04..6736d6dff981 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -1236,7 +1236,6 @@ static void end_sync_read(struct bio *bio, int error) /* for reconstruct, we always reschedule after a read. * for resync, only after all reads */ - rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); if (test_bit(R10BIO_IsRecover, &r10_bio->state) || atomic_dec_and_test(&r10_bio->remaining)) { /* we have read all the blocks, @@ -1244,6 +1243,7 @@ static void end_sync_read(struct bio *bio, int error) */ reschedule_retry(r10_bio); } + rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); } static void end_sync_write(struct bio *bio, int error) @@ -1264,13 +1264,11 @@ static void end_sync_write(struct bio *bio, int error) update_head_pos(i, r10_bio); - rdev_dec_pending(conf->mirrors[d].rdev, mddev); while (atomic_dec_and_test(&r10_bio->remaining)) { if (r10_bio->master_bio == NULL) { /* the primary of several recovery bios */ - sector_t s = r10_bio->sectors; + md_done_sync(mddev, r10_bio->sectors, 1); put_buf(r10_bio); - md_done_sync(mddev, s, 1); break; } else { r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; @@ -1278,6 +1276,7 @@ static void end_sync_write(struct bio *bio, int error) r10_bio = r10_bio2; } } + rdev_dec_pending(conf->mirrors[d].rdev, mddev); } /* @@ -1750,6 +1749,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); + bitmap_cond_end_sync(mddev->bitmap, sector_nr); + /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that * have bi_end_io, bi_sector, bi_bdev set, @@ -1885,8 +1886,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* resync. Schedule a read for every block at this virt offset */ int count = 0; - bitmap_cond_end_sync(mddev->bitmap, sector_nr); - if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, mddev->degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { @@ -2011,13 +2010,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* There is nowhere to write, so all non-sync * drives must be failed, so try the next chunk... */ - if (sector_nr + max_sync < max_sector) - max_sector = sector_nr + max_sync; - - sectors_skipped += (max_sector - sector_nr); + { + sector_t sec = max_sector - sector_nr; + sectors_skipped += sec; chunks_skipped ++; sector_nr = max_sector; goto skipped; + } } static int run(mddev_t *mddev) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c index ab13ff22a8c0..b0ee86c62685 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) pci_unmap_single(dev, pci_unmap_addr(&txq->cmd[index]->meta, mapping), pci_unmap_len(&txq->cmd[index]->meta, len), - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_TODEVICE); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) { @@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) * within command buffer array. */ txcmd_phys = pci_map_single(priv->pci_dev, out_cmd, sizeof(struct iwl_cmd), - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_TODEVICE); pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); /* Add buffer containing Tx command and MAC(!) header to TFD's @@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); phys_addr = pci_map_single(priv->pci_dev, out_cmd, - len, PCI_DMA_BIDIRECTIONAL); + len, PCI_DMA_TODEVICE); pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); pci_unmap_len_set(&out_cmd->meta, len, len); phys_addr += offsetof(struct iwl_cmd, hdr); @@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, pci_unmap_single(priv->pci_dev, pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_TODEVICE); for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { diff --git a/trunk/drivers/pci/dmar.c b/trunk/drivers/pci/dmar.c index 26c536b51c5a..f5a662a50acb 100644 --- a/trunk/drivers/pci/dmar.c +++ b/trunk/drivers/pci/dmar.c @@ -330,14 +330,6 @@ parse_dmar_table(void) entry_header = (struct acpi_dmar_header *)(dmar + 1); while (((unsigned long)entry_header) < (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - printk(KERN_WARNING PREFIX - "Invalid 0-length structure\n"); - ret = -EINVAL; - break; - } - dmar_table_print_dmar_entry(entry_header); switch (entry_header->type) { @@ -499,7 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) int map_size; u32 ver; static int iommu_allocated = 0; - int agaw = 0; + int agaw; iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -515,7 +507,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); -#ifdef CONFIG_DMAR agaw = iommu_calculate_agaw(iommu); if (agaw < 0) { printk(KERN_ERR @@ -523,7 +514,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->seq_id); goto error; } -#endif iommu->agaw = agaw; /* the registers might be more than one page */ @@ -581,49 +571,19 @@ static inline void reclaim_free_desc(struct q_inval *qi) } } -static int qi_check_fault(struct intel_iommu *iommu, int index) -{ - u32 fault; - int head; - struct q_inval *qi = iommu->qi; - int wait_index = (index + 1) % QI_LENGTH; - - fault = readl(iommu->reg + DMAR_FSTS_REG); - - /* - * If IQE happens, the head points to the descriptor associated - * with the error. No new descriptors are fetched until the IQE - * is cleared. - */ - if (fault & DMA_FSTS_IQE) { - head = readl(iommu->reg + DMAR_IQH_REG); - if ((head >> 4) == index) { - memcpy(&qi->desc[index], &qi->desc[wait_index], - sizeof(struct qi_desc)); - __iommu_flush_cache(iommu, &qi->desc[index], - sizeof(struct qi_desc)); - writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); - return -EINVAL; - } - } - - return 0; -} - /* * Submit the queued invalidation descriptor to the remapping * hardware unit and wait for its completion. */ -int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) +void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) { - int rc = 0; struct q_inval *qi = iommu->qi; struct qi_desc *hw, wait_desc; int wait_index, index; unsigned long flags; if (!qi) - return 0; + return; hw = qi->desc; @@ -641,8 +601,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) hw[index] = *desc; - wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | - QI_IWD_STATUS_WRITE | QI_IWD_TYPE; + wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); hw[wait_index] = wait_desc; @@ -653,11 +612,13 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_cnt -= 2; + spin_lock(&iommu->register_lock); /* * update the HW tail register indicating the presence of * new descriptors. */ writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); + spin_unlock(&iommu->register_lock); while (qi->desc_status[wait_index] != QI_DONE) { /* @@ -667,21 +628,15 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) * a deadlock where the interrupt context can wait indefinitely * for free slots in the queue. */ - rc = qi_check_fault(iommu, index); - if (rc) - goto out; - spin_unlock(&qi->q_lock); cpu_relax(); spin_lock(&qi->q_lock); } -out: - qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; + + qi->desc_status[index] = QI_DONE; reclaim_free_desc(qi); spin_unlock_irqrestore(&qi->q_lock, flags); - - return rc; } /* @@ -694,13 +649,13 @@ void qi_global_iec(struct intel_iommu *iommu) desc.low = QI_IEC_TYPE; desc.high = 0; - /* should never fail */ qi_submit_sync(&desc, iommu); } int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type, int non_present_entry_flush) { + struct qi_desc desc; if (non_present_entry_flush) { @@ -714,7 +669,10 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | QI_CC_GRAN(type) | QI_CC_TYPE; desc.high = 0; - return qi_submit_sync(&desc, iommu); + qi_submit_sync(&desc, iommu); + + return 0; + } int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, @@ -744,7 +702,10 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | QI_IOTLB_AM(size_order); - return qi_submit_sync(&desc, iommu); + qi_submit_sync(&desc, iommu); + + return 0; + } /* diff --git a/trunk/drivers/pci/intr_remapping.c b/trunk/drivers/pci/intr_remapping.c index 45effc5726c0..f78371b22529 100644 --- a/trunk/drivers/pci/intr_remapping.c +++ b/trunk/drivers/pci/intr_remapping.c @@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) return index; } -static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) { struct qi_desc desc; @@ -215,7 +215,7 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | QI_IEC_SELECTIVE; desc.high = 0; - return qi_submit_sync(&desc, iommu); + qi_submit_sync(&desc, iommu); } int map_irq_to_irte_handle(int irq, u16 *sub_handle) @@ -283,7 +283,6 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) int modify_irte(int irq, struct irte *irte_modified) { - int rc; int index; struct irte *irte; struct intel_iommu *iommu; @@ -304,15 +303,14 @@ int modify_irte(int irq, struct irte *irte_modified) set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); __iommu_flush_cache(iommu, irte, sizeof(*irte)); - rc = qi_flush_iec(iommu, index, 0); - spin_unlock(&irq_2_ir_lock); + qi_flush_iec(iommu, index, 0); - return rc; + spin_unlock(&irq_2_ir_lock); + return 0; } int flush_irte(int irq) { - int rc; int index; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; @@ -328,10 +326,10 @@ int flush_irte(int irq) index = irq_iommu->irte_index + irq_iommu->sub_handle; - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); + qi_flush_iec(iommu, index, irq_iommu->irte_mask); spin_unlock(&irq_2_ir_lock); - return rc; + return 0; } struct intel_iommu *map_ioapic_to_ir(int apic) @@ -357,7 +355,6 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) int free_irte(int irq) { - int rc = 0; int index, i; struct irte *irte; struct intel_iommu *iommu; @@ -378,7 +375,7 @@ int free_irte(int irq) if (!irq_iommu->sub_handle) { for (i = 0; i < (1 << irq_iommu->irte_mask); i++) set_64bit((unsigned long *)irte, 0); - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); + qi_flush_iec(iommu, index, irq_iommu->irte_mask); } irq_iommu->iommu = NULL; @@ -388,7 +385,7 @@ int free_irte(int irq) spin_unlock(&irq_2_ir_lock); - return rc; + return 0; } static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) diff --git a/trunk/include/linux/intel-iommu.h b/trunk/include/linux/intel-iommu.h index d2e3cbfba14f..c4f6c101dbcd 100644 --- a/trunk/include/linux/intel-iommu.h +++ b/trunk/include/linux/intel-iommu.h @@ -194,7 +194,6 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) /* FSTS_REG */ #define DMA_FSTS_PPF ((u32)2) #define DMA_FSTS_PFO ((u32)1) -#define DMA_FSTS_IQE (1 << 4) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ @@ -329,7 +328,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, int non_present_entry_flush); -extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); +extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);