Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 131756
b: refs/heads/master
c: 7c04d1d
h: refs/heads/master
v: v3
  • Loading branch information
Jesse Barnes authored and Dave Airlie committed Feb 25, 2009
1 parent c840e7c commit 80fb930
Show file tree
Hide file tree
Showing 9 changed files with 43 additions and 89 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 490213556ad5bc1b46857bce1bc2c6db41d3e63d
refs/heads/master: 7c04d1d97a8d918b7ae2ef478229862b71a65f06
1 change: 0 additions & 1 deletion trunk/arch/sparc/kernel/chmc.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,6 @@ static int jbusmc_print_dimm(int syndrome_code,
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
p = dp->controller;
prop = &p->layout;
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/gpu/drm/i915/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
return false;
}

#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0)
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
Expand Down
3 changes: 1 addition & 2 deletions trunk/drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -1237,9 +1237,8 @@ static void end_sync_write(struct bio *bio, int error)
update_head_pos(mirror, r1_bio);

if (atomic_dec_and_test(&r1_bio->remaining)) {
sector_t s = r1_bio->sectors;
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
md_done_sync(mddev, s, uptodate);
}
}

Expand Down
19 changes: 9 additions & 10 deletions trunk/drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -1236,14 +1236,14 @@ static void end_sync_read(struct bio *bio, int error)
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
*/
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
atomic_dec_and_test(&r10_bio->remaining)) {
/* we have read all the blocks,
* do the comparison in process context in raid10d
*/
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
}

static void end_sync_write(struct bio *bio, int error)
Expand All @@ -1264,20 +1264,19 @@ static void end_sync_write(struct bio *bio, int error)

update_head_pos(i, r10_bio);

rdev_dec_pending(conf->mirrors[d].rdev, mddev);
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
sector_t s = r10_bio->sectors;
md_done_sync(mddev, r10_bio->sectors, 1);
put_buf(r10_bio);
md_done_sync(mddev, s, 1);
break;
} else {
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
put_buf(r10_bio);
r10_bio = r10_bio2;
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}

/*
Expand Down Expand Up @@ -1750,6 +1749,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);

bitmap_cond_end_sync(mddev->bitmap, sector_nr);

/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
Expand Down Expand Up @@ -1885,8 +1886,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* resync. Schedule a read for every block at this virt offset */
int count = 0;

bitmap_cond_end_sync(mddev->bitmap, sector_nr);

if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
Expand Down Expand Up @@ -2011,13 +2010,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* There is nowhere to write, so all non-sync
* drives must be failed, so try the next chunk...
*/
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;

sectors_skipped += (max_sector - sector_nr);
{
sector_t sec = max_sector - sector_nr;
sectors_skipped += sec;
chunks_skipped ++;
sector_nr = max_sector;
goto skipped;
}
}

static int run(mddev_t *mddev)
Expand Down
8 changes: 4 additions & 4 deletions trunk/drivers/net/wireless/iwlwifi/iwl-tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
pci_unmap_single(dev,
pci_unmap_addr(&txq->cmd[index]->meta, mapping),
pci_unmap_len(&txq->cmd[index]->meta, len),
PCI_DMA_BIDIRECTIONAL);
PCI_DMA_TODEVICE);

/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++) {
Expand Down Expand Up @@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
PCI_DMA_BIDIRECTIONAL);
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
/* Add buffer containing Tx command and MAC(!) header to TFD's
Expand Down Expand Up @@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);

phys_addr = pci_map_single(priv->pci_dev, out_cmd,
len, PCI_DMA_BIDIRECTIONAL);
len, PCI_DMA_TODEVICE);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
Expand Down Expand Up @@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
pci_unmap_single(priv->pci_dev,
pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
PCI_DMA_BIDIRECTIONAL);
PCI_DMA_TODEVICE);

for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
Expand Down
73 changes: 17 additions & 56 deletions trunk/drivers/pci/dmar.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,14 +330,6 @@ parse_dmar_table(void)
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
printk(KERN_WARNING PREFIX
"Invalid 0-length structure\n");
ret = -EINVAL;
break;
}

dmar_table_print_dmar_entry(entry_header);

switch (entry_header->type) {
Expand Down Expand Up @@ -499,7 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
int agaw;

iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
Expand All @@ -515,15 +507,13 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);

#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;

/* the registers might be more than one page */
Expand Down Expand Up @@ -581,49 +571,19 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}

static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
int head;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;

fault = readl(iommu->reg + DMAR_FSTS_REG);

/*
* If IQE happens, the head points to the descriptor associated
* with the error. No new descriptors are fetched until the IQE
* is cleared.
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> 4) == index) {
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
}

return 0;
}

/*
* Submit the queued invalidation descriptor to the remapping
* hardware unit and wait for its completion.
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
int rc = 0;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
unsigned long flags;

if (!qi)
return 0;
return;

hw = qi->desc;

Expand All @@ -641,8 +601,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)

hw[index] = *desc;

wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);

hw[wait_index] = wait_desc;
Expand All @@ -653,11 +612,13 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;

spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
spin_unlock(&iommu->register_lock);

while (qi->desc_status[wait_index] != QI_DONE) {
/*
Expand All @@ -667,21 +628,15 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
rc = qi_check_fault(iommu, index);
if (rc)
goto out;

spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}
out:
qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;

qi->desc_status[index] = QI_DONE;

reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);

return rc;
}

/*
Expand All @@ -694,13 +649,13 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.low = QI_IEC_TYPE;
desc.high = 0;

/* should never fail */
qi_submit_sync(&desc, iommu);
}

int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush)
{

struct qi_desc desc;

if (non_present_entry_flush) {
Expand All @@ -714,7 +669,10 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;

return qi_submit_sync(&desc, iommu);
qi_submit_sync(&desc, iommu);

return 0;

}

int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
Expand Down Expand Up @@ -744,7 +702,10 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);

return qi_submit_sync(&desc, iommu);
qi_submit_sync(&desc, iommu);

return 0;

}

/*
Expand Down
Loading

0 comments on commit 80fb930

Please sign in to comment.