Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 131765
b: refs/heads/master
c: c15d8a6
h: refs/heads/master
i:
  131763: f14fee3
v: v3
  • Loading branch information
Linus Torvalds committed Feb 25, 2009
1 parent 48a246d commit 2c83ae5
Show file tree
Hide file tree
Showing 8 changed files with 88 additions and 42 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e08fb4f6d1dc95eff5b3fc1d0412bcb5afcae7f2
refs/heads/master: c15d8a6499d04e5d2cac07f8120f207bb275f60f
1 change: 1 addition & 0 deletions trunk/arch/sparc/kernel/chmc.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code,
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
p = dp->controller;
prop = &p->layout;
Expand Down
3 changes: 2 additions & 1 deletion trunk/drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
update_head_pos(mirror, r1_bio);

if (atomic_dec_and_test(&r1_bio->remaining)) {
md_done_sync(mddev, r1_bio->sectors, uptodate);
sector_t s = r1_bio->sectors;
put_buf(r1_bio);
md_done_sync(mddev, s, uptodate);
}
}

Expand Down
19 changes: 10 additions & 9 deletions trunk/drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -1236,14 +1236,14 @@ static void end_sync_read(struct bio *bio, int error)
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
*/
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
atomic_dec_and_test(&r10_bio->remaining)) {
/* we have read all the blocks,
* do the comparison in process context in raid10d
*/
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
}

static void end_sync_write(struct bio *bio, int error)
Expand All @@ -1264,19 +1264,20 @@ static void end_sync_write(struct bio *bio, int error)

update_head_pos(i, r10_bio);

rdev_dec_pending(conf->mirrors[d].rdev, mddev);
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
md_done_sync(mddev, r10_bio->sectors, 1);
sector_t s = r10_bio->sectors;
put_buf(r10_bio);
md_done_sync(mddev, s, 1);
break;
} else {
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
put_buf(r10_bio);
r10_bio = r10_bio2;
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}

/*
Expand Down Expand Up @@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);

bitmap_cond_end_sync(mddev->bitmap, sector_nr);

/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
Expand Down Expand Up @@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* resync. Schedule a read for every block at this virt offset */
int count = 0;

bitmap_cond_end_sync(mddev->bitmap, sector_nr);

if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
Expand Down Expand Up @@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* There is nowhere to write, so all non-sync
* drives must be failed, so try the next chunk...
*/
{
sector_t sec = max_sector - sector_nr;
sectors_skipped += sec;
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;

sectors_skipped += (max_sector - sector_nr);
chunks_skipped ++;
sector_nr = max_sector;
goto skipped;
}
}

static int run(mddev_t *mddev)
Expand Down
8 changes: 4 additions & 4 deletions trunk/drivers/net/wireless/iwlwifi/iwl-tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
pci_unmap_single(dev,
pci_unmap_addr(&txq->cmd[index]->meta, mapping),
pci_unmap_len(&txq->cmd[index]->meta, len),
PCI_DMA_TODEVICE);
PCI_DMA_BIDIRECTIONAL);

/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++) {
Expand Down Expand Up @@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
PCI_DMA_TODEVICE);
PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
/* Add buffer containing Tx command and MAC(!) header to TFD's
Expand Down Expand Up @@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);

phys_addr = pci_map_single(priv->pci_dev, out_cmd,
len, PCI_DMA_TODEVICE);
len, PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
Expand Down Expand Up @@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
pci_unmap_single(priv->pci_dev,
pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
PCI_DMA_TODEVICE);
PCI_DMA_BIDIRECTIONAL);

for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
Expand Down
73 changes: 56 additions & 17 deletions trunk/drivers/pci/dmar.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,14 @@ parse_dmar_table(void)
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
printk(KERN_WARNING PREFIX
"Invalid 0-length structure\n");
ret = -EINVAL;
break;
}

dmar_table_print_dmar_entry(entry_header);

switch (entry_header->type) {
Expand Down Expand Up @@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw;
int agaw = 0;

iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
Expand All @@ -507,13 +515,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);

#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;

/* the registers might be more than one page */
Expand Down Expand Up @@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}

static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
int head;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;

fault = readl(iommu->reg + DMAR_FSTS_REG);

/*
* If IQE happens, the head points to the descriptor associated
* with the error. No new descriptors are fetched until the IQE
* is cleared.
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> 4) == index) {
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
}

return 0;
}

/*
* Submit the queued invalidation descriptor to the remapping
* hardware unit and wait for its completion.
*/
void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
int rc = 0;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
unsigned long flags;

if (!qi)
return;
return 0;

hw = qi->desc;

Expand All @@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)

hw[index] = *desc;

wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);

hw[wait_index] = wait_desc;
Expand All @@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;

spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
spin_unlock(&iommu->register_lock);

while (qi->desc_status[wait_index] != QI_DONE) {
/*
Expand All @@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
rc = qi_check_fault(iommu, index);
if (rc)
goto out;

spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}

qi->desc_status[index] = QI_DONE;
out:
qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;

reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);

return rc;
}

/*
Expand All @@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.low = QI_IEC_TYPE;
desc.high = 0;

/* should never fail */
qi_submit_sync(&desc, iommu);
}

int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush)
{

struct qi_desc desc;

if (non_present_entry_flush) {
Expand All @@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;

qi_submit_sync(&desc, iommu);

return 0;

return qi_submit_sync(&desc, iommu);
}

int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
Expand Down Expand Up @@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);

qi_submit_sync(&desc, iommu);

return 0;

return qi_submit_sync(&desc, iommu);
}

/*
Expand Down
21 changes: 12 additions & 9 deletions trunk/drivers/pci/intr_remapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,15 +207,15 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return index;
}

static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{
struct qi_desc desc;

desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
| QI_IEC_SELECTIVE;
desc.high = 0;

qi_submit_sync(&desc, iommu);
return qi_submit_sync(&desc, iommu);
}

int map_irq_to_irte_handle(int irq, u16 *sub_handle)
Expand Down Expand Up @@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)

int modify_irte(int irq, struct irte *irte_modified)
{
int rc;
int index;
struct irte *irte;
struct intel_iommu *iommu;
Expand All @@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified)
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
__iommu_flush_cache(iommu, irte, sizeof(*irte));

qi_flush_iec(iommu, index, 0);

rc = qi_flush_iec(iommu, index, 0);
spin_unlock(&irq_2_ir_lock);
return 0;

return rc;
}

int flush_irte(int irq)
{
int rc;
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
Expand All @@ -326,10 +328,10 @@ int flush_irte(int irq)

index = irq_iommu->irte_index + irq_iommu->sub_handle;

qi_flush_iec(iommu, index, irq_iommu->irte_mask);
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock(&irq_2_ir_lock);

return 0;
return rc;
}

struct intel_iommu *map_ioapic_to_ir(int apic)
Expand All @@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)

int free_irte(int irq)
{
int rc = 0;
int index, i;
struct irte *irte;
struct intel_iommu *iommu;
Expand All @@ -375,7 +378,7 @@ int free_irte(int irq)
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)irte, 0);
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}

irq_iommu->iommu = NULL;
Expand All @@ -385,7 +388,7 @@ int free_irte(int irq)

spin_unlock(&irq_2_ir_lock);

return 0;
return rc;
}

static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
Expand Down
Loading

0 comments on commit 2c83ae5

Please sign in to comment.