Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 153852
b: refs/heads/master
c: 1f0ef2a
h: refs/heads/master
v: v3
  • Loading branch information
David Woodhouse authored and David Woodhouse committed May 10, 2009
1 parent 596c965 commit f694271
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 65 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4c25a2c1b90bf785fc2e2f0f0c74a80b3e070d39
refs/heads/master: 1f0ef2aa18802a8ce7eb5a5164aaaf4d59073801
14 changes: 3 additions & 11 deletions trunk/drivers/pci/dmar.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,22 +735,14 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
qi_submit_sync(&desc, iommu);
}

int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush)
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type)
{
u8 dw = 0, dr = 0;

struct qi_desc desc;
int ih = 0;

if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}

if (cap_write_drain(iommu->cap))
dw = 1;

Expand All @@ -762,7 +754,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);

return qi_submit_sync(&desc, iommu);
qi_submit_sync(&desc, iommu);
}

/*
Expand Down
78 changes: 30 additions & 48 deletions trunk/drivers/pci/intel-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,27 +891,13 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
}

/* return value determine if we need a write buffer flush */
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
int non_present_entry_flush)
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;

/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}

switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
Expand Down Expand Up @@ -959,12 +945,10 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
/* flush iotlb entry will implicitly flush write buffer */
return 0;
}

static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages)
{
unsigned int mask;

Expand All @@ -974,8 +958,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
/* Fallback to domain selective flush if no PSI support */
if (!cap_pgsel_inv(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH,
non_present_entry_flush);
DMA_TLB_DSI_FLUSH);

/*
* PSI requires page size to be 2 ^ x, and the base address is naturally
Expand All @@ -985,11 +968,10 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
/* Fallback to domain selective flush if size is too big */
if (mask > cap_max_amask_val(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH, non_present_entry_flush);
DMA_TLB_DSI_FLUSH);

return iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH,
non_present_entry_flush);
DMA_TLB_PSI_FLUSH);
}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
Expand Down Expand Up @@ -1423,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
Expand Down Expand Up @@ -1558,8 +1540,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}

static void domain_remove_dev_info(struct dmar_domain *domain)
Expand Down Expand Up @@ -2096,8 +2077,7 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);

iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
0);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);

ret = iommu_enable_translation(iommu);
Expand Down Expand Up @@ -2244,10 +2224,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
if (ret)
goto error;

/* it's a non-present to present mapping */
ret = iommu_flush_iotlb_psi(iommu, domain->id,
start_paddr, size >> VTD_PAGE_SHIFT, 1);
if (ret)
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, 0, start_paddr,
size >> VTD_PAGE_SHIFT);
else
iommu_flush_write_buffer(iommu);

return start_paddr + ((u64)paddr & (~PAGE_MASK));
Expand Down Expand Up @@ -2283,7 +2264,7 @@ static void flush_unmaps(void)

if (deferred_flush[i].next) {
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
Expand Down Expand Up @@ -2362,9 +2343,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) {
if (iommu_flush_iotlb_psi(iommu,
domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
iommu_flush_write_buffer(iommu);
iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
Expand Down Expand Up @@ -2455,9 +2435,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);

if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
size >> VTD_PAGE_SHIFT, 0))
iommu_flush_write_buffer(iommu);
iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
size >> VTD_PAGE_SHIFT);

/* free iova */
__free_iova(&domain->iovad, iova);
Expand Down Expand Up @@ -2549,10 +2528,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
offset += size;
}

/* it's a non-present to present mapping */
if (iommu_flush_iotlb_psi(iommu, domain->id,
start_addr, offset >> VTD_PAGE_SHIFT, 1))
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, 0, start_addr,
offset >> VTD_PAGE_SHIFT);
else
iommu_flush_write_buffer(iommu);

return nelems;
}

Expand Down Expand Up @@ -2711,9 +2693,9 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu);

iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL);
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
}
Expand All @@ -2728,9 +2710,9 @@ static void iommu_flush_all(void)

for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL);
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
DMA_TLB_GLOBAL_FLUSH);
}
}

Expand Down
9 changes: 4 additions & 5 deletions trunk/include/linux/intel-iommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ struct ir_table {
struct iommu_flush {
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, int non_present_entry_flush);
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
};

enum {
Expand Down Expand Up @@ -341,9 +341,8 @@ extern void qi_global_iec(struct intel_iommu *iommu);

extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);

extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);

Expand Down

0 comments on commit f694271

Please sign in to comment.