Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 86808
b: refs/heads/master
c: cf54014
h: refs/heads/master
v: v3
  • Loading branch information
FUJITA Tomonori authored and Linus Torvalds committed Mar 5, 2008
1 parent 1f9fdab commit 78248b4
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 23d7e0390ab57cf15a5cfe8d6806192f0997e5a8
refs/heads/master: cf5401454863df8e6dc3ebe8faad09141cbec187
40 changes: 34 additions & 6 deletions trunk/arch/alpha/kernel/pci_iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,13 +126,34 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
return iommu_arena_new_node(0, hose, base, window_size, align);
}

static inline int is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size)
{
shift = (shift + index) & (boundary_size - 1);
return shift + nr > boundary_size;
}

/* Must be called with the arena lock held */
static long
iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
long n, long mask)
{
unsigned long *ptes;
long i, p, nent;
int pass = 0;
unsigned long base;
unsigned long boundary_size;

BUG_ON(arena->dma_base & ~PAGE_MASK);
base = arena->dma_base >> PAGE_SHIFT;
if (dev)
boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE)
>> PAGE_SHIFT;
else
boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT;

BUG_ON(!is_power_of_2(boundary_size));

/* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes;
Expand All @@ -142,6 +163,11 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)

again:
while (i < n && p+i < nent) {
if (!i && is_span_boundary(p, n, base, boundary_size)) {
p = ALIGN(p + 1, mask + 1);
goto again;
}

if (ptes[p+i])
p = ALIGN(p + i + 1, mask + 1), i = 0;
else
Expand Down Expand Up @@ -170,7 +196,8 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
}

static long
iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
unsigned int align)
{
unsigned long flags;
unsigned long *ptes;
Expand All @@ -181,7 +208,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
/* Search for N empty ptes */
ptes = arena->ptes;
mask = max(align, arena->align_entry) - 1;
p = iommu_arena_find_pages(arena, n, mask);
p = iommu_arena_find_pages(dev, arena, n, mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
Expand Down Expand Up @@ -231,6 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
unsigned long paddr;
dma_addr_t ret;
unsigned int align = 0;
struct device *dev = pdev ? &pdev->dev : NULL;

paddr = __pa(cpu_addr);

Expand Down Expand Up @@ -278,7 +306,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
/* Force allocation to 64KB boundary for ISA bridges. */
if (pdev && pdev == isa_bridge)
align = 8;
dma_ofs = iommu_arena_alloc(arena, npages, align);
dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n");
Expand Down Expand Up @@ -565,7 +593,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,

paddr &= ~PAGE_MASK;
npages = calc_npages(paddr + size);
dma_ofs = iommu_arena_alloc(arena, npages, 0);
dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
if (dma_ofs < 0) {
/* If we attempted a direct map above but failed, die. */
if (leader->dma_address == 0)
Expand Down Expand Up @@ -832,7 +860,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)

/* Search for N empty ptes. */
ptes = arena->ptes;
p = iommu_arena_find_pages(arena, pg_count, align_mask);
p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
Expand Down

0 comments on commit 78248b4

Please sign in to comment.