Skip to content

Commit

Permalink
iommu sg: x86: convert gart IOMMU to use the IOMMU helper
Browse files Browse the repository at this point in the history
This patch converts gart IOMMU to use the IOMMU helper functions. The
IOMMU doesn't allocate a memory area spanning LLD's segment boundary
anymore.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
FUJITA Tomonori authored and Linus Torvalds committed Feb 5, 2008
1 parent 1b39b07 commit fde9a10
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 17 deletions.
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
If unsure, say Y.

config IOMMU_HELPER
def_bool CALGARY_IOMMU
def_bool (CALGARY_IOMMU || GART_IOMMU)

# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
Expand Down
41 changes: 25 additions & 16 deletions arch/x86/kernel/pci-gart_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <linux/bitops.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
Expand Down Expand Up @@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */

static unsigned long alloc_iommu(int size)
static unsigned long alloc_iommu(struct device *dev, int size)
{
unsigned long offset, flags;
unsigned long boundary_size;
unsigned long base_index;

base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;

spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
iommu_pages, size);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
size, base_index, boundary_size, 0);
if (offset == -1) {
need_flush = 1;
offset = find_next_zero_string(iommu_gart_bitmap, 0,
iommu_pages, size);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
size, base_index, boundary_size, 0);
}
if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size);
Expand All @@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;

spin_lock_irqsave(&iommu_bitmap_lock, flags);
__clear_bit_string(iommu_gart_bitmap, offset, size);
iommu_area_free(iommu_gart_bitmap, offset, size);
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}

Expand Down Expand Up @@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir)
{
unsigned long npages = to_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(npages);
unsigned long iommu_page = alloc_iommu(dev, npages);
int i;

if (iommu_page == -1) {
Expand Down Expand Up @@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
}

/* Map multiple scatterlist entries continuous into the first. */
static int __dma_map_cont(struct scatterlist *start, int nelems,
struct scatterlist *sout, unsigned long pages)
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
int nelems, struct scatterlist *sout,
unsigned long pages)
{
unsigned long iommu_start = alloc_iommu(pages);
unsigned long iommu_start = alloc_iommu(dev, pages);
unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
Expand Down Expand Up @@ -394,16 +403,16 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
}

static inline int
dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
unsigned long pages, int need)
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
struct scatterlist *sout, unsigned long pages, int need)
{
if (!need) {
BUG_ON(nelems != 1);
sout->dma_address = start->dma_address;
sout->dma_length = start->length;
return 0;
}
return __dma_map_cont(start, nelems, sout, pages);
return __dma_map_cont(dev, start, nelems, sout, pages);
}

/*
Expand Down Expand Up @@ -449,8 +458,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (!iommu_merge || !nextneed || !need || s->offset ||
(s->length + seg_size > max_seg_size) ||
(ps->offset + ps->length) % PAGE_SIZE) {
if (dma_map_cont(start_sg, i - start, sgmap,
pages, need) < 0)
if (dma_map_cont(dev, start_sg, i - start,
sgmap, pages, need) < 0)
goto error;
out++;
seg_size = 0;
Expand All @@ -466,7 +475,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
pages += to_pages(s->offset, s->length);
ps = s;
}
if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
goto error;
out++;
flush_gart();
Expand Down

0 comments on commit fde9a10

Please sign in to comment.