Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 112998
b: refs/heads/master
c: 7a715f4
h: refs/heads/master
v: v3
  • Loading branch information
David S. Miller committed Aug 29, 2008
1 parent f49115f commit 1e3844f
Show file tree
Hide file tree
Showing 17 changed files with 168 additions and 157 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5778002874de0fb7e3d8c4a0a4afb6b1a6297069
refs/heads/master: 7a715f46012f3552294154978aed59cba9804928
20 changes: 8 additions & 12 deletions trunk/arch/sparc/include/asm/sbus_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ extern void sbus_set_sbus64(struct sbus_dev *, int);
extern void sbus_fill_device_irq(struct sbus_dev *);

/* These yield IOMMU mappings in consistent mode. */
extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
extern void *sbus_alloc_consistent(struct device *, long, u32 *dma_addrp);
extern void sbus_free_consistent(struct device *, long, void *, u32);
void prom_adjust_ranges(struct linux_prom_ranges *, int,
struct linux_prom_ranges *, int);

Expand All @@ -120,18 +120,14 @@ void prom_adjust_ranges(struct linux_prom_ranges *, int,
#define SBUS_DMA_NONE DMA_NONE

/* All the rest use streaming mode mappings. */
extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
extern dma_addr_t sbus_map_single(struct device *, void *, size_t, int);
extern void sbus_unmap_single(struct device *, dma_addr_t, size_t, int);
extern int sbus_map_sg(struct device *, struct scatterlist *, int, int);
extern void sbus_unmap_sg(struct device *, struct scatterlist *, int, int);

/* Finally, allow explicit synchronization of streamable mappings. */
extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
extern void sbus_dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, int);
extern void sbus_dma_sync_single_for_device(struct device *, dma_addr_t, size_t, int);

/* Eric Brower (ebrower@usa.net)
* Translate SBus interrupt levels to ino values--
Expand Down
48 changes: 15 additions & 33 deletions trunk/arch/sparc/include/asm/sbus_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,17 +100,16 @@ extern struct sbus_bus *sbus_root;
extern void sbus_set_sbus64(struct sbus_dev *, int);
extern void sbus_fill_device_irq(struct sbus_dev *);

static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
static inline void *sbus_alloc_consistent(struct device *dev , size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(&sdev->ofdev.dev, size,
dma_handle, GFP_ATOMIC);
return dma_alloc_coherent(dev, size, dma_handle, GFP_ATOMIC);
}

static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
static inline void sbus_free_consistent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
return dma_free_coherent(dev, size, vaddr, dma_handle);
}

#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
Expand All @@ -119,68 +118,51 @@ static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
#define SBUS_DMA_NONE DMA_NONE

/* All the rest use streaming mode mappings. */
static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
static inline dma_addr_t sbus_map_single(struct device *dev, void *ptr,
size_t size, int direction)
{
return dma_map_single(&sdev->ofdev.dev, ptr, size,
return dma_map_single(dev, ptr, size,
(enum dma_data_direction) direction);
}

static inline void sbus_unmap_single(struct sbus_dev *sdev,
static inline void sbus_unmap_single(struct device *dev,
dma_addr_t dma_addr, size_t size,
int direction)
{
dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
dma_unmap_single(dev, dma_addr, size,
(enum dma_data_direction) direction);
}

static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
static inline int sbus_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(&sdev->ofdev.dev, sg, nents,
return dma_map_sg(dev, sg, nents,
(enum dma_data_direction) direction);
}

static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
static inline void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
dma_unmap_sg(dev, sg, nents,
(enum dma_data_direction) direction);
}

/* Finally, allow explicit synchronization of streamable mappings. */
static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
static inline void sbus_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
dma_sync_single_for_cpu(dev, dma_handle, size,
(enum dma_data_direction) direction);
}
#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu

static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
static inline void sbus_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
/* No flushing needed to sync cpu writes to the device. */
}

static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
struct scatterlist *sg,
int nents, int direction)
{
dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
(enum dma_data_direction) direction);
}
#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu

static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
struct scatterlist *sg,
int nents, int direction)
{
/* No flushing needed to sync cpu writes to the device. */
}

extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
Expand Down
43 changes: 19 additions & 24 deletions trunk/arch/sparc/kernel/ioport.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,11 +300,10 @@ void __init sbus_fill_device_irq(struct sbus_dev *sdev)
* Allocate a chunk of memory suitable for DMA.
* Typically devices use them for control blocks.
* CPU may access them without any explicit flushing.
*
* XXX Some clever people know that sdev is not used and supply NULL. Watch.
*/
void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
{
struct of_device *op = to_of_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
struct resource *res;
Expand Down Expand Up @@ -341,10 +340,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;

/* Set the resource name, if known. */
if (sdev) {
res->name = sdev->prom_name;
}
res->name = op->node->name;

return (void *)(unsigned long)res->start;

Expand All @@ -358,7 +354,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
return NULL;
}

void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
{
struct resource *res;
struct page *pgv;
Expand Down Expand Up @@ -396,8 +392,10 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary.
*/
dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
{
struct sbus_dev *sdev = to_sbus_device(dev);

/* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return 0;
Expand All @@ -409,13 +407,16 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int dire
return mmu_get_scsi_one(va, len, sdev->bus);
}

void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
{
struct sbus_dev *sdev = to_sbus_device(dev);
mmu_release_scsi_one(ba, n, sdev->bus);
}

int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
{
struct sbus_dev *sdev = to_sbus_device(dev);

mmu_get_scsi_sgl(sg, n, sdev->bus);

/*
Expand All @@ -425,16 +426,19 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direct
return n;
}

void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
{
struct sbus_dev *sdev = to_sbus_device(dev);

mmu_release_scsi_sgl(sg, n, sdev->bus);
}

/*
*/
void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
{
#if 0
struct sbus_dev *sdev = to_sbus_device(dev);
unsigned long va;
struct resource *res;

Expand All @@ -452,9 +456,10 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t s
#endif
}

void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
{
#if 0
struct sbus_dev *sdev = to_sbus_device(dev);
unsigned long va;
struct resource *res;

Expand All @@ -472,16 +477,6 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_
#endif
}

void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
{
printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
}

void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
{
printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
}

/* Support code for sbus_init(). */
/*
* XXX This functions appears to be a distorted version of
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/sparc/kernel/sparc_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,6 @@ EXPORT_SYMBOL(sbus_map_sg);
EXPORT_SYMBOL(sbus_unmap_sg);
EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
EXPORT_SYMBOL(sbus_iounmap);
EXPORT_SYMBOL(sbus_ioremap);
#endif
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/sparc64/kernel/sparc64_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,6 @@ EXPORT_SYMBOL(sbus_map_sg);
EXPORT_SYMBOL(sbus_unmap_sg);
EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
#endif
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
Expand Down
34 changes: 24 additions & 10 deletions trunk/drivers/atm/fore200e.c
Original file line number Diff line number Diff line change
Expand Up @@ -678,7 +678,9 @@ fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
static u32
fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
{
u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
struct sbus_dev *sdev = fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;
u32 dma_addr = sbus_map_single(dev, virt_addr, size, direction);

DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
virt_addr, size, direction, dma_addr);
Expand All @@ -690,27 +692,36 @@ fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int d
static void
fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
{
struct sbus_dev *sdev = fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;

DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
dma_addr, size, direction);

sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
sbus_unmap_single(dev, dma_addr, size, direction);
}


static void
fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
{
struct sbus_dev *sdev = fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;

DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);

sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
sbus_dma_sync_single_for_cpu(dev, dma_addr, size, direction);
}

static void
fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
{
struct sbus_dev *sdev = fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;

DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);

sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
sbus_dma_sync_single_for_device(dev, dma_addr, size, direction);
}


Expand All @@ -721,11 +732,13 @@ static int
fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
int size, int nbr, int alignment)
{
struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;

chunk->alloc_size = chunk->align_size = size * nbr;

/* returned chunks are page-aligned */
chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
chunk->alloc_size,
chunk->alloc_addr = sbus_alloc_consistent(dev, chunk->alloc_size,
&chunk->dma_addr);

if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
Expand All @@ -742,10 +755,11 @@ fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
static void
fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
chunk->alloc_size,
chunk->alloc_addr,
chunk->dma_addr);
struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
struct device *dev = &sdev->ofdev.dev;

sbus_free_consistent(dev, chunk->alloc_size,
chunk->alloc_addr, chunk->dma_addr);
}


Expand Down
Loading

0 comments on commit 1e3844f

Please sign in to comment.