Skip to content

Commit

Permalink
[POWERPC] Refactor 64 bits DMA operations
Browse files Browse the repository at this point in the history
This patch completely refactors DMA operations for 64 bits powerpc. 32 bits
is untouched for now.

We use the new dev_archdata structure to add the dma operations pointer
and associated data to struct device. While at it, we also add the OF node
pointer and numa node. In the future, we might want to look into merging
that with pci_dn as well.

The old vio, pci-iommu and pci-direct DMA ops are gone. They are now replaced
by a set of generic iommu and direct DMA ops (non PCI specific) that can be
used by bus types. The toplevel implementation is now inline.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Benjamin Herrenschmidt authored and Paul Mackerras committed Dec 4, 2006
1 parent 7c71987 commit 12d04ee
Show file tree
Hide file tree
Showing 26 changed files with 451 additions and 611 deletions.
3 changes: 1 addition & 2 deletions arch/powerpc/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,7 @@ obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)

pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
pci_direct_iommu.o iomap.o
pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o iomap.o
pci32-$(CONFIG_PPC32) := pci_32.o
obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
kexec-$(CONFIG_PPC64) := machine_kexec_64.o
Expand Down
240 changes: 137 additions & 103 deletions arch/powerpc/kernel/dma_64.c
Original file line number Diff line number Diff line change
@@ -1,151 +1,185 @@
/*
* Copyright (C) 2004 IBM Corporation
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Implements the generic device dma API for ppc64. Handles
* the pci and vio busses
* Provide default implementations of the DMA mapping callbacks for
* directly mapped busses and busses using the iommu infrastructure
*/

#include <linux/device.h>
#include <linux/dma-mapping.h>
/* Include the busses we support */
#include <linux/pci.h>
#include <asm/vio.h>
#include <asm/ibmebus.h>
#include <asm/scatterlist.h>
#include <asm/bug.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>

static struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return &pci_dma_ops;
#endif
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return &vio_dma_ops;
#endif
#ifdef CONFIG_IBMEBUS
if (dev->bus == &ibmebus_bus_type)
return &ibmebus_dma_ops;
#endif
return NULL;
}
/*
* Generic iommu implementation
*/

int dma_supported(struct device *dev, u64 mask)
static inline unsigned long device_to_mask(struct device *dev)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
/* Assume devices without mask can take 32 bit addresses */
return 0xfffffffful;
}

BUG_ON(!dma_ops);

return dma_ops->dma_supported(dev, mask);
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
device_to_mask(dev), flag,
dev->archdata.numa_node);
}
EXPORT_SYMBOL(dma_supported);

int dma_set_mask(struct device *dev, u64 dma_mask)
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
#endif
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return -EIO;
#endif /* CONFIG_IBMVIO */
#ifdef CONFIG_IBMEBUS
if (dev->bus == &ibmebus_bus_type)
return -EIO;
#endif
BUG();
return 0;
iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
}
EXPORT_SYMBOL(dma_set_mask);

void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
size_t size,
enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

BUG_ON(!dma_ops);

return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
return iommu_map_single(dev->archdata.dma_data, vaddr, size,
device_to_mask(dev), direction);
}
EXPORT_SYMBOL(dma_alloc_coherent);

void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)

static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
}

BUG_ON(!dma_ops);

dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
device_to_mask(dev), direction);
}
EXPORT_SYMBOL(dma_free_coherent);

dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

BUG_ON(!dma_ops);

return dma_ops->map_single(dev, cpu_addr, size, direction);
iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
}
EXPORT_SYMBOL(dma_map_single);

void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
/* We support DMA to/from any memory page via the iommu */
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

BUG_ON(!dma_ops);

dma_ops->unmap_single(dev, dma_addr, size, direction);
struct iommu_table *tbl = dev->archdata.dma_data;

if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO
"Warning: IOMMU offset too big for device mask\n");
if (tbl)
printk(KERN_INFO
"mask: 0x%08lx, table offset: 0x%08lx\n",
mask, tbl->it_offset);
else
printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
mask);
return 0;
} else
return 1;
}
EXPORT_SYMBOL(dma_unmap_single);

dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
struct dma_mapping_ops dma_iommu_ops = {
.alloc_coherent = dma_iommu_alloc_coherent,
.free_coherent = dma_iommu_free_coherent,
.map_single = dma_iommu_map_single,
.unmap_single = dma_iommu_unmap_single,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
};
EXPORT_SYMBOL(dma_iommu_ops);

BUG_ON(!dma_ops);
/*
* Generic direct DMA implementation
*/

return dma_ops->map_single(dev, page_address(page) + offset, size,
direction);
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;

/* TODO: Maybe use the numa node here too ? */
ret = (void *)__get_free_pages(flag, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_abs(ret);
}
return ret;
}
EXPORT_SYMBOL(dma_map_page);

void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
free_pages((unsigned long)vaddr, get_order(size));
}

BUG_ON(!dma_ops);
static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction direction)
{
return virt_to_abs(ptr);
}

dma_ops->unmap_single(dev, dma_address, size, direction);
static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
}
EXPORT_SYMBOL(dma_unmap_page);

int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
int i;

BUG_ON(!dma_ops);
for (i = 0; i < nents; i++, sg++) {
sg->dma_address = page_to_phys(sg->page) + sg->offset;
sg->dma_length = sg->length;
}

return dma_ops->map_sg(dev, sg, nents, direction);
return nents;
}
EXPORT_SYMBOL(dma_map_sg);

void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

BUG_ON(!dma_ops);
}

dma_ops->unmap_sg(dev, sg, nhwentries, direction);
static int dma_direct_dma_supported(struct device *dev, u64 mask)
{
/* Could be improved to check for memory though it better be
* done via some global so platforms can set the limit in case
* they have limited DMA windows
*/
return mask >= DMA_32BIT_MASK;
}
EXPORT_SYMBOL(dma_unmap_sg);

struct dma_mapping_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.map_single = dma_direct_map_single,
.unmap_single = dma_direct_unmap_single,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
};
EXPORT_SYMBOL(dma_direct_ops);
6 changes: 5 additions & 1 deletion arch/powerpc/kernel/ibmebus.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
return 1;
}

struct dma_mapping_ops ibmebus_dma_ops = {
static struct dma_mapping_ops ibmebus_dma_ops = {
.alloc_coherent = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent,
.map_single = ibmebus_map_single,
Expand Down Expand Up @@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common(
dev->ofdev.dev.bus = &ibmebus_bus_type;
dev->ofdev.dev.release = ibmebus_dev_release;

dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);

/* An ibmebusdev is based on a of_device. We have to change the
* bus type to use our own DMA mapping operations.
*/
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kernel/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction)
int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, unsigned long mask,
enum dma_data_direction direction)
{
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
Expand Down
9 changes: 8 additions & 1 deletion arch/powerpc/kernel/of_platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
#include <asm/dcr.h>
#include <asm/of_device.h>
#include <asm/of_platform.h>

#include <asm/topology.h>

/*
* The list of OF IDs below is used for matching bus types in the
Expand Down Expand Up @@ -221,6 +221,13 @@ struct of_device* of_platform_device_create(struct device_node *np,
dev->dev.parent = parent;
dev->dev.bus = &of_platform_bus_type;
dev->dev.release = of_release_dev;
dev->dev.archdata.of_node = np;
dev->dev.archdata.numa_node = of_node_to_nid(np);

/* We do not fill the DMA ops for platform devices by default.
* This is currently the responsibility of the platform code
* to do such, possibly using a device notifier
*/

if (bus_id)
strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
Expand Down
Loading

0 comments on commit 12d04ee

Please sign in to comment.