Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 271559
b: refs/heads/master
c: 5a6a5b1
h: refs/heads/master
i:
  271557: d09d5b3
  271555: 60d66c4
  271551: fa68e7a
v: v3
  • Loading branch information
Ohad Ben-Cohen authored and Joerg Roedel committed Aug 26, 2011
1 parent ea3414b commit 83243b1
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 210 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5da14a471455bd725534d18604b4d89ffbe158df
refs/heads/master: 5a6a5b1bcca3247e9161ccada488965c94012c48
8 changes: 0 additions & 8 deletions trunk/arch/arm/plat-omap/include/plat/iovmm.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,6 @@ extern u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj,
u32 da, size_t bytes, u32 flags);
extern void iommu_vfree(struct iommu_domain *domain, struct iommu *obj,
const u32 da);
extern u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
u32 pa, size_t bytes, u32 flags);
extern void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj,
u32 da);
extern u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj,
u32 da, size_t bytes, u32 flags);
extern void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da);

extern void *da_to_va(struct iommu *obj, u32 da);

#endif /* __IOMMU_MMAP_H */
201 changes: 0 additions & 201 deletions trunk/drivers/iommu/omap-iovmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,40 +25,6 @@

#include <plat/iopgtable.h>

/*
* A device driver needs to create address mappings between:
*
* - iommu/device address
* - physical address
* - mpu virtual address
*
* There are 4 possible patterns for them:
*
* |iova/ mapping iommu_ page
* | da pa va (d)-(p)-(v) function type
* ---------------------------------------------------------------------------
* 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
* 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
* 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
* 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
*
*
* 'iova': device iommu virtual address
* 'da': alias of 'iova'
* 'pa': physical address
* 'va': mpu virtual address
*
* 'c': contiguous memory area
* 'd': discontiguous memory area
* 'a': anonymous memory allocation
* '()': optional feature
*
* 'n': a normal page(4KB) size is used.
* 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
*
* '*': not yet, but feasible.
*/

static struct kmem_cache *iovm_area_cachep;

/* return total bytes of sg buffers */
Expand Down Expand Up @@ -419,40 +385,6 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
BUG_ON(!sgt);
}

static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
size_t len)
{
unsigned int i;
struct scatterlist *sg;

for_each_sg(sgt->sgl, sg, sgt->nents, i) {
unsigned bytes;

bytes = max_alignment(da | pa);
bytes = min_t(unsigned, bytes, iopgsz_max(len));

BUG_ON(!iopgsz_ok(bytes));

sg_set_buf(sg, phys_to_virt(pa), bytes);
/*
* 'pa' is cotinuous(linear).
*/
pa += bytes;
da += bytes;
len -= bytes;
}
BUG_ON(len);
}

static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
{
/*
* Actually this is not necessary at all, just exists for
* consistency of the code readability
*/
BUG_ON(!sgt);
}

/* create 'da' <-> 'pa' mapping from 'sgt' */
static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
const struct sg_table *sgt, u32 flags)
Expand Down Expand Up @@ -764,139 +696,6 @@ void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
}
EXPORT_SYMBOL_GPL(iommu_vfree);

static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
u32 da, u32 pa, void *va, size_t bytes, u32 flags)
{
struct sg_table *sgt;

sgt = sgtable_alloc(bytes, flags, da, pa);
if (IS_ERR(sgt))
return PTR_ERR(sgt);

sgtable_fill_kmalloc(sgt, pa, da, bytes);

da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) {
sgtable_drain_kmalloc(sgt);
sgtable_free(sgt);
}

return da;
}

/**
* iommu_kmap - (d)-(p)-(v) address mapper
* @obj: objective iommu
* @da: contiguous iommu virtual memory
* @pa: contiguous physical memory
* @flags: iovma and page property
*
* Creates 1-1-1 mapping and returns @da again, which can be
* adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
size_t bytes, u32 flags)
{
void *va;

if (!obj || !obj->dev || !bytes)
return -EINVAL;

bytes = PAGE_ALIGN(bytes);

va = ioremap(pa, bytes);
if (!va)
return -ENOMEM;

flags |= IOVMF_LINEAR;
flags |= IOVMF_MMIO;

da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
if (IS_ERR_VALUE(da))
iounmap(va);

return da;
}
EXPORT_SYMBOL_GPL(iommu_kmap);

/**
* iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
* @obj: objective iommu
* @da: iommu device virtual address
*
* Frees the iommu virtually contiguous memory area starting at
* @da, which was passed to and was returned by'iommu_kmap()'.
*/
void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
{
struct sg_table *sgt;
typedef void (*func_t)(const void *);

sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
IOVMF_LINEAR | IOVMF_MMIO);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt);
}
EXPORT_SYMBOL_GPL(iommu_kunmap);

/**
* iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
* @obj: objective iommu
* @da: contiguous iommu virtual memory
* @bytes: bytes for allocation
* @flags: iovma and page property
*
* Allocate @bytes linearly and creates 1-1-1 mapping and returns
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
size_t bytes, u32 flags)
{
void *va;
u32 pa;

if (!obj || !obj->dev || !bytes)
return -EINVAL;

bytes = PAGE_ALIGN(bytes);

va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
if (!va)
return -ENOMEM;
pa = virt_to_phys(va);

flags |= IOVMF_LINEAR;
flags |= IOVMF_ALLOC;

da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
if (IS_ERR_VALUE(da))
kfree(va);

return da;
}
EXPORT_SYMBOL_GPL(iommu_kmalloc);

/**
* iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
* @obj: objective iommu
* @da: iommu device virtual address
*
* Frees the iommu virtually contiguous memory area starting at
* @da, which was passed to and was returned by'iommu_kmalloc()'.
*/
void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
{
struct sg_table *sgt;

sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt);
}
EXPORT_SYMBOL_GPL(iommu_kfree);


static int __init iovmm_init(void)
{
const unsigned long flags = SLAB_HWCACHE_ALIGN;
Expand Down

0 comments on commit 83243b1

Please sign in to comment.