Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 304754
b: refs/heads/master
c: 8b6b569
h: refs/heads/master
v: v3
  • Loading branch information
Rob Clark authored and Greg Kroah-Hartman committed May 17, 2012
1 parent 14fa264 commit e7ab968
Show file tree
Hide file tree
Showing 5 changed files with 152 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 39eb7e9791866973dbb7a3a6d2061d70356c7d90
refs/heads/master: 8b6b569eac2e74d0b2a1822790f725a9a6abc2be
5 changes: 5 additions & 0 deletions trunk/drivers/staging/omapdrm/omap_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,18 @@ int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg);
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
int omap_gem_put_paddr(struct drm_gem_object *obj);
Expand Down
5 changes: 4 additions & 1 deletion trunk/drivers/staging/omapdrm/omap_fb.c
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,11 @@ int omap_framebuffer_replace(struct drm_framebuffer *a,
pa->paddr = 0;
}

if (pb && !ret)
if (pb && !ret) {
ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
if (!ret)
omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
}
}

if (ret) {
Expand Down
104 changes: 94 additions & 10 deletions trunk/drivers/staging/omapdrm/omap_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,13 +207,27 @@ static inline bool is_shmem(struct drm_gem_object *obj)
return obj->filp != NULL;
}

/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}

static DEFINE_SPINLOCK(sync_lock);

/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct page **pages;
int i, npages = obj->size >> PAGE_SHIFT;
dma_addr_t *addrs;

WARN_ON(omap_obj->pages);

Expand All @@ -231,16 +245,18 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(obj->dev->dev, pages[i],
addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
omap_obj->addrs = addrs;
} else {
addrs = kzalloc(npages * sizeof(addrs), GFP_KERNEL);
}

omap_obj->addrs = addrs;
omap_obj->pages = pages;

return 0;
}

Expand All @@ -258,10 +274,11 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
kfree(omap_obj->addrs);
omap_obj->addrs = NULL;
}

kfree(omap_obj->addrs);
omap_obj->addrs = NULL;

_drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
Expand Down Expand Up @@ -336,6 +353,7 @@ static int fault_1d(struct drm_gem_object *obj,
vma->vm_start) >> PAGE_SHIFT;

if (omap_obj->pages) {
omap_gem_cpu_sync(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
Expand Down Expand Up @@ -510,7 +528,6 @@ int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/** We override mainly to fix up some of the vm mapping flags.. */
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct omap_gem_object *omap_obj;
int ret;

ret = drm_gem_mmap(filp, vma);
Expand All @@ -519,8 +536,13 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}

/* after drm_gem_mmap(), it is safe to access the obj */
omap_obj = to_omap_bo(vma->vm_private_data);
return omap_gem_mmap_obj(vma->vm_private_data, vma);
}

int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);

vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
Expand All @@ -530,12 +552,31 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
} else {
/*
* We do have some private objects, at least for scanout buffers
* on hardware without DMM/TILER. But these are allocated write-
* combine
*/
if (WARN_ON(!obj->filp))
return -EINVAL;

/*
* Shunt off cached objs to shmem file so they have their own
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
fput(vma->vm_file);
get_file(obj->filp);
vma->vm_pgoff = 0;
vma->vm_file = obj->filp;

vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}

return ret;
return 0;
}


/**
* omap_gem_dumb_create - create a dumb buffer
* @drm_file: our client file
Expand Down Expand Up @@ -645,6 +686,48 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
return ret;
}

/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
*/
void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);

if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
PAGE_SIZE, DMA_BIDIRECTIONAL);
omap_obj->addrs[pgoff] = 0;
}
}

/* sync the buffer for DMA access */
void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);

if (is_cached_coherent(obj)) {
int i, npages = obj->size >> PAGE_SHIFT;
struct page **pages = omap_obj->pages;
bool dirty = false;

for (i = 0; i < npages; i++) {
if (!omap_obj->addrs[i]) {
omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
dirty = true;
}
}

if (dirty) {
unmap_mapping_range(obj->filp->f_mapping, 0,
omap_gem_mmap_size(obj), 1);
}
}
}

/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
* already contiguous, remap it to pin in physically contiguous memory.. (ie.
* map in TILER)
Expand Down Expand Up @@ -709,6 +792,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
*paddr = omap_obj->paddr;
} else {
ret = -EINVAL;
goto fail;
}

fail:
Expand Down
48 changes: 48 additions & 0 deletions trunk/drivers/staging/omapdrm/omap_gem_dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ static struct sg_table *omap_gem_map_dma_buf(
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
sg_dma_address(sg->sgl) = paddr;

/* this should be after _get_paddr() to ensure we have pages attached */
omap_gem_dma_sync(obj, dir);

out:
if (ret)
return ERR_PTR(ret);
Expand Down Expand Up @@ -104,6 +107,7 @@ static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
omap_gem_get_pages(obj, &pages, false);
omap_gem_cpu_sync(obj, page_num);
return kmap_atomic(pages[page_num]);
}

Expand All @@ -119,6 +123,7 @@ static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
omap_gem_get_pages(obj, &pages, false);
omap_gem_cpu_sync(obj, page_num);
return kmap(pages[page_num]);
}

Expand All @@ -131,6 +136,48 @@ static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
kunmap(pages[page_num]);
}

/*
* TODO maybe we can split up drm_gem_mmap to avoid duplicating
* some here.. or at least have a drm_dmabuf_mmap helper.
*/
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = buffer->priv;
int ret = 0;

if (WARN_ON(!obj->filp))
return -EINVAL;

/* Check for valid size. */
if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}

if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}

vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
vma->vm_ops->open(vma);

out_unlock:

return omap_gem_mmap_obj(obj, vma);
}

struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
Expand All @@ -141,6 +188,7 @@ struct dma_buf_ops omap_dmabuf_ops = {
.kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.kmap = omap_gem_dmabuf_kmap,
.kunmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
};

struct dma_buf * omap_gem_prime_export(struct drm_device *dev,
Expand Down

0 comments on commit e7ab968

Please sign in to comment.