Skip to content

Commit

Permalink
drm/gem: add shmem get/put page helpers
Browse files Browse the repository at this point in the history
Basically just extracting some code duplicated in gma500, omapdrm, udl,
and upcoming msm driver.

Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
  • Loading branch information
Rob Clark authored and Dave Airlie committed Aug 19, 2013
1 parent 367bbd4 commit bcc5c9d
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 0 deletions.
103 changes: 103 additions & 0 deletions drivers/gpu/drm/drm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,109 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);

/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
* @gfpmask: gfp mask of requested pages
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
{
struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;

/* This is the shared memory object that backs the GEM resource */
inode = file_inode(obj->filp);
mapping = inode->i_mapping;

/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);

npages = obj->size >> PAGE_SHIFT;

pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);

gfpmask |= mapping_gfp_mask(mapping);

for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;

/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4GB.. if the pages are un-
* pinned, and swapped out, they can end up swapped back
* in above 4GB. If pages are already in memory, then
* shmem_read_mapping_page_gfp will ignore the gfpmask,
* even if the already in-memory page disobeys the mask.
*
* It is only a theoretical issue today, because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue. But this BUG_ON()
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp() isn't solved by the time
* it does become a real issue.
*
* See this thread: http://lkml.org/lkml/2011/7/11/238
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}

return pages;

fail:
while (i--)
page_cache_release(pages[i]);

drm_free_large(pages);
return ERR_CAST(p);
}
EXPORT_SYMBOL(drm_gem_get_pages);

/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @obj: obj in question
* @pages: pages to free
* @dirty: if true, pages will be marked as dirty
* @accessed: if true, the pages will be marked as accessed
*/
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;

/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);

npages = obj->size >> PAGE_SHIFT;

for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);

if (accessed)
mark_page_accessed(pages[i]);

/* Undo the reference we took when populating the table */
page_cache_release(pages[i]);
}

drm_free_large(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);

/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
Expand Down
4 changes: 4 additions & 0 deletions include/drm/drmP.h
Original file line number Diff line number Diff line change
Expand Up @@ -1616,6 +1616,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);

struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);

struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
Expand Down

0 comments on commit bcc5c9d

Please sign in to comment.