Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 329468
b: refs/heads/master
c: 9da3da6
h: refs/heads/master
v: v3
  • Loading branch information
Chris Wilson authored and Daniel Vetter committed Sep 20, 2012
1 parent 9c4552b commit 6c5a89f
Show file tree
Hide file tree
Showing 13 changed files with 240 additions and 221 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f60d7f0c1d55a935475ab394955cafddefaa6533
refs/heads/master: 9da3da660d8c19a54f6e93361d147509be3fff84
51 changes: 22 additions & 29 deletions trunk/drivers/char/agp/intel-gtt.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,40 +84,33 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable

int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
struct scatterlist **sg_list, int *num_sg)
static int intel_gtt_map_memory(struct page **pages,
unsigned int num_entries,
struct sg_table *st)
{
struct sg_table st;
struct scatterlist *sg;
int i;

if (*sg_list)
return 0; /* already mapped (for e.g. resume */

DBG("try mapping %lu pages\n", (unsigned long)num_entries);

if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
if (sg_alloc_table(st, num_entries, GFP_KERNEL))
goto err;

*sg_list = sg = st.sgl;

for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);

*num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
num_entries, PCI_DMA_BIDIRECTIONAL);
if (unlikely(!*num_sg))
if (!pci_map_sg(intel_private.pcidev,
st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
goto err;

return 0;

err:
sg_free_table(&st);
sg_free_table(st);
return -ENOMEM;
}
EXPORT_SYMBOL(intel_gtt_map_memory);

void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
Expand All @@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)

sg_free_table(&st);
}
EXPORT_SYMBOL(intel_gtt_unmap_memory);

static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
Expand Down Expand Up @@ -879,8 +871,7 @@ static bool i830_check_flags(unsigned int flags)
return false;
}

void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
unsigned int sg_len,
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
{
Expand All @@ -892,21 +883,22 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,

/* sg may merge pages, but we have to separate
* per-page addr for GTT */
for_each_sg(sg_list, sg, sg_len, i) {
for_each_sg(st->sgl, sg, st->nents, i) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
intel_private.driver->write_entry(addr,
j, flags);
intel_private.driver->write_entry(addr, j, flags);
j++;
}
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);

void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags)
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
struct page **pages,
unsigned int flags)
{
int i, j;

Expand All @@ -917,7 +909,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_pages);

static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
Expand Down Expand Up @@ -953,13 +944,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
global_cache_flush();

if (intel_private.base.needs_dmar) {
ret = intel_gtt_map_memory(mem->pages, mem->page_count,
&mem->sg_list, &mem->num_sg);
struct sg_table st;

ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
if (ret != 0)
return ret;

intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
pg_start, type);
intel_gtt_insert_sg_entries(&st, pg_start, type);
mem->sg_list = st.sgl;
mem->num_sg = st.nents;
} else
intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
type);
Expand Down
25 changes: 25 additions & 0 deletions trunk/drivers/gpu/drm/drm_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
}
EXPORT_SYMBOL(drm_clflush_pages);

void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
struct scatterlist *sg;
int i;

mb();
for_each_sg(st->sgl, sg, st->nents, i)
drm_clflush_page(sg_page(sg));
mb();

return;
}

if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);

void
drm_clflush_virt_range(char *addr, unsigned long length)
{
Expand Down
18 changes: 11 additions & 7 deletions trunk/drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -1006,16 +1006,11 @@ struct drm_i915_gem_object {

unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;

struct page **pages;
struct sg_table *pages;
int pages_pin_count;

/**
* DMAR support
*/
struct scatterlist *sg_list;
int num_sg;

/* prime dma-buf support */
struct sg_table *sg_table;
void *dma_buf_vmapping;
Expand Down Expand Up @@ -1342,6 +1337,15 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);

int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
struct scatterlist *sg = obj->pages->sgl;
while (n >= SG_MAX_SINGLE_ALLOC) {
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages == NULL);
Expand Down
79 changes: 55 additions & 24 deletions trunk/drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
int i;

user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
Expand Down Expand Up @@ -439,9 +441,15 @@ i915_gem_shmem_pread(struct drm_device *dev,

offset = args->offset;

while (remain > 0) {
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;

if (i < offset >> PAGE_SHIFT)
continue;

if (remain <= 0)
break;

/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
Expand All @@ -452,7 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;

page = obj->pages[offset >> PAGE_SHIFT];
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;

Expand Down Expand Up @@ -731,6 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
int i;
struct scatterlist *sg;

user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
Expand Down Expand Up @@ -765,10 +775,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
offset = args->offset;
obj->dirty = 1;

while (remain > 0) {
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;

if (i < offset >> PAGE_SHIFT)
continue;

if (remain <= 0)
break;

/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
Expand All @@ -787,7 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));

page = obj->pages[offset >> PAGE_SHIFT];
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;

Expand Down Expand Up @@ -1633,6 +1649,7 @@ static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
struct scatterlist *sg;
int ret, i;

BUG_ON(obj->madv == __I915_MADV_PURGED);
Expand All @@ -1653,19 +1670,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;

for (i = 0; i < page_count; i++) {
for_each_sg(obj->pages->sgl, sg, page_count, i) {
struct page *page = sg_page(sg);

if (obj->dirty)
set_page_dirty(obj->pages[i]);
set_page_dirty(page);

if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj->pages[i]);
mark_page_accessed(page);

page_cache_release(obj->pages[i]);
page_cache_release(page);
}
obj->dirty = 0;

drm_free_large(obj->pages);
obj->pages = NULL;
sg_free_table(obj->pages);
kfree(obj->pages);
}

static int
Expand All @@ -1682,6 +1701,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return -EBUSY;

ops->put_pages(obj);
obj->pages = NULL;

list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
Expand Down Expand Up @@ -1739,6 +1759,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
struct page *page;
gfp_t gfp;

Expand All @@ -1749,20 +1771,27 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;

page_count = obj->base.size / PAGE_SIZE;
obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
if (obj->pages == NULL)
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
sg_free_table(st);
kfree(st);
return -ENOMEM;
}

/* Fail silently without starting the shrinker */
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT);
for (i = 0; i < page_count; i++) {
for_each_sg(st->sgl, sg, page_count, i) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count);
Expand All @@ -1785,20 +1814,20 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT);
}

obj->pages[i] = page;
sg_set_page(sg, page, PAGE_SIZE, 0);
}

if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);

obj->pages = st;
return 0;

err_pages:
while (i--)
page_cache_release(obj->pages[i]);

drm_free_large(obj->pages);
obj->pages = NULL;
for_each_sg(st->sgl, sg, i, page_count)
page_cache_release(sg_page(sg));
sg_free_table(st);
kfree(st);
return PTR_ERR(page);
}

Expand Down Expand Up @@ -2981,7 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)

trace_i915_gem_object_clflush(obj);

drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
drm_clflush_sg(obj->pages);
}

/** Flushes the GTT write domain for the object if it's dirty. */
Expand Down Expand Up @@ -3731,6 +3760,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);

BUG_ON(obj->pages);

drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);

Expand Down
Loading

0 comments on commit 6c5a89f

Please sign in to comment.