Skip to content

Commit

Permalink
drm/i915: Refactor object page API
Browse files Browse the repository at this point in the history
The plan is to make obtaining the backing storage for the object avoid
struct_mutex (i.e. use its own locking). The first step is to update the
API so that normal users only call pin/unpin whilst working on the
backing storage.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-12-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Oct 28, 2016
1 parent d2a84a7 commit a4f5ea6
Show file tree
Hide file tree
Showing 17 changed files with 233 additions and 214 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_cmd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}

if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);

return ret;
Expand Down
17 changes: 9 additions & 8 deletions drivers/gpu/drm/i915/i915_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)

static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
return obj->mm.mapping ? 'M' : ' ';
}

static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
Expand Down Expand Up @@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
i915_gem_active_get_seqno(&obj->last_write,
&obj->base.dev->struct_mutex),
i915_cache_level_str(dev_priv, obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
Expand Down Expand Up @@ -403,12 +403,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size += obj->base.size;
++count;

if (obj->madv == I915_MADV_DONTNEED) {
if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}

if (obj->mapping) {
if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
Expand All @@ -425,12 +425,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}

if (obj->madv == I915_MADV_DONTNEED) {
if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}

if (obj->mapping) {
if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
Expand Down Expand Up @@ -2028,7 +2028,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));

if (i915_gem_object_get_pages(vma->obj)) {
if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
Expand All @@ -2047,6 +2047,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}

i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}

Expand Down
94 changes: 64 additions & 30 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2252,17 +2252,6 @@ struct drm_i915_gem_object {
*/
#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)

/**
* This is set if the object has been written to since last bound
* to the GTT
*/
unsigned int dirty:1;

/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;

/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
Expand All @@ -2284,16 +2273,31 @@ struct drm_i915_gem_object {
unsigned int bind_count;
unsigned int pin_display;

struct sg_table *pages;
int pages_pin_count;
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */
struct {
unsigned int pages_pin_count;

struct sg_table *pages;
void *mapping;

struct radix_tree_root radix;
struct mutex lock; /* protects this cache */
} get_page;
void *mapping;
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */

struct radix_tree_root radix;
struct mutex lock; /* protects this cache */
} get_page;

/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;

/**
* This is set if the object has been written to since the
* pages were last acquired.
*/
bool dirty:1;
} mm;

/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
Expand Down Expand Up @@ -3182,14 +3186,11 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);

int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);

void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);

int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __sg_page_count(struct scatterlist *sg)
static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
Expand All @@ -3210,19 +3211,52 @@ dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);

static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);

if (obj->mm.pages_pin_count++)
return 0;

return __i915_gem_object_get_pages(obj);
}

static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!obj->mm.pages);

obj->mm.pages_pin_count++;
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
return obj->mm.pages_pin_count;
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);

obj->mm.pages_pin_count--;
GEM_BUG_ON(obj->mm.pages_pin_count < obj->bind_count);
}

static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
__i915_gem_object_unpin_pages(obj);
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);

enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
Expand Down
Loading

0 comments on commit a4f5ea6

Please sign in to comment.