Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 218103
b: refs/heads/master
c: f787a5f
h: refs/heads/master
i:
  218101: 6ff62fa
  218099: f67f2a0
  218095: d560910
v: v3
  • Loading branch information
Chris Wilson committed Sep 24, 2010
1 parent 0943613 commit 6f06603
Show file tree
Hide file tree
Showing 9 changed files with 124 additions and 93 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 27d64339a8d8465484286a2da93f5f6c36be5c3d
refs/heads/master: f787a5f59e1b0e320a6b0a37e9a2e306551d1e40
6 changes: 3 additions & 3 deletions trunk/drivers/gpu/drm/i915/i915_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)

if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
i915_get_gem_seqno(dev, &dev_priv->render_ring));
dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
Expand Down Expand Up @@ -321,7 +321,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
atomic_read(&dev_priv->irq_received));
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
i915_get_gem_seqno(dev, &dev_priv->render_ring));
dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
Expand Down Expand Up @@ -932,7 +932,7 @@ i915_wedged_write(struct file *filp,

atomic_set(&dev_priv->mm.wedged, val);
if (val) {
DRM_WAKEUP(&dev_priv->irq_queue);
wake_up_all(&dev_priv->irq_queue);
queue_work(dev_priv->wq, &dev_priv->error_work);
}

Expand Down
22 changes: 11 additions & 11 deletions trunk/drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -2162,20 +2162,19 @@ int i915_driver_unload(struct drm_device *dev)
return 0;
}

int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *i915_file_priv;
struct drm_i915_file_private *file_priv;

DRM_DEBUG_DRIVER("\n");
i915_file_priv = (struct drm_i915_file_private *)
kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);

if (!i915_file_priv)
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;

file_priv->driver_priv = i915_file_priv;
file->driver_priv = file_priv;

INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
INIT_LIST_HEAD(&file_priv->mm.request_list);
mutex_init(&file_priv->mutex);

return 0;
}
Expand Down Expand Up @@ -2218,11 +2217,12 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}

void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_file_private *file_priv = file->driver_priv;

kfree(i915_file_priv);
mutex_destroy(&file_priv->mutex);
kfree(file_priv);
}

struct drm_ioctl_desc i915_ioctls[] = {
Expand Down
15 changes: 12 additions & 3 deletions trunk/drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -844,11 +844,13 @@ struct drm_i915_gem_request {
/** global list entry for this request */
struct list_head list;

struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};

struct drm_i915_file_private {
struct mutex mutex;
struct {
struct list_head request_list;
} mm;
Expand Down Expand Up @@ -1005,9 +1007,16 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring);
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);

/**
* Returns true if seq1 is later than seq2.
*/
static inline bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
}

int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
bool interruptible);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
Expand Down
120 changes: 72 additions & 48 deletions trunk/drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1592,37 +1592,38 @@ i915_gem_process_flushing_list(struct drm_device *dev,

uint32_t
i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_file *file,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;

if (file_priv != NULL)
i915_file_priv = file_priv->driver_priv;
if (file != NULL)
file_priv = file->driver_priv;

if (request == NULL) {
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return 0;
}

seqno = ring->add_request(dev, ring, file_priv, 0);
seqno = ring->add_request(dev, ring, 0);

request->seqno = seqno;
request->ring = ring;
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);

if (i915_file_priv) {
if (file_priv) {
mutex_lock(&file_priv->mutex);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
&i915_file_priv->mm.request_list);
} else {
INIT_LIST_HEAD(&request->client_list);
&file_priv->mm.request_list);
mutex_unlock(&file_priv->mutex);
}

if (!dev_priv->mm.suspended) {
Expand Down Expand Up @@ -1654,20 +1655,14 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
I915_GEM_DOMAIN_COMMAND, flush_domains);
}

/**
* Returns true if seq1 is later than seq2.
*/
bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
}

uint32_t
i915_get_gem_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring)
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
return ring->get_gem_seqno(dev, ring);
if (request->file_priv) {
mutex_lock(&request->file_priv->mutex);
list_del(&request->client_list);
mutex_unlock(&request->file_priv->mutex);
}
}

static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
Expand All @@ -1681,7 +1676,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
list);

list_del(&request->list);
list_del(&request->client_list);
i915_gem_request_remove_from_client(request);
kfree(request);
}

Expand Down Expand Up @@ -1746,7 +1741,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
list_empty(&ring->request_list))
return;

seqno = i915_get_gem_seqno(dev, ring);
seqno = ring->get_seqno(dev, ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;

Expand All @@ -1760,7 +1755,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
trace_i915_gem_request_retire(dev, request->seqno);

list_del(&request->list);
list_del(&request->client_list);
i915_gem_request_remove_from_client(request);
kfree(request);
}

Expand Down Expand Up @@ -1862,7 +1857,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;

if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
Expand All @@ -1881,12 +1876,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(
ring->get_gem_seqno(dev, ring), seqno)
ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
i915_seqno_passed(
ring->get_gem_seqno(dev, ring), seqno)
ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));

ring->user_irq_put(dev, ring);
Expand All @@ -1899,7 +1894,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,

if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
__func__, ret, seqno, ring->get_gem_seqno(dev, ring),
__func__, ret, seqno, ring->get_seqno(dev, ring),
dev_priv->next_seqno);

/* Directly dispatch request retiring. While we have the work queue
Expand Down Expand Up @@ -3384,28 +3379,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
* relatively low latency when blocking on a particular request to finish.
*/
static int
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
int ret = 0;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
u32 seqno = 0;
int ret;

mutex_lock(&dev->struct_mutex);
while (!list_empty(&i915_file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;

request = list_first_entry(&i915_file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);

mutex_lock(&file_priv->mutex);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;

ret = i915_wait_request(dev, request->seqno, request->ring);
if (ret != 0)
break;
ring = request->ring;
seqno = request->seqno;
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&file_priv->mutex);

if (seqno == 0)
return 0;

ret = 0;
if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
ring->user_irq_get(dev, ring);
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);

if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
}

if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);

return ret;
}
Expand Down Expand Up @@ -4857,17 +4872,26 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}

void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_file_private *file_priv = file->driver_priv;

/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
mutex_lock(&dev->struct_mutex);
while (!list_empty(&i915_file_priv->mm.request_list))
list_del_init(i915_file_priv->mm.request_list.next);
mutex_lock(&file_priv->mutex);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;

request = list_first_entry(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
list_del(&request->client_list);
request->file_priv = NULL;
}
mutex_unlock(&file_priv->mutex);
mutex_unlock(&dev->struct_mutex);
}

Expand Down
Loading

0 comments on commit 6f06603

Please sign in to comment.