Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 269719
b: refs/heads/master
c: e2fa3a7
h: refs/heads/master
i:
  269717: fcd9183
  269715: e3e941d
  269711: f861bef
v: v3
  • Loading branch information
Thomas Hellstrom authored and Dave Airlie committed Oct 5, 2011
1 parent 92954cd commit 5e4cb83
Show file tree
Hide file tree
Showing 7 changed files with 573 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e93daed8e2fd5ce3dc98efe9938426127a534ccc
refs/heads/master: e2fa3a76839ada0d788549607263a036aa654243
44 changes: 44 additions & 0 deletions trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
Expand All @@ -59,6 +60,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;

vmw_execbuf_release_pinned_bo(dev_priv, false, 0);

ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
Expand All @@ -78,6 +81,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
Expand All @@ -100,6 +104,9 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;

if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);

ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
Expand Down Expand Up @@ -177,6 +184,7 @@ int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
Expand Down Expand Up @@ -205,6 +213,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;

if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);

ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
Expand Down Expand Up @@ -276,3 +287,36 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
ptr->offset = 0;
}
}


/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
* @bo: The buffer object. Must be reserved, and present either in VRAM
* or GMR memory.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
uint32_t pl_flags;
struct ttm_placement placement;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;

BUG_ON(!atomic_read(&bo->reserved));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_FLAG_GMR);

pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;

memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl_flags;

ret = ttm_bo_validate(bo, &placement, false, true, true);

BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
86 changes: 86 additions & 0 deletions trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,72 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Screen Object 2.\n");
}


/**
* vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
* the start of a buffer object.
*
* @dev_priv: The device private structure.
*
* This function will idle the buffer using an uninterruptible wait, then
* map the first page and initialize a pending occlusion query result structure,
* Finally it will unmap the buffer.
*
* TODO: Since we're only mapping a single page, we should optimize the map
* to use kmap_atomic / iomap_atomic.
*/
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
{
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
int ret;
struct ttm_bo_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;

ttm_bo_reserve(bo, false, false, false, 0);
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE);
spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0))
(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
10*HZ);

ret = ttm_bo_kmap(bo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
} else
DRM_ERROR("Dummy query buffer map failed.\n");
ttm_bo_unreserve(bo);
}


/**
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
* @dev_priv: A device private structure.
*
* This function creates a small buffer object that holds the query
* result for dummy queries emitted as query barriers.
* No interruptible waits are done within this function.
*
* Returns an error if bo creation fails.
*/
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
return ttm_bo_create(&dev_priv->bdev,
PAGE_SIZE,
ttm_bo_type_device,
&vmw_vram_sys_placement,
0, 0, false, NULL,
&dev_priv->dummy_query_bo);
}


static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
Expand All @@ -223,12 +289,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
vmw_dummy_query_bo_prepare(dev_priv);

return 0;

out_no_query_bo:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}

static void vmw_release_device(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
* the pinned bo.
*/

BUG_ON(dev_priv->pinned_bo != NULL);

ttm_bo_unref(&dev_priv->dummy_query_bo);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
Expand Down Expand Up @@ -794,6 +877,8 @@ static void vmw_master_drop(struct drm_device *dev,

vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);

if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
Expand Down Expand Up @@ -844,6 +929,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ttm_bo_swapout_all(&dev_priv->bdev);

break;
Expand Down
24 changes: 24 additions & 0 deletions trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ struct vmw_resource {
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
bool on_validate_list;
struct list_head query_head; /* Protected by the cmdbuf mutex */
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
Expand Down Expand Up @@ -142,6 +143,7 @@ struct vmw_sw_context{
uint32_t last_cid;
bool cid_valid;
bool kernel; /**< is the called made from the kernel */
struct vmw_resource *cur_ctx;
uint32_t last_sid;
uint32_t sid_translation;
bool sid_valid;
Expand All @@ -155,6 +157,11 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size;
struct vmw_resource *resources[VMWGFX_MAX_VALIDATIONS];
uint32_t num_ref_resources;
uint32_t fence_flags;
struct list_head query_list;
struct ttm_buffer_object *cur_query_bo;
uint32_t cur_query_cid;
bool query_cid_valid;
};

struct vmw_legacy_display;
Expand Down Expand Up @@ -294,6 +301,16 @@ struct vmw_private {

struct mutex release_mutex;
uint32_t num_3d_resources;

/*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/

struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
bool dummy_query_bo_pinned;
};

static inline struct vmw_private *vmw_priv(struct drm_device *dev)
Expand Down Expand Up @@ -418,6 +435,7 @@ extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);

/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
Expand Down Expand Up @@ -447,6 +465,8 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);

/**
* TTM glue - vmwgfx_ttm_glue.c
Expand Down Expand Up @@ -485,6 +505,10 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep);

extern void
vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
bool only_on_cid_match, uint32_t cid);

/**
* IRQs and wating - vmwgfx_irq.c
*/
Expand Down
Loading

0 comments on commit 5e4cb83

Please sign in to comment.