Skip to content

Commit

Permalink
drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING
Browse files Browse the repository at this point in the history
If the command buffer pool is out of space, the code waits until space is
available. However since the condition code tries to allocate a range manager
node while !TASK_RUNNING we get a kernel warning.

Avoid this by pre-allocating the mm node. This will also probably be more
efficient.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
  • Loading branch information
Thomas Hellstrom committed Aug 5, 2015
1 parent 6bf6bf0 commit 9b59078
Showing 1 changed file with 34 additions and 49 deletions.
83 changes: 34 additions & 49 deletions drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@
* multiple of the DMA pool allocation size.
*/
#define VMW_CMDBUF_INLINE_ALIGN 64
#define VMW_CMDBUF_INLINE_SIZE (1024 - VMW_CMDBUF_INLINE_ALIGN)
#define VMW_CMDBUF_INLINE_SIZE \
(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))

/**
* struct vmw_cmdbuf_context - Command buffer context queues
Expand Down Expand Up @@ -145,7 +146,7 @@ struct vmw_cmdbuf_header {
SVGACBHeader *cb_header;
SVGACBContext cb_context;
struct list_head list;
struct drm_mm_node *node;
struct drm_mm_node node;
dma_addr_t handle;
u8 *cmd;
size_t size;
Expand All @@ -169,13 +170,13 @@ struct vmw_cmdbuf_dheader {
* struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
*
* @page_size: Size of requested command buffer space in pages.
* @node: The range manager node if allocation succeeded.
* @ret: Error code if failure. Otherwise 0.
* @node: Pointer to the range manager node.
* @done: True if this allocation has succeeded.
*/
struct vmw_cmdbuf_alloc_info {
size_t page_size;
struct drm_mm_node *node;
int ret;
bool done;
};

/* Loop over each context in the command buffer manager. */
Expand Down Expand Up @@ -253,9 +254,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
return;
}

drm_mm_remove_node(header->node);
kfree(header->node);
header->node = NULL;
drm_mm_remove_node(&header->node);
wake_up_all(&man->alloc_queue);
if (header->cb_header)
dma_pool_free(man->headers, header->cb_header,
Expand Down Expand Up @@ -669,71 +668,58 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
{
int ret;

if (info->node)
if (info->done)
return true;

info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
if (!info->node) {
info->ret = -ENOMEM;
return true;
}


memset(info->node, 0, sizeof(*info->node));
spin_lock_bh(&man->lock);
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 0, 0,
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
spin_unlock_bh(&man->lock);
if (ret) {
kfree(info->node);
info->node = NULL;
}
info->done = !ret;

return !!info->node;
return info->done;
}

/**
* vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
*
* @man: The command buffer manager.
* @node: Pointer to pre-allocated range-manager node.
* @size: The size of the allocation.
* @interruptible: Whether to sleep interruptible while waiting for space.
*
* This function allocates buffer space from the main pool, and if there is
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to
* become available.
*/
static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
size_t size,
bool interruptible)
int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
struct drm_mm_node *node,
size_t size,
bool interruptible)
{
struct vmw_cmdbuf_alloc_info info;

info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
info.node = NULL;
info.ret = 0;
info.node = node;
info.done = false;

/*
* To prevent starvation of large requests, only one allocating call
* at a time waiting for space.
*/
if (interruptible) {
if (mutex_lock_interruptible(&man->space_mutex))
return ERR_PTR(-ERESTARTSYS);
return -ERESTARTSYS;
} else {
mutex_lock(&man->space_mutex);
}

/* Try to allocate space without waiting. */
(void) vmw_cmdbuf_try_alloc(man, &info);
if (info.ret && !info.node) {
mutex_unlock(&man->space_mutex);
return ERR_PTR(info.ret);
}

if (info.node) {
mutex_unlock(&man->space_mutex);
return info.node;
}
if (vmw_cmdbuf_try_alloc(man, &info))
goto out_unlock;

vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
Expand All @@ -749,19 +735,19 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
mutex_unlock(&man->space_mutex);
return ERR_PTR(ret);
return ret;
}
} else {
wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
}
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);

out_unlock:
mutex_unlock(&man->space_mutex);
if (info.ret && !info.node)
return ERR_PTR(info.ret);

return info.node;
return 0;
}

/**
Expand All @@ -785,10 +771,10 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (!man->has_pool)
return -ENOMEM;

header->node = vmw_cmdbuf_alloc_space(man, size, interruptible);
ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);

if (IS_ERR(header->node))
return PTR_ERR(header->node);
if (ret)
return ret;

header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
&header->handle);
Expand All @@ -797,9 +783,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
goto out_no_cb_header;
}

header->size = header->node->size << PAGE_SHIFT;
header->size = header->node.size << PAGE_SHIFT;
cb_hdr = header->cb_header;
offset = header->node->start << PAGE_SHIFT;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
Expand All @@ -814,9 +800,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,

out_no_cb_header:
spin_lock_bh(&man->lock);
drm_mm_remove_node(header->node);
drm_mm_remove_node(&header->node);
spin_unlock_bh(&man->lock);
kfree(header->node);

return ret;
}
Expand Down

0 comments on commit 9b59078

Please sign in to comment.