Skip to content

Commit

Permalink
drm/msm: Separate locking of buffer resources from struct_mutex
Browse files Browse the repository at this point in the history
Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.

Signed-off-by: Sushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: Rob Clark <robdclark@gmail.com>
  • Loading branch information
Sushmita Susheelendra authored and Rob Clark committed Jun 17, 2017
1 parent 816fa34 commit 0e08270
Show file tree
Hide file tree
Showing 17 changed files with 238 additions and 144 deletions.
8 changes: 4 additions & 4 deletions drivers/gpu/drm/msm/adreno/a5xx_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo;
void *ptr;

bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
if (IS_ERR(bo))
return bo;

ptr = msm_gem_get_vaddr_locked(bo);
ptr = msm_gem_get_vaddr(bo);
if (!ptr) {
drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM);
}

if (iova) {
int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova);
int ret = msm_gem_get_iova(bo, gpu->aspace, iova);

if (ret) {
drm_gem_object_unreference(bo);
Expand All @@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,

memcpy(ptr, &fw->data[4], fw->size - 4);

msm_gem_put_vaddr_locked(bo);
msm_gem_put_vaddr(bo);
return bo;
}

Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/drm/msm/adreno/a5xx_power.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;

a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err;

if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace,
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova))
goto err;

ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr)
goto err;

Expand All @@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
cmds_size -= _size;
}

msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo);
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;

goto out;
Expand Down
4 changes: 1 addition & 3 deletions drivers/gpu/drm/msm/adreno/adreno_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)

DBG("%s", gpu->name);

ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
Expand Down Expand Up @@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}

mutex_lock(&drm->struct_mutex);
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL;
Expand Down
4 changes: 1 addition & 3 deletions drivers/gpu/drm/msm/dsi/dsi_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
uint64_t iova;

if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n",
__func__, ret);
msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}

ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
ret = msm_gem_get_iova(msm_host->tx_gem_obj,
priv->kms->aspace, &iova);
mutex_unlock(&dev->struct_mutex);
if (ret) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
msm_gem_get_iova_locked(next_bo, kms->aspace, &iova);
msm_gem_get_iova(next_bo, kms->aspace, &iova);

/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
Original file line number Diff line number Diff line change
Expand Up @@ -528,9 +528,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}

mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/msm/msm_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev)
priv->vram.size = size;

drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
spin_lock_init(&priv->vram.lock);

attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
attrs |= DMA_ATTR_WRITE_COMBINE;
Expand Down
9 changes: 3 additions & 6 deletions drivers/gpu/drm/msm/msm_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ struct msm_drm_private {
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram;

struct notifier_block vmap_notifier;
Expand Down Expand Up @@ -198,8 +199,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
Expand All @@ -221,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
Expand All @@ -240,6 +235,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);

Expand Down
6 changes: 2 additions & 4 deletions drivers/gpu/drm/msm/msm_fbdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL;
Expand All @@ -126,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr);
ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
Expand Down Expand Up @@ -155,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,

dev->mode_config.fb_base = paddr;

fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
Expand Down
Loading

0 comments on commit 0e08270

Please sign in to comment.