Skip to content

Commit

Permalink
Merge tag 'msm-fixes-2019_08_01' of https://gitlab.freedesktop.org/dr…
Browse files Browse the repository at this point in the history
…m/msm into drm-fixes

- Fix the dma_sync calls applied last week (Rob)
- Fix mdp5 dsi command mode (Brian)
- Squash fall through warnings (Jordan)
- Don't add disabled gpu nodes to the of device list (Jeffrey)

Cc: Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Brian Masney <masneyb@onstation.org>
Cc: Rob Clark <robdclark@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>

# gpg: Signature made Fri 02 Aug 2019 05:54:27 AM AEST
# gpg:                using RSA key 96F70DFDA84A070A
# gpg: Can't check signature: public key not found
From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20190801200439.GV104440@art_vandelay
  • Loading branch information
Dave Airlie committed Aug 2, 2019
2 parents 412e85b + 9ca7ad6 commit f8981e0
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 8 deletions.
2 changes: 2 additions & 0 deletions drivers/gpu/drm/msm/adreno/a5xx_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
Expand Down Expand Up @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/msm/adreno/a6xx_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/msm/adreno/adreno_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
Expand Down
16 changes: 15 additions & 1 deletion drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp5_crtc->enabled = false;
}

static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
u32 count;

count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
drm_crtc_set_max_vblank_count(crtc, count);

drm_crtc_vblank_on(crtc);
}

static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
Expand Down Expand Up @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
}

/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp5_crtc_vblank_on(crtc);

mdp5_crtc_mode_set_nofb(crtc);

Expand Down Expand Up @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
mdp5_crtc_destroy_state(crtc, crtc->state);

__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);

drm_crtc_vblank_reset(crtc);
}

static const struct drm_crtc_funcs mdp5_crtc_funcs = {
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
Original file line number Diff line number Diff line change
Expand Up @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0xffffffff;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;

return kms;
Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/msm/msm_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;

drm_of_component_match_add(dev, matchptr, compare_of, np);
if (of_device_is_available(np))
drm_of_component_match_add(dev, matchptr, compare_of, np);

of_node_put(np);

Expand Down
47 changes: 42 additions & 5 deletions drivers/gpu/drm/msm/msm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}

/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
* and all we need to do is invalidate newly allocated pages before
* mapping to CPU as uncached/writecombine.
*
* On top of this, we have the added headache, that depending on
* display generation, the display's iommu may be wired up to either
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
* that here we either have dma-direct or iommu ops.
*
* Let this be a cautionary tail of abstraction gone wrong.
*/

static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;

if (get_dma_ops(dev)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_map_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
}

static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;

if (get_dma_ops(dev)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
}

/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
Expand Down Expand Up @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
sync_for_device(msm_obj);
}

return msm_obj->pages;
Expand Down Expand Up @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents,
DMA_BIDIRECTIONAL);
sync_for_cpu(msm_obj);

sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
Expand Down

0 comments on commit f8981e0

Please sign in to comment.