Skip to content

Commit

Permalink
Merge tag 'drm-msm-next-2024-03-07' of https://gitlab.freedesktop.org…
Browse files Browse the repository at this point in the history
…/drm/msm into drm-next

Late updates for v6.9, the main part is CDM (YUV over DP) which was
waiting for drm-misc-next-2024-02-29.

DPU:
- Add support for YUV420 over DP
- Patchset to ease debugging of vblank timeouts
- Small cleanup

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvedk6OCOZ-NNtGf_pNiGuK9uvWj1MCDZLX9Jo2nHS=Zg@mail.gmail.com
  • Loading branch information
Dave Airlie committed Mar 8, 2024
2 parents b0b6739 + 4be445f commit b9511c6
Show file tree
Hide file tree
Showing 23 changed files with 736 additions and 291 deletions.
3 changes: 2 additions & 1 deletion drivers/gpu/drm/msm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_drm.o \
dp/dp_link.o \
dp/dp_panel.o \
dp/dp_audio.o
dp/dp_audio.o \
dp/dp_utils.o

msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o

Expand Down
244 changes: 204 additions & 40 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ enum dpu_enc_rc_states {
* @base: drm_encoder base class for registration with DRM
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enabled: True if the encoder is active, protected by enc_lock
* @commit_done_timedout: True if there has been a timeout on commit after
* enabling the encoder.
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
Expand Down Expand Up @@ -172,6 +174,7 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;

bool enabled;
bool commit_done_timedout;

unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
Expand Down Expand Up @@ -218,12 +221,59 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};

u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
struct dpu_encoder_virt *dpu_enc;
struct drm_display_info *info;
struct drm_display_mode *mode;

drm_enc = phys_enc->parent;
dpu_enc = to_dpu_encoder_virt(drm_enc);
info = &dpu_enc->connector->display_info;
mode = &phys_enc->cached_mode;

if (drm_mode_is_420_only(info, mode))
return DRM_FORMAT_YUV420;

return DRM_FORMAT_RGB888;
}

bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
struct dpu_encoder_virt *dpu_enc;
struct msm_display_info *disp_info;
struct msm_drm_private *priv;
struct drm_display_mode *mode;

drm_enc = phys_enc->parent;
dpu_enc = to_dpu_encoder_virt(drm_enc);
disp_info = &dpu_enc->disp_info;
priv = drm_enc->dev->dev_private;
mode = &phys_enc->cached_mode;

return phys_enc->hw_intf->cap->type == INTF_DP &&
msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
}

bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
const struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv = drm_enc->dev->dev_private;
const struct msm_display_info *disp_info;
int index;

return dpu_enc->wide_bus_en;
dpu_enc = to_dpu_encoder_virt(drm_enc);
disp_info = &dpu_enc->disp_info;
index = disp_info->h_tile_instance[0];

if (disp_info->intf_type == INTF_DP)
return msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
return msm_dsi_wide_bus_enabled(priv->dsi[index]);

return false;
}

bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
Expand Down Expand Up @@ -588,6 +638,7 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_kms *dpu_kms;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct msm_display_info *disp_info;
struct dpu_global_state *global_state;
struct drm_framebuffer *fb;
struct drm_dsc_config *dsc;
Expand All @@ -603,6 +654,7 @@ static int dpu_encoder_virt_atomic_check(
DPU_DEBUG_ENC(dpu_enc, "\n");

priv = drm_enc->dev->dev_private;
disp_info = &dpu_enc->disp_info;
dpu_kms = to_dpu_kms(priv->kms);
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
Expand All @@ -616,21 +668,24 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);

/*
* Use CDM only for writeback at the moment as other interfaces cannot handle it.
* if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
* Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
* If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
* earlier.
*/
if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
fb = conn_state->writeback_job->fb;

if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
topology.needs_cdm = true;
if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
crtc_state->mode_changed = true;
else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
crtc_state->mode_changed = true;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
topology.needs_cdm = true;
}

if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
crtc_state->mode_changed = true;
else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
crtc_state->mode_changed = true;
/*
* Release and Allocate resources on every modeset
* Dont allocate when active is false.
Expand Down Expand Up @@ -1102,7 +1157,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,

dpu_enc->dsc_mask = dsc_mask;

if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) ||
dpu_enc->disp_info.intf_type == INTF_DP) {
struct dpu_hw_blk *hw_cdm = NULL;

dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
Expand Down Expand Up @@ -1209,26 +1265,20 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
struct msm_drm_private *priv = drm_enc->dev->dev_private;
struct msm_display_info *disp_info;
int index;

dpu_enc = to_dpu_encoder_virt(drm_enc);
disp_info = &dpu_enc->disp_info;
index = disp_info->h_tile_instance[0];

dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);

atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);

if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);

mutex_lock(&dpu_enc->enc_lock);

dpu_enc->commit_done_timedout = false;

cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;

dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc);

trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);

Expand Down Expand Up @@ -1282,7 +1332,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
trace_dpu_enc_disable(DRMID(drm_enc));

/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
dpu_encoder_wait_for_tx_complete(drm_enc);

dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);

Expand Down Expand Up @@ -2133,6 +2183,84 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}

void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct dpu_format *dpu_fmt,
u32 output_type)
{
struct dpu_hw_cdm *hw_cdm;
struct dpu_hw_cdm_cfg *cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
int ret;

if (!phys_enc)
return;

cdm_cfg = &phys_enc->cdm_cfg;
hw_pp = phys_enc->hw_pp;
hw_cdm = phys_enc->hw_cdm;

if (!hw_cdm)
return;

if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
dpu_fmt->base.pixel_format);
if (hw_cdm->ops.bind_pingpong_blk)
hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);

return;
}

memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));

cdm_cfg->output_width = phys_enc->cached_mode.hdisplay;
cdm_cfg->output_height = phys_enc->cached_mode.vdisplay;
cdm_cfg->output_fmt = dpu_fmt;
cdm_cfg->output_type = output_type;
cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;

/* enable 10 bit logic */
switch (cdm_cfg->output_fmt->chroma_sample) {
case DPU_CHROMA_RGB:
cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
break;
case DPU_CHROMA_H2V1:
cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
break;
case DPU_CHROMA_420:
cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
break;
case DPU_CHROMA_H1V2:
default:
DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
DRMID(phys_enc->parent));
cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
break;
}

DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
DRMID(phys_enc->parent), cdm_cfg->output_width,
cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
cdm_cfg->output_type, cdm_cfg->output_bit_depth,
cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);

if (hw_cdm->ops.enable) {
cdm_cfg->pp_id = hw_pp->idx;
ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
if (ret < 0) {
DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
DRMID(phys_enc->parent), ret);
return;
}
}
}

#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
Expand Down Expand Up @@ -2402,10 +2530,18 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
return &dpu_enc->base;
}

int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
enum msm_event_wait event)
/**
* dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
* @drm_enc: encoder pointer
*
* Wait for hardware to have flushed the current pending changes to hardware at
* a vblank or CTL_START. Physical encoders will map this differently depending
* on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
*
* Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
{
int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;

Expand All @@ -2419,23 +2555,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];

switch (event) {
case MSM_ENC_COMMIT_DONE:
fn_wait = phys->ops.wait_for_commit_done;
break;
case MSM_ENC_TX_COMPLETE:
fn_wait = phys->ops.wait_for_tx_complete;
break;
default:
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
if (phys->ops.wait_for_commit_done) {
DPU_ATRACE_BEGIN("wait_for_commit_done");
ret = phys->ops.wait_for_commit_done(phys);
DPU_ATRACE_END("wait_for_commit_done");
if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
dpu_enc->commit_done_timedout = true;
msm_disp_snapshot_state(drm_enc->dev);
}
if (ret)
return ret;
}
}

return ret;
}

/**
* dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
* @drm_enc: encoder pointer
*
* Wait for the hardware to transfer all the pixels to the panel. Physical
* encoders will map this differently depending on the type: vid mode -> vsync_irq,
* cmd mode -> pp_done.
*
* Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;

if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");

for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];

if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
ret = fn_wait(phys);
DPU_ATRACE_END("wait_for_completion_event");
if (phys->ops.wait_for_tx_complete) {
DPU_ATRACE_BEGIN("wait_for_tx_complete");
ret = phys->ops.wait_for_tx_complete(phys);
DPU_ATRACE_END("wait_for_tx_complete");
if (ret)
return ret;
}
Expand Down
Loading

0 comments on commit b9511c6

Please sign in to comment.