Skip to content

Commit

Permalink
drm/amd/display: Add polling method to handle MST reply packet
Browse files Browse the repository at this point in the history
[Why]
Specific TBT4 dock doesn't send out short HPD to notify source
that IRQ event DOWN_REP_MSG_RDY is set. Which violates the spec
and cause source can't send out streams to mst sinks.

[How]
To cover this misbehavior, add an additional polling method to detect
DOWN_REP_MSG_RDY is set. HPD driven handling method is still kept.
Just hook up our handler to drm mgr->cbs->poll_hpd_irq().

Cc: Mario Limonciello <mario.limonciello@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
Reviewed-by: Jerry Zuo <jerry.zuo@amd.com>
Acked-by: Alan Liu <haoping.liu@amd.com>
Signed-off-by: Wayne Lin <wayne.lin@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Wayne Lin authored and Alex Deucher committed Jul 18, 2023
1 parent c4e532f commit bb4fa52
Show file tree
Hide file tree
Showing 4 changed files with 159 additions and 86 deletions.
117 changes: 31 additions & 86 deletions drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;

if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
goto skip;
}

mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
Expand Down Expand Up @@ -3232,87 +3241,6 @@ static void handle_hpd_irq(void *param)

}

static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;

const int max_process_count = 30;
int process_count = 0;

const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);

if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}

dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);

while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
u8 retry;

dret = 0;

process_count++;

DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
/* handle HPD short pulse irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);

if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;

wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}

if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}

drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
/* check if there is new irq to be handled */
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);

new_irq_handled = false;
} else {
break;
}
}

if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}

static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
Expand Down Expand Up @@ -3374,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg(aconnector);
bool skip = false;

/*
* DOWN_REP_MSG_RDY is also handled by polling method
* mgr->cbs->poll_hpd_irq()
*/
spin_lock(&offload_wq->offload_lock);
skip = offload_wq->is_handling_mst_msg_rdy_event;

if (!skip)
offload_wq->is_handling_mst_msg_rdy_event = true;

spin_unlock(&offload_wq->offload_lock);

if (!skip)
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);

goto out;
}

Expand Down Expand Up @@ -3483,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);

if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
aconnector;
}

if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
}

Expand Down Expand Up @@ -7296,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);

/*
* configure support HPD hot plug connector_>polled default value is 0
Expand Down
7 changes: 7 additions & 0 deletions drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
* we're handling link loss
*/
bool is_handling_link_loss;
/**
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
* ready event when we're already handling mst message ready event
*/
bool is_handling_mst_msg_rdy_event;
/**
* @aconnector: The aconnector that this work queue is attached to
*/
Expand Down Expand Up @@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
struct mutex handle_mst_msg_ready;

/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;

Expand Down
110 changes: 110 additions & 0 deletions drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
Original file line number Diff line number Diff line change
Expand Up @@ -620,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}

void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
uint8_t dpcd_bytes_to_read;
const uint8_t max_process_count = 30;
uint8_t process_count = 0;
u8 retry;
struct amdgpu_dm_connector *aconnector =
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);


const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);

if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}

mutex_lock(&aconnector->handle_mst_msg_ready);

while (process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};

process_count++;

dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);

if (dret != dpcd_bytes_to_read) {
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
break;
}

DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);

switch (msg_rdy_type) {
case DOWN_REP_MSG_RDY_EVENT:
/* Only handle DOWN_REP_MSG_RDY case*/
esi[1] &= DP_DOWN_REP_MSG_RDY;
break;
case UP_REQ_MSG_RDY_EVENT:
/* Only handle UP_REQ_MSG_RDY case*/
esi[1] &= DP_UP_REQ_MSG_RDY;
break;
default:
/* Handle both cases*/
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
break;
}

if (!esi[1])
break;

/* handle MST irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);

if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;

wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}

if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}

drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);

new_irq_handled = false;
} else {
break;
}
}

mutex_unlock(&aconnector->handle_mst_msg_ready);

if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}

static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
{
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
}

static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};

void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
Expand Down
11 changes: 11 additions & 0 deletions drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000

enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
UP_REQ_MSG_RDY_EVENT = 2,
DOWN_OR_UP_MSG_RDY_EVENT = 3
};

struct amdgpu_display_manager;
struct amdgpu_dm_connector;

Expand All @@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);

void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type);

struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
Expand Down

0 comments on commit bb4fa52

Please sign in to comment.