From 735ffae0c906307034314357814391a36e637b45 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Tue, 25 Feb 2025 14:51:13 +0100 Subject: [PATCH 01/77] drm/bridge: ti-sn65dsi83: Support negative DE polarity Polarity for DE is stored in bridge state. Use this flag for setting the DE polarity in the bridge. Signed-off-by: Alexander Stein Tested-by: Herve Codina Reviewed-by: Dmitry Baryshkov Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20250225135114.801884-1-alexander.stein@ew.tq-group.com --- drivers/gpu/drm/bridge/ti-sn65dsi83.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 54ad462d17ef..95563aa1b450 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -561,6 +561,8 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, REG_LVDS_FMT_HS_NEG_POLARITY : 0) | (mode->flags & DRM_MODE_FLAG_NVSYNC ? REG_LVDS_FMT_VS_NEG_POLARITY : 0); + val |= bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW ? + REG_LVDS_FMT_DE_NEG_POLARITY : 0; /* Set up bits-per-pixel, 18bpp or 24bpp. */ if (lvds_format_24bpp) { From fd0141d1a8a2a26675ee88df75615c05a55044de Mon Sep 17 00:00:00 2001 From: Sugar Zhang Date: Mon, 17 Feb 2025 16:47:40 -0500 Subject: [PATCH 02/77] drm/bridge: synopsys: Add audio support for dw-hdmi-qp Register the dw-hdmi-qp bridge driver as an HDMI audio codec. The register values computation functions (for n) are based on the downstream driver, as well as the register writing functions. The driver uses the generic HDMI Codec framework in order to implement the HDMI audio support. Signed-off-by: Sugar Zhang Reviewed-by: Dmitry Baryshkov Tested-by: Quentin Schulz Reviewed-by: Robert Foss Signed-off-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250217215641.372723-2-detlev.casanova@collabora.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 489 +++++++++++++++++++ 1 file changed, 489 insertions(+) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 1d39015f1533..6166f197e37b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -36,6 +36,88 @@ #define SCRAMB_POLL_DELAY_MS 3000 +/* + * Unless otherwise noted, entries in this table are 100% optimization. + * Values can be obtained from dw_hdmi_qp_compute_n() but that function is + * slow so we pre-compute values we expect to see. + * + * The values for TMDS 25175, 25200, 27000, 54000, 74250 and 148500 kHz are + * the recommended N values specified in the Audio chapter of the HDMI + * specification. + */ +static const struct dw_hdmi_audio_tmds_n { + unsigned long tmds; + unsigned int n_32k; + unsigned int n_44k1; + unsigned int n_48k; +} common_tmds_n_table[] = { + { .tmds = 25175000, .n_32k = 4576, .n_44k1 = 7007, .n_48k = 6864, }, + { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, }, + { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, }, + { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, + { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, + { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, + { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, + { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, }, + { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, }, + { .tmds = 73250000, .n_32k = 11648, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, }, + { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, + { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, + { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, + { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, + { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, }, + { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, + { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 146250000, .n_32k = 11648, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, + { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + + /* For 297 MHz+ HDMI spec have some other rule for setting N */ + { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, }, + { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240,}, + + /* End of table */ + { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, }, +}; + +/* + * These are the CTS values as recommended in the Audio chapter of the HDMI + * specification. + */ +static const struct dw_hdmi_audio_tmds_cts { + unsigned long tmds; + unsigned int cts_32k; + unsigned int cts_44k1; + unsigned int cts_48k; +} common_tmds_cts_table[] = { + { .tmds = 25175000, .cts_32k = 28125, .cts_44k1 = 31250, .cts_48k = 28125, }, + { .tmds = 25200000, .cts_32k = 25200, .cts_44k1 = 28000, .cts_48k = 25200, }, + { .tmds = 27000000, .cts_32k = 27000, .cts_44k1 = 30000, .cts_48k = 27000, }, + { .tmds = 54000000, .cts_32k = 54000, .cts_44k1 = 60000, .cts_48k = 54000, }, + { .tmds = 74250000, .cts_32k = 74250, .cts_44k1 = 82500, .cts_48k = 74250, }, + { .tmds = 148500000, .cts_32k = 148500, .cts_44k1 = 165000, .cts_48k = 148500, }, + + /* End of table */ + { .tmds = 0, .cts_32k = 0, .cts_44k1 = 0, .cts_48k = 0, }, +}; + struct dw_hdmi_qp_i2c { struct i2c_adapter adap; @@ -60,6 +142,8 @@ struct dw_hdmi_qp { } phy; struct regmap *regm; + + unsigned long tmds_char_rate; }; static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val, @@ -83,6 +167,346 @@ static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data, regmap_update_bits(hdmi->regm, reg, mask, data); } +static struct dw_hdmi_qp *dw_hdmi_qp_from_bridge(struct drm_bridge *bridge) +{ + return container_of(bridge, struct dw_hdmi_qp, bridge); +} + +static void dw_hdmi_qp_set_cts_n(struct dw_hdmi_qp *hdmi, unsigned int cts, + unsigned int n) +{ + /* Set N */ + dw_hdmi_qp_mod(hdmi, n, AUDPKT_ACR_N_VALUE, AUDPKT_ACR_CONTROL0); + + /* Set CTS */ + if (cts) + dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_EN, AUDPKT_ACR_CTS_OVR_EN_MSK, + AUDPKT_ACR_CONTROL1); + else + dw_hdmi_qp_mod(hdmi, 0, AUDPKT_ACR_CTS_OVR_EN_MSK, + AUDPKT_ACR_CONTROL1); + + dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_VAL(cts), AUDPKT_ACR_CTS_OVR_VAL_MSK, + AUDPKT_ACR_CONTROL1); +} + +static int dw_hdmi_qp_match_tmds_n_table(struct dw_hdmi_qp *hdmi, + unsigned long pixel_clk, + unsigned long freq) +{ + const struct dw_hdmi_audio_tmds_n *tmds_n = NULL; + int i; + + for (i = 0; common_tmds_n_table[i].tmds != 0; i++) { + if (pixel_clk == common_tmds_n_table[i].tmds) { + tmds_n = &common_tmds_n_table[i]; + break; + } + } + + if (!tmds_n) + return -ENOENT; + + switch (freq) { + case 32000: + return tmds_n->n_32k; + case 44100: + case 88200: + case 176400: + return (freq / 44100) * tmds_n->n_44k1; + case 48000: + case 96000: + case 192000: + return (freq / 48000) * tmds_n->n_48k; + default: + return -ENOENT; + } +} + +static u32 dw_hdmi_qp_audio_math_diff(unsigned int freq, unsigned int n, + unsigned int pixel_clk) +{ + u64 cts = mul_u32_u32(pixel_clk, n); + + return do_div(cts, 128 * freq); +} + +static unsigned int dw_hdmi_qp_compute_n(struct dw_hdmi_qp *hdmi, + unsigned long pixel_clk, + unsigned long freq) +{ + unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500); + unsigned int max_n = (128 * freq) / 300; + unsigned int ideal_n = (128 * freq) / 1000; + unsigned int best_n_distance = ideal_n; + unsigned int best_n = 0; + u64 best_diff = U64_MAX; + int n; + + /* If the ideal N could satisfy the audio math, then just take it */ + if (dw_hdmi_qp_audio_math_diff(freq, ideal_n, pixel_clk) == 0) + return ideal_n; + + for (n = min_n; n <= max_n; n++) { + u64 diff = dw_hdmi_qp_audio_math_diff(freq, n, pixel_clk); + + if (diff < best_diff || + (diff == best_diff && abs(n - ideal_n) < best_n_distance)) { + best_n = n; + best_diff = diff; + best_n_distance = abs(best_n - ideal_n); + } + + /* + * The best N already satisfy the audio math, and also be + * the closest value to ideal N, so just cut the loop. + */ + if (best_diff == 0 && (abs(n - ideal_n) > best_n_distance)) + break; + } + + return best_n; +} + +static unsigned int dw_hdmi_qp_find_n(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk, + unsigned long sample_rate) +{ + int n = dw_hdmi_qp_match_tmds_n_table(hdmi, pixel_clk, sample_rate); + + if (n > 0) + return n; + + dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n", + pixel_clk); + + return dw_hdmi_qp_compute_n(hdmi, pixel_clk, sample_rate); +} + +static unsigned int dw_hdmi_qp_find_cts(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk, + unsigned long sample_rate) +{ + const struct dw_hdmi_audio_tmds_cts *tmds_cts = NULL; + int i; + + for (i = 0; common_tmds_cts_table[i].tmds != 0; i++) { + if (pixel_clk == common_tmds_cts_table[i].tmds) { + tmds_cts = &common_tmds_cts_table[i]; + break; + } + } + + if (!tmds_cts) + return 0; + + switch (sample_rate) { + case 32000: + return tmds_cts->cts_32k; + case 44100: + case 88200: + case 176400: + return tmds_cts->cts_44k1; + case 48000: + case 96000: + case 192000: + return tmds_cts->cts_48k; + default: + return -ENOENT; + } +} + +static void dw_hdmi_qp_set_audio_interface(struct dw_hdmi_qp *hdmi, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + u32 conf0 = 0; + + /* Reset the audio data path of the AVP */ + dw_hdmi_qp_write(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWINIT_P, GLOBAL_SWRESET_REQUEST); + + /* Disable AUDS, ACR, AUDI */ + dw_hdmi_qp_mod(hdmi, 0, + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDS_TX_EN | PKTSCHED_AUDI_TX_EN, + PKTSCHED_PKT_EN); + + /* Clear the audio FIFO */ + dw_hdmi_qp_write(hdmi, AUDIO_FIFO_CLR_P, AUDIO_INTERFACE_CONTROL0); + + /* Select I2S interface as the audio source */ + dw_hdmi_qp_mod(hdmi, AUD_IF_I2S, AUD_IF_SEL_MSK, AUDIO_INTERFACE_CONFIG0); + + /* Enable the active i2s lanes */ + switch (hparms->channels) { + case 7 ... 8: + conf0 |= I2S_LINES_EN(3); + fallthrough; + case 5 ... 6: + conf0 |= I2S_LINES_EN(2); + fallthrough; + case 3 ... 4: + conf0 |= I2S_LINES_EN(1); + fallthrough; + default: + conf0 |= I2S_LINES_EN(0); + break; + } + + dw_hdmi_qp_mod(hdmi, conf0, I2S_LINES_EN_MSK, AUDIO_INTERFACE_CONFIG0); + + /* + * Enable bpcuv generated internally for L-PCM, or received + * from stream for NLPCM/HBR. + */ + switch (fmt->bit_fmt) { + case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: + conf0 = (hparms->channels == 8) ? AUD_HBR : AUD_ASP; + conf0 |= I2S_BPCUV_RCV_EN; + break; + default: + conf0 = AUD_ASP | I2S_BPCUV_RCV_DIS; + break; + } + + dw_hdmi_qp_mod(hdmi, conf0, I2S_BPCUV_RCV_MSK | AUD_FORMAT_MSK, + AUDIO_INTERFACE_CONFIG0); + + /* Enable audio FIFO auto clear when overflow */ + dw_hdmi_qp_mod(hdmi, AUD_FIFO_INIT_ON_OVF_EN, AUD_FIFO_INIT_ON_OVF_MSK, + AUDIO_INTERFACE_CONFIG0); +} + +/* + * When transmitting IEC60958 linear PCM audio, these registers allow to + * configure the channel status information of all the channel status + * bits in the IEC60958 frame. For the moment this configuration is only + * used when the I2S audio interface, General Purpose Audio (GPA), + * or AHB audio DMA (AHBAUDDMA) interface is active + * (for S/PDIF interface this information comes from the stream). + */ +static void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi, + u8 *channel_status, bool ref2stream) +{ + /* + * AUDPKT_CHSTATUS_OVR0: { RSV, RSV, CS1, CS0 } + * AUDPKT_CHSTATUS_OVR1: { CS6, CS5, CS4, CS3 } + * + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * CS0: | Mode | d | c | b | a | + * CS1: | Category Code | + * CS2: | Channel Number | Source Number | + * CS3: | Clock Accuracy | Sample Freq | + * CS4: | Ori Sample Freq | Word Length | + * CS5: | | CGMS-A | + * CS6~CS23: Reserved + * + * a: use of channel status block + * b: linear PCM identification: 0 for lpcm, 1 for nlpcm + * c: copyright information + * d: additional format information + */ + + if (ref2stream) + channel_status[0] |= IEC958_AES0_NONAUDIO; + + if ((dw_hdmi_qp_read(hdmi, AUDIO_INTERFACE_CONFIG0) & GENMASK(25, 24)) == AUD_HBR) { + /* fixup cs for HBR */ + channel_status[3] = (channel_status[3] & 0xf0) | IEC958_AES3_CON_FS_768000; + channel_status[4] = (channel_status[4] & 0x0f) | IEC958_AES4_CON_ORIGFS_NOTID; + } + + dw_hdmi_qp_write(hdmi, channel_status[0] | (channel_status[1] << 8), + AUDPKT_CHSTATUS_OVR0); + + regmap_bulk_write(hdmi->regm, AUDPKT_CHSTATUS_OVR1, &channel_status[3], 1); + + if (ref2stream) + dw_hdmi_qp_mod(hdmi, 0, + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, + AUDPKT_CONTROL0); + else + dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN, + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, + AUDPKT_CONTROL0); +} + +static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long long tmds_char_rate, + unsigned int sample_rate) +{ + unsigned int n, cts; + + n = dw_hdmi_qp_find_n(hdmi, tmds_char_rate, sample_rate); + cts = dw_hdmi_qp_find_cts(hdmi, tmds_char_rate, sample_rate); + + dw_hdmi_qp_set_cts_n(hdmi, cts, n); +} + +static int dw_hdmi_qp_audio_enable(struct drm_connector *connector, + struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + + if (hdmi->tmds_char_rate) + dw_hdmi_qp_mod(hdmi, 0, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); + + return 0; +} + +static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + bool ref2stream = false; + + if (!hdmi->tmds_char_rate) + return -ENODEV; + + if (fmt->bit_clk_provider | fmt->frame_clk_provider) { + dev_err(hdmi->dev, "unsupported clock settings\n"); + return -EINVAL; + } + + if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) + ref2stream = true; + + dw_hdmi_qp_set_audio_interface(hdmi, fmt, hparms); + dw_hdmi_qp_set_sample_rate(hdmi, hdmi->tmds_char_rate, hparms->sample_rate); + dw_hdmi_qp_set_channel_status(hdmi, hparms->iec.status, ref2stream); + drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, &hparms->cea); + + return 0; +} + +static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi) +{ + /* + * Keep ACR, AUDI, AUDS packet always on to make SINK device + * active for better compatibility and user experience. + * + * This also fix POP sound on some SINK devices which wakeup + * from suspend to active. + */ + dw_hdmi_qp_mod(hdmi, I2S_BPCUV_RCV_DIS, I2S_BPCUV_RCV_MSK, + AUDIO_INTERFACE_CONFIG0); + dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN, + AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK, + AUDPKT_CONTROL0); + + dw_hdmi_qp_mod(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, + AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); +} + +static void dw_hdmi_qp_audio_disable(struct drm_connector *connector, + struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + + drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector); + + if (hdmi->tmds_char_rate) + dw_hdmi_qp_audio_disable_regs(hdmi); +} + static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi, unsigned char *buf, unsigned int length) { @@ -361,6 +785,51 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, return 0; } +/* + * Static values documented in the TRM + * Different values are only used for debug purposes + */ +#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1 +#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa + +static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + /* + * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV } + * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 } + * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 } + * + * PB0: CheckSum + * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 | + * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 | + * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 | + * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 | + * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 | + * PB6~PB10: Reserved + * + * AUDI_CONTENTS0 default value defined by HDMI specification, + * and shall only be changed for debug purposes. + */ + u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) | + (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16); + + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1); + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1); + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1); + + /* Enable ACR, AUDI, AMD */ + dw_hdmi_qp_mod(hdmi, + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, + PKTSCHED_PKT_EN); + + /* Enable AUDS */ + dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN); + + return 0; +} + static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { @@ -381,6 +850,7 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", __func__, conn_state->hdmi.tmds_char_rate); op_mode = 0; + hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; } else { dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__); op_mode = OPMODE_DVI; @@ -399,6 +869,8 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge, { struct dw_hdmi_qp *hdmi = bridge->driver_private; + hdmi->tmds_char_rate = 0; + hdmi->phy.ops->disable(hdmi, hdmi->phy.data); } @@ -454,6 +926,13 @@ static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge, dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); break; + case HDMI_INFOFRAME_TYPE_AUDIO: + dw_hdmi_qp_mod(hdmi, 0, + PKTSCHED_ACR_TX_EN | + PKTSCHED_AUDS_TX_EN | + PKTSCHED_AUDI_TX_EN, + PKTSCHED_PKT_EN); + break; default: dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); } @@ -476,6 +955,9 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, case HDMI_INFOFRAME_TYPE_DRM: return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); + case HDMI_INFOFRAME_TYPE_AUDIO: + return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len); + default: dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); return 0; @@ -493,6 +975,9 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid, .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe, .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe, + .hdmi_audio_startup = dw_hdmi_qp_audio_enable, + .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, + .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, }; static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) @@ -602,6 +1087,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, if (IS_ERR(hdmi->bridge.ddc)) return ERR_CAST(hdmi->bridge.ddc); + hdmi->bridge.hdmi_audio_max_i2s_playback_channels = 8; + hdmi->bridge.hdmi_audio_dev = dev; + hdmi->bridge.hdmi_audio_dai_port = 1; + ret = devm_drm_bridge_add(dev, &hdmi->bridge); if (ret) return ERR_PTR(ret); From 2bf9f610494d75cfaf3c8a0cef93135ce83f7254 Mon Sep 17 00:00:00 2001 From: Damon Ding Date: Mon, 24 Feb 2025 16:13:11 +0800 Subject: [PATCH 03/77] drm/rockchip: analogix_dp: Use formalized struct definition for grf field The formalized struct definition will makes grf field operations more concise and easier to extend. Signed-off-by: Damon Ding Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250224081325.96724-2-damon.ding@rock-chips.com --- .../gpu/drm/rockchip/analogix_dp-rockchip.c | 77 +++++++++++-------- 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 0844175c37c5..0d93df6b5144 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -32,26 +32,29 @@ #include "rockchip_drm_drv.h" -#define RK3288_GRF_SOC_CON6 0x25c -#define RK3288_EDP_LCDC_SEL BIT(5) -#define RK3399_GRF_SOC_CON20 0x6250 -#define RK3399_EDP_LCDC_SEL BIT(5) - -#define HIWORD_UPDATE(val, mask) (val | (mask) << 16) - #define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100 +#define GRF_REG_FIELD(_reg, _lsb, _msb) { \ + .reg = _reg, \ + .lsb = _lsb, \ + .msb = _msb, \ + .valid = true, \ + } + +struct rockchip_grf_reg_field { + u32 reg; + u32 lsb; + u32 msb; + bool valid; +}; + /** * struct rockchip_dp_chip_data - splite the grf setting of kind of chips - * @lcdsel_grf_reg: grf register offset of lcdc select - * @lcdsel_big: reg value of selecting vop big for eDP - * @lcdsel_lit: reg value of selecting vop little for eDP + * @lcdc_sel: grf register field of lcdc_sel * @chip_type: specific chip type */ struct rockchip_dp_chip_data { - u32 lcdsel_grf_reg; - u32 lcdsel_big; - u32 lcdsel_lit; + const struct rockchip_grf_reg_field lcdc_sel; u32 chip_type; }; @@ -84,6 +87,26 @@ static struct rockchip_dp_device *pdata_encoder_to_dp(struct analogix_dp_plat_da return container_of(plat_data, struct rockchip_dp_device, plat_data); } +static int rockchip_grf_write(struct regmap *grf, u32 reg, u32 mask, u32 val) +{ + return regmap_write(grf, reg, (mask << 16) | (val & mask)); +} + +static int rockchip_grf_field_write(struct regmap *grf, + const struct rockchip_grf_reg_field *field, + u32 val) +{ + u32 mask; + + if (!field->valid) + return 0; + + mask = GENMASK(field->msb, field->lsb); + val <<= field->lsb; + + return rockchip_grf_write(grf, field->reg, mask, val); +} + static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) { reset_control_assert(dp->rst); @@ -181,7 +204,6 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int ret; - u32 val; crtc = rockchip_dp_drm_get_new_crtc(encoder, state); if (!crtc) @@ -192,24 +214,19 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder, if (old_crtc_state && old_crtc_state->self_refresh_active) return; - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); - if (ret < 0) - return; - - if (ret) - val = dp->data->lcdsel_lit; - else - val = dp->data->lcdsel_big; - - DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); - ret = clk_prepare_enable(dp->grfclk); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret); return; } - ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val); + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + if (ret < 0) + return; + + DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); + + ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret); if (ret != 0) DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); @@ -448,16 +465,12 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rockchip_dp_pm_ops, rockchip_dp_suspend, rockchip_dp_resume, NULL); static const struct rockchip_dp_chip_data rk3399_edp = { - .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, - .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL), - .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL), + .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5), .chip_type = RK3399_EDP, }; static const struct rockchip_dp_chip_data rk3288_dp = { - .lcdsel_grf_reg = RK3288_GRF_SOC_CON6, - .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL), - .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL), + .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5), .chip_type = RK3288_DP, }; From 718b3bb9c0ab87bc90914799e6999bf4b1ecc67b Mon Sep 17 00:00:00 2001 From: Damon Ding Date: Mon, 24 Feb 2025 16:13:12 +0800 Subject: [PATCH 04/77] drm/rockchip: analogix_dp: Expand device data to support multiple edp display There are two main modifications: one is expanding struct rockchip_dp_chip_data to an array, and the other is adding &rockchip_dp_chip_data.reg to separate different edp devices. Signed-off-by: Damon Ding Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250224081325.96724-3-damon.ding@rock-chips.com --- .../gpu/drm/rockchip/analogix_dp-rockchip.c | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 0d93df6b5144..a8265a1bf9ff 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -52,10 +52,12 @@ struct rockchip_grf_reg_field { * struct rockchip_dp_chip_data - splite the grf setting of kind of chips * @lcdc_sel: grf register field of lcdc_sel * @chip_type: specific chip type + * @reg: register base address */ struct rockchip_dp_chip_data { const struct rockchip_grf_reg_field lcdc_sel; u32 chip_type; + u32 reg; }; struct rockchip_dp_device { @@ -396,6 +398,8 @@ static int rockchip_dp_probe(struct platform_device *pdev) const struct rockchip_dp_chip_data *dp_data; struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; + struct resource *res; + int i; int ret; dp_data = of_device_get_match_data(dev); @@ -410,9 +414,24 @@ static int rockchip_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + i = 0; + while (dp_data[i].reg) { + if (dp_data[i].reg == res->start) { + dp->data = &dp_data[i]; + break; + } + + i++; + } + + if (!dp->data) + return dev_err_probe(dev, -EINVAL, "no chip-data for %s node\n", + dev->of_node->name); + dp->dev = dev; dp->adp = ERR_PTR(-ENODEV); - dp->data = dp_data; dp->plat_data.panel = panel; dp->plat_data.dev_type = dp->data->chip_type; dp->plat_data.power_on = rockchip_dp_poweron; @@ -464,14 +483,22 @@ static int rockchip_dp_resume(struct device *dev) static DEFINE_RUNTIME_DEV_PM_OPS(rockchip_dp_pm_ops, rockchip_dp_suspend, rockchip_dp_resume, NULL); -static const struct rockchip_dp_chip_data rk3399_edp = { - .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5), - .chip_type = RK3399_EDP, +static const struct rockchip_dp_chip_data rk3399_edp[] = { + { + .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5), + .chip_type = RK3399_EDP, + .reg = 0xff970000, + }, + { /* sentinel */ } }; -static const struct rockchip_dp_chip_data rk3288_dp = { - .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5), - .chip_type = RK3288_DP, +static const struct rockchip_dp_chip_data rk3288_dp[] = { + { + .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5), + .chip_type = RK3288_DP, + .reg = 0xff970000, + }, + { /* sentinel */ } }; static const struct of_device_id rockchip_dp_dt_ids[] = { From f8dd7fc9ba88bc4a6ea85269287a51fb756440e2 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Sun, 23 Feb 2025 11:31:37 +0200 Subject: [PATCH 05/77] drm/rockchip: vop2: Improve display modes handling on RK3588 HDMI1 The RK3588 specific implementation is currently quite limited in terms of handling the full range of display modes supported by the connected screens, e.g. 2560x1440@75Hz, 2048x1152@60Hz, 1024x768@60Hz are just a few of them. Additionally, it doesn't cope well with non-integer refresh rates like 59.94, 29.97, 23.98, etc. Make use of HDMI1 PHY PLL as a more accurate DCLK source to handle all display modes up to 4K@60Hz. Signed-off-by: Cristian Ciocaltea Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250223-vop2-hdmi1-disp-modes-v2-1-f4cec5e06fbe@collabora.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 25 +++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index afc946ead870..14e039e9143a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -216,6 +216,7 @@ struct vop2 { struct clk *aclk; struct clk *pclk; struct clk *pll_hdmiphy0; + struct clk *pll_hdmiphy1; /* optional internal rgb encoder */ struct rockchip_rgb *rgb; @@ -2270,11 +2271,14 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, * Switch to HDMI PHY PLL as DCLK source for display modes up * to 4K@60Hz, if available, otherwise keep using the system CRU. */ - if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) { + if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) { drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { + if (!vop2->pll_hdmiphy0) + break; + if (!vp->dclk_src) vp->dclk_src = clk_get_parent(vp->dclk); @@ -2284,6 +2288,20 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, "Could not switch to HDMI0 PHY PLL: %d\n", ret); break; } + + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { + if (!vop2->pll_hdmiphy1) + break; + + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); + + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI1 PHY PLL: %d\n", ret); + break; + } } } @@ -3733,6 +3751,11 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(vop2->pll_hdmiphy0); } + vop2->pll_hdmiphy1 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy1"); + if (IS_ERR(vop2->pll_hdmiphy1)) + return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy1), + "failed to get pll_hdmiphy1\n"); + vop2->irq = platform_get_irq(pdev, 0); if (vop2->irq < 0) { drm_err(vop2->drm, "cannot find irq for vop2\n"); From b06d1ef3355571383cdb463cf0195b7a02efdfbf Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Sun, 23 Feb 2025 11:31:38 +0200 Subject: [PATCH 06/77] drm/rockchip: vop2: Consistently use dev_err_probe() Replace drm_err() calls in vop2_bind() and vop2_create_crtcs() with dev_err_probe(), to simplify error handling and improve consistency. Additionally, ensure the already existing dev_err_probe() invocations pass drm->dev instead of dev as their first argument, so that we get the actual reason in case of -EPROBE_DEFER errors: platform display-subsystem: deferred probe pending: (reason unknown) vs. platform display-subsystem: deferred probe pending: rockchip-drm: While at it, add the missing '\n' to some of the message strings. Signed-off-by: Cristian Ciocaltea Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250223-vop2-hdmi1-disp-modes-v2-2-f4cec5e06fbe@collabora.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 83 +++++++++----------- 1 file changed, 37 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 14e039e9143a..7b893b4447b6 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -3273,10 +3273,9 @@ static int vop2_create_crtcs(struct vop2 *vop2) snprintf(dclk_name, sizeof(dclk_name), "dclk_vp%d", vp->id); vp->dclk = devm_clk_get(vop2->dev, dclk_name); - if (IS_ERR(vp->dclk)) { - drm_err(vop2->drm, "failed to get %s\n", dclk_name); - return PTR_ERR(vp->dclk); - } + if (IS_ERR(vp->dclk)) + return dev_err_probe(drm->dev, PTR_ERR(vp->dclk), + "failed to get %s\n", dclk_name); np = of_graph_get_remote_node(dev->of_node, i, -1); if (!np) { @@ -3286,11 +3285,9 @@ static int vop2_create_crtcs(struct vop2 *vop2) of_node_put(np); port = of_graph_get_port_by_id(dev->of_node, i); - if (!port) { - drm_err(vop2->drm, "no port node found for video_port%d\n", i); - return -ENOENT; - } - + if (!port) + return dev_err_probe(drm->dev, -ENOENT, + "no port node found for video_port%d\n", i); vp->crtc.port = port; nvps++; } @@ -3330,11 +3327,9 @@ static int vop2_create_crtcs(struct vop2 *vop2) possible_crtcs = (1 << nvps) - 1; ret = vop2_plane_init(vop2, win, possible_crtcs); - if (ret) { - drm_err(vop2->drm, "failed to init plane %s: %d\n", - win->data->name, ret); - return ret; - } + if (ret) + return dev_err_probe(drm->dev, ret, "failed to init plane %s\n", + win->data->name); } for (i = 0; i < vop2_data->nr_vps; i++) { @@ -3348,10 +3343,9 @@ static int vop2_create_crtcs(struct vop2 *vop2) ret = drm_crtc_init_with_planes(drm, &vp->crtc, plane, NULL, &vop2_crtc_funcs, "video_port%d", vp->id); - if (ret) { - drm_err(vop2->drm, "crtc init for video_port%d failed\n", i); - return ret; - } + if (ret) + return dev_err_probe(drm->dev, ret, + "crtc init for video_port%d failed\n", i); drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs); if (vop2->lut_regs) { @@ -3678,10 +3672,9 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) dev_set_drvdata(dev, vop2); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vop"); - if (!res) { - drm_err(vop2->drm, "failed to get vop2 register byname\n"); - return -EINVAL; - } + if (!res) + return dev_err_probe(drm->dev, -EINVAL, + "failed to get vop2 register byname\n"); vop2->res = res; vop2->regs = devm_ioremap_resource(dev, res); @@ -3706,50 +3699,50 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) { vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(vop2->sys_grf)) - return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf"); + return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_grf), + "cannot get sys_grf\n"); } if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) { vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf"); if (IS_ERR(vop2->vop_grf)) - return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf"); + return dev_err_probe(drm->dev, PTR_ERR(vop2->vop_grf), + "cannot get vop_grf\n"); } if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) { vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf"); if (IS_ERR(vop2->vo1_grf)) - return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf"); + return dev_err_probe(drm->dev, PTR_ERR(vop2->vo1_grf), + "cannot get vo1_grf\n"); } if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) { vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu"); if (IS_ERR(vop2->sys_pmu)) - return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu"); + return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_pmu), + "cannot get sys_pmu\n"); } vop2->hclk = devm_clk_get(vop2->dev, "hclk"); - if (IS_ERR(vop2->hclk)) { - drm_err(vop2->drm, "failed to get hclk source\n"); - return PTR_ERR(vop2->hclk); - } + if (IS_ERR(vop2->hclk)) + return dev_err_probe(drm->dev, PTR_ERR(vop2->hclk), + "failed to get hclk source\n"); vop2->aclk = devm_clk_get(vop2->dev, "aclk"); - if (IS_ERR(vop2->aclk)) { - drm_err(vop2->drm, "failed to get aclk source\n"); - return PTR_ERR(vop2->aclk); - } + if (IS_ERR(vop2->aclk)) + return dev_err_probe(drm->dev, PTR_ERR(vop2->aclk), + "failed to get aclk source\n"); vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop"); - if (IS_ERR(vop2->pclk)) { - drm_err(vop2->drm, "failed to get pclk source\n"); - return PTR_ERR(vop2->pclk); - } + if (IS_ERR(vop2->pclk)) + return dev_err_probe(drm->dev, PTR_ERR(vop2->pclk), + "failed to get pclk source\n"); vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0"); - if (IS_ERR(vop2->pll_hdmiphy0)) { - drm_err(vop2->drm, "failed to get pll_hdmiphy0\n"); - return PTR_ERR(vop2->pll_hdmiphy0); - } + if (IS_ERR(vop2->pll_hdmiphy0)) + return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy0), + "failed to get pll_hdmiphy0\n"); vop2->pll_hdmiphy1 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy1"); if (IS_ERR(vop2->pll_hdmiphy1)) @@ -3757,10 +3750,8 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) "failed to get pll_hdmiphy1\n"); vop2->irq = platform_get_irq(pdev, 0); - if (vop2->irq < 0) { - drm_err(vop2->drm, "cannot find irq for vop2\n"); - return vop2->irq; - } + if (vop2->irq < 0) + return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n"); mutex_init(&vop2->vop2_lock); From a6ba2dad0aa4f623ab0def8b6e6888ac00639055 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Fri, 21 Feb 2025 00:41:40 +0100 Subject: [PATCH 07/77] drivers: base: component: add function to query the bound status The component helpers already expose the bound status in debugfs, but at times it might be necessary to also check that state in the kernel and act differently depending on the result. For example the shutdown handler of a drm-driver might need to stop a whole output pipeline if the drm device is up and running, but may run into problems if that drm-device has never been set up before, for example because the binding deferred. So add a little helper that returns the bound status for a componet device. Acked-by: Greg Kroah-Hartman Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250220234141.2788785-2-heiko@sntech.de --- drivers/base/component.c | 14 ++++++++++++++ include/linux/component.h | 4 +++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/base/component.c b/drivers/base/component.c index 741497324d78..d63e01f4851d 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -569,6 +569,20 @@ void component_master_del(struct device *parent, } EXPORT_SYMBOL_GPL(component_master_del); +bool component_master_is_bound(struct device *parent, + const struct component_master_ops *ops) +{ + struct aggregate_device *adev; + + guard(mutex)(&component_mutex); + adev = __aggregate_find(parent, ops); + if (!adev) + return 0; + + return adev->bound; +} +EXPORT_SYMBOL_GPL(component_master_is_bound); + static void component_unbind(struct component *component, struct aggregate_device *adev, void *data) { diff --git a/include/linux/component.h b/include/linux/component.h index df4aa75c9e7c..9d6c66401280 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -3,7 +3,7 @@ #define COMPONENT_H #include - +#include struct device; @@ -90,6 +90,8 @@ int component_compare_dev_name(struct device *dev, void *data); void component_master_del(struct device *, const struct component_master_ops *); +bool component_master_is_bound(struct device *parent, + const struct component_master_ops *ops); struct component_match; From 4444e4d789d64f053435713e5984f0ef31a7633b Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Fri, 21 Feb 2025 00:41:41 +0100 Subject: [PATCH 08/77] drm/rockchip: Fix shutdown when no drm-device is set up When the drm-driver probes, it mainly creates the component device, where all the sub-drivers (vops, hdmi, etc) hook into. This will cause the shutdown handler to get called on shutdown, even though the drm-device might not have been set up, or the component bind might have failed. So use the new component helper to check whether the drm-device is up and only then call the drm-atomic helper to release all the drm magic. This prevents failures when the drm-device is never set, or has been freed up already for example by a probe-defer during the component bind. Reviewed-by: Nicolas Frattaroli Tested-by: Nicolas Frattaroli Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250220234141.2788785-3-heiko@sntech.de --- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 439edc165ff6..180fad5d49ad 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -484,9 +484,11 @@ static void rockchip_drm_platform_remove(struct platform_device *pdev) static void rockchip_drm_platform_shutdown(struct platform_device *pdev) { - struct drm_device *drm = platform_get_drvdata(pdev); + if (component_master_is_bound(&pdev->dev, &rockchip_drm_ops)) { + struct drm_device *drm = platform_get_drvdata(pdev); - drm_atomic_helper_shutdown(drm); + drm_atomic_helper_shutdown(drm); + } } static const struct of_device_id rockchip_drm_dt_ids[] = { From cd740b873f8f6f5f4558723241ba9c09eb36d0ba Mon Sep 17 00:00:00 2001 From: Lizhi Hou Date: Wed, 26 Feb 2025 08:18:10 -0800 Subject: [PATCH 09/77] accel/amdxdna: Check interrupt register before mailbox_rx_worker exits There is a timeout failure been found during stress tests. If the firmware generates a mailbox response right after driver clears the mailbox channel interrupt register, the hardware will not generate an interrupt for the response. This causes the unexpected mailbox command timeout. To handle this failure, driver checks the interrupt register before exiting mailbox_rx_worker(). If there is a new response, driver goes back to process it. Signed-off-by: Lizhi Hou Reviewed-by: Jacek Lawrynowicz Signed-off-by: Mario Limonciello Link: https://patchwork.freedesktop.org/patch/msgid/20250226161810.4188334-1-lizhi.hou@amd.com --- drivers/accel/amdxdna/amdxdna_mailbox.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c index aa07e67400ef..da1ac89bb78f 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox.c @@ -349,8 +349,6 @@ static irqreturn_t mailbox_irq_handler(int irq, void *p) trace_mbox_irq_handle(MAILBOX_NAME, irq); /* Schedule a rx_work to call the callback functions */ queue_work(mb_chann->work_q, &mb_chann->rx_work); - /* Clear IOHUB register */ - mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); return IRQ_HANDLED; } @@ -367,6 +365,9 @@ static void mailbox_rx_worker(struct work_struct *rx_work) return; } +again: + mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); + while (1) { /* * If return is 0, keep consuming next message, until there is @@ -380,10 +381,18 @@ static void mailbox_rx_worker(struct work_struct *rx_work) if (unlikely(ret)) { MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret); WRITE_ONCE(mb_chann->bad_state, true); - disable_irq(mb_chann->msix_irq); - break; + return; } } + + /* + * The hardware will not generate interrupt if firmware creates a new + * response right after driver clears interrupt register. Check + * the interrupt register to make sure there is not any new response + * before exiting. + */ + if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr)) + goto again; } int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann, From 77f183d151bda791ac26ee479b6542ccd35acdac Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 12 Feb 2025 19:31:26 +1030 Subject: [PATCH 10/77] drm/nouveau: Avoid multiple -Wflex-array-member-not-at-end warnings -Wflex-array-member-not-at-end was introduced in GCC-14, and we are getting ready to enable it, globally. So, in order to avoid ending up with flexible-array members in the middle of other structs, we use the `struct_group_tagged()` helper to separate the flexible arrays from the rest of the members in the flexible structures. We then use the newly created tagged `struct nvif_ioctl_v0_hdr` and `struct nvif_ioctl_mthd_v0_hdr` to replace the type of the objects causing trouble in multiple structures. We also want to ensure that when new members need to be added to the flexible structures, they are always included within the newly created tagged structs. For this, we use `static_assert()`. This ensures that the memory layout for both the flexible structure and the new tagged struct is the same after any changes. So, with these changes, fix the following warnings: drivers/gpu/drm/nouveau/nvif/object.c:60:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nvif/object.c:233:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nvif/object.c:214:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nvif/object.c:152:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nvif/object.c:138:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nvif/object.c:104:38: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nouveau_svm.c:83:35: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/gpu/drm/nouveau/nouveau_svm.c:82:30: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] Signed-off-by: Gustavo A. R. Silva Acked-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/Z6xjZhHxRp4Bu_SX@kspp --- drivers/gpu/drm/nouveau/include/nvif/ioctl.h | 32 +++++++++++++------- drivers/gpu/drm/nouveau/nouveau_svm.c | 4 +-- drivers/gpu/drm/nouveau/nvif/object.c | 12 ++++---- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h index e825c8a1d9ca..00015412cb3e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h +++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h @@ -3,25 +3,30 @@ #define __NVIF_IOCTL_H__ struct nvif_ioctl_v0 { - __u8 version; + /* New members MUST be added within the struct_group() macro below. */ + struct_group_tagged(nvif_ioctl_v0_hdr, __hdr, + __u8 version; #define NVIF_IOCTL_V0_SCLASS 0x01 #define NVIF_IOCTL_V0_NEW 0x02 #define NVIF_IOCTL_V0_DEL 0x03 #define NVIF_IOCTL_V0_MTHD 0x04 #define NVIF_IOCTL_V0_MAP 0x07 #define NVIF_IOCTL_V0_UNMAP 0x08 - __u8 type; - __u8 pad02[4]; + __u8 type; + __u8 pad02[4]; #define NVIF_IOCTL_V0_OWNER_NVIF 0x00 #define NVIF_IOCTL_V0_OWNER_ANY 0xff - __u8 owner; + __u8 owner; #define NVIF_IOCTL_V0_ROUTE_NVIF 0x00 #define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff - __u8 route; - __u64 token; - __u64 object; + __u8 route; + __u64 token; + __u64 object; + ); __u8 data[]; /* ioctl data (below) */ }; +static_assert(offsetof(struct nvif_ioctl_v0, data) == sizeof(struct nvif_ioctl_v0_hdr), + "struct member likely outside of struct_group()"); struct nvif_ioctl_sclass_v0 { /* nvif_ioctl ... */ @@ -51,12 +56,17 @@ struct nvif_ioctl_del { }; struct nvif_ioctl_mthd_v0 { - /* nvif_ioctl ... */ - __u8 version; - __u8 method; - __u8 pad02[6]; + /* New members MUST be added within the struct_group() macro below. */ + struct_group_tagged(nvif_ioctl_mthd_v0_hdr, __hdr, + /* nvif_ioctl ... */ + __u8 version; + __u8 method; + __u8 pad02[6]; + ); __u8 data[]; /* method data (class.h) */ }; +static_assert(offsetof(struct nvif_ioctl_mthd_v0, data) == sizeof(struct nvif_ioctl_mthd_v0_hdr), + "struct member likely outside of struct_group()"); struct nvif_ioctl_map_v0 { /* nvif_ioctl ... */ diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8ea98f06d39a..825c867eba7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -79,8 +79,8 @@ struct nouveau_svm { #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a) struct nouveau_pfnmap_args { - struct nvif_ioctl_v0 i; - struct nvif_ioctl_mthd_v0 m; + struct nvif_ioctl_v0_hdr i; + struct nvif_ioctl_mthd_v0_hdr m; struct nvif_vmm_pfnmap_v0 p; }; diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 0b87278ac0f8..70af63d70976 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -57,7 +57,7 @@ int nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_sclass_v0 sclass; } *args = NULL; int ret, cnt = 0, i; @@ -101,7 +101,7 @@ int nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_mthd_v0 mthd; } *args; u32 args_size; @@ -135,7 +135,7 @@ void nvif_object_unmap_handle(struct nvif_object *object) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_unmap unmap; } args = { .ioctl.type = NVIF_IOCTL_V0_UNMAP, @@ -149,7 +149,7 @@ nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc, u64 *handle, u64 *length) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_map_v0 map; } *args; u32 argn = sizeof(*args) + argc; @@ -211,7 +211,7 @@ void nvif_object_dtor(struct nvif_object *object) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_del del; } args = { .ioctl.type = NVIF_IOCTL_V0_DEL, @@ -230,7 +230,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle, s32 oclass, void *data, u32 size, struct nvif_object *object) { struct { - struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_v0_hdr ioctl; struct nvif_ioctl_new_v0 new; } *args; int ret = 0; From 7a4c93cadd58728cd2d2d1c63093860e4af7b160 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 15 Dec 2024 22:00:14 +0000 Subject: [PATCH 11/77] drm/vboxvideo: Remove unused hgsmi_cursor_position hgsmi_cursor_position() has been unused since 2018's commit 35f3288c453e ("staging: vboxvideo: Atomic phase 1: convert cursor to universal plane") Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Hans de Goede Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20241215220014.452537-1-linux@treblig.org --- drivers/gpu/drm/vboxvideo/hgsmi_base.c | 37 --------------------- drivers/gpu/drm/vboxvideo/vboxvideo_guest.h | 2 -- 2 files changed, 39 deletions(-) diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c index 87dccaecc3e5..db994aeaa0f9 100644 --- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c +++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c @@ -181,40 +181,3 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, return rc; } - -/** - * hgsmi_cursor_position - Report the guest cursor position. The host may - * wish to use this information to re-position its - * own cursor (though this is currently unlikely). - * The current host cursor position is returned. - * Return: 0 or negative errno value. - * @ctx: The context containing the heap used. - * @report_position: Are we reporting a position? - * @x: Guest cursor X position. - * @y: Guest cursor Y position. - * @x_host: Host cursor X position is stored here. Optional. - * @y_host: Host cursor Y position is stored here. Optional. - */ -int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, - u32 x, u32 y, u32 *x_host, u32 *y_host) -{ - struct vbva_cursor_position *p; - - p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, - VBVA_CURSOR_POSITION); - if (!p) - return -ENOMEM; - - p->report_position = report_position; - p->x = x; - p->y = y; - - hgsmi_buffer_submit(ctx, p); - - *x_host = p->x; - *y_host = p->y; - - hgsmi_buffer_free(ctx, p); - - return 0; -} diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h index 55fcee3a6470..643c4448bdcb 100644 --- a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h +++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h @@ -34,8 +34,6 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret); int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, u32 hot_x, u32 hot_y, u32 width, u32 height, u8 *pixels, u32 len); -int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, - u32 x, u32 y, u32 *x_host, u32 *y_host); bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, struct vbva_buffer *vbva, s32 screen); From 3cfae15302b398fd2d9884dcea0a1d2188e6513d Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 15 Dec 2024 21:47:50 +0000 Subject: [PATCH 12/77] gpu: host1x: Remove unused host1x_debug_dump_syncpts host1x_debug_dump_syncpts() has been unused since commit f0fb260a0cdb ("gpu: host1x: Implement syncpoint wait using DMA fences") Remove it. Signed-off-by: Dr. David Alan Gilbert Acked-by: Mikko Perttunen Signed-off-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20241215214750.448209-1-linux@treblig.org --- drivers/gpu/host1x/debug.c | 9 --------- drivers/gpu/host1x/debug.h | 1 - 2 files changed, 10 deletions(-) diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index a18cc8d8caf5..6433c00d5d7e 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -216,12 +216,3 @@ void host1x_debug_dump(struct host1x *host1x) show_all(host1x, &o, true); } - -void host1x_debug_dump_syncpts(struct host1x *host1x) -{ - struct output o = { - .fn = write_to_printk - }; - - show_syncpts(host1x, &o, false); -} diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h index 62bd8a091fa7..c43c61d876a9 100644 --- a/drivers/gpu/host1x/debug.h +++ b/drivers/gpu/host1x/debug.h @@ -41,6 +41,5 @@ extern unsigned int host1x_debug_trace_cmdbuf; void host1x_debug_init(struct host1x *host1x); void host1x_debug_deinit(struct host1x *host1x); void host1x_debug_dump(struct host1x *host1x); -void host1x_debug_dump_syncpts(struct host1x *host1x); #endif From 049e7ac203d51fdc3a739f5f28906788e8eeea03 Mon Sep 17 00:00:00 2001 From: Kever Yang Date: Thu, 27 Feb 2025 19:19:03 +0800 Subject: [PATCH 13/77] dt-bindings: gpu: Add rockchip,rk3562-mali compatible The Rockchip RK3562 GPU is ARM Mali-G52, use the same driver with "arm,mali-bifrost". Extend the binding accordingly to allow compatible = "rockchip,rk3562-mali", "arm,mali-bifrost"; Signed-off-by: Kever Yang Acked-by: "Rob Herring (Arm)" Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250227111913.2344207-6-kever.yang@rock-chips.com --- Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml index 735c7f06c24e..fc8e82cb28a9 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml @@ -25,6 +25,7 @@ properties: - renesas,r9a07g044-mali - renesas,r9a07g054-mali - rockchip,px30-mali + - rockchip,rk3562-mali - rockchip,rk3568-mali - rockchip,rk3576-mali - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable From 16e3bf497fb2d379f3d461fa0c85d14de0a3d183 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:46 +0000 Subject: [PATCH 14/77] gpu: ipu-v3: ipu-ic: Remove unused ipu_ic_task_graphics_init ipu_ic_task_graphics_init() was added in 2014 by commit 1aa8ea0d2bd5 ("gpu: ipu-v3: Add Image Converter unit") but has been unused. Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-2-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-ic.c | 73 ------------------------------------- include/video/imx-ipu-v3.h | 4 -- 2 files changed, 77 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c index 846461bac70d..acd76ecc5221 100644 --- a/drivers/gpu/ipu-v3/ipu-ic.c +++ b/drivers/gpu/ipu-v3/ipu-ic.c @@ -321,79 +321,6 @@ void ipu_ic_task_disable(struct ipu_ic *ic) } EXPORT_SYMBOL_GPL(ipu_ic_task_disable); -int ipu_ic_task_graphics_init(struct ipu_ic *ic, - const struct ipu_ic_colorspace *g_in_cs, - bool galpha_en, u32 galpha, - bool colorkey_en, u32 colorkey) -{ - struct ipu_ic_priv *priv = ic->priv; - struct ipu_ic_csc csc2; - unsigned long flags; - u32 reg, ic_conf; - int ret = 0; - - if (ic->task == IC_TASK_ENCODER) - return -EINVAL; - - spin_lock_irqsave(&priv->lock, flags); - - ic_conf = ipu_ic_read(ic, IC_CONF); - - if (!(ic_conf & ic->bit->ic_conf_csc1_en)) { - struct ipu_ic_csc csc1; - - ret = ipu_ic_calc_csc(&csc1, - V4L2_YCBCR_ENC_601, - V4L2_QUANTIZATION_FULL_RANGE, - IPUV3_COLORSPACE_RGB, - V4L2_YCBCR_ENC_601, - V4L2_QUANTIZATION_FULL_RANGE, - IPUV3_COLORSPACE_RGB); - if (ret) - goto unlock; - - /* need transparent CSC1 conversion */ - ret = init_csc(ic, &csc1, 0); - if (ret) - goto unlock; - } - - ic->g_in_cs = *g_in_cs; - csc2.in_cs = ic->g_in_cs; - csc2.out_cs = ic->out_cs; - - ret = __ipu_ic_calc_csc(&csc2); - if (ret) - goto unlock; - - ret = init_csc(ic, &csc2, 1); - if (ret) - goto unlock; - - if (galpha_en) { - ic_conf |= IC_CONF_IC_GLB_LOC_A; - reg = ipu_ic_read(ic, IC_CMBP_1); - reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit); - reg |= (galpha << ic->bit->ic_cmb_galpha_bit); - ipu_ic_write(ic, reg, IC_CMBP_1); - } else - ic_conf &= ~IC_CONF_IC_GLB_LOC_A; - - if (colorkey_en) { - ic_conf |= IC_CONF_KEY_COLOR_EN; - ipu_ic_write(ic, colorkey, IC_CMBP_2); - } else - ic_conf &= ~IC_CONF_KEY_COLOR_EN; - - ipu_ic_write(ic, ic_conf, IC_CONF); - - ic->graphics = true; -unlock: - spin_unlock_irqrestore(&priv->lock, flags); - return ret; -} -EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init); - int ipu_ic_task_init_rsc(struct ipu_ic *ic, const struct ipu_ic_csc *csc, int in_width, int in_height, diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index c422a403c099..0bb1d714cbf5 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -445,10 +445,6 @@ int ipu_ic_task_init_rsc(struct ipu_ic *ic, int in_width, int in_height, int out_width, int out_height, u32 rsc); -int ipu_ic_task_graphics_init(struct ipu_ic *ic, - const struct ipu_ic_colorspace *g_in_cs, - bool galpha_en, u32 galpha, - bool colorkey_en, u32 colorkey); void ipu_ic_task_enable(struct ipu_ic *ic); void ipu_ic_task_disable(struct ipu_ic *ic); int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, From a52ba18c254c0a3819e632e6371554f1c6f5bd16 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:47 +0000 Subject: [PATCH 15/77] gpu: ipu-v3: Remove unused ipu_rot_mode_to_degrees ipu_rot_mode_to_degrees() was added in 2014 by commit f835f386a119 ("gpu: ipu-v3: Add rotation mode conversion utilities") but has remained unused. Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-3-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-common.c | 32 -------------------------------- include/video/imx-ipu-v3.h | 2 -- 2 files changed, 34 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 947323f4a234..a8570e1bdf28 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -165,38 +165,6 @@ int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, } EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode); -int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, - bool hflip, bool vflip) -{ - u32 r90, vf, hf; - - r90 = ((u32)mode >> 2) & 0x1; - hf = ((u32)mode >> 1) & 0x1; - vf = ((u32)mode >> 0) & 0x1; - hf ^= (u32)hflip; - vf ^= (u32)vflip; - - switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) { - case IPU_ROTATE_NONE: - *degrees = 0; - break; - case IPU_ROTATE_90_RIGHT: - *degrees = 90; - break; - case IPU_ROTATE_180: - *degrees = 180; - break; - case IPU_ROTATE_90_LEFT: - *degrees = 270; - break; - default: - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees); - struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num) { struct ipuv3_channel *channel; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 0bb1d714cbf5..8870f65c9a8b 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -484,8 +484,6 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, bool hflip, bool vflip); -int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, - bool hflip, bool vflip); struct ipu_client_platformdata { int csi; From 4f9c64e95c3510f4a5192bd401de5611c1dd5637 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:48 +0000 Subject: [PATCH 16/77] gpu: ipu-v3: Remove unused ipu_idmac_channel_busy The last use of ipu_idmac_channel_busy() was removed in 2017 by commit eb8c88808c83 ("drm/imx: add deferred plane disabling") Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-4-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-common.c | 6 ------ drivers/gpu/ipu-v3/ipu-prv.h | 2 -- 2 files changed, 8 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index a8570e1bdf28..fa77e4e64f12 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -484,12 +484,6 @@ int ipu_idmac_enable_channel(struct ipuv3_channel *channel) } EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel); -bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno) -{ - return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno)); -} -EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy); - int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms) { struct ipu_soc *ipu = channel->ipu; diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h index 3884acb7995a..16322b2137f8 100644 --- a/drivers/gpu/ipu-v3/ipu-prv.h +++ b/drivers/gpu/ipu-v3/ipu-prv.h @@ -216,8 +216,6 @@ void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync); int ipu_module_enable(struct ipu_soc *ipu, u32 mask); int ipu_module_disable(struct ipu_soc *ipu, u32 mask); -bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno); - int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *clk_ipu); void ipu_csi_exit(struct ipu_soc *ipu, int id); From 96e9d754b35e87a5be2de7dce3c810ffdd769c84 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:49 +0000 Subject: [PATCH 17/77] gpu: ipu-v3: Remove unused ipu_image_convert_* functions ipu_image_convert_enum_format() and ipu_image_convert_sync() were both added in 2016 by commit cd98e85a6b78 ("gpu: ipu-v3: Add queued image conversion support") but have remained unused. Remove them. ipu_image_convert_sync() was the last user of image_convert_sync_complete(). Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-5-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-image-convert.c | 48 -------------------------- include/video/imx-ipu-image-convert.h | 32 ----------------- 2 files changed, 80 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index 841316582ea9..3c33b4defab5 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -355,20 +355,6 @@ static void dump_format(struct ipu_image_convert_ctx *ctx, (ic_image->fmt->fourcc >> 24) & 0xff); } -int ipu_image_convert_enum_format(int index, u32 *fourcc) -{ - const struct ipu_image_pixfmt *fmt; - - if (index >= (int)ARRAY_SIZE(image_convert_formats)) - return -EINVAL; - - /* Format found */ - fmt = &image_convert_formats[index]; - *fourcc = fmt->fourcc; - return 0; -} -EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); - static void free_dma_buf(struct ipu_image_convert_priv *priv, struct ipu_image_convert_dma_buf *buf) { @@ -2437,40 +2423,6 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, } EXPORT_SYMBOL_GPL(ipu_image_convert); -/* "Canned" synchronous single image conversion */ -static void image_convert_sync_complete(struct ipu_image_convert_run *run, - void *data) -{ - struct completion *comp = data; - - complete(comp); -} - -int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, - struct ipu_image *in, struct ipu_image *out, - enum ipu_rotate_mode rot_mode) -{ - struct ipu_image_convert_run *run; - struct completion comp; - int ret; - - init_completion(&comp); - - run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, - image_convert_sync_complete, &comp); - if (IS_ERR(run)) - return PTR_ERR(run); - - ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); - ret = (ret == 0) ? -ETIMEDOUT : 0; - - ipu_image_convert_unprepare(run->ctx); - kfree(run); - - return ret; -} -EXPORT_SYMBOL_GPL(ipu_image_convert_sync); - int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) { struct ipu_image_convert_priv *priv; diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h index 3c71b8b94b33..003b3927ede5 100644 --- a/include/video/imx-ipu-image-convert.h +++ b/include/video/imx-ipu-image-convert.h @@ -40,19 +40,6 @@ struct ipu_image_convert_run { typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run, void *ctx); -/** - * ipu_image_convert_enum_format() - enumerate the image converter's - * supported input and output pixel formats. - * - * @index: pixel format index - * @fourcc: v4l2 fourcc for this index - * - * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise. - * - * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt. - */ -int ipu_image_convert_enum_format(int index, u32 *fourcc); - /** * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions. * @@ -176,23 +163,4 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, ipu_image_convert_cb_t complete, void *complete_context); -/** - * ipu_image_convert_sync() - synchronous single image conversion request - * - * @ipu: the IPU handle to use for the conversion - * @ic_task: the IC task to use for the conversion - * @in: input image format - * @out: output image format - * @rot_mode: rotation mode - * - * Carry out a single image conversion. Returns when the conversion - * completes. The input/output formats and rotation mode must already - * meet IPU retrictions. The created context is automatically unprepared - * and the run freed on return. - */ -int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, - struct ipu_image *in, struct ipu_image *out, - enum ipu_rotate_mode rot_mode); - - #endif /* __IMX_IPU_IMAGE_CONVERT_H__ */ From 27985c86e283e1e5ac8a9809f189f03643a6f5f2 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:50 +0000 Subject: [PATCH 18/77] gpu: ipu-v3: Remove unused ipu_vdi_unsetup ipu_vdi_unsetup() was added in 2016 by commit 2d2ead453077 ("gpu: ipu-v3: Add Video Deinterlacer unit") but has remained unused. Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-6-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-vdi.c | 11 ----------- include/video/imx-ipu-v3.h | 1 - 2 files changed, 12 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c index a593b232b6d3..af9631fd4256 100644 --- a/drivers/gpu/ipu-v3/ipu-vdi.c +++ b/drivers/gpu/ipu-v3/ipu-vdi.c @@ -150,17 +150,6 @@ void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres) } EXPORT_SYMBOL_GPL(ipu_vdi_setup); -void ipu_vdi_unsetup(struct ipu_vdi *vdi) -{ - unsigned long flags; - - spin_lock_irqsave(&vdi->lock, flags); - ipu_vdi_write(vdi, 0, VDI_FSIZE); - ipu_vdi_write(vdi, 0, VDI_C); - spin_unlock_irqrestore(&vdi->lock, flags); -} -EXPORT_SYMBOL_GPL(ipu_vdi_unsetup); - int ipu_vdi_enable(struct ipu_vdi *vdi) { unsigned long flags; diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 8870f65c9a8b..c9ed4b6a408b 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -463,7 +463,6 @@ struct ipu_vdi; void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field); void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel); void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres); -void ipu_vdi_unsetup(struct ipu_vdi *vdi); int ipu_vdi_enable(struct ipu_vdi *vdi); int ipu_vdi_disable(struct ipu_vdi *vdi); struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu); From c687c3147d5de801ed835b077802b68fe85d8a3d Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:51 +0000 Subject: [PATCH 19/77] gpu: ipu-v3: ipu-csi: Remove unused functions ipu_csi_get_window(), ipu_csi_is_interlaced() and ipu_csi_set_test_generator() were added in 2014 by commit 2ffd48f2e7ae ("gpu: ipu-v3: Add Camera Sensor Interface unit") but have remained unused. Remove them. ipu_csi_set_testgen_mclk() is now unused. Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-7-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-csi.c | 108 ----------------------------------- include/video/imx-ipu-v3.h | 5 -- 2 files changed, 113 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 778bc26d3ba5..d576b7d28437 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -185,32 +185,6 @@ static inline void ipu_csi_write(struct ipu_csi *csi, u32 value, writel(value, csi->base + offset); } -/* - * Set mclk division ratio for generating test mode mclk. Only used - * for test generator. - */ -static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk, - u32 ipu_clk) -{ - u32 temp; - int div_ratio; - - div_ratio = (ipu_clk / pixel_clk) - 1; - - if (div_ratio > 0xFF || div_ratio < 0) { - dev_err(csi->ipu->dev, - "value of pixel_clk extends normal range\n"); - return -EINVAL; - } - - temp = ipu_csi_read(csi, CSI_SENS_CONF); - temp &= ~CSI_SENS_CONF_DIVRATIO_MASK; - ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT), - CSI_SENS_CONF); - - return 0; -} - /* * Find the CSI data format and data width for the given V4L2 media * bus pixel format code. @@ -538,56 +512,6 @@ int ipu_csi_init_interface(struct ipu_csi *csi, } EXPORT_SYMBOL_GPL(ipu_csi_init_interface); -bool ipu_csi_is_interlaced(struct ipu_csi *csi) -{ - unsigned long flags; - u32 sensor_protocol; - - spin_lock_irqsave(&csi->lock, flags); - sensor_protocol = - (ipu_csi_read(csi, CSI_SENS_CONF) & - CSI_SENS_CONF_SENS_PRTCL_MASK) >> - CSI_SENS_CONF_SENS_PRTCL_SHIFT; - spin_unlock_irqrestore(&csi->lock, flags); - - switch (sensor_protocol) { - case IPU_CSI_CLK_MODE_GATED_CLK: - case IPU_CSI_CLK_MODE_NONGATED_CLK: - case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE: - case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR: - case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR: - return false; - case IPU_CSI_CLK_MODE_CCIR656_INTERLACED: - case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR: - case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR: - return true; - default: - dev_err(csi->ipu->dev, - "CSI %d sensor protocol unsupported\n", csi->id); - return false; - } -} -EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced); - -void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w) -{ - unsigned long flags; - u32 reg; - - spin_lock_irqsave(&csi->lock, flags); - - reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE); - w->width = (reg & 0xFFFF) + 1; - w->height = (reg >> 16 & 0xFFFF) + 1; - - reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL); - w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT; - w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT; - - spin_unlock_irqrestore(&csi->lock, flags); -} -EXPORT_SYMBOL_GPL(ipu_csi_get_window); - void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w) { unsigned long flags; @@ -624,38 +548,6 @@ void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert) } EXPORT_SYMBOL_GPL(ipu_csi_set_downsize); -void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, - u32 r_value, u32 g_value, u32 b_value, - u32 pix_clk) -{ - unsigned long flags; - u32 ipu_clk = clk_get_rate(csi->clk_ipu); - u32 temp; - - spin_lock_irqsave(&csi->lock, flags); - - temp = ipu_csi_read(csi, CSI_TST_CTRL); - - if (!active) { - temp &= ~CSI_TEST_GEN_MODE_EN; - ipu_csi_write(csi, temp, CSI_TST_CTRL); - } else { - /* Set sensb_mclk div_ratio */ - ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk); - - temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK | - CSI_TEST_GEN_B_MASK); - temp |= CSI_TEST_GEN_MODE_EN; - temp |= (r_value << CSI_TEST_GEN_R_SHIFT) | - (g_value << CSI_TEST_GEN_G_SHIFT) | - (b_value << CSI_TEST_GEN_B_SHIFT); - ipu_csi_write(csi, temp, CSI_TST_CTRL); - } - - spin_unlock_irqrestore(&csi->lock, flags); -} -EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator); - int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, struct v4l2_mbus_framefmt *mbus_fmt) { diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index c9ed4b6a408b..29e142e95ba3 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -361,13 +361,8 @@ int ipu_csi_init_interface(struct ipu_csi *csi, const struct v4l2_mbus_config *mbus_cfg, const struct v4l2_mbus_framefmt *infmt, const struct v4l2_mbus_framefmt *outfmt); -bool ipu_csi_is_interlaced(struct ipu_csi *csi); -void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w); void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w); void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert); -void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active, - u32 r_value, u32 g_value, u32 b_value, - u32 pix_clk); int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, struct v4l2_mbus_framefmt *mbus_fmt); int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip, From 2800028d5bdee8e9a3cda2fec782dadc32225d8d Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 26 Dec 2024 02:27:52 +0000 Subject: [PATCH 20/77] gpu: ipu-v3 ipu-cpmem: Remove unused functions ipu_cpmem_set_yuv_interleaved() was added in 2012 by commit 0125f21b2baf ("staging: drm/imx: Add ipu_cpmem_set_yuv_interleaved()") but has remained unused. ipu_cpmem_get_burstsize() was added in 2016 by commit 03085911d7bb ("gpu: ipu-cpmem: Add ipu_cpmem_get_burstsize()") but has remained unused. Remove them. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20241226022752.219399-8-linux@treblig.org Signed-off-by: Dmitry Baryshkov --- drivers/gpu/ipu-v3/ipu-cpmem.c | 23 ----------------------- include/video/imx-ipu-v3.h | 2 -- 2 files changed, 25 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index 82b244cb313e..07866b1369c6 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -337,12 +337,6 @@ void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id) } EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id); -int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch) -{ - return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1; -} -EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize); - void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize) { ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1); @@ -452,23 +446,6 @@ int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width) } EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough); -void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format) -{ - switch (pixel_format) { - case V4L2_PIX_FMT_UYVY: - ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ - ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */ - ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ - break; - case V4L2_PIX_FMT_YUYV: - ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ - ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */ - ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ - break; - } -} -EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); - void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, unsigned int uv_stride, unsigned int u_offset, unsigned int v_offset) diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 29e142e95ba3..c89574b6f527 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -262,7 +262,6 @@ void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off); void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride, u32 pixelformat); void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id); -int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch); void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize); void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch); void ipu_cpmem_set_rotation(struct ipuv3_channel *ch, @@ -270,7 +269,6 @@ void ipu_cpmem_set_rotation(struct ipuv3_channel *ch, int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, const struct ipu_rgb *rgb); int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); -void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, unsigned int uv_stride, unsigned int u_offset, From ff0b6c031ed3ed31024618340c795523a86e6688 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:28 +0800 Subject: [PATCH 21/77] drm/rockchip: vop2: use devm_regmap_field_alloc for cluster-regs Right now vop2_cluster_init() copies the base vop2_cluster_regs and adapts the reg value with the current window's offset before adding the fields to the regmap. This conflicts with the notion of reg_fields being const, see https://lore.kernel.org/all/20240706-regmap-const-structs-v1-1-d08c776da787@weissschuh.net/ for reference, which now causes checkpatch to actually warn about that. So instead of creating one big copy and changing it afterwards, add the reg_fields individually using devm_regmap_field_alloc(). Functional it is the same, just that the reg_field we're handling can stay const. Signed-off-by: Andy Yan Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-2-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 66 +++++++++----------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 7b893b4447b6..b84e92a9a25a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -3412,7 +3412,7 @@ static int vop2_find_rgb_encoder(struct vop2 *vop2) return -ENOENT; } -static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { +static const struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), @@ -3483,28 +3483,26 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { static int vop2_cluster_init(struct vop2_win *win) { struct vop2 *vop2 = win->vop2; - struct reg_field *cluster_regs; - int ret, i; - - cluster_regs = kmemdup(vop2_cluster_regs, sizeof(vop2_cluster_regs), - GFP_KERNEL); - if (!cluster_regs) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++) - if (cluster_regs[i].reg != 0xffffffff) - cluster_regs[i].reg += win->offset; + int i; - ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg, - cluster_regs, - ARRAY_SIZE(vop2_cluster_regs)); + for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++) { + const struct reg_field field = { + .reg = (vop2_cluster_regs[i].reg != 0xffffffff) ? + vop2_cluster_regs[i].reg + win->offset : + vop2_cluster_regs[i].reg, + .lsb = vop2_cluster_regs[i].lsb, + .msb = vop2_cluster_regs[i].msb + }; - kfree(cluster_regs); + win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); + if (IS_ERR(win->reg[i])) + return PTR_ERR(win->reg[i]); + } - return ret; + return 0; }; -static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { +static const struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), @@ -3571,26 +3569,24 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { static int vop2_esmart_init(struct vop2_win *win) { struct vop2 *vop2 = win->vop2; - struct reg_field *esmart_regs; - int ret, i; - - esmart_regs = kmemdup(vop2_esmart_regs, sizeof(vop2_esmart_regs), - GFP_KERNEL); - if (!esmart_regs) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++) - if (esmart_regs[i].reg != 0xffffffff) - esmart_regs[i].reg += win->offset; + int i; - ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg, - esmart_regs, - ARRAY_SIZE(vop2_esmart_regs)); + for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++) { + const struct reg_field field = { + .reg = (vop2_esmart_regs[i].reg != 0xffffffff) ? + vop2_esmart_regs[i].reg + win->offset : + vop2_esmart_regs[i].reg, + .lsb = vop2_esmart_regs[i].lsb, + .msb = vop2_esmart_regs[i].msb + }; - kfree(esmart_regs); + win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); + if (IS_ERR(win->reg[i])) + return PTR_ERR(win->reg[i]); + } - return ret; -}; + return 0; +} static int vop2_win_init(struct vop2 *vop2) { From 838a871a4d51b59fe56ac0422b97443203bfa55c Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:29 +0800 Subject: [PATCH 22/77] drm/rockchip: vop2: Remove AFBC from TRANSFORM_OFFSET register macro This TRANSFORM_OFFSET register needs to be configured not only in AFBC mode, but also in tile mode, so remove the AFBC/AFBCD prefix. This also help avoid "exceeds 100 columns" warning from checkpatch. Signed-off-by: Andy Yan Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-3-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 8 ++++---- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index b84e92a9a25a..d0cfde532f0e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1525,7 +1525,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, transform_offset = vop2_afbc_transform_offset(pstate, half_block_en); vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst); vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info); - vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset); + vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, transform_offset); vop2_win_write(win, VOP2_WIN_AFBC_PIC_OFFSET, ((src->x1 >> 16) | src->y1)); vop2_win_write(win, VOP2_WIN_AFBC_DSP_OFFSET, (dest->x1 | (dest->y1 << 16))); vop2_win_write(win, VOP2_WIN_AFBC_PIC_VIR_WIDTH, stride); @@ -1536,7 +1536,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, } else { if (vop2_cluster_window(win)) { vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0); - vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0); + vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, 0); } vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4)); @@ -3460,7 +3460,7 @@ static const struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), - [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET, 0, 31), + [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), @@ -3559,7 +3559,7 @@ static const struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 29cc7fb8f6d8..156a272480f3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -118,7 +118,7 @@ enum vop2_win_regs { VOP2_WIN_AFBC_PIC_OFFSET, VOP2_WIN_AFBC_PIC_SIZE, VOP2_WIN_AFBC_DSP_OFFSET, - VOP2_WIN_AFBC_TRANSFORM_OFFSET, + VOP2_WIN_TRANSFORM_OFFSET, VOP2_WIN_AFBC_HDR_PTR, VOP2_WIN_AFBC_HALF_BLOCK_EN, VOP2_WIN_AFBC_ROTATE_270, @@ -335,7 +335,7 @@ enum dst_factor_mode { #define RK3568_CLUSTER_WIN_DSP_INFO 0x24 #define RK3568_CLUSTER_WIN_DSP_ST 0x28 #define RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB 0x30 -#define RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET 0x3C +#define RK3568_CLUSTER_WIN_TRANSFORM_OFFSET 0x3C #define RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL 0x50 #define RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE 0x54 #define RK3568_CLUSTER_WIN_AFBCD_HDR_PTR 0x58 From 328e6885996ca2c6eb8b07d3c9bb1439fdcb088f Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:30 +0800 Subject: [PATCH 23/77] drm/rockchip: vop2: Add platform specific callback The VOP interface mux, overlay, background delay cycle configuration of different SOC are much different. Add platform specific callback ops to let the core driver look cleaner and more refined. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-4-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 1143 +----------------- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 171 +++ drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 992 +++++++++++++++ 3 files changed, 1180 insertions(+), 1126 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index d0cfde532f0e..efd6b4f91156 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -33,7 +33,6 @@ #include #include -#include #include "rockchip_drm_gem.h" #include "rockchip_drm_vop2.h" @@ -102,148 +101,8 @@ enum vop2_afbc_format { VOP2_AFBC_FMT_INVALID = -1, }; -union vop2_alpha_ctrl { - u32 val; - struct { - /* [0:1] */ - u32 color_mode:1; - u32 alpha_mode:1; - /* [2:3] */ - u32 blend_mode:2; - u32 alpha_cal_mode:1; - /* [5:7] */ - u32 factor_mode:3; - /* [8:9] */ - u32 alpha_en:1; - u32 src_dst_swap:1; - u32 reserved:6; - /* [16:23] */ - u32 glb_alpha:8; - } bits; -}; - -struct vop2_alpha { - union vop2_alpha_ctrl src_color_ctrl; - union vop2_alpha_ctrl dst_color_ctrl; - union vop2_alpha_ctrl src_alpha_ctrl; - union vop2_alpha_ctrl dst_alpha_ctrl; -}; - -struct vop2_alpha_config { - bool src_premulti_en; - bool dst_premulti_en; - bool src_pixel_alpha_en; - bool dst_pixel_alpha_en; - u16 src_glb_alpha_value; - u16 dst_glb_alpha_value; -}; - -struct vop2_win { - struct vop2 *vop2; - struct drm_plane base; - const struct vop2_win_data *data; - struct regmap_field *reg[VOP2_WIN_MAX_REG]; - - /** - * @win_id: graphic window id, a cluster may be split into two - * graphics windows. - */ - u8 win_id; - u8 delay; - u32 offset; - - enum drm_plane_type type; -}; - -struct vop2_video_port { - struct drm_crtc crtc; - struct vop2 *vop2; - struct clk *dclk; - struct clk *dclk_src; - unsigned int id; - const struct vop2_video_port_data *data; - - struct completion dsp_hold_completion; - - /** - * @win_mask: Bitmask of windows attached to the video port; - */ - u32 win_mask; - - struct vop2_win *primary_plane; - struct drm_pending_vblank_event *event; - - unsigned int nlayers; -}; - -struct vop2 { - struct device *dev; - struct drm_device *drm; - struct vop2_video_port vps[ROCKCHIP_MAX_CRTC]; - - const struct vop2_data *data; - /* - * Number of windows that are registered as plane, may be less than the - * total number of hardware windows. - */ - u32 registered_num_wins; - - struct resource *res; - void __iomem *regs; - struct regmap *map; - - struct regmap *sys_grf; - struct regmap *vop_grf; - struct regmap *vo1_grf; - struct regmap *sys_pmu; - - /* physical map length of vop2 register */ - u32 len; - - void __iomem *lut_regs; - - /* protects crtc enable/disable */ - struct mutex vop2_lock; - - int irq; - - /* - * Some global resources are shared between all video ports(crtcs), so - * we need a ref counter here. - */ - unsigned int enable_count; - struct clk *hclk; - struct clk *aclk; - struct clk *pclk; - struct clk *pll_hdmiphy0; - struct clk *pll_hdmiphy1; - - /* optional internal rgb encoder */ - struct rockchip_rgb *rgb; - - /* must be put at the end of the struct */ - struct vop2_win win[]; -}; - #define VOP2_MAX_DCLK_RATE 600000000 -#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \ - (x) == ROCKCHIP_VOP2_EP_HDMI1) - -#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \ - (x) == ROCKCHIP_VOP2_EP_DP1) - -#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \ - (x) == ROCKCHIP_VOP2_EP_EDP1) - -#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \ - (x) == ROCKCHIP_VOP2_EP_MIPI1) - -#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \ - (x) == ROCKCHIP_VOP2_EP_LVDS1) - -#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0) - /* * bus-format types. */ @@ -277,16 +136,6 @@ static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list) static const struct regmap_config vop2_regmap_config; -static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) -{ - return container_of(crtc, struct vop2_video_port, crtc); -} - -static struct vop2_win *to_vop2_win(struct drm_plane *p) -{ - return container_of(p, struct vop2_win, base); -} - static void vop2_lock(struct vop2 *vop2) { mutex_lock(&vop2->vop2_lock); @@ -297,44 +146,6 @@ static void vop2_unlock(struct vop2 *vop2) mutex_unlock(&vop2->vop2_lock); } -static void vop2_writel(struct vop2 *vop2, u32 offset, u32 v) -{ - regmap_write(vop2->map, offset, v); -} - -static void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v) -{ - regmap_write(vp->vop2->map, vp->data->offset + offset, v); -} - -static u32 vop2_readl(struct vop2 *vop2, u32 offset) -{ - u32 val; - - regmap_read(vop2->map, offset, &val); - - return val; -} - -static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset) -{ - u32 val; - - regmap_read(vp->vop2->map, vp->data->offset + offset, &val); - - return val; -} - -static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v) -{ - regmap_field_write(win->reg[reg], v); -} - -static bool vop2_cluster_window(const struct vop2_win *win) -{ - return win->data->feature & WIN_FEATURE_CLUSTER; -} - /* * Note: * The write mask function is documented but missing on rk3566/8, writes @@ -1732,6 +1543,7 @@ static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl) static void vop2_post_config(struct drm_crtc *crtc) { struct vop2_video_port *vp = to_vop2_video_port(crtc); + struct vop2 *vop2 = vp->vop2; struct drm_display_mode *mode = &crtc->state->adjusted_mode; u16 vtotal = mode->crtc_vtotal; u16 hdisplay = mode->crtc_hdisplay; @@ -1742,18 +1554,10 @@ static void vop2_post_config(struct drm_crtc *crtc) u32 top_margin = 100, bottom_margin = 100; u16 hsize = hdisplay * (left_margin + right_margin) / 200; u16 vsize = vdisplay * (top_margin + bottom_margin) / 200; - u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; u16 hact_end, vact_end; u32 val; - u32 bg_dly; - u32 pre_scan_dly; - bg_dly = vp->data->pre_scan_max_dly[3]; - vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id), - FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly)); - - pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; - vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); + vop2->ops->setup_bg_dly(vp); vsize = rounddown(vsize, 2); hsize = rounddown(hsize, 2); @@ -1789,347 +1593,6 @@ static void vop2_post_config(struct drm_crtc *crtc) vop2_vp_write(vp, RK3568_VP_DSP_BG, 0); } -static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) -{ - struct vop2 *vop2 = vp->vop2; - struct drm_crtc *crtc = &vp->crtc; - u32 die, dip; - - die = vop2_readl(vop2, RK3568_DSP_IF_EN); - dip = vop2_readl(vop2, RK3568_DSP_IF_POL); - - switch (id) { - case ROCKCHIP_VOP2_EP_RGB0: - die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_RGB | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); - if (polflags & POLFLAG_DCLK_INV) - regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3)); - else - regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16)); - break; - case ROCKCHIP_VOP2_EP_HDMI0: - die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_HDMI | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_EDP0: - die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_EDP | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_MIPI0: - die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_MIPI1: - die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_LVDS0: - die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_LVDS1: - die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id); - dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; - dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); - break; - default: - drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); - return 0; - } - - dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; - - vop2_writel(vop2, RK3568_DSP_IF_EN, die); - vop2_writel(vop2, RK3568_DSP_IF_POL, dip); - - return crtc->state->adjusted_mode.crtc_clock * 1000LL; -} - -/* - * calc the dclk on rk3588 - * the available div of dclk is 1, 2, 4 - */ -static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk) -{ - if (child_clk * 4 <= max_dclk) - return child_clk * 4; - else if (child_clk * 2 <= max_dclk) - return child_clk * 2; - else if (child_clk <= max_dclk) - return child_clk; - else - return 0; -} - -/* - * 4 pixclk/cycle on rk3588 - * RGB/eDP/HDMI: if_pixclk >= dclk_core - * DP: dp_pixclk = dclk_out <= dclk_core - * DSI: mipi_pixclk <= dclk_out <= dclk_core - */ -static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id, - int *dclk_core_div, int *dclk_out_div, - int *if_pixclk_div, int *if_dclk_div) -{ - struct vop2 *vop2 = vp->vop2; - struct drm_crtc *crtc = &vp->crtc; - struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; - struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); - int output_mode = vcstate->output_mode; - unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */ - unsigned long dclk_core_rate = v_pixclk >> 2; - unsigned long dclk_rate = v_pixclk; - unsigned long dclk_out_rate; - unsigned long if_pixclk_rate; - int K = 1; - - if (vop2_output_if_is_hdmi(id)) { - /* - * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate - * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate - */ - if (output_mode == ROCKCHIP_OUT_MODE_YUV420) { - dclk_rate = dclk_rate >> 1; - K = 2; - } - - /* - * if_pixclk_rate = (dclk_core_rate << 1) / K; - * if_dclk_rate = dclk_core_rate / K; - * *if_pixclk_div = dclk_rate / if_pixclk_rate; - * *if_dclk_div = dclk_rate / if_dclk_rate; - */ - *if_pixclk_div = 2; - *if_dclk_div = 4; - } else if (vop2_output_if_is_edp(id)) { - /* - * edp_pixclk = edp_dclk > dclk_core - */ - if_pixclk_rate = v_pixclk / K; - dclk_rate = if_pixclk_rate * K; - /* - * *if_pixclk_div = dclk_rate / if_pixclk_rate; - * *if_dclk_div = *if_pixclk_div; - */ - *if_pixclk_div = K; - *if_dclk_div = K; - } else if (vop2_output_if_is_dp(id)) { - if (output_mode == ROCKCHIP_OUT_MODE_YUV420) - dclk_out_rate = v_pixclk >> 3; - else - dclk_out_rate = v_pixclk >> 2; - - dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); - if (!dclk_rate) { - drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n", - dclk_out_rate); - return 0; - } - *dclk_out_div = dclk_rate / dclk_out_rate; - } else if (vop2_output_if_is_mipi(id)) { - if_pixclk_rate = dclk_core_rate / K; - /* - * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4 - */ - dclk_out_rate = if_pixclk_rate; - /* - * dclk_rate = N * dclk_core_rate N = (1,2,4 ), - * we get a little factor here - */ - dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); - if (!dclk_rate) { - drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n", - dclk_out_rate); - return 0; - } - *dclk_out_div = dclk_rate / dclk_out_rate; - /* - * mipi pixclk == dclk_out - */ - *if_pixclk_div = 1; - } else if (vop2_output_if_is_dpi(id)) { - dclk_rate = v_pixclk; - } - - *dclk_core_div = dclk_rate / dclk_core_rate; - *if_pixclk_div = ilog2(*if_pixclk_div); - *if_dclk_div = ilog2(*if_dclk_div); - *dclk_core_div = ilog2(*dclk_core_div); - *dclk_out_div = ilog2(*dclk_out_div); - - drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n", - dclk_rate, *if_pixclk_div, *if_dclk_div); - - return dclk_rate; -} - -/* - * MIPI port mux on rk3588: - * 0: Video Port2 - * 1: Video Port3 - * 3: Video Port 1(MIPI1 only) - */ -static u32 rk3588_get_mipi_port_mux(int vp_id) -{ - if (vp_id == 1) - return 3; - else if (vp_id == 3) - return 1; - else - return 0; -} - -static u32 rk3588_get_hdmi_pol(u32 flags) -{ - u32 val; - - val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0; - val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0; - - return val; -} - -static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) -{ - struct vop2 *vop2 = vp->vop2; - int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div; - unsigned long clock; - u32 die, dip, div, vp_clk_div, val; - - clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div, - &if_pixclk_div, &if_dclk_div); - if (!clock) - return 0; - - vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); - vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); - - die = vop2_readl(vop2, RK3568_DSP_IF_EN); - dip = vop2_readl(vop2, RK3568_DSP_IF_POL); - div = vop2_readl(vop2, RK3568_DSP_IF_CTRL); - - switch (id) { - case ROCKCHIP_VOP2_EP_HDMI0: - div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; - div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); - val = rk3588_get_hdmi_pol(polflags); - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1)); - regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5)); - break; - case ROCKCHIP_VOP2_EP_HDMI1: - div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; - div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div); - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); - val = rk3588_get_hdmi_pol(polflags); - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4)); - regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7)); - break; - case ROCKCHIP_VOP2_EP_EDP0: - div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; - div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_EDP0 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0)); - break; - case ROCKCHIP_VOP2_EP_EDP1: - div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; - div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); - div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_EDP1 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); - regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3)); - break; - case ROCKCHIP_VOP2_EP_MIPI0: - div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX; - val = rk3588_get_mipi_port_mux(vp->id); - die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val); - break; - case ROCKCHIP_VOP2_EP_MIPI1: - div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV; - div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div); - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; - val = rk3588_get_mipi_port_mux(vp->id); - die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val); - break; - case ROCKCHIP_VOP2_EP_DP0: - die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_DP0 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id); - dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL; - dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_DP1: - die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; - die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | - FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); - dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL; - dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags); - break; - default: - drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); - return 0; - } - - dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; - - vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); - vop2_writel(vop2, RK3568_DSP_IF_EN, die); - vop2_writel(vop2, RK3568_DSP_IF_CTRL, div); - vop2_writel(vop2, RK3568_DSP_IF_POL, dip); - - return clock; -} - -static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags) -{ - struct vop2 *vop2 = vp->vop2; - - if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568) - return rk3568_set_intf_mux(vp, ep_id, polflags); - else if (vop2->data->soc_id == 3588) - return rk3588_set_intf_mux(vp, ep_id, polflags); - else - return 0; -} - static int us_to_vertical_line(struct drm_display_mode *mode, int us) { return us * mode->clock / mode->htotal / 1000; @@ -2202,7 +1665,7 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, * process multi(1/2/4/8) pixels per cycle, so the dclk feed by the * system cru may be the 1/2 or 1/4 of mode->clock. */ - clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); + clock = vop2->ops->setup_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); } if (!clock) { @@ -2369,454 +1832,13 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc, return 0; } -static bool is_opaque(u16 alpha) -{ - return (alpha >> 8) == 0xff; -} - -static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, - struct vop2_alpha *alpha) -{ - int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1; - int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1; - int src_color_mode = alpha_config->src_premulti_en ? - ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; - int dst_color_mode = alpha_config->dst_premulti_en ? - ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; - - alpha->src_color_ctrl.val = 0; - alpha->dst_color_ctrl.val = 0; - alpha->src_alpha_ctrl.val = 0; - alpha->dst_alpha_ctrl.val = 0; - - if (!alpha_config->src_pixel_alpha_en) - alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; - else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en) - alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX; - else - alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; - - alpha->src_color_ctrl.bits.alpha_en = 1; - - if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) { - alpha->src_color_ctrl.bits.color_mode = src_color_mode; - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; - } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) { - alpha->src_color_ctrl.bits.color_mode = src_color_mode; - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE; - } else { - alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL; - alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; - } - alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8; - alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; - alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; - - alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; - alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; - alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; - alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8; - alpha->dst_color_ctrl.bits.color_mode = dst_color_mode; - alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; - - alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; - alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode; - alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; - alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE; - - alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; - if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en) - alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX; - else - alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; - alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION; - alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; -} - -static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) -{ - struct vop2_video_port *vp; - int used_layer = 0; - int i; - - for (i = 0; i < port_id; i++) { - vp = &vop2->vps[i]; - used_layer += hweight32(vp->win_mask); - } - - return used_layer; -} - -static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win) -{ - struct vop2_alpha_config alpha_config; - struct vop2_alpha alpha; - struct drm_plane_state *bottom_win_pstate; - bool src_pixel_alpha_en = false; - u16 src_glb_alpha_val, dst_glb_alpha_val; - bool premulti_en = false; - bool swap = false; - u32 offset = 0; - - /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */ - bottom_win_pstate = main_win->base.state; - src_glb_alpha_val = 0; - dst_glb_alpha_val = main_win->base.state->alpha; - - if (!bottom_win_pstate->fb) - return; - - alpha_config.src_premulti_en = premulti_en; - alpha_config.dst_premulti_en = false; - alpha_config.src_pixel_alpha_en = src_pixel_alpha_en; - alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ - alpha_config.src_glb_alpha_value = src_glb_alpha_val; - alpha_config.dst_glb_alpha_value = dst_glb_alpha_val; - vop2_parse_alpha(&alpha_config, &alpha); - - alpha.src_color_ctrl.bits.src_dst_swap = swap; - - switch (main_win->data->phys_id) { - case ROCKCHIP_VOP2_CLUSTER0: - offset = 0x0; - break; - case ROCKCHIP_VOP2_CLUSTER1: - offset = 0x10; - break; - case ROCKCHIP_VOP2_CLUSTER2: - offset = 0x20; - break; - case ROCKCHIP_VOP2_CLUSTER3: - offset = 0x30; - break; - } - - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset, - alpha.dst_alpha_ctrl.val); -} - -static void vop2_setup_alpha(struct vop2_video_port *vp) -{ - struct vop2 *vop2 = vp->vop2; - struct drm_framebuffer *fb; - struct vop2_alpha_config alpha_config; - struct vop2_alpha alpha; - struct drm_plane *plane; - int pixel_alpha_en; - int premulti_en, gpremulti_en = 0; - int mixer_id; - u32 offset; - bool bottom_layer_alpha_en = false; - u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; - - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); - alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ - - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { - struct vop2_win *win = to_vop2_win(plane); - - if (plane->state->normalized_zpos == 0 && - !is_opaque(plane->state->alpha) && - !vop2_cluster_window(win)) { - /* - * If bottom layer have global alpha effect [except cluster layer, - * because cluster have deal with bottom layer global alpha value - * at cluster mix], bottom layer mix need deal with global alpha. - */ - bottom_layer_alpha_en = true; - dst_global_alpha = plane->state->alpha; - } - } - - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { - struct vop2_win *win = to_vop2_win(plane); - int zpos = plane->state->normalized_zpos; - - /* - * Need to configure alpha from second layer. - */ - if (zpos == 0) - continue; - - if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) - premulti_en = 1; - else - premulti_en = 0; - - plane = &win->base; - fb = plane->state->fb; - - pixel_alpha_en = fb->format->has_alpha; - - alpha_config.src_premulti_en = premulti_en; - - if (bottom_layer_alpha_en && zpos == 1) { - gpremulti_en = premulti_en; - /* Cd = Cs + (1 - As) * Cd * Agd */ - alpha_config.dst_premulti_en = false; - alpha_config.src_pixel_alpha_en = pixel_alpha_en; - alpha_config.src_glb_alpha_value = plane->state->alpha; - alpha_config.dst_glb_alpha_value = dst_global_alpha; - } else if (vop2_cluster_window(win)) { - /* Mix output data only have pixel alpha */ - alpha_config.dst_premulti_en = true; - alpha_config.src_pixel_alpha_en = true; - alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; - } else { - /* Cd = Cs + (1 - As) * Cd */ - alpha_config.dst_premulti_en = true; - alpha_config.src_pixel_alpha_en = pixel_alpha_en; - alpha_config.src_glb_alpha_value = plane->state->alpha; - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; - } - - vop2_parse_alpha(&alpha_config, &alpha); - - offset = (mixer_id + zpos - 1) * 0x10; - vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset, - alpha.dst_alpha_ctrl.val); - } - - if (vp->id == 0) { - if (bottom_layer_alpha_en) { - /* Transfer pixel alpha to hdr mix */ - alpha_config.src_premulti_en = gpremulti_en; - alpha_config.dst_premulti_en = true; - alpha_config.src_pixel_alpha_en = true; - alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; - alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; - vop2_parse_alpha(&alpha_config, &alpha); - - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL, - alpha.dst_alpha_ctrl.val); - } else { - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0); - } - } -} - -static void vop2_setup_layer_mixer(struct vop2_video_port *vp) -{ - struct vop2 *vop2 = vp->vop2; - struct drm_plane *plane; - u32 layer_sel = 0; - u32 port_sel; - u8 layer_id; - u8 old_layer_id; - u8 layer_sel_id; - unsigned int ofs; - u32 ovl_ctrl; - int i; - struct vop2_video_port *vp0 = &vop2->vps[0]; - struct vop2_video_port *vp1 = &vop2->vps[1]; - struct vop2_video_port *vp2 = &vop2->vps[2]; - struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); - - ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); - ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; - if (vcstate->yuv_overlay) - ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); - else - ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); - - vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); - - port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); - port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; - - if (vp0->nlayers) - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, - vp0->nlayers - 1); - else - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8); - - if (vp1->nlayers) - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, - (vp0->nlayers + vp1->nlayers - 1)); - else - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8); - - if (vp2->nlayers) - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, - (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1)); - else - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); - - layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); - - ofs = 0; - for (i = 0; i < vp->id; i++) - ofs += vop2->vps[i].nlayers; - - drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { - struct vop2_win *win = to_vop2_win(plane); - struct vop2_win *old_win; - - layer_id = (u8)(plane->state->normalized_zpos + ofs); - - /* - * Find the layer this win bind in old state. - */ - for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) { - layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf; - if (layer_sel_id == win->data->layer_sel_id) - break; - } - - /* - * Find the win bind to this layer in old state - */ - for (i = 0; i < vop2->data->win_size; i++) { - old_win = &vop2->win[i]; - layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf; - if (layer_sel_id == old_win->data->layer_sel_id) - break; - } - - switch (win->data->phys_id) { - case ROCKCHIP_VOP2_CLUSTER0: - port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id); - break; - case ROCKCHIP_VOP2_CLUSTER1: - port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id); - break; - case ROCKCHIP_VOP2_CLUSTER2: - port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2; - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id); - break; - case ROCKCHIP_VOP2_CLUSTER3: - port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3; - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id); - break; - case ROCKCHIP_VOP2_ESMART0: - port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id); - break; - case ROCKCHIP_VOP2_ESMART1: - port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id); - break; - case ROCKCHIP_VOP2_ESMART2: - port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2; - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id); - break; - case ROCKCHIP_VOP2_ESMART3: - port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3; - port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id); - break; - case ROCKCHIP_VOP2_SMART0: - port_sel &= ~RK3568_OVL_PORT_SEL__SMART0; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id); - break; - case ROCKCHIP_VOP2_SMART1: - port_sel &= ~RK3568_OVL_PORT_SEL__SMART1; - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id); - break; - } - - layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7); - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id); - /* - * When we bind a window from layerM to layerN, we also need to move the old - * window on layerN to layerM to avoid one window selected by two or more layers. - */ - layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7); - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id); - } - - vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); - vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); -} - -static void vop2_setup_dly_for_windows(struct vop2 *vop2) -{ - struct vop2_win *win; - int i = 0; - u32 cdly = 0, sdly = 0; - - for (i = 0; i < vop2->data->win_size; i++) { - u32 dly; - - win = &vop2->win[i]; - dly = win->delay; - - switch (win->data->phys_id) { - case ROCKCHIP_VOP2_CLUSTER0: - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly); - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly); - break; - case ROCKCHIP_VOP2_CLUSTER1: - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly); - cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly); - break; - case ROCKCHIP_VOP2_ESMART0: - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly); - break; - case ROCKCHIP_VOP2_ESMART1: - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly); - break; - case ROCKCHIP_VOP2_SMART0: - case ROCKCHIP_VOP2_ESMART2: - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly); - break; - case ROCKCHIP_VOP2_SMART1: - case ROCKCHIP_VOP2_ESMART3: - sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly); - break; - } - } - - vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly); - vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly); -} - static void vop2_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vop2_video_port *vp = to_vop2_video_port(crtc); struct vop2 *vop2 = vp->vop2; - struct drm_plane *plane; - - vp->win_mask = 0; - - drm_atomic_crtc_for_each_plane(plane, crtc) { - struct vop2_win *win = to_vop2_win(plane); - - win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; - - vp->win_mask |= BIT(win->data->phys_id); - if (vop2_cluster_window(win)) - vop2_setup_cluster_alpha(vop2, win); - } - - if (!vp->win_mask) - return; - - vop2_setup_layer_mixer(vp); - vop2_setup_alpha(vp); - vop2_setup_dly_for_windows(vop2); + vop2->ops->setup_overlay(vp); } static void vop2_crtc_atomic_flush(struct drm_crtc *crtc, @@ -3412,86 +2434,18 @@ static int vop2_find_rgb_encoder(struct vop2 *vop2) return -ENOENT; } -static const struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { - [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), - [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), - [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), - [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), - [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), - [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), - [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), - [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), - [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), - [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), - [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), - [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), - [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), - [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), - [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), - [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3), - [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8), - /* RK3588 only, reserved bit on rk3568*/ - [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), - - /* Scale */ - [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), - [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), - [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), - [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13), - [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), - [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), - [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), - - /* cluster regs */ - [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), - [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), - [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), - - /* afbc regs */ - [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), - [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), - [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), - [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), - [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), - [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), - [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), - [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), - [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), - [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), - [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), - [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), - [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), - [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), - [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), - [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), - [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), - [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff }, - [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, - [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, - [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, - [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, - [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, - [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, -}; - static int vop2_cluster_init(struct vop2_win *win) { struct vop2 *vop2 = win->vop2; int i; - for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++) { + for (i = 0; i < vop2->data->nr_cluster_regs; i++) { const struct reg_field field = { - .reg = (vop2_cluster_regs[i].reg != 0xffffffff) ? - vop2_cluster_regs[i].reg + win->offset : - vop2_cluster_regs[i].reg, - .lsb = vop2_cluster_regs[i].lsb, - .msb = vop2_cluster_regs[i].msb + .reg = (vop2->data->cluster_reg[i].reg != 0xffffffff) ? + vop2->data->cluster_reg[i].reg + win->offset : + vop2->data->cluster_reg[i].reg, + .lsb = vop2->data->cluster_reg[i].lsb, + .msb = vop2->data->cluster_reg[i].msb }; win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); @@ -3502,82 +2456,18 @@ static int vop2_cluster_init(struct vop2_win *win) return 0; }; -static const struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = { - [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), - [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), - [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), - [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), - [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), - [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), - [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), - [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), - [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), - [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), - [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), - [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), - [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), - [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), - [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), - [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), - [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), - [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), - [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), - [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), - [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), - /* RK3588 only, reserved register on rk3568 */ - [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), - - /* Scale */ - [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), - [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), - [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15), - [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31), - [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), - [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), - [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), - [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), - [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9), - [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11), - [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13), - [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15), - [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), - [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), - [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), - [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), - [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), - [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, - [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, - [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, - [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, - [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, -}; - static int vop2_esmart_init(struct vop2_win *win) { struct vop2 *vop2 = win->vop2; int i; - for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++) { + for (i = 0; i < vop2->data->nr_smart_regs; i++) { const struct reg_field field = { - .reg = (vop2_esmart_regs[i].reg != 0xffffffff) ? - vop2_esmart_regs[i].reg + win->offset : - vop2_esmart_regs[i].reg, - .lsb = vop2_esmart_regs[i].lsb, - .msb = vop2_esmart_regs[i].msb + .reg = (vop2->data->smart_reg[i].reg != 0xffffffff) ? + vop2->data->smart_reg[i].reg + win->offset : + vop2->data->smart_reg[i].reg, + .lsb = vop2->data->smart_reg[i].lsb, + .msb = vop2->data->smart_reg[i].msb }; win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); @@ -3663,6 +2553,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) vop2->dev = dev; vop2->data = vop2_data; + vop2->ops = vop2_data->ops; vop2->drm = drm; dev_set_drvdata(dev, vop2); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 156a272480f3..904ce2008870 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -9,6 +9,7 @@ #include #include +#include #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -58,6 +59,23 @@ enum vop2_scale_down_mode { #define VOP2_PD_DSC_4K BIT(6) #define VOP2_PD_ESMART BIT(7) +#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \ + (x) == ROCKCHIP_VOP2_EP_HDMI1) + +#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \ + (x) == ROCKCHIP_VOP2_EP_DP1) + +#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \ + (x) == ROCKCHIP_VOP2_EP_EDP1) + +#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \ + (x) == ROCKCHIP_VOP2_EP_MIPI1) + +#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \ + (x) == ROCKCHIP_VOP2_EP_LVDS1) + +#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0) + enum vop2_win_regs { VOP2_WIN_ENABLE, VOP2_WIN_FORMAT, @@ -162,6 +180,23 @@ struct vop2_win_data { const u8 dly[VOP2_DLY_MODE_MAX]; }; +struct vop2_win { + struct vop2 *vop2; + struct drm_plane base; + const struct vop2_win_data *data; + struct regmap_field *reg[VOP2_WIN_MAX_REG]; + + /** + * @win_id: graphic window id, a cluster may be split into two + * graphics windows. + */ + u8 win_id; + u8 delay; + u32 offset; + + enum drm_plane_type type; +}; + struct vop2_video_port_data { unsigned int id; u32 feature; @@ -172,20 +207,108 @@ struct vop2_video_port_data { unsigned int offset; }; +struct vop2_video_port { + struct drm_crtc crtc; + struct vop2 *vop2; + struct clk *dclk; + struct clk *dclk_src; + unsigned int id; + const struct vop2_video_port_data *data; + + struct completion dsp_hold_completion; + + /** + * @win_mask: Bitmask of windows attached to the video port; + */ + u32 win_mask; + + struct vop2_win *primary_plane; + struct drm_pending_vblank_event *event; + + unsigned int nlayers; +}; + +/** + * struct vop2_ops - helper operations for vop2 hardware + * + * These hooks are used by the common part of the vop2 driver to + * implement the proper behaviour of different variants. + */ +struct vop2_ops { + unsigned long (*setup_intf_mux)(struct vop2_video_port *vp, int ep_id, u32 polflags); + void (*setup_bg_dly)(struct vop2_video_port *vp); + void (*setup_overlay)(struct vop2_video_port *vp); +}; + struct vop2_data { u8 nr_vps; u64 feature; + const struct vop2_ops *ops; const struct vop2_win_data *win; const struct vop2_video_port_data *vp; + const struct reg_field *cluster_reg; + const struct reg_field *smart_reg; const struct vop2_regs_dump *regs_dump; struct vop_rect max_input; struct vop_rect max_output; + unsigned int nr_cluster_regs; + unsigned int nr_smart_regs; unsigned int win_size; unsigned int regs_dump_size; unsigned int soc_id; }; +struct vop2 { + struct device *dev; + struct drm_device *drm; + struct vop2_video_port vps[ROCKCHIP_MAX_CRTC]; + + const struct vop2_data *data; + const struct vop2_ops *ops; + /* + * Number of windows that are registered as plane, may be less than the + * total number of hardware windows. + */ + u32 registered_num_wins; + + struct resource *res; + void __iomem *regs; + struct regmap *map; + + struct regmap *sys_grf; + struct regmap *vop_grf; + struct regmap *vo1_grf; + struct regmap *sys_pmu; + + /* physical map length of vop2 register */ + u32 len; + + void __iomem *lut_regs; + + /* protects crtc enable/disable */ + struct mutex vop2_lock; + + int irq; + + /* + * Some global resources are shared between all video ports(crtcs), so + * we need a ref counter here. + */ + unsigned int enable_count; + struct clk *hclk; + struct clk *aclk; + struct clk *pclk; + struct clk *pll_hdmiphy0; + struct clk *pll_hdmiphy1; + + /* optional internal rgb encoder */ + struct rockchip_rgb *rgb; + + /* must be put at the end of the struct */ + struct vop2_win win[]; +}; + /* interrupt define */ #define FS_NEW_INTR BIT(4) #define ADDR_SAME_INTR BIT(5) @@ -560,4 +683,52 @@ enum vop2_layer_phy_id { extern const struct component_ops vop2_component_ops; +static inline void vop2_writel(struct vop2 *vop2, u32 offset, u32 v) +{ + regmap_write(vop2->map, offset, v); +} + +static inline void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v) +{ + regmap_write(vp->vop2->map, vp->data->offset + offset, v); +} + +static inline u32 vop2_readl(struct vop2 *vop2, u32 offset) +{ + u32 val; + + regmap_read(vop2->map, offset, &val); + + return val; +} + +static inline u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset) +{ + u32 val; + + regmap_read(vp->vop2->map, vp->data->offset + offset, &val); + + return val; +} + +static inline void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v) +{ + regmap_field_write(win->reg[reg], v); +} + +static inline bool vop2_cluster_window(const struct vop2_win *win) +{ + return win->data->feature & WIN_FEATURE_CLUSTER; +} + +static inline struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) +{ + return container_of(crtc, struct vop2_video_port, crtc); +} + +static inline struct vop2_win *to_vop2_win(struct drm_plane *p) +{ + return container_of(p, struct vop2_win, base); +} + #endif /* _ROCKCHIP_DRM_VOP2_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 65a88f489693..56d72f35b57f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -9,12 +9,50 @@ #include #include #include +#include #include +#include #include #include #include "rockchip_drm_vop2.h" +union vop2_alpha_ctrl { + u32 val; + struct { + /* [0:1] */ + u32 color_mode:1; + u32 alpha_mode:1; + /* [2:3] */ + u32 blend_mode:2; + u32 alpha_cal_mode:1; + /* [5:7] */ + u32 factor_mode:3; + /* [8:9] */ + u32 alpha_en:1; + u32 src_dst_swap:1; + u32 reserved:6; + /* [16:23] */ + u32 glb_alpha:8; + } bits; +}; + +struct vop2_alpha { + union vop2_alpha_ctrl src_color_ctrl; + union vop2_alpha_ctrl dst_color_ctrl; + union vop2_alpha_ctrl src_alpha_ctrl; + union vop2_alpha_ctrl dst_alpha_ctrl; +}; + +struct vop2_alpha_config { + bool src_premulti_en; + bool dst_premulti_en; + bool src_pixel_alpha_en; + bool dst_pixel_alpha_en; + u16 src_glb_alpha_value; + u16 dst_glb_alpha_value; +}; + static const uint32_t formats_cluster[] = { DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, @@ -131,6 +169,138 @@ static const uint64_t format_modifiers_afbc[] = { DRM_FORMAT_MOD_INVALID, }; +static const struct reg_field rk3568_vop_cluster_regs[VOP2_WIN_MAX_REG] = { + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3), + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8), + /* RK3588 only, reserved bit on rk3568*/ + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), + + /* Scale */ + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13), + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), + + /* cluster regs */ + [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), + [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), + [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), + + /* afbc regs */ + [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), + [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), + [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), + [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), + [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), + [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), + [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), + [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), + [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), + [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), + [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), + [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), + [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), + [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff }, + [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, + [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, + [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, +}; + +static const struct reg_field rk3568_vop_smart_regs[VOP2_WIN_MAX_REG] = { + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), + [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), + [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), + /* RK3588 only, reserved register on rk3568 */ + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), + + /* Scale */ + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), + [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15), + [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31), + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), + [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9), + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11), + [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13), + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15), + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), + [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), + [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), + [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, + [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, + [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, +}; + static const struct vop2_video_port_data rk3568_vop_video_ports[] = { { .id = 0, @@ -647,6 +817,813 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = { }, }; +static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + u32 die, dip; + + die = vop2_readl(vop2, RK3568_DSP_IF_EN); + dip = vop2_readl(vop2, RK3568_DSP_IF_POL); + + switch (id) { + case ROCKCHIP_VOP2_EP_RGB0: + die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_RGB | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); + if (polflags & POLFLAG_DCLK_INV) + regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3)); + else + regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16)); + break; + case ROCKCHIP_VOP2_EP_HDMI0: + die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_HDMI | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_EDP0: + die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_EDP | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_MIPI0: + die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_MIPI1: + die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_LVDS0: + die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_LVDS1: + die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX; + die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 | + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags); + break; + default: + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); + return 0; + } + + dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; + + vop2_writel(vop2, RK3568_DSP_IF_EN, die); + vop2_writel(vop2, RK3568_DSP_IF_POL, dip); + + return crtc->state->adjusted_mode.crtc_clock * 1000LL; +} + +/* + * calc the dclk on rk3588 + * the available div of dclk is 1, 2, 4 + */ +static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk) +{ + if (child_clk * 4 <= max_dclk) + return child_clk * 4; + else if (child_clk * 2 <= max_dclk) + return child_clk * 2; + else if (child_clk <= max_dclk) + return child_clk; + else + return 0; +} + +/* + * 4 pixclk/cycle on rk3588 + * RGB/eDP/HDMI: if_pixclk >= dclk_core + * DP: dp_pixclk = dclk_out <= dclk_core + * DSI: mipi_pixclk <= dclk_out <= dclk_core + */ +static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id, + int *dclk_core_div, int *dclk_out_div, + int *if_pixclk_div, int *if_dclk_div) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); + int output_mode = vcstate->output_mode; + unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */ + unsigned long dclk_core_rate = v_pixclk >> 2; + unsigned long dclk_rate = v_pixclk; + unsigned long dclk_out_rate; + unsigned long if_pixclk_rate; + int K = 1; + + if (vop2_output_if_is_hdmi(id)) { + /* + * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate + * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate + */ + if (output_mode == ROCKCHIP_OUT_MODE_YUV420) { + dclk_rate = dclk_rate >> 1; + K = 2; + } + + /* + * if_pixclk_rate = (dclk_core_rate << 1) / K; + * if_dclk_rate = dclk_core_rate / K; + * *if_pixclk_div = dclk_rate / if_pixclk_rate; + * *if_dclk_div = dclk_rate / if_dclk_rate; + */ + *if_pixclk_div = 2; + *if_dclk_div = 4; + } else if (vop2_output_if_is_edp(id)) { + /* + * edp_pixclk = edp_dclk > dclk_core + */ + if_pixclk_rate = v_pixclk / K; + dclk_rate = if_pixclk_rate * K; + /* + * *if_pixclk_div = dclk_rate / if_pixclk_rate; + * *if_dclk_div = *if_pixclk_div; + */ + *if_pixclk_div = K; + *if_dclk_div = K; + } else if (vop2_output_if_is_dp(id)) { + if (output_mode == ROCKCHIP_OUT_MODE_YUV420) + dclk_out_rate = v_pixclk >> 3; + else + dclk_out_rate = v_pixclk >> 2; + + dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); + if (!dclk_rate) { + drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n", + dclk_out_rate); + return 0; + } + *dclk_out_div = dclk_rate / dclk_out_rate; + } else if (vop2_output_if_is_mipi(id)) { + if_pixclk_rate = dclk_core_rate / K; + /* + * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4 + */ + dclk_out_rate = if_pixclk_rate; + /* + * dclk_rate = N * dclk_core_rate N = (1,2,4 ), + * we get a little factor here + */ + dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000); + if (!dclk_rate) { + drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n", + dclk_out_rate); + return 0; + } + *dclk_out_div = dclk_rate / dclk_out_rate; + /* + * mipi pixclk == dclk_out + */ + *if_pixclk_div = 1; + } else if (vop2_output_if_is_dpi(id)) { + dclk_rate = v_pixclk; + } + + *dclk_core_div = dclk_rate / dclk_core_rate; + *if_pixclk_div = ilog2(*if_pixclk_div); + *if_dclk_div = ilog2(*if_dclk_div); + *dclk_core_div = ilog2(*dclk_core_div); + *dclk_out_div = ilog2(*dclk_out_div); + + drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n", + dclk_rate, *if_pixclk_div, *if_dclk_div); + + return dclk_rate; +} + +/* + * MIPI port mux on rk3588: + * 0: Video Port2 + * 1: Video Port3 + * 3: Video Port 1(MIPI1 only) + */ +static u32 rk3588_get_mipi_port_mux(int vp_id) +{ + if (vp_id == 1) + return 3; + else if (vp_id == 3) + return 1; + else + return 0; +} + +static u32 rk3588_get_hdmi_pol(u32 flags) +{ + u32 val; + + val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0; + val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0; + + return val; +} + +static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) +{ + struct vop2 *vop2 = vp->vop2; + int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div; + unsigned long clock; + u32 die, dip, div, vp_clk_div, val; + + clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div, + &if_pixclk_div, &if_dclk_div); + if (!clock) + return 0; + + vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); + vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); + + die = vop2_readl(vop2, RK3568_DSP_IF_EN); + dip = vop2_readl(vop2, RK3568_DSP_IF_POL); + div = vop2_readl(vop2, RK3568_DSP_IF_CTRL); + + switch (id) { + case ROCKCHIP_VOP2_EP_HDMI0: + div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; + div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); + val = rk3588_get_hdmi_pol(polflags); + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1)); + regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5)); + break; + case ROCKCHIP_VOP2_EP_HDMI1: + div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; + div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div); + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); + val = rk3588_get_hdmi_pol(polflags); + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4)); + regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7)); + break; + case ROCKCHIP_VOP2_EP_EDP0: + div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV; + div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_EDP0 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id); + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0)); + break; + case ROCKCHIP_VOP2_EP_EDP1: + div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV; + div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div); + div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_EDP1 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id); + regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3)); + break; + case ROCKCHIP_VOP2_EP_MIPI0: + div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX; + val = rk3588_get_mipi_port_mux(vp->id); + die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val); + break; + case ROCKCHIP_VOP2_EP_MIPI1: + div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV; + div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div); + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; + val = rk3588_get_mipi_port_mux(vp->id); + die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val); + break; + case ROCKCHIP_VOP2_EP_DP0: + die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_DP0 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id); + dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL; + dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags); + break; + case ROCKCHIP_VOP2_EP_DP1: + die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX; + die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 | + FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id); + dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL; + dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags); + break; + default: + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); + return 0; + } + + dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD; + + vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); + vop2_writel(vop2, RK3568_DSP_IF_EN, die); + vop2_writel(vop2, RK3568_DSP_IF_CTRL, div); + vop2_writel(vop2, RK3568_DSP_IF_POL, dip); + + return clock; +} + +static bool is_opaque(u16 alpha) +{ + return (alpha >> 8) == 0xff; +} + +static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, + struct vop2_alpha *alpha) +{ + int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1; + int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1; + int src_color_mode = alpha_config->src_premulti_en ? + ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; + int dst_color_mode = alpha_config->dst_premulti_en ? + ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL; + + alpha->src_color_ctrl.val = 0; + alpha->dst_color_ctrl.val = 0; + alpha->src_alpha_ctrl.val = 0; + alpha->dst_alpha_ctrl.val = 0; + + if (!alpha_config->src_pixel_alpha_en) + alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; + else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en) + alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX; + else + alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; + + alpha->src_color_ctrl.bits.alpha_en = 1; + + if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) { + alpha->src_color_ctrl.bits.color_mode = src_color_mode; + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; + } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) { + alpha->src_color_ctrl.bits.color_mode = src_color_mode; + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE; + } else { + alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL; + alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL; + } + alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8; + alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; + alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; + + alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; + alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; + alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL; + alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8; + alpha->dst_color_ctrl.bits.color_mode = dst_color_mode; + alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; + + alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; + alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode; + alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION; + alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE; + + alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT; + if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en) + alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX; + else + alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL; + alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION; + alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; +} + +static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) +{ + struct vop2_video_port *vp; + int used_layer = 0; + int i; + + for (i = 0; i < port_id; i++) { + vp = &vop2->vps[i]; + used_layer += hweight32(vp->win_mask); + } + + return used_layer; +} + +static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win) +{ + struct vop2_alpha_config alpha_config; + struct vop2_alpha alpha; + struct drm_plane_state *bottom_win_pstate; + bool src_pixel_alpha_en = false; + u16 src_glb_alpha_val, dst_glb_alpha_val; + u32 offset = 0; + bool premulti_en = false; + bool swap = false; + + /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */ + bottom_win_pstate = main_win->base.state; + src_glb_alpha_val = 0; + dst_glb_alpha_val = main_win->base.state->alpha; + + if (!bottom_win_pstate->fb) + return; + + alpha_config.src_premulti_en = premulti_en; + alpha_config.dst_premulti_en = false; + alpha_config.src_pixel_alpha_en = src_pixel_alpha_en; + alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ + alpha_config.src_glb_alpha_value = src_glb_alpha_val; + alpha_config.dst_glb_alpha_value = dst_glb_alpha_val; + vop2_parse_alpha(&alpha_config, &alpha); + + alpha.src_color_ctrl.bits.src_dst_swap = swap; + + switch (main_win->data->phys_id) { + case ROCKCHIP_VOP2_CLUSTER0: + offset = 0x0; + break; + case ROCKCHIP_VOP2_CLUSTER1: + offset = 0x10; + break; + case ROCKCHIP_VOP2_CLUSTER2: + offset = 0x20; + break; + case ROCKCHIP_VOP2_CLUSTER3: + offset = 0x30; + break; + } + + vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset, + alpha.src_color_ctrl.val); + vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset, + alpha.dst_color_ctrl.val); + vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset, + alpha.src_alpha_ctrl.val); + vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset, + alpha.dst_alpha_ctrl.val); +} + +static void vop2_setup_alpha(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_framebuffer *fb; + struct vop2_alpha_config alpha_config; + struct vop2_alpha alpha; + struct drm_plane *plane; + int pixel_alpha_en; + int premulti_en, gpremulti_en = 0; + int mixer_id; + u32 offset; + bool bottom_layer_alpha_en = false; + u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; + + mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); + alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ + + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { + struct vop2_win *win = to_vop2_win(plane); + + if (plane->state->normalized_zpos == 0 && + !is_opaque(plane->state->alpha) && + !vop2_cluster_window(win)) { + /* + * If bottom layer have global alpha effect [except cluster layer, + * because cluster have deal with bottom layer global alpha value + * at cluster mix], bottom layer mix need deal with global alpha. + */ + bottom_layer_alpha_en = true; + dst_global_alpha = plane->state->alpha; + } + } + + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { + struct vop2_win *win = to_vop2_win(plane); + int zpos = plane->state->normalized_zpos; + + /* + * Need to configure alpha from second layer. + */ + if (zpos == 0) + continue; + + if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) + premulti_en = 1; + else + premulti_en = 0; + + plane = &win->base; + fb = plane->state->fb; + + pixel_alpha_en = fb->format->has_alpha; + + alpha_config.src_premulti_en = premulti_en; + + if (bottom_layer_alpha_en && zpos == 1) { + gpremulti_en = premulti_en; + /* Cd = Cs + (1 - As) * Cd * Agd */ + alpha_config.dst_premulti_en = false; + alpha_config.src_pixel_alpha_en = pixel_alpha_en; + alpha_config.src_glb_alpha_value = plane->state->alpha; + alpha_config.dst_glb_alpha_value = dst_global_alpha; + } else if (vop2_cluster_window(win)) { + /* Mix output data only have pixel alpha */ + alpha_config.dst_premulti_en = true; + alpha_config.src_pixel_alpha_en = true; + alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + } else { + /* Cd = Cs + (1 - As) * Cd */ + alpha_config.dst_premulti_en = true; + alpha_config.src_pixel_alpha_en = pixel_alpha_en; + alpha_config.src_glb_alpha_value = plane->state->alpha; + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + } + + vop2_parse_alpha(&alpha_config, &alpha); + + offset = (mixer_id + zpos - 1) * 0x10; + vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset, + alpha.src_color_ctrl.val); + vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset, + alpha.dst_color_ctrl.val); + vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset, + alpha.src_alpha_ctrl.val); + vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset, + alpha.dst_alpha_ctrl.val); + } + + if (vp->id == 0) { + if (bottom_layer_alpha_en) { + /* Transfer pixel alpha to hdr mix */ + alpha_config.src_premulti_en = gpremulti_en; + alpha_config.dst_premulti_en = true; + alpha_config.src_pixel_alpha_en = true; + alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + vop2_parse_alpha(&alpha_config, &alpha); + + vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, + alpha.src_color_ctrl.val); + vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL, + alpha.dst_color_ctrl.val); + vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL, + alpha.src_alpha_ctrl.val); + vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL, + alpha.dst_alpha_ctrl.val); + } else { + vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0); + } + } +} + +static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_plane *plane; + u32 layer_sel = 0; + u32 port_sel; + u8 layer_id; + u8 old_layer_id; + u8 layer_sel_id; + unsigned int ofs; + u32 ovl_ctrl; + int i; + struct vop2_video_port *vp0 = &vop2->vps[0]; + struct vop2_video_port *vp1 = &vop2->vps[1]; + struct vop2_video_port *vp2 = &vop2->vps[2]; + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); + + ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); + ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; + if (vcstate->yuv_overlay) + ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); + else + ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); + + vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); + + port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); + port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; + + if (vp0->nlayers) + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, + vp0->nlayers - 1); + else + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8); + + if (vp1->nlayers) + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, + (vp0->nlayers + vp1->nlayers - 1)); + else + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8); + + if (vp2->nlayers) + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, + (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1)); + else + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); + + layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); + + ofs = 0; + for (i = 0; i < vp->id; i++) + ofs += vop2->vps[i].nlayers; + + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { + struct vop2_win *win = to_vop2_win(plane); + struct vop2_win *old_win; + + layer_id = (u8)(plane->state->normalized_zpos + ofs); + /* + * Find the layer this win bind in old state. + */ + for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) { + layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf; + if (layer_sel_id == win->data->layer_sel_id) + break; + } + + /* + * Find the win bind to this layer in old state + */ + for (i = 0; i < vop2->data->win_size; i++) { + old_win = &vop2->win[i]; + layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf; + if (layer_sel_id == old_win->data->layer_sel_id) + break; + } + + switch (win->data->phys_id) { + case ROCKCHIP_VOP2_CLUSTER0: + port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id); + break; + case ROCKCHIP_VOP2_CLUSTER1: + port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id); + break; + case ROCKCHIP_VOP2_CLUSTER2: + port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2; + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id); + break; + case ROCKCHIP_VOP2_CLUSTER3: + port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3; + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id); + break; + case ROCKCHIP_VOP2_ESMART0: + port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id); + break; + case ROCKCHIP_VOP2_ESMART1: + port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id); + break; + case ROCKCHIP_VOP2_ESMART2: + port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2; + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id); + break; + case ROCKCHIP_VOP2_ESMART3: + port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3; + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id); + break; + case ROCKCHIP_VOP2_SMART0: + port_sel &= ~RK3568_OVL_PORT_SEL__SMART0; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id); + break; + case ROCKCHIP_VOP2_SMART1: + port_sel &= ~RK3568_OVL_PORT_SEL__SMART1; + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id); + break; + } + + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7); + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id); + /* + * When we bind a window from layerM to layerN, we also need to move the old + * window on layerN to layerM to avoid one window selected by two or more layers. + */ + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7); + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id); + } + + vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); + vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); +} + +static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + struct vop2_win *win; + int i = 0; + u32 cdly = 0, sdly = 0; + + for (i = 0; i < vop2->data->win_size; i++) { + u32 dly; + + win = &vop2->win[i]; + dly = win->delay; + + switch (win->data->phys_id) { + case ROCKCHIP_VOP2_CLUSTER0: + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly); + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly); + break; + case ROCKCHIP_VOP2_CLUSTER1: + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly); + cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly); + break; + case ROCKCHIP_VOP2_ESMART0: + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly); + break; + case ROCKCHIP_VOP2_ESMART1: + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly); + break; + case ROCKCHIP_VOP2_SMART0: + case ROCKCHIP_VOP2_ESMART2: + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly); + break; + case ROCKCHIP_VOP2_SMART1: + case ROCKCHIP_VOP2_ESMART3: + sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly); + break; + } + } + + vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly); + vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly); +} + +static void rk3568_vop2_setup_overlay(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + struct drm_plane *plane; + + vp->win_mask = 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + struct vop2_win *win = to_vop2_win(plane); + + win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; + + vp->win_mask |= BIT(win->data->phys_id); + + if (vop2_cluster_window(win)) + vop2_setup_cluster_alpha(vop2, win); + } + + if (!vp->win_mask) + return; + + rk3568_vop2_setup_layer_mixer(vp); + vop2_setup_alpha(vp); + rk3568_vop2_setup_dly_for_windows(vp); +} + +static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp) +{ + struct drm_crtc *crtc = &vp->crtc; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + u16 hdisplay = mode->crtc_hdisplay; + u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + u32 bg_dly; + u32 pre_scan_dly; + + bg_dly = vp->data->pre_scan_max_dly[3]; + vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id), + FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly)); + + pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; + vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); +} + +static const struct vop2_ops rk3568_vop_ops = { + .setup_intf_mux = rk3568_set_intf_mux, + .setup_bg_dly = rk3568_vop2_setup_bg_dly, + .setup_overlay = rk3568_vop2_setup_overlay, +}; + +static const struct vop2_ops rk3588_vop_ops = { + .setup_intf_mux = rk3588_set_intf_mux, + .setup_bg_dly = rk3568_vop2_setup_bg_dly, + .setup_overlay = rk3568_vop2_setup_overlay, +}; + static const struct vop2_data rk3566_vop = { .feature = VOP2_FEATURE_HAS_SYS_GRF, .nr_vps = 3, @@ -655,8 +1632,13 @@ static const struct vop2_data rk3566_vop = { .vp = rk3568_vop_video_ports, .win = rk3568_vop_win_data, .win_size = ARRAY_SIZE(rk3568_vop_win_data), + .cluster_reg = rk3568_vop_cluster_regs, + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), + .smart_reg = rk3568_vop_smart_regs, + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), .regs_dump = rk3568_regs_dump, .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump), + .ops = &rk3568_vop_ops, .soc_id = 3566, }; @@ -668,8 +1650,13 @@ static const struct vop2_data rk3568_vop = { .vp = rk3568_vop_video_ports, .win = rk3568_vop_win_data, .win_size = ARRAY_SIZE(rk3568_vop_win_data), + .cluster_reg = rk3568_vop_cluster_regs, + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), + .smart_reg = rk3568_vop_smart_regs, + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), .regs_dump = rk3568_regs_dump, .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump), + .ops = &rk3568_vop_ops, .soc_id = 3568, }; @@ -682,8 +1669,13 @@ static const struct vop2_data rk3588_vop = { .vp = rk3588_vop_video_ports, .win = rk3588_vop_win_data, .win_size = ARRAY_SIZE(rk3588_vop_win_data), + .cluster_reg = rk3568_vop_cluster_regs, + .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs), + .smart_reg = rk3568_vop_smart_regs, + .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs), .regs_dump = rk3588_regs_dump, .regs_dump_size = ARRAY_SIZE(rk3588_regs_dump), + .ops = &rk3588_vop_ops, .soc_id = 3588, }; From 145c9b36892a07bf5e2525b4938e1a6cc9b41b7a Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:31 +0800 Subject: [PATCH 24/77] drm/rockchip: vop2: Merge vop2_cluster/esmart_init function Now these two function share the same logic, they can be merged as one. Signed-off-by: Andy Yan Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-5-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 42 +++++--------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index efd6b4f91156..1f13b942064e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2434,18 +2434,18 @@ static int vop2_find_rgb_encoder(struct vop2 *vop2) return -ENOENT; } -static int vop2_cluster_init(struct vop2_win *win) +static int vop2_regmap_init(struct vop2_win *win, const struct reg_field *regs, + int nr_regs) { struct vop2 *vop2 = win->vop2; int i; - for (i = 0; i < vop2->data->nr_cluster_regs; i++) { + for (i = 0; i < nr_regs; i++) { const struct reg_field field = { - .reg = (vop2->data->cluster_reg[i].reg != 0xffffffff) ? - vop2->data->cluster_reg[i].reg + win->offset : - vop2->data->cluster_reg[i].reg, - .lsb = vop2->data->cluster_reg[i].lsb, - .msb = vop2->data->cluster_reg[i].msb + .reg = (regs[i].reg != 0xffffffff) ? + regs[i].reg + win->offset : regs[i].reg, + .lsb = regs[i].lsb, + .msb = regs[i].msb }; win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); @@ -2456,28 +2456,6 @@ static int vop2_cluster_init(struct vop2_win *win) return 0; }; -static int vop2_esmart_init(struct vop2_win *win) -{ - struct vop2 *vop2 = win->vop2; - int i; - - for (i = 0; i < vop2->data->nr_smart_regs; i++) { - const struct reg_field field = { - .reg = (vop2->data->smart_reg[i].reg != 0xffffffff) ? - vop2->data->smart_reg[i].reg + win->offset : - vop2->data->smart_reg[i].reg, - .lsb = vop2->data->smart_reg[i].lsb, - .msb = vop2->data->smart_reg[i].msb - }; - - win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field); - if (IS_ERR(win->reg[i])) - return PTR_ERR(win->reg[i]); - } - - return 0; -} - static int vop2_win_init(struct vop2 *vop2) { const struct vop2_data *vop2_data = vop2->data; @@ -2494,9 +2472,11 @@ static int vop2_win_init(struct vop2 *vop2) win->win_id = i; win->vop2 = vop2; if (vop2_cluster_window(win)) - ret = vop2_cluster_init(win); + ret = vop2_regmap_init(win, vop2->data->cluster_reg, + vop2->data->nr_cluster_regs); else - ret = vop2_esmart_init(win); + ret = vop2_regmap_init(win, vop2->data->smart_reg, + vop2->data->nr_smart_regs); if (ret) return ret; } From 5439c4f3cb0ec11a3f3cb70be2b019770f6d183c Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:32 +0800 Subject: [PATCH 25/77] drm/rockchip: vop2: Support for different layer select configuration between VPs In the upcoming VOP for rk3576, every VP has it's own LAYER_SEL register, and the configuration value of each VP for the same window maybe different, so extend the layer_sel_id to array, let it can descption the layer select configuration value for different VP. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-6-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 4 +-- drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 38 ++++++++++---------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 904ce2008870..cae211a558bd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -166,9 +166,9 @@ struct vop2_win_data { const unsigned int supported_rotations; /** - * @layer_sel_id: defined by register OVERLAY_LAYER_SEL of VOP2 + * @layer_sel_id: defined by register OVERLAY_LAYER_SEL or PORTn_LAYER_SEL */ - unsigned int layer_sel_id; + unsigned int layer_sel_id[ROCKCHIP_MAX_CRTC]; uint64_t feature; uint8_t axi_bus_id; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 56d72f35b57f..31edde7ae600 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -350,7 +350,8 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .formats = formats_smart, .nformats = ARRAY_SIZE(formats_smart), .format_modifiers = format_modifiers, - .layer_sel_id = 3, + /* 0xf means this layer can't attached to this VP */ + .layer_sel_id = { 3, 3, 3, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, .max_upscale_factor = 8, @@ -363,7 +364,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_smart), .format_modifiers = format_modifiers, .base = 0x1e00, - .layer_sel_id = 7, + .layer_sel_id = { 7, 7, 7, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, .max_upscale_factor = 8, @@ -376,7 +377,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_rk356x_esmart), .format_modifiers = format_modifiers, .base = 0x1a00, - .layer_sel_id = 6, + .layer_sel_id = { 6, 6, 6, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, .max_upscale_factor = 8, @@ -389,7 +390,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_rk356x_esmart), .format_modifiers = format_modifiers, .base = 0x1800, - .layer_sel_id = 2, + .layer_sel_id = { 2, 2, 2, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, .max_upscale_factor = 8, @@ -402,7 +403,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 0, + .layer_sel_id = { 0, 0, 0, 0xf }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .max_upscale_factor = 4, @@ -417,7 +418,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 1, + .layer_sel_id = { 1, 1, 1, 0xf }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, @@ -582,7 +583,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 0, + .layer_sel_id = { 0, 0, 0, 0 }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .axi_bus_id = 0, @@ -600,7 +601,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 1, + .layer_sel_id = { 1, 1, 1, 1 }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, @@ -618,7 +619,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 4, + .layer_sel_id = { 4, 4, 4, 4 }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, @@ -636,7 +637,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, - .layer_sel_id = 5, + .layer_sel_id = { 5, 5, 5, 5 }, .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, @@ -654,7 +655,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, .base = 0x1800, - .layer_sel_id = 2, + .layer_sel_id = { 2, 2, 2, 2 }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, .axi_bus_id = 0, @@ -670,7 +671,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, .base = 0x1a00, - .layer_sel_id = 3, + .layer_sel_id = { 3, 3, 3, 3 }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, .axi_bus_id = 0, @@ -686,7 +687,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .formats = formats_esmart, .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, - .layer_sel_id = 6, + .layer_sel_id = { 6, 6, 6, 6 }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, .axi_bus_id = 1, @@ -702,7 +703,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, .base = 0x1e00, - .layer_sel_id = 7, + .layer_sel_id = { 7, 7, 7, 7 }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, .axi_bus_id = 1, @@ -1454,7 +1455,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) */ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) { layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf; - if (layer_sel_id == win->data->layer_sel_id) + if (layer_sel_id == win->data->layer_sel_id[vp->id]) break; } @@ -1464,7 +1465,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) for (i = 0; i < vop2->data->win_size; i++) { old_win = &vop2->win[i]; layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf; - if (layer_sel_id == old_win->data->layer_sel_id) + if (layer_sel_id == old_win->data->layer_sel_id[vp->id]) break; } @@ -1512,13 +1513,14 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) } layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7); - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id); + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id[vp->id]); /* * When we bind a window from layerM to layerN, we also need to move the old * window on layerN to layerM to avoid one window selected by two or more layers. */ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7); - layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id); + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, + old_win->data->layer_sel_id[vp->id]); } vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); From 301618ed1d8ab7cfaec39b107eded9f263da2299 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Tue, 18 Feb 2025 19:27:33 +0800 Subject: [PATCH 26/77] drm/rockchip: vop2: Introduce vop hardware version There is a version number hardcoded in the VOP VERSION_INFO register, and the version number increments sequentially based on the production order of the SoC. So using this version number to distinguish different VOP features will simplify the code. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250218112744.34433-7-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 26 ++++++++++++++------ drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 11 +++++++++ drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 3 +++ 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 1f13b942064e..bebe5bd70b90 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -355,7 +355,7 @@ static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode) static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format) { - if (vop2->data->soc_id == 3588) { + if (vop2->version == VOP_VERSION_RK3588) { if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 || bus_format == MEDIA_BUS_FMT_YUV10_1X30) return true; @@ -408,7 +408,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format, if (modifier == DRM_FORMAT_MOD_INVALID) return false; - if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) { + if (vop2->version == VOP_VERSION_RK3568) { if (vop2_cluster_window(win)) { if (modifier == DRM_FORMAT_MOD_LINEAR) { drm_dbg_kms(vop2->drm, @@ -419,7 +419,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format, } if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) { - if (vop2->data->soc_id == 3588) { + if (vop2->version == VOP_VERSION_RK3588) { if (!rockchip_afbc(plane, modifier)) { drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n"); return false; @@ -818,6 +818,7 @@ static void rk3588_vop2_power_domain_enable_all(struct vop2 *vop2) static void vop2_enable(struct vop2 *vop2) { int ret; + u32 version; ret = pm_runtime_resume_and_get(vop2->dev); if (ret < 0) { @@ -837,10 +838,20 @@ static void vop2_enable(struct vop2 *vop2) return; } + version = vop2_readl(vop2, RK3568_VERSION_INFO); + if (version != vop2->version) { + drm_err(vop2->drm, "Hardware version(0x%08x) mismatch\n", version); + return; + } + + /* + * rk3566 share the same vop version with rk3568, so + * we need to use soc_id for identification here. + */ if (vop2->data->soc_id == 3566) vop2_writel(vop2, RK3568_OTP_WIN_EN, 1); - if (vop2->data->soc_id == 3588) + if (vop2->version == VOP_VERSION_RK3588) rk3588_vop2_power_domain_enable_all(vop2); vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN); @@ -921,7 +932,7 @@ static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp) static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2) { - return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568); + return vop2->version != VOP_VERSION_RK3568; } static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp) @@ -1263,7 +1274,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, &fb->format->format, afbc_en ? "AFBC" : "", &yrgb_mst); - if (vop2->data->soc_id > 3568) { + if (vop2->version > VOP_VERSION_RK3568) { vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id); vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id); vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id); @@ -1323,7 +1334,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, * this bit is gating disable, we should write 1 to * disable gating when enable afbc. */ - if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568) + if (vop2->version == VOP_VERSION_RK3568) vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0); else vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1); @@ -2534,6 +2545,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) vop2->dev = dev; vop2->data = vop2_data; vop2->ops = vop2_data->ops; + vop2->version = vop2_data->version; vop2->drm = drm; dev_set_drvdata(dev, vop2); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index cae211a558bd..a309042aa8e6 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -13,6 +13,15 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" +#define VOP2_VERSION(major, minor, build) ((major) << 24 | (minor) << 16 | (build)) + +/* The VOP version of new SoC is bigger than the old */ +#define VOP_VERSION_RK3568 VOP2_VERSION(0x40, 0x15, 0x8023) +#define VOP_VERSION_RK3588 VOP2_VERSION(0x40, 0x17, 0x6786) +#define VOP_VERSION_RK3528 VOP2_VERSION(0x50, 0x17, 0x1263) +#define VOP_VERSION_RK3562 VOP2_VERSION(0x50, 0x17, 0x4350) +#define VOP_VERSION_RK3576 VOP2_VERSION(0x50, 0x19, 0x9765) + #define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0) #define VOP2_FEATURE_HAS_SYS_GRF BIT(0) @@ -243,6 +252,7 @@ struct vop2_ops { struct vop2_data { u8 nr_vps; u64 feature; + u32 version; const struct vop2_ops *ops; const struct vop2_win_data *win; const struct vop2_video_port_data *vp; @@ -260,6 +270,7 @@ struct vop2_data { }; struct vop2 { + u32 version; struct device *dev; struct drm_device *drm; struct vop2_video_port vps[ROCKCHIP_MAX_CRTC]; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 31edde7ae600..0afef24db144 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -1627,6 +1627,7 @@ static const struct vop2_ops rk3588_vop_ops = { }; static const struct vop2_data rk3566_vop = { + .version = VOP_VERSION_RK3568, .feature = VOP2_FEATURE_HAS_SYS_GRF, .nr_vps = 3, .max_input = { 4096, 2304 }, @@ -1645,6 +1646,7 @@ static const struct vop2_data rk3566_vop = { }; static const struct vop2_data rk3568_vop = { + .version = VOP_VERSION_RK3568, .feature = VOP2_FEATURE_HAS_SYS_GRF, .nr_vps = 3, .max_input = { 4096, 2304 }, @@ -1663,6 +1665,7 @@ static const struct vop2_data rk3568_vop = { }; static const struct vop2_data rk3588_vop = { + .version = VOP_VERSION_RK3588, .feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF | VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU, .nr_vps = 4, From db505ff68c122734ed0e944f9f8430eb89947fdb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 28 Feb 2025 08:32:47 +0000 Subject: [PATCH 27/77] drm/bridge: Fix spelling mistake "gettin" -> "getting" There is a spelling mistake in a dev_err message. Fix it. Signed-off-by: Colin Ian King Fixes: ff5781634c41 ("drm/bridge: sii902x: Implement HDMI audio support") Reviewed-by: Dmitry Baryshkov Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20250228083248.676473-1-colin.i.king@gmail.com --- drivers/gpu/drm/bridge/sii902x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 2100a687096e..914a2609a685 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -887,7 +887,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x, lanes[0] = 0; } else if (num_lanes < 0) { dev_err(dev, - "%s: Error gettin \"sil,i2s-data-lanes\": %d\n", + "%s: Error getting \"sil,i2s-data-lanes\": %d\n", __func__, num_lanes); return num_lanes; } From 30188df0c387d9837562bbcc38ede98f6a1f0d46 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 20 Feb 2025 14:25:37 +0100 Subject: [PATCH 28/77] drm/tests: Drop drm_kunit_helper_acquire_ctx_alloc() lockdep complains when a lock is released in a separate thread the lock is taken in, and it turns out that kunit does run its actions in a separate thread than the test ran in. This means that drm_kunit_helper_acquire_ctx_alloc() just cannot work as it's supposed to, so let's just get rid of it. Suggested-by: Simona Vetter Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250220132537.2834168-1-mripard@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_atomic_state_test.c | 24 +- .../drm/tests/drm_hdmi_state_helper_test.c | 254 +++++++++++------- drivers/gpu/drm/tests/drm_kunit_helpers.c | 41 --- .../gpu/drm/vc4/tests/vc4_test_pv_muxing.c | 46 ++-- include/drm/drm_kunit_helpers.h | 2 - 5 files changed, 191 insertions(+), 176 deletions(-) diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c index 5945c3298901..2f6ac7a09f44 100644 --- a/drivers/gpu/drm/tests/drm_atomic_state_test.c +++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c @@ -189,7 +189,7 @@ static int set_up_atomic_state(struct kunit *test, static void drm_test_check_connector_changed_modeset(struct kunit *test) { struct drm_atomic_test_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector *old_conn, *new_conn; struct drm_atomic_state *state; struct drm_device *drm; @@ -203,14 +203,13 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test) old_conn = &priv->connectors[0]; new_conn = &priv->connectors[1]; - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); // first modeset to enable - ret = set_up_atomic_state(test, priv, old_conn, ctx); + ret = set_up_atomic_state(test, priv, old_conn, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); new_conn_state = drm_atomic_get_connector_state(state, new_conn); @@ -231,6 +230,9 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test) ret = drm_atomic_commit(state); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_ASSERT_EQ(test, modeset_counter, initial_modeset_count + 1); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -263,7 +265,7 @@ static void drm_test_check_valid_clones(struct kunit *test) int ret; const struct drm_clone_mode_test *param = test->param_value; struct drm_atomic_test_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_device *drm; struct drm_atomic_state *state; struct drm_crtc_state *crtc_state; @@ -273,13 +275,12 @@ static void drm_test_check_valid_clones(struct kunit *test) drm = &priv->drm; - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = set_up_atomic_state(test, priv, NULL, ctx); + ret = set_up_atomic_state(test, priv, NULL, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); crtc_state = drm_atomic_get_crtc_state(state, priv->crtc); @@ -292,6 +293,9 @@ static void drm_test_check_valid_clones(struct kunit *test) ret = drm_atomic_helper_check_modeset(drm, state); KUNIT_ASSERT_EQ(test, ret, param->expected_result); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static void drm_check_in_clone_mode_desc(const struct drm_clone_mode_test *t, diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index 23ecc00accb2..e97efd3af9ed 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -273,7 +273,7 @@ drm_kunit_helper_connector_hdmi_init(struct kunit *test, static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_crtc_state *crtc_state; @@ -296,13 +296,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); new_conn_state = drm_atomic_get_connector_state(state, conn); @@ -327,6 +326,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) crtc_state = drm_atomic_get_new_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -337,7 +339,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_crtc_state *crtc_state; @@ -360,13 +362,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); new_conn_state = drm_atomic_get_connector_state(state, conn); @@ -393,6 +394,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes crtc_state = drm_atomic_get_new_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -403,7 +407,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *preferred; @@ -426,13 +430,12 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -449,6 +452,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -459,7 +465,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *mode; @@ -477,17 +483,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -504,6 +509,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -514,7 +522,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *preferred; @@ -537,13 +545,12 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -562,6 +569,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) DRM_HDMI_BROADCAST_RGB_FULL); KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -572,7 +582,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *mode; @@ -590,17 +600,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -619,6 +628,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) DRM_HDMI_BROADCAST_RGB_FULL); KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -629,7 +641,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *preferred; @@ -652,13 +664,12 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -677,6 +688,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) DRM_HDMI_BROADCAST_RGB_LIMITED); KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -687,7 +701,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_atomic_state *state; struct drm_display_mode *mode; @@ -705,17 +719,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); conn_state = drm_atomic_get_connector_state(state, conn); @@ -734,6 +747,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te DRM_HDMI_BROADCAST_RGB_LIMITED); KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -744,7 +760,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_crtc_state *crtc_state; @@ -771,13 +787,12 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); new_conn_state = drm_atomic_get_connector_state(state, conn); @@ -808,6 +823,9 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) crtc_state = drm_atomic_get_new_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -818,7 +836,7 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_crtc_state *crtc_state; @@ -845,13 +863,12 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); new_conn_state = drm_atomic_get_connector_state(state, conn); @@ -880,6 +897,9 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) crtc_state = drm_atomic_get_new_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -889,7 +909,7 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) static void drm_test_check_output_bpc_dvi(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -919,10 +939,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -930,6 +949,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -939,7 +961,7 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_mode *preferred; struct drm_connector *conn; @@ -964,10 +986,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -976,6 +997,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 8); KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1000); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -986,7 +1010,7 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_mode *preferred; struct drm_connector *conn; @@ -1011,10 +1035,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1023,6 +1046,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 10); KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1033,7 +1059,7 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_mode *preferred; struct drm_connector *conn; @@ -1058,10 +1084,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1070,6 +1095,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 12); KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1500); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1083,7 +1111,7 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct drm_display_mode *preferred; struct drm_crtc_state *crtc_state; @@ -1104,16 +1132,15 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); /* You shouldn't be doing that at home. */ conn->hdmi.funcs = &reject_connector_hdmi_funcs; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); crtc_state = drm_atomic_get_crtc_state(state, crtc); @@ -1123,6 +1150,9 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) ret = drm_atomic_check_only(state); KUNIT_EXPECT_LT(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1139,7 +1169,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1176,10 +1206,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1188,6 +1217,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1206,7 +1238,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1248,10 +1280,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1259,6 +1290,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1269,7 +1303,7 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *mode; @@ -1310,11 +1344,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) rate = mode->clock * 1500; KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, ctx); + ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1322,6 +1355,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1331,7 +1367,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1376,10 +1412,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1387,6 +1422,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1396,7 +1434,7 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1443,10 +1481,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1454,6 +1491,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1464,7 +1504,7 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1501,10 +1541,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1512,6 +1551,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -1522,7 +1564,7 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_display_info *info; struct drm_display_mode *preferred; @@ -1561,10 +1603,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1572,13 +1613,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8); KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* Test that atomic check succeeds when disabling a connector. */ static void drm_test_check_disable_connector(struct kunit *test) { struct drm_atomic_helper_connector_hdmi_priv *priv; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct drm_atomic_state *state; @@ -1593,8 +1637,7 @@ static void drm_test_check_disable_connector(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); conn = &priv->connector; preferred = find_preferred_mode(conn); @@ -1602,10 +1645,10 @@ static void drm_test_check_disable_connector(struct kunit *test) drm = &priv->drm; crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); crtc_state = drm_atomic_get_crtc_state(state, crtc); @@ -1623,6 +1666,9 @@ static void drm_test_check_disable_connector(struct kunit *test) ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = { diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index 3c0b7824c0be..a4eb68f0decc 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -80,47 +80,6 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test, } EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver); -static void action_drm_release_context(void *ptr) -{ - struct drm_modeset_acquire_ctx *ctx = ptr; - - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); -} - -/** - * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context - * @test: The test context object - * - * Allocates and initializes a modeset acquire context. - * - * The context is tied to the kunit test context, so we must not call - * drm_modeset_acquire_fini() on it, it will be done so automatically. - * - * Returns: - * An ERR_PTR on error, a pointer to the newly allocated context otherwise - */ -struct drm_modeset_acquire_ctx * -drm_kunit_helper_acquire_ctx_alloc(struct kunit *test) -{ - struct drm_modeset_acquire_ctx *ctx; - int ret; - - ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); - KUNIT_ASSERT_NOT_NULL(test, ctx); - - drm_modeset_acquire_init(ctx, 0); - - ret = kunit_add_action_or_reset(test, - action_drm_release_context, - ctx); - if (ret) - return ERR_PTR(ret); - - return ctx; -} -EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc); - static void kunit_action_drm_atomic_state_put(void *ptr) { struct drm_atomic_state *state = ptr; diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c index 40a05869a50e..992e8f5c5c6e 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c +++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c @@ -724,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test) static int vc4_pv_muxing_test_init(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct pv_muxing_priv *priv; struct drm_device *drm; struct vc4_dev *vc4; @@ -737,13 +737,15 @@ static int vc4_pv_muxing_test_init(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); priv->vc4 = vc4; - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return 0; } @@ -782,7 +784,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = { */ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; @@ -795,11 +797,10 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -822,7 +823,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); @@ -843,6 +844,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use); KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -854,7 +858,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes */ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; @@ -867,11 +871,10 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -905,7 +908,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -929,6 +932,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel); } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -949,7 +955,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct drm_device *drm; @@ -959,11 +965,10 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -975,7 +980,7 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); @@ -987,6 +992,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_EXPECT_NULL(test, new_vc4_crtc_state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static struct kunit_case vc5_pv_muxing_bugs_tests[] = { diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index afdd46ef04f7..11d59ce0bac0 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -95,8 +95,6 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, sizeof(_type), \ offsetof(_type, _member), \ _feat)) -struct drm_modeset_acquire_ctx * -drm_kunit_helper_acquire_ctx_alloc(struct kunit *test); struct drm_atomic_state * drm_kunit_helper_atomic_state_alloc(struct kunit *test, From 16e57a72780931c3c70dbc928aeee4a0518075de Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Feb 2025 12:38:17 +0300 Subject: [PATCH 29/77] drm/vc4: hdmi: Fix some NULL vs IS_ERR() bugs The devm_platform_ioremap_resource_byname() function doesn't return NULL, it returns error pointers. Update the checking to match. Fixes: b93f07cf090a ("drm/vc4: move to devm_platform_ioremap_resource() usage") Signed-off-by: Dan Carpenter Link: https://patchwork.freedesktop.org/patch/msgid/a952e2b4-d4b8-49ac-abd9-9967c50f4a80@stanley.mountain Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vc4/vc4_hdmi.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index d20e5c53ba75..37238a12baa5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -2928,8 +2928,8 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, vc4_hdmi->hdmicore_regs = devm_platform_ioremap_resource_byname(pdev, "hdmi"); - if (!vc4_hdmi->hdmicore_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->hdmicore_regs)) + return PTR_ERR(vc4_hdmi->hdmicore_regs); /* This is shared between both HDMI controllers. Cannot * claim for both instances. Lets not convert to using @@ -2946,33 +2946,33 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, vc4_hdmi->cec_regs = devm_platform_ioremap_resource_byname(pdev, "cec"); - if (!vc4_hdmi->cec_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->cec_regs)) + return PTR_ERR(vc4_hdmi->cec_regs); vc4_hdmi->csc_regs = devm_platform_ioremap_resource_byname(pdev, "csc"); - if (!vc4_hdmi->csc_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->csc_regs)) + return PTR_ERR(vc4_hdmi->csc_regs); vc4_hdmi->dvp_regs = devm_platform_ioremap_resource_byname(pdev, "dvp"); - if (!vc4_hdmi->dvp_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->dvp_regs)) + return PTR_ERR(vc4_hdmi->dvp_regs); vc4_hdmi->phy_regs = devm_platform_ioremap_resource_byname(pdev, "phy"); - if (!vc4_hdmi->phy_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->phy_regs)) + return PTR_ERR(vc4_hdmi->phy_regs); vc4_hdmi->ram_regs = devm_platform_ioremap_resource_byname(pdev, "packet"); - if (!vc4_hdmi->ram_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->ram_regs)) + return PTR_ERR(vc4_hdmi->ram_regs); vc4_hdmi->rm_regs = devm_platform_ioremap_resource_byname(pdev, "rm"); - if (!vc4_hdmi->rm_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->rm_regs)) + return PTR_ERR(vc4_hdmi->rm_regs); vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); if (IS_ERR(vc4_hdmi->hsm_clock)) { From c9043706cb11b8005e145debe0a3211acd08e2c1 Mon Sep 17 00:00:00 2001 From: Kerem Karabay Date: Wed, 26 Feb 2025 16:03:47 +0000 Subject: [PATCH 30/77] drm/format-helper: Add conversion from XRGB8888 to BGR888 Add XRGB8888 emulation helper for devices that only support BGR888. Signed-off-by: Kerem Karabay Signed-off-by: Aditya Garg Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/9A67EA95-9BC7-4D56-8F87-05EAC1C166AD@live.com --- drivers/gpu/drm/drm_format_helper.c | 54 +++++++++++++ .../gpu/drm/tests/drm_format_helper_test.c | 81 +++++++++++++++++++ include/drm/drm_format_helper.h | 3 + 3 files changed, 138 insertions(+) diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index ecb278b63e8c..01d3ab307ac3 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -702,6 +702,57 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); +static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels) +{ + u8 *dbuf8 = dbuf; + const __le32 *sbuf32 = sbuf; + unsigned int x; + u32 pix; + + for (x = 0; x < pixels; x++) { + pix = le32_to_cpu(sbuf32[x]); + /* write red-green-blue to output in little endianness */ + *dbuf8++ = (pix & 0x00ff0000) >> 16; + *dbuf8++ = (pix & 0x0000ff00) >> 8; + *dbuf8++ = (pix & 0x000000ff) >> 0; + } +} + +/** + * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer + * @dst: Array of BGR888 destination buffers + * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines + * within @dst; can be NULL if scanlines are stored next to each other. + * @src: Array of XRGB8888 source buffers + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * @state: Transform and conversion state + * + * This function copies parts of a framebuffer to display memory and converts the + * color format during the process. Destination and framebuffer formats must match. The + * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at + * least as many entries as there are planes in @fb's format. Each entry stores the + * value for the format's respective color plane at the same index. + * + * This function does not apply clipping on @dst (i.e. the destination is at the + * top-left corner). + * + * Drivers can use this function for BGR888 devices that don't natively + * support XRGB8888. + */ +void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state) +{ + static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { + 3, + }; + + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, + drm_fb_xrgb8888_to_bgr888_line); +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888); + static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels) { __le32 *dbuf32 = dbuf; @@ -1104,6 +1155,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d } else if (dst_format == DRM_FORMAT_RGB888) { drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); return 0; + } else if (dst_format == DRM_FORMAT_BGR888) { + drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); + return 0; } else if (dst_format == DRM_FORMAT_ARGB8888) { drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); return 0; diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 08992636ec05..35cd3405d045 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -60,6 +60,11 @@ struct convert_to_rgb888_result { const u8 expected[TEST_BUF_SIZE]; }; +struct convert_to_bgr888_result { + unsigned int dst_pitch; + const u8 expected[TEST_BUF_SIZE]; +}; + struct convert_to_argb8888_result { unsigned int dst_pitch; const u32 expected[TEST_BUF_SIZE]; @@ -107,6 +112,7 @@ struct convert_xrgb8888_case { struct convert_to_argb1555_result argb1555_result; struct convert_to_rgba5551_result rgba5551_result; struct convert_to_rgb888_result rgb888_result; + struct convert_to_bgr888_result bgr888_result; struct convert_to_argb8888_result argb8888_result; struct convert_to_xrgb2101010_result xrgb2101010_result; struct convert_to_argb2101010_result argb2101010_result; @@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0x00, 0x00, 0xFF }, }, + .bgr888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFF, 0x00, 0x00 }, + }, .argb8888_result = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0xFFFF0000 }, @@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0x00, 0x00, 0xFF }, }, + .bgr888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFF, 0x00, 0x00 }, + }, .argb8888_result = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0xFFFF0000 }, @@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, }, }, + .bgr888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { + 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, + 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00, + 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, + }, + }, .argb8888_result = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { @@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, }, + .bgr888_result = { + .dst_pitch = 15, + .expected = { + 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, .argb8888_result = { .dst_pitch = 20, .expected = { @@ -914,6 +948,52 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } +static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_bgr888_result *result = ¶ms->bgr888_result; + size_t dst_size; + u8 *buf = NULL; + __le32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch, + ¶ms->clip, 0); + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + /* + * BGR888 expected results are already in little-endian + * order, so there's no need to convert the test output. + */ + drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, + &fmtcnv_state); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, ¶ms->clip, + &fmtcnv_state); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); +} + static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) { const struct convert_xrgb8888_case *params = test->param_value; @@ -1851,6 +1931,7 @@ static struct kunit_case drm_format_helper_test_cases[] = { KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params), diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index a1347e47e9d5..d8539174ca11 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -96,6 +96,9 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, struct drm_format_conv_state *state); From 0670c2f56e45b3f4541985a9ebe06d04308e43b0 Mon Sep 17 00:00:00 2001 From: Kerem Karabay Date: Wed, 26 Feb 2025 16:04:36 +0000 Subject: [PATCH 31/77] drm/tiny: add driver for Apple Touch Bars in x86 Macs The Touch Bars found on x86 Macs support two USB configurations: one where the device presents itself as a HID keyboard and can display predefined sets of keys, and one where the operating system has full control over what is displayed. This commit adds support for the display functionality of the second configuration. Functionality for the first configuration has been merged in the HID tree. Note that this driver has only been tested on T2 Macs, and only includes the USB device ID for these devices. Testing on T1 Macs would be appreciated. Credit goes to Ben (Bingxing) Wang on GitHub for reverse engineering most of the protocol. Also, as requested by Andy, I would like to clarify the use of __packed structs in this driver: - All the packed structs are aligned except for appletbdrm_msg_information. - We have to pack appletbdrm_msg_information since it is requirement of the protocol. - We compared binaries compiled by keeping the rest structs __packed and not __packed using bloat-o-meter, and __packed was not affecting code generation. - To maintain consistency, rest structs have been kept __packed. I would also like to point out that since the driver was reverse-engineered the actual data types of the protocol might be different, including, but not limited to, endianness. Link: https://github.com/imbushuo/DFRDisplayKm Signed-off-by: Kerem Karabay Co-developed-by: Atharva Tiwari Signed-off-by: Atharva Tiwari Co-developed-by: Aditya Garg Signed-off-by: Aditya Garg Signed-off-by: Aun-Ali Zaidi Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/FCAC702C-F84A-47F9-8C78-BBBB34D08500@live.com --- MAINTAINERS | 8 + drivers/gpu/drm/tiny/Kconfig | 12 + drivers/gpu/drm/tiny/Makefile | 1 + drivers/gpu/drm/tiny/appletbdrm.c | 841 ++++++++++++++++++++++++++++++ 4 files changed, 862 insertions(+) create mode 100644 drivers/gpu/drm/tiny/appletbdrm.c diff --git a/MAINTAINERS b/MAINTAINERS index 1d8a7937bb1d..2b20daaf7077 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7149,6 +7149,14 @@ S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/sun4i/sun8i* +DRM DRIVER FOR APPLE TOUCH BARS +M: Aun-Ali Zaidi +M: Aditya Garg +L: dri-devel@lists.freedesktop.org +S: Maintained +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: drivers/gpu/drm/tiny/appletbdrm.c + DRM DRIVER FOR ARM PL111 CLCD M: Linus Walleij S: Maintained diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 94cbdb1337c0..54c84c9801c1 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -1,5 +1,17 @@ # SPDX-License-Identifier: GPL-2.0-only +config DRM_APPLETBDRM + tristate "DRM support for Apple Touch Bars" + depends on DRM && USB && MMU + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + help + Say Y here if you want support for the display of Touch Bars on x86 + MacBook Pros. + + To compile this driver as a module, choose M here: the + module will be called appletbdrm. + config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 60816d2eb4ff..0a3a7837a58b 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_APPLETBDRM) += appletbdrm.o obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o obj-$(CONFIG_DRM_BOCHS) += bochs.o obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c new file mode 100644 index 000000000000..f5d177e234e4 --- /dev/null +++ b/drivers/gpu/drm/tiny/appletbdrm.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple Touch Bar DRM Driver + * + * Copyright (c) 2023 Kerem Karabay + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define APPLETBDRM_PIXEL_FORMAT cpu_to_le32(0x52474241) /* RGBA, the actual format is BGR888 */ +#define APPLETBDRM_BITS_PER_PIXEL 24 + +#define APPLETBDRM_MSG_CLEAR_DISPLAY cpu_to_le32(0x434c5244) /* CLRD */ +#define APPLETBDRM_MSG_GET_INFORMATION cpu_to_le32(0x47494e46) /* GINF */ +#define APPLETBDRM_MSG_UPDATE_COMPLETE cpu_to_le32(0x5544434c) /* UDCL */ +#define APPLETBDRM_MSG_SIGNAL_READINESS cpu_to_le32(0x52454459) /* REDY */ + +#define APPLETBDRM_BULK_MSG_TIMEOUT 1000 + +#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm) +#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev)) + +struct appletbdrm_msg_request_header { + __le16 unk_00; + __le16 unk_02; + __le32 unk_04; + __le32 unk_08; + __le32 size; +} __packed; + +struct appletbdrm_msg_response_header { + u8 unk_00[16]; + __le32 msg; +} __packed; + +struct appletbdrm_msg_simple_request { + struct appletbdrm_msg_request_header header; + __le32 msg; + u8 unk_14[8]; + __le32 size; +} __packed; + +struct appletbdrm_msg_information { + struct appletbdrm_msg_response_header header; + u8 unk_14[12]; + __le32 width; + __le32 height; + u8 bits_per_pixel; + __le32 bytes_per_row; + __le32 orientation; + __le32 bitmap_info; + __le32 pixel_format; + __le32 width_inches; /* floating point */ + __le32 height_inches; /* floating point */ +} __packed; + +struct appletbdrm_frame { + __le16 begin_x; + __le16 begin_y; + __le16 width; + __le16 height; + __le32 buf_size; + u8 buf[]; +} __packed; + +struct appletbdrm_fb_request_footer { + u8 unk_00[12]; + __le32 unk_0c; + u8 unk_10[12]; + __le32 unk_1c; + __le64 timestamp; + u8 unk_28[12]; + __le32 unk_34; + u8 unk_38[20]; + __le32 unk_4c; +} __packed; + +struct appletbdrm_fb_request { + struct appletbdrm_msg_request_header header; + __le16 unk_10; + u8 msg_id; + u8 unk_13[29]; + /* + * Contents of `data`: + * - struct appletbdrm_frame frames[]; + * - struct appletbdrm_fb_request_footer footer; + * - padding to make the total size a multiple of 16 + */ + u8 data[]; +} __packed; + +struct appletbdrm_fb_request_response { + struct appletbdrm_msg_response_header header; + u8 unk_14[12]; + __le64 timestamp; +} __packed; + +struct appletbdrm_device { + struct device *dmadev; + + unsigned int in_ep; + unsigned int out_ep; + + unsigned int width; + unsigned int height; + + struct drm_device drm; + struct drm_display_mode mode; + struct drm_connector connector; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; +}; + +struct appletbdrm_plane_state { + struct drm_shadow_plane_state base; + struct appletbdrm_fb_request *request; + struct appletbdrm_fb_request_response *response; + size_t request_size; + size_t frames_size; +}; + +static inline struct appletbdrm_plane_state *to_appletbdrm_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct appletbdrm_plane_state, base.base); +} + +static int appletbdrm_send_request(struct appletbdrm_device *adev, + struct appletbdrm_msg_request_header *request, size_t size) +{ + struct usb_device *udev = adev_to_udev(adev); + struct drm_device *drm = &adev->drm; + int ret, actual_size; + + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, adev->out_ep), + request, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); + if (ret) { + drm_err(drm, "Failed to send message (%d)\n", ret); + return ret; + } + + if (actual_size != size) { + drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", + actual_size, size); + return -EIO; + } + + return 0; +} + +static int appletbdrm_read_response(struct appletbdrm_device *adev, + struct appletbdrm_msg_response_header *response, + size_t size, __le32 expected_response) +{ + struct usb_device *udev = adev_to_udev(adev); + struct drm_device *drm = &adev->drm; + int ret, actual_size; + bool readiness_signal_received = false; + +retry: + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, adev->in_ep), + response, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); + if (ret) { + drm_err(drm, "Failed to read response (%d)\n", ret); + return ret; + } + + /* + * The device responds to the first request sent in a particular + * timeframe after the USB device configuration is set with a readiness + * signal, in which case the response should be read again + */ + if (response->msg == APPLETBDRM_MSG_SIGNAL_READINESS) { + if (!readiness_signal_received) { + readiness_signal_received = true; + goto retry; + } + + drm_err(drm, "Encountered unexpected readiness signal\n"); + return -EINTR; + } + + if (actual_size != size) { + drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", + actual_size, size); + return -EBADMSG; + } + + if (response->msg != expected_response) { + drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n", + &expected_response, &response->msg); + return -EIO; + } + + return 0; +} + +static int appletbdrm_send_msg(struct appletbdrm_device *adev, __le32 msg) +{ + struct appletbdrm_msg_simple_request *request; + int ret; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->header.unk_00 = cpu_to_le16(2); + request->header.unk_02 = cpu_to_le16(0x1512); + request->header.size = cpu_to_le32(sizeof(*request) - sizeof(request->header)); + request->msg = msg; + request->size = request->header.size; + + ret = appletbdrm_send_request(adev, &request->header, sizeof(*request)); + + kfree(request); + + return ret; +} + +static int appletbdrm_clear_display(struct appletbdrm_device *adev) +{ + return appletbdrm_send_msg(adev, APPLETBDRM_MSG_CLEAR_DISPLAY); +} + +static int appletbdrm_signal_readiness(struct appletbdrm_device *adev) +{ + return appletbdrm_send_msg(adev, APPLETBDRM_MSG_SIGNAL_READINESS); +} + +static int appletbdrm_get_information(struct appletbdrm_device *adev) +{ + struct appletbdrm_msg_information *info; + struct drm_device *drm = &adev->drm; + u8 bits_per_pixel; + __le32 pixel_format; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = appletbdrm_send_msg(adev, APPLETBDRM_MSG_GET_INFORMATION); + if (ret) + return ret; + + ret = appletbdrm_read_response(adev, &info->header, sizeof(*info), + APPLETBDRM_MSG_GET_INFORMATION); + if (ret) + goto free_info; + + bits_per_pixel = info->bits_per_pixel; + pixel_format = get_unaligned(&info->pixel_format); + + adev->width = get_unaligned_le32(&info->width); + adev->height = get_unaligned_le32(&info->height); + + if (bits_per_pixel != APPLETBDRM_BITS_PER_PIXEL) { + drm_err(drm, "Encountered unexpected bits per pixel value (%d)\n", bits_per_pixel); + ret = -EINVAL; + goto free_info; + } + + if (pixel_format != APPLETBDRM_PIXEL_FORMAT) { + drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format); + ret = -EINVAL; + goto free_info; + } + +free_info: + kfree(info); + + return ret; +} + +static u32 rect_size(struct drm_rect *rect) +{ + return drm_rect_width(rect) * drm_rect_height(rect) * + (BITS_TO_BYTES(APPLETBDRM_BITS_PER_PIXEL)); +} + +static int appletbdrm_connector_helper_get_modes(struct drm_connector *connector) +{ + struct appletbdrm_device *adev = drm_to_adev(connector->dev); + + return drm_connector_helper_get_modes_fixed(connector, &adev->mode); +} + +static const u32 appletbdrm_primary_plane_formats[] = { + DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, /* emulated */ +}; + +static int appletbdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(new_plane_state); + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + size_t frames_size = 0; + size_t request_size; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; + + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, new_plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + frames_size += struct_size((struct appletbdrm_frame *)0, buf, rect_size(&damage)); + } + + if (!frames_size) + return 0; + + request_size = ALIGN(sizeof(struct appletbdrm_fb_request) + + frames_size + + sizeof(struct appletbdrm_fb_request_footer), 16); + + appletbdrm_state->request = kzalloc(request_size, GFP_KERNEL); + + if (!appletbdrm_state->request) + return -ENOMEM; + + appletbdrm_state->response = kzalloc(sizeof(*appletbdrm_state->response), GFP_KERNEL); + + if (!appletbdrm_state->response) + return -ENOMEM; + + appletbdrm_state->request_size = request_size; + appletbdrm_state->frames_size = frames_size; + + return 0; +} + +static int appletbdrm_flush_damage(struct appletbdrm_device *adev, + struct drm_plane_state *old_state, + struct drm_plane_state *state) +{ + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); + struct appletbdrm_fb_request_response *response = appletbdrm_state->response; + struct appletbdrm_fb_request_footer *footer; + struct drm_atomic_helper_damage_iter iter; + struct drm_framebuffer *fb = state->fb; + struct appletbdrm_fb_request *request = appletbdrm_state->request; + struct drm_device *drm = &adev->drm; + struct appletbdrm_frame *frame; + u64 timestamp = ktime_get_ns(); + struct drm_rect damage; + size_t frames_size = appletbdrm_state->frames_size; + size_t request_size = appletbdrm_state->request_size; + int ret; + + if (!frames_size) + return 0; + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) { + drm_err(drm, "Failed to start CPU framebuffer access (%d)\n", ret); + goto end_fb_cpu_access; + } + + request->header.unk_00 = cpu_to_le16(2); + request->header.unk_02 = cpu_to_le16(0x12); + request->header.unk_04 = cpu_to_le32(9); + request->header.size = cpu_to_le32(request_size - sizeof(request->header)); + request->unk_10 = cpu_to_le16(1); + request->msg_id = timestamp; + + frame = (struct appletbdrm_frame *)request->data; + + drm_atomic_helper_damage_iter_init(&iter, old_state, state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct drm_rect dst_clip = state->dst; + struct iosys_map dst = IOSYS_MAP_INIT_VADDR(frame->buf); + u32 buf_size = rect_size(&damage); + + if (!drm_rect_intersect(&dst_clip, &damage)) + continue; + + /* + * The coordinates need to be translated to the coordinate + * system the device expects, see the comment in + * appletbdrm_setup_mode_config + */ + frame->begin_x = cpu_to_le16(damage.y1); + frame->begin_y = cpu_to_le16(adev->height - damage.x2); + frame->width = cpu_to_le16(drm_rect_height(&damage)); + frame->height = cpu_to_le16(drm_rect_width(&damage)); + frame->buf_size = cpu_to_le32(buf_size); + + switch (fb->format->format) { + case DRM_FORMAT_XRGB8888: + drm_fb_xrgb8888_to_bgr888(&dst, NULL, &shadow_plane_state->data[0], fb, &damage, &shadow_plane_state->fmtcnv_state); + break; + default: + drm_fb_memcpy(&dst, NULL, &shadow_plane_state->data[0], fb, &damage); + break; + } + + frame = (void *)frame + struct_size(frame, buf, buf_size); + } + + footer = (struct appletbdrm_fb_request_footer *)&request->data[frames_size]; + + footer->unk_0c = cpu_to_le32(0xfffe); + footer->unk_1c = cpu_to_le32(0x80001); + footer->unk_34 = cpu_to_le32(0x80002); + footer->unk_4c = cpu_to_le32(0xffff); + footer->timestamp = cpu_to_le64(timestamp); + + ret = appletbdrm_send_request(adev, &request->header, request_size); + if (ret) + goto end_fb_cpu_access; + + ret = appletbdrm_read_response(adev, &response->header, sizeof(*response), + APPLETBDRM_MSG_UPDATE_COMPLETE); + if (ret) + goto end_fb_cpu_access; + + if (response->timestamp != footer->timestamp) { + drm_err(drm, "Response timestamp (%llu) doesn't match request timestamp (%llu)\n", + le64_to_cpu(response->timestamp), timestamp); + goto end_fb_cpu_access; + } + +end_fb_cpu_access: + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + + return ret; +} + +static void appletbdrm_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *old_state) +{ + struct appletbdrm_device *adev = drm_to_adev(plane->dev); + struct drm_device *drm = plane->dev; + struct drm_plane_state *plane_state = plane->state; + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane); + int idx; + + if (!drm_dev_enter(drm, &idx)) + return; + + appletbdrm_flush_damage(adev, old_plane_state, plane_state); + + drm_dev_exit(idx); +} + +static void appletbdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct appletbdrm_device *adev = drm_to_adev(dev); + int idx; + + if (!drm_dev_enter(dev, &idx)) + return; + + appletbdrm_clear_display(adev); + + drm_dev_exit(idx); +} + +static void appletbdrm_primary_plane_reset(struct drm_plane *plane) +{ + struct appletbdrm_plane_state *appletbdrm_state; + + WARN_ON(plane->state); + + appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL); + if (!appletbdrm_state) + return; + + __drm_gem_reset_shadow_plane(plane, &appletbdrm_state->base); +} + +static struct drm_plane_state *appletbdrm_primary_plane_duplicate_state(struct drm_plane *plane) +{ + struct drm_shadow_plane_state *new_shadow_plane_state; + struct appletbdrm_plane_state *appletbdrm_state; + + if (WARN_ON(!plane->state)) + return NULL; + + appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL); + if (!appletbdrm_state) + return NULL; + + /* Request and response are not duplicated and are allocated in .atomic_check */ + appletbdrm_state->request = NULL; + appletbdrm_state->response = NULL; + + appletbdrm_state->request_size = 0; + appletbdrm_state->frames_size = 0; + + new_shadow_plane_state = &appletbdrm_state->base; + + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); + + return &new_shadow_plane_state->base; +} + +static void appletbdrm_primary_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state); + + kfree(appletbdrm_state->request); + kfree(appletbdrm_state->response); + + __drm_gem_destroy_shadow_plane_state(&appletbdrm_state->base); + + kfree(appletbdrm_state); +} + +static const struct drm_plane_helper_funcs appletbdrm_primary_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = appletbdrm_primary_plane_helper_atomic_check, + .atomic_update = appletbdrm_primary_plane_helper_atomic_update, + .atomic_disable = appletbdrm_primary_plane_helper_atomic_disable, +}; + +static const struct drm_plane_funcs appletbdrm_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = appletbdrm_primary_plane_reset, + .atomic_duplicate_state = appletbdrm_primary_plane_duplicate_state, + .atomic_destroy_state = appletbdrm_primary_plane_destroy_state, + .destroy = drm_plane_cleanup, +}; + +static enum drm_mode_status appletbdrm_crtc_helper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct appletbdrm_device *adev = drm_to_adev(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &adev->mode); +} + +static const struct drm_mode_config_funcs appletbdrm_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_connector_funcs appletbdrm_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .destroy = drm_connector_cleanup, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, +}; + +static const struct drm_connector_helper_funcs appletbdrm_connector_helper_funcs = { + .get_modes = appletbdrm_connector_helper_get_modes, +}; + +static const struct drm_crtc_helper_funcs appletbdrm_crtc_helper_funcs = { + .mode_valid = appletbdrm_crtc_helper_mode_valid, +}; + +static const struct drm_crtc_funcs appletbdrm_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs appletbdrm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct appletbdrm_device *adev = drm_to_adev(dev); + + if (!adev->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev); +} + +DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops); + +static const struct drm_driver appletbdrm_drm_driver = { + DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = appletbdrm_driver_gem_prime_import, + .name = "appletbdrm", + .desc = "Apple Touch Bar DRM Driver", + .major = 1, + .minor = 0, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + .fops = &appletbdrm_drm_fops, +}; + +static int appletbdrm_setup_mode_config(struct appletbdrm_device *adev) +{ + struct drm_connector *connector = &adev->connector; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_device *drm = &adev->drm; + int ret; + + ret = drmm_mode_config_init(drm); + if (ret) { + drm_err(drm, "Failed to initialize mode configuration\n"); + return ret; + } + + primary_plane = &adev->primary_plane; + ret = drm_universal_plane_init(drm, primary_plane, 0, + &appletbdrm_primary_plane_funcs, + appletbdrm_primary_plane_formats, + ARRAY_SIZE(appletbdrm_primary_plane_formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + drm_err(drm, "Failed to initialize universal plane object\n"); + return ret; + } + + drm_plane_helper_add(primary_plane, &appletbdrm_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); + + crtc = &adev->crtc; + ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, + &appletbdrm_crtc_funcs, NULL); + if (ret) { + drm_err(drm, "Failed to initialize CRTC object\n"); + return ret; + } + + drm_crtc_helper_add(crtc, &appletbdrm_crtc_helper_funcs); + + encoder = &adev->encoder; + ret = drm_encoder_init(drm, encoder, &appletbdrm_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + drm_err(drm, "Failed to initialize encoder\n"); + return ret; + } + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* + * The coordinate system used by the device is different from the + * coordinate system of the framebuffer in that the x and y axes are + * swapped, and that the y axis is inverted; so what the device reports + * as the height is actually the width of the framebuffer and vice + * versa. + */ + drm->mode_config.max_width = max(adev->height, DRM_SHADOW_PLANE_MAX_WIDTH); + drm->mode_config.max_height = max(adev->width, DRM_SHADOW_PLANE_MAX_HEIGHT); + drm->mode_config.preferred_depth = APPLETBDRM_BITS_PER_PIXEL; + drm->mode_config.funcs = &appletbdrm_mode_config_funcs; + + adev->mode = (struct drm_display_mode) { + DRM_MODE_INIT(60, adev->height, adev->width, + DRM_MODE_RES_MM(adev->height, 218), + DRM_MODE_RES_MM(adev->width, 218)) + }; + + ret = drm_connector_init(drm, connector, + &appletbdrm_connector_funcs, DRM_MODE_CONNECTOR_USB); + if (ret) { + drm_err(drm, "Failed to initialize connector\n"); + return ret; + } + + drm_connector_helper_add(connector, &appletbdrm_connector_helper_funcs); + + ret = drm_connector_set_panel_orientation(connector, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP); + if (ret) { + drm_err(drm, "Failed to set panel orientation\n"); + return ret; + } + + connector->display_info.non_desktop = true; + ret = drm_object_property_set_value(&connector->base, + drm->mode_config.non_desktop_property, true); + if (ret) { + drm_err(drm, "Failed to set non-desktop property\n"); + return ret; + } + + ret = drm_connector_attach_encoder(connector, encoder); + + if (ret) { + drm_err(drm, "Failed to initialize simple display pipe\n"); + return ret; + } + + drm_mode_config_reset(drm); + + return 0; +} + +static int appletbdrm_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct device *dev = &intf->dev; + struct appletbdrm_device *adev; + struct drm_device *drm = NULL; + int ret; + + ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); + if (ret) { + drm_err(drm, "appletbdrm: Failed to find bulk endpoints\n"); + return ret; + } + + adev = devm_drm_dev_alloc(dev, &appletbdrm_drm_driver, struct appletbdrm_device, drm); + if (IS_ERR(adev)) + return PTR_ERR(adev); + + adev->in_ep = bulk_in->bEndpointAddress; + adev->out_ep = bulk_out->bEndpointAddress; + adev->dmadev = dev; + + drm = &adev->drm; + + usb_set_intfdata(intf, adev); + + ret = appletbdrm_get_information(adev); + if (ret) { + drm_err(drm, "Failed to get display information\n"); + return ret; + } + + ret = appletbdrm_signal_readiness(adev); + if (ret) { + drm_err(drm, "Failed to signal readiness\n"); + return ret; + } + + ret = appletbdrm_setup_mode_config(adev); + if (ret) { + drm_err(drm, "Failed to setup mode config\n"); + return ret; + } + + ret = drm_dev_register(drm, 0); + if (ret) { + drm_err(drm, "Failed to register DRM device\n"); + return ret; + } + + ret = appletbdrm_clear_display(adev); + if (ret) { + drm_err(drm, "Failed to clear display\n"); + return ret; + } + + return 0; +} + +static void appletbdrm_disconnect(struct usb_interface *intf) +{ + struct appletbdrm_device *adev = usb_get_intfdata(intf); + struct drm_device *drm = &adev->drm; + + put_device(adev->dmadev); + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); +} + +static void appletbdrm_shutdown(struct usb_interface *intf) +{ + struct appletbdrm_device *adev = usb_get_intfdata(intf); + + /* + * The framebuffer needs to be cleared on shutdown since its content + * persists across boots + */ + drm_atomic_helper_shutdown(&adev->drm); +} + +static const struct usb_device_id appletbdrm_usb_id_table[] = { + { USB_DEVICE_INTERFACE_CLASS(0x05ac, 0x8302, USB_CLASS_AUDIO_VIDEO) }, + {} +}; +MODULE_DEVICE_TABLE(usb, appletbdrm_usb_id_table); + +static struct usb_driver appletbdrm_usb_driver = { + .name = "appletbdrm", + .probe = appletbdrm_probe, + .disconnect = appletbdrm_disconnect, + .shutdown = appletbdrm_shutdown, + .id_table = appletbdrm_usb_id_table, +}; +module_usb_driver(appletbdrm_usb_driver); + +MODULE_AUTHOR("Kerem Karabay "); +MODULE_DESCRIPTION("Apple Touch Bar DRM Driver"); +MODULE_LICENSE("GPL"); From 7a108b930a84e71be71c3370eef6dd96fbb8f618 Mon Sep 17 00:00:00 2001 From: Sasha Finkelstein Date: Mon, 24 Feb 2025 12:02:16 +0100 Subject: [PATCH 32/77] dt-bindings: display: Add Apple pre-DCP display controller Add bindings for a secondary display controller present on certain Apple laptops. Reviewed-by: Krzysztof Kozlowski Reviewed-by: Neal Gompa Signed-off-by: Sasha Finkelstein Link: https://patchwork.freedesktop.org/patch/msgid/20250224-adpdrm-v8-1-cccf96710f0f@gmail.com Signed-off-by: Alyssa Rosenzweig --- .../display/apple,h7-display-pipe-mipi.yaml | 83 +++++++++++++++++ .../display/apple,h7-display-pipe.yaml | 88 +++++++++++++++++++ .../bindings/display/panel/apple,summit.yaml | 58 ++++++++++++ 3 files changed, 229 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml create mode 100644 Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml create mode 100644 Documentation/devicetree/bindings/display/panel/apple,summit.yaml diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml new file mode 100644 index 000000000000..5e6da66499a5 --- /dev/null +++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/apple,h7-display-pipe-mipi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Apple pre-DCP display controller MIPI interface + +maintainers: + - Sasha Finkelstein + +description: + The MIPI controller part of the pre-DCP Apple display controller + +allOf: + - $ref: dsi-controller.yaml# + +properties: + compatible: + items: + - enum: + - apple,t8112-display-pipe-mipi + - apple,t8103-display-pipe-mipi + - const: apple,h7-display-pipe-mipi + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: Input port. Always connected to the primary controller + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: Output MIPI DSI port to the panel + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - ports + +unevaluatedProperties: false + +examples: + - | + dsi@28200000 { + compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi"; + reg = <0x28200000 0xc000>; + power-domains = <&ps_dispdfr_mipi>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + dfr_adp_out_mipi: endpoint { + remote-endpoint = <&dfr_adp_out_mipi>; + }; + }; + + port@1 { + reg = <1>; + + dfr_panel_in: endpoint { + remote-endpoint = <&dfr_mipi_out_panel>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml new file mode 100644 index 000000000000..102fb1804c0c --- /dev/null +++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml @@ -0,0 +1,88 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/apple,h7-display-pipe.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Apple pre-DCP display controller + +maintainers: + - Sasha Finkelstein + +description: + A secondary display controller used to drive the "touchbar" on + certain Apple laptops. + +properties: + compatible: + items: + - enum: + - apple,t8112-display-pipe + - apple,t8103-display-pipe + - const: apple,h7-display-pipe + + reg: + items: + - description: Primary register block, controls planes and blending + - description: + Contains other configuration registers like interrupt + and FIFO control + + reg-names: + items: + - const: be + - const: fe + + power-domains: + description: + Phandles to pmgr entries that are needed for this controller to turn on. + Aside from that, their specific functions are unknown + maxItems: 2 + + interrupts: + items: + - description: Unknown function + - description: Primary interrupt. Vsync events are reported via it + + interrupt-names: + items: + - const: be + - const: fe + + iommus: + maxItems: 1 + + port: + $ref: /schemas/graph.yaml#/properties/port + description: Output port. Always connected to apple,h7-display-pipe-mipi + +required: + - compatible + - reg + - interrupts + - port + +additionalProperties: false + +examples: + - | + #include + display-pipe@28200000 { + compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe"; + reg = <0x28200000 0xc000>, + <0x28400000 0x4000>; + reg-names = "be", "fe"; + power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>; + interrupt-parent = <&aic>; + interrupts = , + ; + interrupt-names = "be", "fe"; + iommus = <&displaydfr_dart 0>; + + port { + dfr_adp_out_mipi: endpoint { + remote-endpoint = <&dfr_mipi_in_adp>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/apple,summit.yaml b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml new file mode 100644 index 000000000000..f081755325e9 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/apple,summit.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Apple "Summit" display panel + +maintainers: + - Sasha Finkelstein + +description: + An OLED panel used as a touchbar on certain Apple laptops. + Contains a backlight device, which controls brightness of the panel itself. + The backlight common properties are included for this reason + +allOf: + - $ref: panel-common.yaml# + - $ref: /schemas/leds/backlight/common.yaml# + +properties: + compatible: + items: + - enum: + - apple,j293-summit + - apple,j493-summit + - const: apple,summit + + reg: + maxItems: 1 + +required: + - compatible + - reg + - max-brightness + - port + +unevaluatedProperties: false + +examples: + - | + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "apple,j293-summit", "apple,summit"; + reg = <0>; + max-brightness = <255>; + + port { + endpoint { + remote-endpoint = <&dfr_bridge_out>; + }; + }; + }; + }; +... From 332122eba628d537a1b7b96b976079753fd03039 Mon Sep 17 00:00:00 2001 From: Sasha Finkelstein Date: Mon, 24 Feb 2025 12:02:17 +0100 Subject: [PATCH 33/77] drm: adp: Add Apple Display Pipe driver This display controller is present on M-series chips and is used to drive the touchbar display. Co-developed-by: Janne Grunau Signed-off-by: Janne Grunau Reviewed-by: Dmitry Baryshkov Reviewed-by: Neal Gompa Signed-off-by: Sasha Finkelstein Link: https://patchwork.freedesktop.org/patch/msgid/20250224-adpdrm-v8-2-cccf96710f0f@gmail.com Signed-off-by: Alyssa Rosenzweig --- drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/adp/Kconfig | 17 + drivers/gpu/drm/adp/Makefile | 5 + drivers/gpu/drm/adp/adp-mipi.c | 276 +++++++++++++++ drivers/gpu/drm/adp/adp_drv.c | 616 +++++++++++++++++++++++++++++++++ 6 files changed, 917 insertions(+) create mode 100644 drivers/gpu/drm/adp/Kconfig create mode 100644 drivers/gpu/drm/adp/Makefile create mode 100644 drivers/gpu/drm/adp/adp-mipi.c create mode 100644 drivers/gpu/drm/adp/adp_drv.c diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index d9986fd52194..e5b59de28216 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -439,6 +439,8 @@ source "drivers/gpu/drm/mcde/Kconfig" source "drivers/gpu/drm/tidss/Kconfig" +source "drivers/gpu/drm/adp/Kconfig" + source "drivers/gpu/drm/xlnx/Kconfig" source "drivers/gpu/drm/gud/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 50604b49d1ac..4cd054188faf 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -206,6 +206,7 @@ obj-y += mxsfb/ obj-y += tiny/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_ADP) += adp/ obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_DRM_LIMA) += lima/ diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig new file mode 100644 index 000000000000..9fcc27eb200d --- /dev/null +++ b/drivers/gpu/drm/adp/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT +config DRM_ADP + tristate "DRM Support for pre-DCP Apple display controllers" + depends on DRM && OF && ARM64 + depends on ARCH_APPLE || COMPILE_TEST + select DRM_KMS_HELPER + select DRM_BRIDGE_CONNECTOR + select DRM_DISPLAY_HELPER + select DRM_KMS_DMA_HELPER + select DRM_GEM_DMA_HELPER + select DRM_PANEL_BRIDGE + select VIDEOMODE_HELPERS + select DRM_MIPI_DSI + help + Chose this option if you have an Apple Arm laptop with a touchbar. + + If M is selected, this module will be called adpdrm. diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile new file mode 100644 index 000000000000..8e7b618edd35 --- /dev/null +++ b/drivers/gpu/drm/adp/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT + +adpdrm-y := adp_drv.o +adpdrm-mipi-y := adp-mipi.o +obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c new file mode 100644 index 000000000000..ad80542b60ed --- /dev/null +++ b/drivers/gpu/drm/adp/adp-mipi.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include +#include + +#define DSI_GEN_HDR 0x6c +#define DSI_GEN_PLD_DATA 0x70 + +#define DSI_CMD_PKT_STATUS 0x74 + +#define GEN_PLD_R_EMPTY BIT(4) +#define GEN_PLD_W_FULL BIT(3) +#define GEN_PLD_W_EMPTY BIT(2) +#define GEN_CMD_FULL BIT(1) +#define GEN_CMD_EMPTY BIT(0) +#define GEN_RD_CMD_BUSY BIT(6) +#define CMD_PKT_STATUS_TIMEOUT_US 20000 + +struct adp_mipi_drv_private { + struct mipi_dsi_host dsi; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + void __iomem *mipi; +}; + +#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi) + +static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val) +{ + int ret; + u32 val, mask; + + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, + val, !(val & GEN_CMD_FULL), 1000, + CMD_PKT_STATUS_TIMEOUT_US); + if (ret) { + dev_err(adp->dsi.dev, "failed to get available command FIFO\n"); + return ret; + } + + writel(hdr_val, adp->mipi + DSI_GEN_HDR); + + mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY; + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, + val, (val & mask) == mask, + 1000, CMD_PKT_STATUS_TIMEOUT_US); + if (ret) { + dev_err(adp->dsi.dev, "failed to write command FIFO\n"); + return ret; + } + + return 0; +} + +static int adp_dsi_write(struct adp_mipi_drv_private *adp, + const struct mipi_dsi_packet *packet) +{ + const u8 *tx_buf = packet->payload; + int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret; + __le32 word; + u32 val; + + while (len) { + if (len < pld_data_bytes) { + word = 0; + memcpy(&word, tx_buf, len); + writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA); + len = 0; + } else { + memcpy(&word, tx_buf, pld_data_bytes); + writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA); + tx_buf += pld_data_bytes; + len -= pld_data_bytes; + } + + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, + val, !(val & GEN_PLD_W_FULL), 1000, + CMD_PKT_STATUS_TIMEOUT_US); + if (ret) { + dev_err(adp->dsi.dev, + "failed to get available write payload FIFO\n"); + return ret; + } + } + + word = 0; + memcpy(&word, packet->header, sizeof(packet->header)); + return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word)); +} + +static int adp_dsi_read(struct adp_mipi_drv_private *adp, + const struct mipi_dsi_msg *msg) +{ + int i, j, ret, len = msg->rx_len; + u8 *buf = msg->rx_buf; + u32 val; + + /* Wait end of the read operation */ + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, + val, !(val & GEN_RD_CMD_BUSY), + 1000, CMD_PKT_STATUS_TIMEOUT_US); + if (ret) { + dev_err(adp->dsi.dev, "Timeout during read operation\n"); + return ret; + } + + for (i = 0; i < len; i += 4) { + /* Read fifo must not be empty before all bytes are read */ + ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS, + val, !(val & GEN_PLD_R_EMPTY), + 1000, CMD_PKT_STATUS_TIMEOUT_US); + if (ret) { + dev_err(adp->dsi.dev, "Read payload FIFO is empty\n"); + return ret; + } + + val = readl(adp->mipi + DSI_GEN_PLD_DATA); + for (j = 0; j < 4 && j + i < len; j++) + buf[i + j] = val >> (8 * j); + } + + return ret; +} + +static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct adp_mipi_drv_private *adp = mipi_to_adp(host); + struct mipi_dsi_packet packet; + int ret, nb_bytes; + + ret = mipi_dsi_create_packet(&packet, msg); + if (ret) { + dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret); + return ret; + } + + ret = adp_dsi_write(adp, &packet); + if (ret) + return ret; + + if (msg->rx_buf && msg->rx_len) { + ret = adp_dsi_read(adp, msg); + if (ret) + return ret; + nb_bytes = msg->rx_len; + } else { + nb_bytes = packet.size; + } + + return nb_bytes; +} + +static int adp_dsi_bind(struct device *dev, struct device *master, void *data) +{ + return 0; +} + +static void adp_dsi_unbind(struct device *dev, struct device *master, void *data) +{ +} + +static const struct component_ops adp_dsi_component_ops = { + .bind = adp_dsi_bind, + .unbind = adp_dsi_unbind, +}; + +static int adp_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + struct adp_mipi_drv_private *adp = mipi_to_adp(host); + struct drm_bridge *next; + int ret; + + next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0); + if (IS_ERR(next)) + return PTR_ERR(next); + + adp->next_bridge = next; + + drm_bridge_add(&adp->bridge); + + ret = component_add(host->dev, &adp_dsi_component_ops); + if (ret) { + pr_err("failed to add dsi_host component: %d\n", ret); + drm_bridge_remove(&adp->bridge); + return ret; + } + + return 0; +} + +static int adp_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + struct adp_mipi_drv_private *adp = mipi_to_adp(host); + + component_del(host->dev, &adp_dsi_component_ops); + drm_bridge_remove(&adp->bridge); + return 0; +} + +static const struct mipi_dsi_host_ops adp_dsi_host_ops = { + .transfer = adp_dsi_host_transfer, + .attach = adp_dsi_host_attach, + .detach = adp_dsi_host_detach, +}; + +static int adp_dsi_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct adp_mipi_drv_private *adp = + container_of(bridge, struct adp_mipi_drv_private, bridge); + + return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags); +} + +static const struct drm_bridge_funcs adp_dsi_bridge_funcs = { + .attach = adp_dsi_bridge_attach, +}; + +static int adp_mipi_probe(struct platform_device *pdev) +{ + struct adp_mipi_drv_private *adp; + + adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL); + if (!adp) + return -ENOMEM; + + adp->mipi = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(adp->mipi)) { + dev_err(&pdev->dev, "failed to map mipi mmio"); + return PTR_ERR(adp->mipi); + } + + adp->dsi.dev = &pdev->dev; + adp->dsi.ops = &adp_dsi_host_ops; + adp->bridge.funcs = &adp_dsi_bridge_funcs; + adp->bridge.of_node = pdev->dev.of_node; + adp->bridge.type = DRM_MODE_CONNECTOR_DSI; + dev_set_drvdata(&pdev->dev, adp); + return mipi_dsi_host_register(&adp->dsi); +} + +static void adp_mipi_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct adp_mipi_drv_private *adp = dev_get_drvdata(dev); + + mipi_dsi_host_unregister(&adp->dsi); +} + +static const struct of_device_id adp_mipi_of_match[] = { + { .compatible = "apple,h7-display-pipe-mipi", }, + { }, +}; +MODULE_DEVICE_TABLE(of, adp_mipi_of_match); + +static struct platform_driver adp_mipi_platform_driver = { + .driver = { + .name = "adp-mipi", + .of_match_table = adp_mipi_of_match, + }, + .probe = adp_mipi_probe, + .remove = adp_mipi_remove, +}; + +module_platform_driver(adp_mipi_platform_driver); + +MODULE_DESCRIPTION("Apple Display Pipe MIPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c new file mode 100644 index 000000000000..0a39abdc9238 --- /dev/null +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -0,0 +1,616 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADP_INT_STATUS 0x34 +#define ADP_INT_STATUS_INT_MASK 0x7 +#define ADP_INT_STATUS_VBLANK 0x1 +#define ADP_CTRL 0x100 +#define ADP_CTRL_VBLANK_ON 0x12 +#define ADP_CTRL_FIFO_ON 0x601 +#define ADP_SCREEN_SIZE 0x0c +#define ADP_SCREEN_HSIZE GENMASK(15, 0) +#define ADP_SCREEN_VSIZE GENMASK(31, 16) + +#define ADBE_FIFO 0x10c0 +#define ADBE_FIFO_SYNC 0xc0000000 + +#define ADBE_BLEND_BYPASS 0x2020 +#define ADBE_BLEND_EN1 0x2028 +#define ADBE_BLEND_EN2 0x2074 +#define ADBE_BLEND_EN3 0x202c +#define ADBE_BLEND_EN4 0x2034 +#define ADBE_MASK_BUF 0x2200 + +#define ADBE_SRC_START 0x4040 +#define ADBE_SRC_SIZE 0x4048 +#define ADBE_DST_START 0x4050 +#define ADBE_DST_SIZE 0x4054 +#define ADBE_STRIDE 0x4038 +#define ADBE_FB_BASE 0x4030 + +#define ADBE_LAYER_EN1 0x4020 +#define ADBE_LAYER_EN2 0x4068 +#define ADBE_LAYER_EN3 0x40b4 +#define ADBE_LAYER_EN4 0x40f4 +#define ADBE_SCALE_CTL 0x40ac +#define ADBE_SCALE_CTL_BYPASS 0x100000 + +#define ADBE_LAYER_CTL 0x1038 +#define ADBE_LAYER_CTL_ENABLE 0x10000 + +#define ADBE_PIX_FMT 0x402c +#define ADBE_PIX_FMT_XRGB32 0x53e4001 + +static int adp_open(struct inode *inode, struct file *filp) +{ + /* + * The modesetting driver does not check the non-desktop connector + * property and keeps the device open and locked. If the touchbar daemon + * opens the device first, modesetting breaks the whole X session. + * Simply refuse to open the device for X11 server processes as + * workaround. + */ + if (current->comm[0] == 'X') + return -EBUSY; + + return drm_open(inode, filp); +} + +static const struct file_operations adp_fops = { + .owner = THIS_MODULE, + .open = adp_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = drm_gem_mmap, + .fop_flags = FOP_UNSIGNED_OFFSET, + DRM_GEM_DMA_UNMAPPED_AREA_FOPS +}; + +static int adp_drm_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args) +{ + args->height = ALIGN(args->height, 64); + args->size = args->pitch * args->height; + + return drm_gem_dma_dumb_create_internal(file_priv, drm, args); +} + +static const struct drm_driver adp_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &adp_fops, + DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create), + .name = "adp", + .desc = "Apple Display Pipe DRM Driver", + .major = 0, + .minor = 1, +}; + +struct adp_drv_private { + struct drm_device drm; + struct drm_crtc crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct drm_bridge *next_bridge; + void __iomem *be; + void __iomem *fe; + u32 *mask_buf; + u64 mask_buf_size; + dma_addr_t mask_iova; + int be_irq; + int fe_irq; + spinlock_t irq_lock; + struct drm_pending_vblank_event *event; +}; + +#define to_adp(x) container_of(x, struct adp_drv_private, drm) +#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc) + +static int adp_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state; + struct drm_crtc_state *crtc_state; + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + + if (!new_plane_state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); +} + +static void adp_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct adp_drv_private *adp; + struct drm_rect src_rect; + struct drm_gem_dma_object *obj; + struct drm_framebuffer *fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + u32 src_pos, src_size, dst_pos, dst_size; + + if (!plane || !new_state) + return; + + fb = new_state->fb; + if (!fb) + return; + adp = to_adp(plane->dev); + + drm_rect_fp_to_int(&src_rect, &new_state->src); + src_pos = src_rect.x1 << 16 | src_rect.y1; + dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1; + src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect); + dst_size = drm_rect_width(&new_state->dst) << 16 | + drm_rect_height(&new_state->dst); + writel(src_pos, adp->be + ADBE_SRC_START); + writel(src_size, adp->be + ADBE_SRC_SIZE); + writel(dst_pos, adp->be + ADBE_DST_START); + writel(dst_size, adp->be + ADBE_DST_SIZE); + writel(fb->pitches[0], adp->be + ADBE_STRIDE); + obj = drm_fb_dma_get_gem_obj(fb, 0); + if (obj) + writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE); + + writel(BIT(0), adp->be + ADBE_LAYER_EN1); + writel(BIT(0), adp->be + ADBE_LAYER_EN2); + writel(BIT(0), adp->be + ADBE_LAYER_EN3); + writel(BIT(0), adp->be + ADBE_LAYER_EN4); + writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL); + writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL); + writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT); +} + +static void adp_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct adp_drv_private *adp = to_adp(plane->dev); + + writel(0x0, adp->be + ADBE_LAYER_EN1); + writel(0x0, adp->be + ADBE_LAYER_EN2); + writel(0x0, adp->be + ADBE_LAYER_EN3); + writel(0x0, adp->be + ADBE_LAYER_EN4); + writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL); +} + +static const struct drm_plane_helper_funcs adp_plane_helper_funcs = { + .atomic_check = adp_plane_atomic_check, + .atomic_update = adp_plane_atomic_update, + .atomic_disable = adp_plane_atomic_disable, + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS +}; + +static const struct drm_plane_funcs adp_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + DRM_GEM_SHADOW_PLANE_FUNCS +}; + +static const u32 plane_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +#define ALL_CRTCS 1 + +static struct drm_plane *adp_plane_new(struct adp_drv_private *adp) +{ + struct drm_device *drm = &adp->drm; + struct drm_plane *plane; + + plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0, + ALL_CRTCS, &adp_plane_funcs, + plane_formats, ARRAY_SIZE(plane_formats), + NULL, DRM_PLANE_TYPE_PRIMARY, "plane"); + if (!plane) { + drm_err(drm, "failed to allocate plane"); + return ERR_PTR(-ENOMEM); + } + + drm_plane_helper_add(plane, &adp_plane_helper_funcs); + return plane; +} + +static void adp_enable_vblank(struct adp_drv_private *adp) +{ + u32 cur_ctrl; + + writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS); + + cur_ctrl = readl(adp->fe + ADP_CTRL); + writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); +} + +static int adp_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct adp_drv_private *adp = to_adp(dev); + + adp_enable_vblank(adp); + + return 0; +} + +static void adp_disable_vblank(struct adp_drv_private *adp) +{ + u32 cur_ctrl; + + cur_ctrl = readl(adp->fe + ADP_CTRL); + writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); + writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS); +} + +static void adp_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct adp_drv_private *adp = to_adp(dev); + + adp_disable_vblank(adp); +} + +static void adp_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct adp_drv_private *adp = crtc_to_adp(crtc); + + writel(BIT(0), adp->be + ADBE_BLEND_EN2); + writel(BIT(4), adp->be + ADBE_BLEND_EN1); + writel(BIT(0), adp->be + ADBE_BLEND_EN3); + writel(BIT(0), adp->be + ADBE_BLEND_BYPASS); + writel(BIT(0), adp->be + ADBE_BLEND_EN4); +} + +static void adp_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct adp_drv_private *adp = crtc_to_adp(crtc); + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); + + drm_atomic_helper_disable_planes_on_crtc(old_state, false); + + writel(0x0, adp->be + ADBE_BLEND_EN2); + writel(0x0, adp->be + ADBE_BLEND_EN1); + writel(0x0, adp->be + ADBE_BLEND_EN3); + writel(0x0, adp->be + ADBE_BLEND_BYPASS); + writel(0x0, adp->be + ADBE_BLEND_EN4); + drm_crtc_vblank_off(crtc); +} + +static void adp_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + u32 frame_num = 1; + struct adp_drv_private *adp = crtc_to_adp(crtc); + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); + u64 new_size = ALIGN(new_state->mode.hdisplay * + new_state->mode.vdisplay * 4, PAGE_SIZE); + + if (new_size != adp->mask_buf_size) { + if (adp->mask_buf) + dma_free_coherent(crtc->dev->dev, adp->mask_buf_size, + adp->mask_buf, adp->mask_iova); + adp->mask_buf = NULL; + if (new_size != 0) { + adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size, + &adp->mask_iova, GFP_KERNEL); + memset(adp->mask_buf, 0xFF, new_size); + writel(adp->mask_iova, adp->be + ADBE_MASK_BUF); + } + adp->mask_buf_size = new_size; + } + writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO); + //FIXME: use adbe flush interrupt + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_vblank_get(crtc); + adp->event = crtc->state->event; + } + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); +} + +static const struct drm_crtc_funcs adp_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = adp_crtc_enable_vblank, + .disable_vblank = adp_crtc_disable_vblank, +}; + + +static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = { + .atomic_enable = adp_crtc_atomic_enable, + .atomic_disable = adp_crtc_atomic_disable, + .atomic_flush = adp_crtc_atomic_flush, +}; + +static int adp_setup_crtc(struct adp_drv_private *adp) +{ + struct drm_device *drm = &adp->drm; + struct drm_plane *primary; + int ret; + + primary = adp_plane_new(adp); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary, + NULL, &adp_crtc_funcs, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs); + return 0; +} + +static const struct drm_mode_config_funcs adp_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int adp_setup_mode_config(struct adp_drv_private *adp) +{ + struct drm_device *drm = &adp->drm; + int ret; + u32 size; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + /* + * Query screen size restrict the frame buffer size to the screen size + * aligned to the next multiple of 64. This is not necessary but can be + * used as simple check for non-desktop devices. + * Xorg's modesetting driver does not care about the connector + * "non-desktop" property. The max frame buffer width or height can be + * easily checked and a device can be reject if the max width/height is + * smaller than 120 for example. + * Any touchbar daemon is not limited by this small framebuffer size. + */ + size = readl(adp->fe + ADP_SCREEN_SIZE); + + drm->mode_config.min_width = 32; + drm->mode_config.min_height = 32; + drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64); + drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64); + drm->mode_config.preferred_depth = 24; + drm->mode_config.prefer_shadow = 0; + drm->mode_config.funcs = &adp_mode_config_funcs; + + ret = adp_setup_crtc(adp); + if (ret) { + drm_err(drm, "failed to create crtc"); + return ret; + } + + adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL); + if (IS_ERR(adp->encoder)) { + drm_err(drm, "failed to init encoder"); + return PTR_ERR(adp->encoder); + } + adp->encoder->possible_crtcs = ALL_CRTCS; + + ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + drm_err(drm, "failed to init bridge chain"); + return ret; + } + + adp->connector = drm_bridge_connector_init(drm, adp->encoder); + if (IS_ERR(adp->connector)) + return PTR_ERR(adp->connector); + + drm_connector_attach_encoder(adp->connector, adp->encoder); + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + drm_err(drm, "failed to initialize vblank"); + return ret; + } + + drm_mode_config_reset(drm); + + return 0; +} + +static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp) +{ + struct device *dev = &pdev->dev; + + adp->be = devm_platform_ioremap_resource_byname(pdev, "be"); + if (IS_ERR(adp->be)) { + dev_err(dev, "failed to map display backend mmio"); + return PTR_ERR(adp->be); + } + + adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe"); + if (IS_ERR(adp->fe)) { + dev_err(dev, "failed to map display pipe mmio"); + return PTR_ERR(adp->fe); + } + + adp->be_irq = platform_get_irq_byname(pdev, "be"); + if (adp->be_irq < 0) { + dev_err(dev, "failed to find be irq"); + return adp->be_irq; + } + + adp->fe_irq = platform_get_irq_byname(pdev, "fe"); + if (adp->fe_irq < 0) { + dev_err(dev, "failed to find fe irq"); + return adp->fe_irq; + } + + return 0; +} + +static irqreturn_t adp_fe_irq(int irq, void *arg) +{ + struct adp_drv_private *adp = (struct adp_drv_private *)arg; + u32 int_status; + u32 int_ctl; + + spin_lock(&adp->irq_lock); + + int_status = readl(adp->fe + ADP_INT_STATUS); + if (int_status & ADP_INT_STATUS_VBLANK) { + drm_crtc_handle_vblank(&adp->crtc); + spin_lock(&adp->crtc.dev->event_lock); + if (adp->event) { + int_ctl = readl(adp->fe + ADP_CTRL); + if ((int_ctl & 0xF00) == 0x600) { + drm_crtc_send_vblank_event(&adp->crtc, adp->event); + adp->event = NULL; + drm_crtc_vblank_put(&adp->crtc); + } + } + spin_unlock(&adp->crtc.dev->event_lock); + } + + writel(int_status, adp->fe + ADP_INT_STATUS); + + spin_unlock(&adp->irq_lock); + + return IRQ_HANDLED; +} + +static int adp_drm_bind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct adp_drv_private *adp = to_adp(drm); + int err; + + adp_disable_vblank(adp); + writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); + + adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0); + if (IS_ERR(adp->next_bridge)) { + dev_err(dev, "failed to find next bridge"); + return PTR_ERR(adp->next_bridge); + } + + err = adp_setup_mode_config(adp); + if (err < 0) + return err; + + err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp); + if (err) + return err; + + err = drm_dev_register(&adp->drm, 0); + if (err) + return err; + + return 0; +} + +static void adp_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct adp_drv_private *adp = to_adp(drm); + + drm_dev_unregister(drm); + drm_atomic_helper_shutdown(drm); + free_irq(adp->fe_irq, adp); +} + +static const struct component_master_ops adp_master_ops = { + .bind = adp_drm_bind, + .unbind = adp_drm_unbind, +}; + +static int compare_dev(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int adp_probe(struct platform_device *pdev) +{ + struct device_node *port; + struct component_match *match = NULL; + struct adp_drv_private *adp; + int err; + + adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm); + if (IS_ERR(adp)) + return PTR_ERR(adp); + + spin_lock_init(&adp->irq_lock); + + dev_set_drvdata(&pdev->dev, &adp->drm); + + err = adp_parse_of(pdev, adp); + if (err < 0) + return err; + + port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); + if (!port) + return -ENODEV; + + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); + of_node_put(port); + + return component_master_add_with_match(&pdev->dev, &adp_master_ops, match); +} + +static void adp_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &adp_master_ops); + dev_set_drvdata(&pdev->dev, NULL); +} + +static const struct of_device_id adp_of_match[] = { + { .compatible = "apple,h7-display-pipe", }, + { }, +}; +MODULE_DEVICE_TABLE(of, adp_of_match); + +static struct platform_driver adp_platform_driver = { + .driver = { + .name = "adp", + .of_match_table = adp_of_match, + }, + .probe = adp_probe, + .remove = adp_remove, +}; + +module_platform_driver(adp_platform_driver); + +MODULE_DESCRIPTION("Apple Display Pipe DRM driver"); +MODULE_LICENSE("GPL"); From 4d2a877cc0efefa815648f1ed5f5b2b796f55bab Mon Sep 17 00:00:00 2001 From: Sasha Finkelstein Date: Mon, 24 Feb 2025 12:02:20 +0100 Subject: [PATCH 34/77] MAINTAINERS: Add entries for touchbar display driver Add the MAINTAINERS entries for the driver Acked-by: Sven Peter Reviewed-by: Neil Armstrong Reviewed-by: Neal Gompa Signed-off-by: Sasha Finkelstein Link: https://patchwork.freedesktop.org/patch/msgid/20250224-adpdrm-v8-5-cccf96710f0f@gmail.com Signed-off-by: Alyssa Rosenzweig --- MAINTAINERS | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2b20daaf7077..d1ac30eae9fa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7835,6 +7835,22 @@ F: drivers/gpu/host1x/ F: include/linux/host1x.h F: include/uapi/drm/tegra_drm.h +DRM DRIVERS FOR PRE-DCP APPLE DISPLAY OUTPUT +M: Sasha Finkelstein +R: Janne Grunau +L: dri-devel@lists.freedesktop.org +L: asahi@lists.linux.dev +S: Maintained +W: https://asahilinux.org +B: https://github.com/AsahiLinux/linux/issues +C: irc://irc.oftc.net/asahi-dev +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml +F: Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml +F: Documentation/devicetree/bindings/display/panel/apple,summit.yaml +F: drivers/gpu/drm/adp/ +F: drivers/gpu/drm/panel/panel-summit.c + DRM DRIVERS FOR RENESAS R-CAR M: Laurent Pinchart M: Tomi Valkeinen From 6fd4f8a26a21dd2075cfcc7eae3b9d440d886571 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:15 +0800 Subject: [PATCH 35/77] drm/rockchip: vop2: Register the primary plane and overlay plane separately In the upcoming VOP of rk3576, a Window cannot attach to all Video Ports, so make sure all VP find it's suitable primary plane, then register the remain windows as overlay plane will make code easier. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-2-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 99 ++++++++++++-------- 1 file changed, 60 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index bebe5bd70b90..0af5059ff7d8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2264,22 +2264,29 @@ static int vop2_plane_init(struct vop2 *vop2, struct vop2_win *win, return 0; } -static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2) +/* + * On RK3566 these windows don't have an independent + * framebuffer. They can only share/mirror the framebuffer + * with smart0, esmart0 and cluster0 respectively. + * And RK3566 share the same vop version with Rk3568, so we + * need to use soc_id for identification here. + */ +static bool vop2_is_mirror_win(struct vop2_win *win) { - int i; - - for (i = 0; i < vop2->data->nr_vps; i++) { - struct vop2_video_port *vp = &vop2->vps[i]; - - if (!vp->crtc.port) - continue; - if (vp->primary_plane) - continue; + struct vop2 *vop2 = win->vop2; - return vp; + if (vop2->data->soc_id == 3566) { + switch (win->data->phys_id) { + case ROCKCHIP_VOP2_SMART1: + case ROCKCHIP_VOP2_ESMART1: + case ROCKCHIP_VOP2_CLUSTER1: + return true; + default: + return false; + } + } else { + return false; } - - return NULL; } static int vop2_create_crtcs(struct vop2 *vop2) @@ -2290,7 +2297,9 @@ static int vop2_create_crtcs(struct vop2 *vop2) struct drm_plane *plane; struct device_node *port; struct vop2_video_port *vp; - int i, nvp, nvps = 0; + struct vop2_win *win; + u32 possible_crtcs; + int i, j, nvp, nvps = 0; int ret; for (i = 0; i < vop2_data->nr_vps; i++) { @@ -2326,42 +2335,54 @@ static int vop2_create_crtcs(struct vop2 *vop2) } nvp = 0; - for (i = 0; i < vop2->registered_num_wins; i++) { - struct vop2_win *win = &vop2->win[i]; - u32 possible_crtcs = 0; - - if (vop2->data->soc_id == 3566) { - /* - * On RK3566 these windows don't have an independent - * framebuffer. They share the framebuffer with smart0, - * esmart0 and cluster0 respectively. - */ - switch (win->data->phys_id) { - case ROCKCHIP_VOP2_SMART1: - case ROCKCHIP_VOP2_ESMART1: - case ROCKCHIP_VOP2_CLUSTER1: + /* Register a primary plane for every crtc */ + for (i = 0; i < vop2_data->nr_vps; i++) { + vp = &vop2->vps[i]; + + if (!vp->crtc.port) + continue; + + for (j = 0; j < vop2->registered_num_wins; j++) { + win = &vop2->win[j]; + + /* Aready registered as primary plane */ + if (win->base.type == DRM_PLANE_TYPE_PRIMARY) + continue; + + if (vop2_is_mirror_win(win)) continue; - } - } - if (win->type == DRM_PLANE_TYPE_PRIMARY) { - vp = find_vp_without_primary(vop2); - if (vp) { + if (win->type == DRM_PLANE_TYPE_PRIMARY) { possible_crtcs = BIT(nvp); vp->primary_plane = win; + ret = vop2_plane_init(vop2, win, possible_crtcs); + if (ret) + return dev_err_probe(drm->dev, ret, + "failed to init primary plane %s\n", + win->data->name); nvp++; - } else { - /* change the unused primary window to overlay window */ - win->type = DRM_PLANE_TYPE_OVERLAY; + break; } } + } + + /* Register all unused window as overlay plane */ + for (i = 0; i < vop2->registered_num_wins; i++) { + win = &vop2->win[i]; + + /* Aready registered as primary plane */ + if (win->base.type == DRM_PLANE_TYPE_PRIMARY) + continue; + + if (vop2_is_mirror_win(win)) + continue; - if (win->type == DRM_PLANE_TYPE_OVERLAY) - possible_crtcs = (1 << nvps) - 1; + win->type = DRM_PLANE_TYPE_OVERLAY; + possible_crtcs = (1 << nvps) - 1; ret = vop2_plane_init(vop2, win, possible_crtcs); if (ret) - return dev_err_probe(drm->dev, ret, "failed to init plane %s\n", + return dev_err_probe(drm->dev, ret, "failed to init overlay plane %s\n", win->data->name); } From b90fa71a11600276c993e620abea8ca9f2045401 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:16 +0800 Subject: [PATCH 36/77] drm/rockchip: vop2: Set plane possible crtcs by possible vp mask In the upcoming VOP of rk3576, a window cannot attach to all Video Ports, we introduce a possible_vp_mask for every window to indicate which Video Ports this window can attach to. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-3-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 18 +++++++++++++++++- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 1 + drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 14 ++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 0af5059ff7d8..1280c5f07557 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2349,6 +2349,10 @@ static int vop2_create_crtcs(struct vop2 *vop2) if (win->base.type == DRM_PLANE_TYPE_PRIMARY) continue; + /* If this win can not attached to this VP */ + if (!(win->data->possible_vp_mask & BIT(vp->id))) + continue; + if (vop2_is_mirror_win(win)) continue; @@ -2379,7 +2383,19 @@ static int vop2_create_crtcs(struct vop2 *vop2) win->type = DRM_PLANE_TYPE_OVERLAY; - possible_crtcs = (1 << nvps) - 1; + possible_crtcs = 0; + nvp = 0; + for (j = 0; j < vop2_data->nr_vps; j++) { + vp = &vop2->vps[j]; + + if (!vp->crtc.port) + continue; + + if (win->data->possible_vp_mask & BIT(vp->id)) + possible_crtcs |= BIT(nvp); + nvp++; + } + ret = vop2_plane_init(vop2, win, possible_crtcs); if (ret) return dev_err_probe(drm->dev, ret, "failed to init overlay plane %s\n", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index a309042aa8e6..46d37c61279e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -167,6 +167,7 @@ struct vop2_win_data { unsigned int phys_id; u32 base; + u32 possible_vp_mask; enum drm_plane_type type; u32 nformats; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 0afef24db144..97df9d479f11 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -347,6 +347,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .name = "Smart0-win0", .phys_id = ROCKCHIP_VOP2_SMART0, .base = 0x1c00, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_smart, .nformats = ARRAY_SIZE(formats_smart), .format_modifiers = format_modifiers, @@ -360,6 +361,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { }, { .name = "Smart1-win0", .phys_id = ROCKCHIP_VOP2_SMART1, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_smart, .nformats = ARRAY_SIZE(formats_smart), .format_modifiers = format_modifiers, @@ -373,6 +375,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { }, { .name = "Esmart1-win0", .phys_id = ROCKCHIP_VOP2_ESMART1, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_rk356x_esmart, .nformats = ARRAY_SIZE(formats_rk356x_esmart), .format_modifiers = format_modifiers, @@ -386,6 +389,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { }, { .name = "Esmart0-win0", .phys_id = ROCKCHIP_VOP2_ESMART0, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_rk356x_esmart, .nformats = ARRAY_SIZE(formats_rk356x_esmart), .format_modifiers = format_modifiers, @@ -400,6 +404,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .name = "Cluster0-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER0, .base = 0x1000, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -415,6 +420,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .name = "Cluster1-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER1, .base = 0x1200, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -580,6 +586,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .name = "Cluster0-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER0, .base = 0x1000, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -598,6 +605,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .name = "Cluster1-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER1, .base = 0x1200, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -616,6 +624,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .name = "Cluster2-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER2, .base = 0x1400, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -634,6 +643,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .name = "Cluster3-win0", .phys_id = ROCKCHIP_VOP2_CLUSTER3, .base = 0x1600, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_cluster, .nformats = ARRAY_SIZE(formats_cluster), .format_modifiers = format_modifiers_afbc, @@ -651,6 +661,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { }, { .name = "Esmart0-win0", .phys_id = ROCKCHIP_VOP2_ESMART0, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_esmart, .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, @@ -667,6 +678,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { }, { .name = "Esmart1-win0", .phys_id = ROCKCHIP_VOP2_ESMART1, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_esmart, .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, @@ -684,6 +696,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { .name = "Esmart2-win0", .phys_id = ROCKCHIP_VOP2_ESMART2, .base = 0x1c00, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_esmart, .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, @@ -699,6 +712,7 @@ static const struct vop2_win_data rk3588_vop_win_data[] = { }, { .name = "Esmart3-win0", .phys_id = ROCKCHIP_VOP2_ESMART3, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), .formats = formats_esmart, .nformats = ARRAY_SIZE(formats_esmart), .format_modifiers = format_modifiers, From e7aae9f6d762139f8d2b86db03793ae0ab3dd802 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:17 +0800 Subject: [PATCH 37/77] drm/rockchip: vop2: Add uv swap for cluster window The Cluster windows of upcoming VOP on rk3576 also support linear YUV support, we need to set uv swap bit for it. As the VOP2_WIN_UV_SWA register defined on rk3568/rk3588 is 0xffffffff, so this register will not be touched on these two platforms. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-4-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 1280c5f07557..338cfb69e1cf 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1377,10 +1377,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, rb_swap = vop2_win_rb_swap(fb->format->format); vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap); - if (!vop2_cluster_window(win)) { - uv_swap = vop2_win_uv_swap(fb->format->format); - vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); - } + uv_swap = vop2_win_uv_swap(fb->format->format); + vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); if (fb->format->is_yuv) { vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4)); From 1803bfb59656d41c4f9f7e5f23188248b649e1a4 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:18 +0800 Subject: [PATCH 38/77] dt-bindings: display: vop2: describe constraint SoC by SoC As more SoCs variants are introduced, each SoC brings its own unique set of constraints, describe this constraints SoC by SoC will make things easier. Signed-off-by: Andy Yan Reviewed-by: Krzysztof Kozlowski Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-5-andyshrk@163.com --- .../display/rockchip/rockchip-vop2.yaml | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml index 46d956e63338..a5771edd83b5 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml @@ -14,6 +14,7 @@ description: maintainers: - Sandy Huang - Heiko Stuebner + - Andy Yan properties: compatible: @@ -124,43 +125,54 @@ allOf: properties: compatible: contains: - const: rockchip,rk3588-vop + enum: + - rockchip,rk3566-vop + - rockchip,rk3568-vop then: properties: clocks: - minItems: 7 + maxItems: 5 + clock-names: - minItems: 7 + maxItems: 5 ports: required: - port@0 - port@1 - port@2 - - port@3 - required: - - rockchip,grf - - rockchip,vo1-grf - - rockchip,vop-grf - - rockchip,pmu - - else: - properties: rockchip,vo1-grf: false rockchip,vop-grf: false rockchip,pmu: false + - if: + properties: + compatible: + contains: + const: rockchip,rk3588-vop + then: + properties: clocks: - maxItems: 5 + minItems: 7 + maxItems: 9 + clock-names: - maxItems: 5 + minItems: 7 + maxItems: 9 ports: required: - port@0 - port@1 - port@2 + - port@3 + + required: + - rockchip,grf + - rockchip,vo1-grf + - rockchip,vop-grf + - rockchip,pmu additionalProperties: false From 47d31e6598e4fda1433e747ef031cb972c57c5ae Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:19 +0800 Subject: [PATCH 39/77] dt-bindings: display: vop2: Add missing rockchip,grf property for rk3566/8 The clock polarity of RGB signal output is controlled by GRF, this property is already being used in the current device tree, but forgot to describe it as a required property in the binding file. Signed-off-by: Andy Yan Acked-by: Krzysztof Kozlowski Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-6-andyshrk@163.com --- .../devicetree/bindings/display/rockchip/rockchip-vop2.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml index a5771edd83b5..083eadcf0588 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml @@ -146,6 +146,9 @@ allOf: rockchip,vop-grf: false rockchip,pmu: false + required: + - rockchip,grf + - if: properties: compatible: @@ -200,6 +203,7 @@ examples: "dclk_vp1", "dclk_vp2"; power-domains = <&power RK3568_PD_VO>; + rockchip,grf = <&grf>; iommus = <&vop_mmu>; vop_out: ports { #address-cells = <1>; From c3b7c5a4d7c17afb158ba5a41222e95a32886ada Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:20 +0800 Subject: [PATCH 40/77] dt-bindings: display: vop2: Add rk3576 support Add vop found on rk3576, the main difference between rk3576 and the previous vop is that each VP has its own interrupt line. Signed-off-by: Andy Yan Reviewed-by: Krzysztof Kozlowski Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-7-andyshrk@163.com --- .../display/rockchip/rockchip-vop2.yaml | 61 ++++++++++++++++++- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml index 083eadcf0588..f546d481b7e5 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml @@ -21,6 +21,7 @@ properties: enum: - rockchip,rk3566-vop - rockchip,rk3568-vop + - rockchip,rk3576-vop - rockchip,rk3588-vop reg: @@ -38,10 +39,21 @@ properties: - const: gamma-lut interrupts: - maxItems: 1 + minItems: 1 + maxItems: 4 description: - The VOP interrupt is shared by several interrupt sources, such as - frame start (VSYNC), line flag and other status interrupts. + For VOP version under rk3576, the interrupt is shared by several interrupt + sources, such as frame start (VSYNC), line flag and other interrupt status. + For VOP version from rk3576 there is a system interrupt for bus error, and + every video port has it's independent interrupts for vsync and other video + port related error interrupts. + + interrupt-names: + items: + - const: sys + - const: vp0 + - const: vp1 + - const: vp2 # See compatible-specific constraints below. clocks: @@ -136,6 +148,11 @@ allOf: clock-names: maxItems: 5 + interrupts: + maxItems: 1 + + interrupt-names: false + ports: required: - port@0 @@ -149,6 +166,39 @@ allOf: required: - rockchip,grf + - if: + properties: + compatible: + contains: + enum: + - rockchip,rk3576-vop + then: + properties: + clocks: + maxItems: 5 + + clock-names: + maxItems: 5 + + interrupts: + minItems: 4 + + interrupt-names: + minItems: 4 + + ports: + required: + - port@0 + - port@1 + - port@2 + + rockchip,vo1-grf: false + rockchip,vop-grf: false + + required: + - rockchip,grf + - rockchip,pmu + - if: properties: compatible: @@ -164,6 +214,11 @@ allOf: minItems: 7 maxItems: 9 + interrupts: + maxItems: 1 + + interrupt-names: false + ports: required: - port@0 From 944757a4cba62b1b50fb51933d0608117599db71 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Mon, 3 Mar 2025 11:44:21 +0800 Subject: [PATCH 41/77] drm/rockchip: vop2: Add support for rk3576 VOP2 on rk3576: Three video ports: VP0 Max 4096x2160 VP1 Max 2560x1600 VP2 Max 1920x1080 2 4K Cluster windows with AFBC/RFBC, line RGB and YUV 4 Esmart windows with line RGB/YUV support: Esmart0/1: 4K Esmart2/3: 2k, or worked together as a single 4K plane at shared line buffer mode. Compared to the previous VOP, another difference is that each VP has its own independent vsync interrupt number. Signed-off-by: Andy Yan Tested-by: Michael Riesch # on RK3568 Tested-by: Detlev Casanova Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303034436.192400-8-andyshrk@163.com --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 138 ++- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 87 ++ drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 950 +++++++++++++++++-- 3 files changed, 1050 insertions(+), 125 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 338cfb69e1cf..d0f5fea15e21 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1280,6 +1280,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id); } + if (vop2->version >= VOP_VERSION_RK3576) + vop2_win_write(win, VOP2_WIN_VP_SEL, vp->id); + if (vop2_cluster_window(win)) vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en); @@ -1344,6 +1347,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, else vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0); + if (vop2->version >= VOP_VERSION_RK3576) { + vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET_EN, 1); + vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET, yrgb_mst); + } + transform_offset = vop2_afbc_transform_offset(pstate, half_block_en); vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst); vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info); @@ -2159,6 +2167,52 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = { .late_register = vop2_crtc_late_register, }; +static irqreturn_t rk3576_vp_isr(int irq, void *data) +{ + struct vop2_video_port *vp = data; + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + uint32_t irqs; + int ret = IRQ_NONE; + + if (!pm_runtime_get_if_in_use(vop2->dev)) + return IRQ_NONE; + + irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); + vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); + + if (irqs & VP_INT_DSP_HOLD_VALID) { + complete(&vp->dsp_hold_completion); + ret = IRQ_HANDLED; + } + + if (irqs & VP_INT_FS_FIELD) { + drm_crtc_handle_vblank(crtc); + spin_lock(&crtc->dev->event_lock); + if (vp->event) { + u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); + + if (!(val & BIT(vp->id))) { + drm_crtc_send_vblank_event(crtc, vp->event); + vp->event = NULL; + drm_crtc_vblank_put(crtc); + } + } + spin_unlock(&crtc->dev->event_lock); + + ret = IRQ_HANDLED; + } + + if (irqs & VP_INT_POST_BUF_EMPTY) { + drm_err_ratelimited(vop2->drm, "POST_BUF_EMPTY irq err at vp%d\n", vp->id); + ret = IRQ_HANDLED; + } + + pm_runtime_put(vop2->dev); + + return ret; +} + static irqreturn_t vop2_isr(int irq, void *data) { struct vop2 *vop2 = data; @@ -2174,41 +2228,43 @@ static irqreturn_t vop2_isr(int irq, void *data) if (!pm_runtime_get_if_in_use(vop2->dev)) return IRQ_NONE; - for (i = 0; i < vop2_data->nr_vps; i++) { - struct vop2_video_port *vp = &vop2->vps[i]; - struct drm_crtc *crtc = &vp->crtc; - u32 irqs; + if (vop2->version < VOP_VERSION_RK3576) { + for (i = 0; i < vop2_data->nr_vps; i++) { + struct vop2_video_port *vp = &vop2->vps[i]; + struct drm_crtc *crtc = &vp->crtc; + u32 irqs; - irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); - vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); + irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id)); + vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs); - if (irqs & VP_INT_DSP_HOLD_VALID) { - complete(&vp->dsp_hold_completion); - ret = IRQ_HANDLED; - } - - if (irqs & VP_INT_FS_FIELD) { - drm_crtc_handle_vblank(crtc); - spin_lock(&crtc->dev->event_lock); - if (vp->event) { - u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); + if (irqs & VP_INT_DSP_HOLD_VALID) { + complete(&vp->dsp_hold_completion); + ret = IRQ_HANDLED; + } - if (!(val & BIT(vp->id))) { - drm_crtc_send_vblank_event(crtc, vp->event); - vp->event = NULL; - drm_crtc_vblank_put(crtc); + if (irqs & VP_INT_FS_FIELD) { + drm_crtc_handle_vblank(crtc); + spin_lock(&crtc->dev->event_lock); + if (vp->event) { + u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE); + + if (!(val & BIT(vp->id))) { + drm_crtc_send_vblank_event(crtc, vp->event); + vp->event = NULL; + drm_crtc_vblank_put(crtc); + } } - } - spin_unlock(&crtc->dev->event_lock); + spin_unlock(&crtc->dev->event_lock); - ret = IRQ_HANDLED; - } + ret = IRQ_HANDLED; + } - if (irqs & VP_INT_POST_BUF_EMPTY) { - drm_err_ratelimited(vop2->drm, - "POST_BUF_EMPTY irq err at vp%d\n", - vp->id); - ret = IRQ_HANDLED; + if (irqs & VP_INT_POST_BUF_EMPTY) { + drm_err_ratelimited(vop2->drm, + "POST_BUF_EMPTY irq err at vp%d\n", + vp->id); + ret = IRQ_HANDLED; + } } } @@ -2677,6 +2733,30 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; + if (vop2->version >= VOP_VERSION_RK3576) { + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, drm) { + struct vop2_video_port *vp = to_vop2_video_port(crtc); + int vp_irq; + const char *irq_name = devm_kasprintf(dev, GFP_KERNEL, "vp%d", vp->id); + + if (!irq_name) + return -ENOMEM; + + vp_irq = platform_get_irq_byname(pdev, irq_name); + if (vp_irq < 0) + return dev_err_probe(drm->dev, vp_irq, + "cannot find irq for vop2 vp%d\n", vp->id); + + ret = devm_request_irq(dev, vp_irq, rk3576_vp_isr, IRQF_SHARED, irq_name, + vp); + if (ret) + dev_err_probe(drm->dev, ret, + "request irq for vop2 vp%d failed\n", vp->id); + } + } + ret = vop2_find_rgb_encoder(vop2); if (ret >= 0) { vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 46d37c61279e..680bedbb770e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -44,6 +44,13 @@ enum win_dly_mode { VOP2_DLY_MODE_MAX, }; +enum vop2_dly_module { + VOP2_DLY_WIN, /** Win delay cycle for this VP */ + VOP2_DLY_LAYER_MIX, /** Layer Mix delay cycle for this VP */ + VOP2_DLY_HDR_MIX, /** HDR delay cycle for this VP */ + VOP2_DLY_MAX, +}; + enum vop2_scale_up_mode { VOP2_SCALE_UP_NRST_NBOR, VOP2_SCALE_UP_BIL, @@ -140,16 +147,22 @@ enum vop2_win_regs { VOP2_WIN_AFBC_UV_SWAP, VOP2_WIN_AFBC_AUTO_GATING_EN, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, + VOP2_WIN_AFBC_PLD_OFFSET_EN, VOP2_WIN_AFBC_PIC_VIR_WIDTH, VOP2_WIN_AFBC_TILE_NUM, VOP2_WIN_AFBC_PIC_OFFSET, VOP2_WIN_AFBC_PIC_SIZE, VOP2_WIN_AFBC_DSP_OFFSET, + VOP2_WIN_AFBC_PLD_OFFSET, VOP2_WIN_TRANSFORM_OFFSET, VOP2_WIN_AFBC_HDR_PTR, VOP2_WIN_AFBC_HALF_BLOCK_EN, VOP2_WIN_AFBC_ROTATE_270, VOP2_WIN_AFBC_ROTATE_90, + + VOP2_WIN_VP_SEL, + VOP2_WIN_DLY_NUM, + VOP2_WIN_MAX_REG, }; @@ -215,6 +228,10 @@ struct vop2_video_port_data { struct vop_rect max_output; const u8 pre_scan_max_dly[4]; unsigned int offset; + /** + * @pixel_rate: pixel per cycle + */ + u8 pixel_rate; }; struct vop2_video_port { @@ -375,10 +392,13 @@ enum dst_factor_mode { #define RK3568_REG_CFG_DONE 0x000 #define RK3568_VERSION_INFO 0x004 #define RK3568_SYS_AUTO_GATING_CTRL 0x008 +#define RK3576_SYS_MMU_CTRL_IMD 0x020 #define RK3568_SYS_AXI_LUT_CTRL 0x024 #define RK3568_DSP_IF_EN 0x028 +#define RK3576_SYS_PORT_CTRL_IMD 0x028 #define RK3568_DSP_IF_CTRL 0x02c #define RK3568_DSP_IF_POL 0x030 +#define RK3576_SYS_CLUSTER_PD_CTRL_IMD 0x030 #define RK3588_SYS_PD_CTRL 0x034 #define RK3568_WB_CTRL 0x40 #define RK3568_WB_XSCAL_FACTOR 0x44 @@ -398,6 +418,55 @@ enum dst_factor_mode { #define RK3568_VP_INT_CLR(vp) (0xA4 + (vp) * 0x10) #define RK3568_VP_INT_STATUS(vp) (0xA8 + (vp) * 0x10) #define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10) +#define RK3576_WB_CTRL 0x100 +#define RK3576_WB_XSCAL_FACTOR 0x104 +#define RK3576_WB_YRGB_MST 0x108 +#define RK3576_WB_CBR_MST 0x10C +#define RK3576_WB_VIR_STRIDE 0x110 +#define RK3576_WB_TIMEOUT_CTRL 0x114 +#define RK3576_MIPI0_IF_CTRL 0x180 +#define RK3576_HDMI0_IF_CTRL 0x184 +#define RK3576_EDP0_IF_CTRL 0x188 +#define RK3576_DP0_IF_CTRL 0x18C +#define RK3576_RGB_IF_CTRL 0x194 +#define RK3576_DP1_IF_CTRL 0x1A4 +#define RK3576_DP2_IF_CTRL 0x1B0 + +/* Extra OVL register definition */ +#define RK3576_SYS_EXTRA_ALPHA_CTRL 0x500 +#define RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL 0x530 +#define RK3576_CLUSTER0_MIX_DST_COLOR_CTRL 0x534 +#define RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL 0x538 +#define RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL 0x53c +#define RK3576_CLUSTER1_MIX_SRC_COLOR_CTRL 0x540 +#define RK3576_CLUSTER1_MIX_DST_COLOR_CTRL 0x544 +#define RK3576_CLUSTER1_MIX_SRC_ALPHA_CTRL 0x548 +#define RK3576_CLUSTER1_MIX_DST_ALPHA_CTRL 0x54c + +/* OVL registers for Video Port definition */ +#define RK3576_OVL_CTRL(vp) (0x600 + (vp) * 0x100) +#define RK3576_OVL_LAYER_SEL(vp) (0x604 + (vp) * 0x100) +#define RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp) (0x620 + (vp) * 0x100) +#define RK3576_OVL_MIX0_DST_COLOR_CTRL(vp) (0x624 + (vp) * 0x100) +#define RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp) (0x628 + (vp) * 0x100) +#define RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp) (0x62C + (vp) * 0x100) +#define RK3576_OVL_MIX1_SRC_COLOR_CTRL(vp) (0x630 + (vp) * 0x100) +#define RK3576_OVL_MIX1_DST_COLOR_CTRL(vp) (0x634 + (vp) * 0x100) +#define RK3576_OVL_MIX1_SRC_ALPHA_CTRL(vp) (0x638 + (vp) * 0x100) +#define RK3576_OVL_MIX1_DST_ALPHA_CTRL(vp) (0x63C + (vp) * 0x100) +#define RK3576_OVL_MIX2_SRC_COLOR_CTRL(vp) (0x640 + (vp) * 0x100) +#define RK3576_OVL_MIX2_DST_COLOR_CTRL(vp) (0x644 + (vp) * 0x100) +#define RK3576_OVL_MIX2_SRC_ALPHA_CTRL(vp) (0x648 + (vp) * 0x100) +#define RK3576_OVL_MIX2_DST_ALPHA_CTRL(vp) (0x64C + (vp) * 0x100) +#define RK3576_EXTRA_OVL_SRC_COLOR_CTRL(vp) (0x650 + (vp) * 0x100) +#define RK3576_EXTRA_OVL_DST_COLOR_CTRL(vp) (0x654 + (vp) * 0x100) +#define RK3576_EXTRA_OVL_SRC_ALPHA_CTRL(vp) (0x658 + (vp) * 0x100) +#define RK3576_EXTRA_OVL_DST_ALPHA_CTRL(vp) (0x65C + (vp) * 0x100) +#define RK3576_OVL_HDR_SRC_COLOR_CTRL(vp) (0x660 + (vp) * 0x100) +#define RK3576_OVL_HDR_DST_COLOR_CTRL(vp) (0x664 + (vp) * 0x100) +#define RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp) (0x668 + (vp) * 0x100) +#define RK3576_OVL_HDR_DST_ALPHA_CTRL(vp) (0x66C + (vp) * 0x100) +#define RK3576_OVL_BG_MIX_CTRL(vp) (0x670 + (vp) * 0x100) /* Video Port registers definition */ #define RK3568_VP0_CTRL_BASE 0x0C00 @@ -480,7 +549,11 @@ enum dst_factor_mode { #define RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET 0x68 #define RK3568_CLUSTER_WIN_AFBCD_CTRL 0x6C +#define RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET 0x78 + #define RK3568_CLUSTER_CTRL 0x100 +#define RK3576_CLUSTER_PORT_SEL_IMD 0x1F4 +#define RK3576_CLUSTER_DLY_NUM 0x1F8 /* (E)smart register definition, offset relative to window base */ #define RK3568_SMART_CTRL0 0x00 @@ -531,6 +604,9 @@ enum dst_factor_mode { #define RK3568_SMART_REGION3_SCL_FACTOR_CBR 0xC8 #define RK3568_SMART_REGION3_SCL_OFFSET 0xCC #define RK3568_SMART_COLOR_KEY_CTRL 0xD0 +#define RK3576_SMART_ALPHA_MAP 0xD8 +#define RK3576_SMART_PORT_SEL_IMD 0xF4 +#define RK3576_SMART_DLY_NUM 0xF8 /* HDR register definition */ #define RK3568_HDR_LUT_CTRL 0x2000 @@ -679,6 +755,17 @@ enum dst_factor_mode { #define POLFLAG_DCLK_INV BIT(3) +#define RK3576_OVL_CTRL__YUV_MODE BIT(0) +#define RK3576_OVL_BG_MIX_CTRL__BG_DLY GENMASK(31, 24) + +#define RK3576_DSP_IF_CFG_DONE_IMD BIT(31) +#define RK3576_DSP_IF_DCLK_SEL_OUT BIT(21) +#define RK3576_DSP_IF_PCLK_DIV BIT(20) +#define RK3576_DSP_IF_PIN_POL GENMASK(5, 4) +#define RK3576_DSP_IF_MUX GENMASK(3, 2) +#define RK3576_DSP_IF_CLK_OUT_EN BIT(1) +#define RK3576_DSP_IF_EN BIT(0) + enum vop2_layer_phy_id { ROCKCHIP_VOP2_CLUSTER0 = 0, ROCKCHIP_VOP2_CLUSTER1, diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 97df9d479f11..a1402622589a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -70,6 +70,37 @@ static const uint32_t formats_cluster[] = { DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */ }; +/* + * The cluster windows on rk3576 support: + * RGB: linear mode and afbc + * YUV: linear mode and rfbc + * rfbc is a rockchip defined non-linear mode, produced by + * Video decoder + */ +static const uint32_t formats_rk3576_cluster[] = { + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */ + DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */ + DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */ + DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */ + DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */ + DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */ + DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */ + DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */ + DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */ +}; + static const uint32_t formats_esmart[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -116,6 +147,41 @@ static const uint32_t formats_rk356x_esmart[] = { DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */ }; +/* + * Add XRGB2101010/ARGB2101010ARGB1555/XRGB1555 + */ +static const uint32_t formats_rk3576_esmart[] = { + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */ + DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */ + DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */ + DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */ + DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */ + DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */ + DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */ + DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */ + DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */ + DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */ + DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */ + DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */ + DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */ +}; + static const uint32_t formats_smart[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -169,6 +235,48 @@ static const uint64_t format_modifiers_afbc[] = { DRM_FORMAT_MOD_INVALID, }; +/* used from rk3576, afbc 32*8 half mode */ +static const uint64_t format_modifiers_rk3576_afbc[] = { + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_SPARSE | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_YTR | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_CBR | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_CBR | + AFBC_FORMAT_MOD_SPARSE | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_YTR | + AFBC_FORMAT_MOD_CBR | + AFBC_FORMAT_MOD_SPLIT), + + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_YTR | + AFBC_FORMAT_MOD_CBR | + AFBC_FORMAT_MOD_SPARSE | + AFBC_FORMAT_MOD_SPLIT), + + /* SPLIT mandates SPARSE, RGB modes mandates YTR */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_YTR | + AFBC_FORMAT_MOD_SPARSE | + AFBC_FORMAT_MOD_SPLIT), + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + static const struct reg_field rk3568_vop_cluster_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), @@ -301,6 +409,147 @@ static const struct reg_field rk3568_vop_smart_regs[VOP2_WIN_MAX_REG] = { [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, }; +static const struct reg_field rk3576_vop_cluster_regs[VOP2_WIN_MAX_REG] = { + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0), + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5), + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14), + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 17, 17), + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18), + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31), + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31), + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31), + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31), + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31), + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19), + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15), + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31), + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8), + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9), + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11), + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 4), + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 9), + /* Read only bit on rk3576, writing on this bit have no effect.*/ + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13), + + [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_CLUSTER_PORT_SEL_IMD, 0, 1), + [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_CLUSTER_DLY_NUM, 0, 7), + + /* Scale */ + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15), + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31), + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3), + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15), + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 22, 23), + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28), + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29), + + /* cluster regs */ + [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1), + [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0), + [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7), + + /* afbc regs */ + [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6), + [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9), + [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10), + [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4), + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7), + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8), + [VOP2_WIN_AFBC_PLD_OFFSET_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 16, 16), + [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31), + [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31), + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15), + [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31), + [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31), + [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31), + [VOP2_WIN_AFBC_PLD_OFFSET] = REG_FIELD(RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET, 0, 31), + [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31), + [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0), + [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1), + [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2), + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3), + [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff }, + [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff }, + [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff }, +}; + +static const struct reg_field rk3576_vop_smart_regs[VOP2_WIN_MAX_REG] = { + [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0), + [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5), + [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12), + [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14), + [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16), + [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31), + [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31), + [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28), + [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31), + [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31), + [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17), + [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15), + [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31), + [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0), + [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1), + [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3), + [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31), + [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29), + [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31), + [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_SMART_PORT_SEL_IMD, 0, 1), + [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_SMART_DLY_NUM, 0, 7), + [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8), + [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16), + [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1), + + /* Scale */ + [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15), + [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31), + [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1), + [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3), + [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5), + [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7), + [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17), + [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8), + [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9), + [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10), + [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11), + [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff }, + + /* CBCR share the same scale factor as YRGB */ + [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff }, + [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff}, + [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff}, + [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff}, + + [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff }, + [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff }, + [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff }, +}; + static const struct vop2_video_port_data rk3568_vop_video_ports[] = { { .id = 0, @@ -355,98 +604,385 @@ static const struct vop2_win_data rk3568_vop_win_data[] = { .layer_sel_id = { 3, 3, 3, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_PRIMARY, - .max_upscale_factor = 8, - .max_downscale_factor = 8, - .dly = { 20, 47, 41 }, + .max_upscale_factor = 8, + .max_downscale_factor = 8, + .dly = { 20, 47, 41 }, + }, { + .name = "Smart1-win0", + .phys_id = ROCKCHIP_VOP2_SMART1, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), + .formats = formats_smart, + .nformats = ARRAY_SIZE(formats_smart), + .format_modifiers = format_modifiers, + .base = 0x1e00, + .layer_sel_id = { 7, 7, 7, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_PRIMARY, + .max_upscale_factor = 8, + .max_downscale_factor = 8, + .dly = { 20, 47, 41 }, + }, { + .name = "Esmart1-win0", + .phys_id = ROCKCHIP_VOP2_ESMART1, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), + .formats = formats_rk356x_esmart, + .nformats = ARRAY_SIZE(formats_rk356x_esmart), + .format_modifiers = format_modifiers, + .base = 0x1a00, + .layer_sel_id = { 6, 6, 6, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_PRIMARY, + .max_upscale_factor = 8, + .max_downscale_factor = 8, + .dly = { 20, 47, 41 }, + }, { + .name = "Esmart0-win0", + .phys_id = ROCKCHIP_VOP2_ESMART0, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), + .formats = formats_rk356x_esmart, + .nformats = ARRAY_SIZE(formats_rk356x_esmart), + .format_modifiers = format_modifiers, + .base = 0x1800, + .layer_sel_id = { 2, 2, 2, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_PRIMARY, + .max_upscale_factor = 8, + .max_downscale_factor = 8, + .dly = { 20, 47, 41 }, + }, { + .name = "Cluster0-win0", + .phys_id = ROCKCHIP_VOP2_CLUSTER0, + .base = 0x1000, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), + .formats = formats_cluster, + .nformats = ARRAY_SIZE(formats_cluster), + .format_modifiers = format_modifiers_afbc, + .layer_sel_id = { 0, 0, 0, 0xf }, + .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, + .max_upscale_factor = 4, + .max_downscale_factor = 4, + .dly = { 0, 27, 21 }, + .type = DRM_PLANE_TYPE_OVERLAY, + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, + }, { + .name = "Cluster1-win0", + .phys_id = ROCKCHIP_VOP2_CLUSTER1, + .base = 0x1200, + .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), + .formats = formats_cluster, + .nformats = ARRAY_SIZE(formats_cluster), + .format_modifiers = format_modifiers_afbc, + .layer_sel_id = { 1, 1, 1, 0xf }, + .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_OVERLAY, + .max_upscale_factor = 4, + .max_downscale_factor = 4, + .dly = { 0, 27, 21 }, + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, + }, +}; + +static const struct vop2_regs_dump rk3568_regs_dump[] = { + { + .name = "SYS", + .base = RK3568_REG_CFG_DONE, + .size = 0x100, + .en_reg = 0, + .en_val = 0, + .en_mask = 0 + }, { + .name = "OVL", + .base = RK3568_OVL_CTRL, + .size = 0x100, + .en_reg = 0, + .en_val = 0, + .en_mask = 0, + }, { + .name = "VP0", + .base = RK3568_VP0_CTRL_BASE, + .size = 0x100, + .en_reg = RK3568_VP_DSP_CTRL, + .en_val = 0, + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, + }, { + .name = "VP1", + .base = RK3568_VP1_CTRL_BASE, + .size = 0x100, + .en_reg = RK3568_VP_DSP_CTRL, + .en_val = 0, + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, + }, { + .name = "VP2", + .base = RK3568_VP2_CTRL_BASE, + .size = 0x100, + .en_reg = RK3568_VP_DSP_CTRL, + .en_val = 0, + .en_mask = RK3568_VP_DSP_CTRL__STANDBY, + + }, { + .name = "Cluster0", + .base = RK3568_CLUSTER0_CTRL_BASE, + .size = 0x110, + .en_reg = RK3568_CLUSTER_WIN_CTRL0, + .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, + .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, + }, { + .name = "Cluster1", + .base = RK3568_CLUSTER1_CTRL_BASE, + .size = 0x110, + .en_reg = RK3568_CLUSTER_WIN_CTRL0, + .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, + .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, + }, { + .name = "Esmart0", + .base = RK3568_ESMART0_CTRL_BASE, + .size = 0xf0, + .en_reg = RK3568_SMART_REGION0_CTRL, + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, + }, { + .name = "Esmart1", + .base = RK3568_ESMART1_CTRL_BASE, + .size = 0xf0, + .en_reg = RK3568_SMART_REGION0_CTRL, + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, + }, { + .name = "Smart0", + .base = RK3568_SMART0_CTRL_BASE, + .size = 0xf0, + .en_reg = RK3568_SMART_REGION0_CTRL, + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, + }, { + .name = "Smart1", + .base = RK3568_SMART1_CTRL_BASE, + .size = 0xf0, + .en_reg = RK3568_SMART_REGION0_CTRL, + .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, + .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, + }, +}; + +static const struct vop2_video_port_data rk3576_vop_video_ports[] = { + { + .id = 0, + .feature = VOP2_VP_FEATURE_OUTPUT_10BIT, + .gamma_lut_len = 1024, + .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */ + .max_output = { 4096, 2304 }, + /* win layer_mix hdr */ + .pre_scan_max_dly = { 10, 8, 2, 0 }, + .offset = 0xc00, + .pixel_rate = 2, + }, { + .id = 1, + .feature = VOP2_VP_FEATURE_OUTPUT_10BIT, + .gamma_lut_len = 1024, + .cubic_lut_len = 729, /* 9x9x9 */ + .max_output = { 2560, 1600 }, + /* win layer_mix hdr */ + .pre_scan_max_dly = { 10, 6, 0, 0 }, + .offset = 0xd00, + .pixel_rate = 1, + }, { + .id = 2, + .gamma_lut_len = 1024, + .max_output = { 1920, 1080 }, + /* win layer_mix hdr */ + .pre_scan_max_dly = { 10, 6, 0, 0 }, + .offset = 0xe00, + .pixel_rate = 1, + }, +}; + +/* + * rk3576 vop with 2 cluster, 4 esmart win. + * Every cluster can work as 4K win or split into two 2K win. + * All win in cluster support AFBCD. + * + * Every esmart win support 4 Multi-region. + * + * VP0 can use Cluster0/1 and Esmart0/2 + * VP1 can use Cluster0/1 and Esmart1/3 + * VP2 can use Esmart0/1/2/3 + * + * Scale filter mode: + * + * * Cluster: + * * Support prescale down: + * * H/V: gt2/avg2 or gt4/avg4 + * * After prescale down: + * * nearest-neighbor/bilinear/multi-phase filter for scale up + * * nearest-neighbor/bilinear/multi-phase filter for scale down + * + * * Esmart: + * * Support prescale down: + * * H: gt2/avg2 or gt4/avg4 + * * V: gt2 or gt4 + * * After prescale down: + * * nearest-neighbor/bilinear/bicubic for scale up + * * nearest-neighbor/bilinear for scale down + * + * AXI config:: + * + * * Cluster0 win0: 0xa, 0xb [AXI0] + * * Cluster0 win1: 0xc, 0xd [AXI0] + * * Cluster1 win0: 0x6, 0x7 [AXI0] + * * Cluster1 win1: 0x8, 0x9 [AXI0] + * * Esmart0: 0x10, 0x11 [AXI0] + * * Esmart1: 0x12, 0x13 [AXI0] + * * Esmart2: 0xa, 0xb [AXI1] + * * Esmart3: 0xc, 0xd [AXI1] + * * Lut dma rid: 0x1, 0x2, 0x3 [AXI0] + * * DCI dma rid: 0x4 [AXI0] + * * Metadata rid: 0x5 [AXI0] + * + * * Limit: + * * (1) Cluster0/1 are fixed on AXI0 by IC design + * * (2) 0x0 and 0xf can't be used; + * * (3) 5 Bits ID for eache axi bus + * * (3) cluster and lut/dci/metadata rid must smaller than 0xf, + * * if Cluster rid is bigger than 0xf, VOP will dead at the + * * system bandwidth very terrible scene. + */ +static const struct vop2_win_data rk3576_vop_win_data[] = { + { + .name = "Cluster0-win0", + .phys_id = ROCKCHIP_VOP2_CLUSTER0, + .base = 0x1000, + .possible_vp_mask = BIT(0) | BIT(1), + .formats = formats_rk3576_cluster, + .nformats = ARRAY_SIZE(formats_rk3576_cluster), + .format_modifiers = format_modifiers_rk3576_afbc, + .layer_sel_id = { 0, 0, 0xf, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_PRIMARY, + .axi_bus_id = 0, + .axi_yrgb_r_id = 0xa, + .axi_uv_r_id = 0xb, + .max_upscale_factor = 4, + .max_downscale_factor = 4, + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, }, { - .name = "Smart1-win0", - .phys_id = ROCKCHIP_VOP2_SMART1, - .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), - .formats = formats_smart, - .nformats = ARRAY_SIZE(formats_smart), + .name = "Cluster1-win0", + .phys_id = ROCKCHIP_VOP2_CLUSTER1, + .base = 0x1200, + .possible_vp_mask = BIT(0) | BIT(1), + .formats = formats_rk3576_cluster, + .nformats = ARRAY_SIZE(formats_rk3576_cluster), + .format_modifiers = format_modifiers_rk3576_afbc, + .layer_sel_id = { 1, 1, 0xf, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, + .type = DRM_PLANE_TYPE_PRIMARY, + .axi_bus_id = 0, + .axi_yrgb_r_id = 6, + .axi_uv_r_id = 7, + .max_upscale_factor = 4, + .max_downscale_factor = 4, + .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, + }, { + .name = "Esmart0-win0", + .phys_id = ROCKCHIP_VOP2_ESMART0, + .base = 0x1800, + .possible_vp_mask = BIT(0) | BIT(2), + .formats = formats_rk3576_esmart, + .nformats = ARRAY_SIZE(formats_rk3576_esmart), .format_modifiers = format_modifiers, - .base = 0x1e00, - .layer_sel_id = { 7, 7, 7, 0xf }, + .layer_sel_id = { 2, 0xf, 0, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, - .type = DRM_PLANE_TYPE_PRIMARY, + .type = DRM_PLANE_TYPE_OVERLAY, + .axi_bus_id = 0, + .axi_yrgb_r_id = 0x10, + .axi_uv_r_id = 0x11, .max_upscale_factor = 8, .max_downscale_factor = 8, - .dly = { 20, 47, 41 }, }, { .name = "Esmart1-win0", .phys_id = ROCKCHIP_VOP2_ESMART1, - .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), - .formats = formats_rk356x_esmart, - .nformats = ARRAY_SIZE(formats_rk356x_esmart), - .format_modifiers = format_modifiers, .base = 0x1a00, - .layer_sel_id = { 6, 6, 6, 0xf }, + .possible_vp_mask = BIT(1) | BIT(2), + .formats = formats_rk3576_esmart, + .nformats = ARRAY_SIZE(formats_rk3576_esmart), + .format_modifiers = format_modifiers, + .layer_sel_id = { 0xf, 2, 1, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, - .type = DRM_PLANE_TYPE_PRIMARY, + .type = DRM_PLANE_TYPE_OVERLAY, + .axi_bus_id = 0, + .axi_yrgb_r_id = 0x12, + .axi_uv_r_id = 0x13, .max_upscale_factor = 8, .max_downscale_factor = 8, - .dly = { 20, 47, 41 }, }, { - .name = "Esmart0-win0", - .phys_id = ROCKCHIP_VOP2_ESMART0, - .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), - .formats = formats_rk356x_esmart, - .nformats = ARRAY_SIZE(formats_rk356x_esmart), + .name = "Esmart2-win0", + .phys_id = ROCKCHIP_VOP2_ESMART2, + .base = 0x1c00, + .possible_vp_mask = BIT(0) | BIT(2), + .formats = formats_rk3576_esmart, + .nformats = ARRAY_SIZE(formats_rk3576_esmart), .format_modifiers = format_modifiers, - .base = 0x1800, - .layer_sel_id = { 2, 2, 2, 0xf }, + .layer_sel_id = { 3, 0xf, 2, 0xf }, .supported_rotations = DRM_MODE_REFLECT_Y, - .type = DRM_PLANE_TYPE_PRIMARY, + .type = DRM_PLANE_TYPE_OVERLAY, + .axi_bus_id = 1, + .axi_yrgb_r_id = 0x0a, + .axi_uv_r_id = 0x0b, .max_upscale_factor = 8, .max_downscale_factor = 8, - .dly = { 20, 47, 41 }, }, { - .name = "Cluster0-win0", - .phys_id = ROCKCHIP_VOP2_CLUSTER0, - .base = 0x1000, - .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), - .formats = formats_cluster, - .nformats = ARRAY_SIZE(formats_cluster), - .format_modifiers = format_modifiers_afbc, - .layer_sel_id = { 0, 0, 0, 0xf }, - .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | - DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, - .max_upscale_factor = 4, - .max_downscale_factor = 4, - .dly = { 0, 27, 21 }, - .type = DRM_PLANE_TYPE_OVERLAY, - .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, - }, { - .name = "Cluster1-win0", - .phys_id = ROCKCHIP_VOP2_CLUSTER1, - .base = 0x1200, - .possible_vp_mask = BIT(0) | BIT(1) | BIT(2), - .formats = formats_cluster, - .nformats = ARRAY_SIZE(formats_cluster), - .format_modifiers = format_modifiers_afbc, - .layer_sel_id = { 1, 1, 1, 0xf }, - .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 | - DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y, + .name = "Esmart3-win0", + .phys_id = ROCKCHIP_VOP2_ESMART3, + .base = 0x1e00, + .possible_vp_mask = BIT(1) | BIT(2), + .formats = formats_rk3576_esmart, + .nformats = ARRAY_SIZE(formats_rk3576_esmart), + .format_modifiers = format_modifiers, + .layer_sel_id = { 0xf, 3, 3, 0xf }, + .supported_rotations = DRM_MODE_REFLECT_Y, .type = DRM_PLANE_TYPE_OVERLAY, - .max_upscale_factor = 4, - .max_downscale_factor = 4, - .dly = { 0, 27, 21 }, - .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER, + .axi_bus_id = 1, + .axi_yrgb_r_id = 0x0c, + .axi_uv_r_id = 0x0d, + .max_upscale_factor = 8, + .max_downscale_factor = 8, }, }; -static const struct vop2_regs_dump rk3568_regs_dump[] = { +static const struct vop2_regs_dump rk3576_regs_dump[] = { { .name = "SYS", .base = RK3568_REG_CFG_DONE, - .size = 0x100, + .size = 0x200, .en_reg = 0, .en_val = 0, .en_mask = 0 }, { - .name = "OVL", - .base = RK3568_OVL_CTRL, - .size = 0x100, + .name = "OVL_SYS", + .base = RK3576_SYS_EXTRA_ALPHA_CTRL, + .size = 0x50, + .en_reg = 0, + .en_val = 0, + .en_mask = 0, + }, { + .name = "OVL_VP0", + .base = RK3576_OVL_CTRL(0), + .size = 0x80, + .en_reg = 0, + .en_val = 0, + .en_mask = 0, + }, { + .name = "OVL_VP1", + .base = RK3576_OVL_CTRL(1), + .size = 0x80, + .en_reg = 0, + .en_val = 0, + .en_mask = 0, + }, { + .name = "OVL_VP2", + .base = RK3576_OVL_CTRL(2), + .size = 0x80, .en_reg = 0, .en_val = 0, .en_mask = 0, @@ -471,18 +1007,17 @@ static const struct vop2_regs_dump rk3568_regs_dump[] = { .en_reg = RK3568_VP_DSP_CTRL, .en_val = 0, .en_mask = RK3568_VP_DSP_CTRL__STANDBY, - }, { .name = "Cluster0", .base = RK3568_CLUSTER0_CTRL_BASE, - .size = 0x110, + .size = 0x200, .en_reg = RK3568_CLUSTER_WIN_CTRL0, .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, }, { .name = "Cluster1", .base = RK3568_CLUSTER1_CTRL_BASE, - .size = 0x110, + .size = 0x200, .en_reg = RK3568_CLUSTER_WIN_CTRL0, .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN, @@ -501,15 +1036,15 @@ static const struct vop2_regs_dump rk3568_regs_dump[] = { .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, }, { - .name = "Smart0", - .base = RK3568_SMART0_CTRL_BASE, + .name = "Esmart2", + .base = RK3588_ESMART2_CTRL_BASE, .size = 0xf0, .en_reg = RK3568_SMART_REGION0_CTRL, .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN, }, { - .name = "Smart1", - .base = RK3568_SMART1_CTRL_BASE, + .name = "Esmart3", + .base = RK3588_ESMART3_CTRL_BASE, .size = 0xf0, .en_reg = RK3568_SMART_REGION0_CTRL, .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN, @@ -908,6 +1443,84 @@ static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 return crtc->state->adjusted_mode.crtc_clock * 1000LL; } +static unsigned long rk3576_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); + u8 port_pix_rate = vp->data->pixel_rate; + int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_sel; + u32 ctrl, vp_clk_div, reg, dclk_div; + unsigned long dclk_in_rate, dclk_core_rate; + + if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420 || adjusted_mode->crtc_clock > 600000) + dclk_div = 2; + else + dclk_div = 1; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + dclk_core_rate = adjusted_mode->crtc_clock / 2; + else + dclk_core_rate = adjusted_mode->crtc_clock / port_pix_rate; + + dclk_in_rate = adjusted_mode->crtc_clock / dclk_div; + + dclk_core_div = dclk_in_rate > dclk_core_rate ? 1 : 0; + + if (vop2_output_if_is_edp(id)) + if_pixclk_div = port_pix_rate == 2 ? RK3576_DSP_IF_PCLK_DIV : 0; + else + if_pixclk_div = port_pix_rate == 1 ? RK3576_DSP_IF_PCLK_DIV : 0; + + if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420) { + if_dclk_sel = RK3576_DSP_IF_DCLK_SEL_OUT; + dclk_out_div = 1; + } else { + if_dclk_sel = 0; + dclk_out_div = 0; + } + + switch (id) { + case ROCKCHIP_VOP2_EP_HDMI0: + reg = RK3576_HDMI0_IF_CTRL; + break; + case ROCKCHIP_VOP2_EP_EDP0: + reg = RK3576_EDP0_IF_CTRL; + break; + case ROCKCHIP_VOP2_EP_MIPI0: + reg = RK3576_MIPI0_IF_CTRL; + break; + case ROCKCHIP_VOP2_EP_DP0: + reg = RK3576_DP0_IF_CTRL; + break; + case ROCKCHIP_VOP2_EP_DP1: + reg = RK3576_DP1_IF_CTRL; + break; + default: + drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id); + return 0; + } + + ctrl = vop2_readl(vop2, reg); + ctrl &= ~RK3576_DSP_IF_DCLK_SEL_OUT; + ctrl &= ~RK3576_DSP_IF_PCLK_DIV; + ctrl &= ~RK3576_DSP_IF_MUX; + ctrl |= RK3576_DSP_IF_CFG_DONE_IMD; + ctrl |= if_dclk_sel | if_pixclk_div; + ctrl |= RK3576_DSP_IF_CLK_OUT_EN | RK3576_DSP_IF_EN; + ctrl |= FIELD_PREP(RK3576_DSP_IF_MUX, vp->id); + ctrl |= FIELD_PREP(RK3576_DSP_IF_PIN_POL, polflags); + vop2_writel(vop2, reg, ctrl); + + vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div); + vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div); + + vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div); + + return dclk_in_rate * 1000LL; +} + /* * calc the dclk on rk3588 * the available div of dclk is 1, 2, 4 @@ -1246,6 +1859,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi struct drm_plane_state *bottom_win_pstate; bool src_pixel_alpha_en = false; u16 src_glb_alpha_val, dst_glb_alpha_val; + u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg; u32 offset = 0; bool premulti_en = false; bool swap = false; @@ -1283,14 +1897,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi break; } - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset, - alpha.dst_alpha_ctrl.val); + if (vop2->version <= VOP_VERSION_RK3588) { + src_color_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL; + dst_color_ctrl_reg = RK3568_CLUSTER0_MIX_DST_COLOR_CTRL; + src_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL; + dst_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL; + } else { + src_color_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL; + dst_color_ctrl_reg = RK3576_CLUSTER0_MIX_DST_COLOR_CTRL; + src_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL; + dst_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL; + } + + vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val); + vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val); + vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val); + vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val); } static void vop2_setup_alpha(struct vop2_video_port *vp) @@ -1303,11 +1925,16 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) int pixel_alpha_en; int premulti_en, gpremulti_en = 0; int mixer_id; + u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg; u32 offset; bool bottom_layer_alpha_en = false; u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); + if (vop2->version <= VOP_VERSION_RK3588) + mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); + else + mixer_id = 0; + alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { @@ -1326,6 +1953,18 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) } } + if (vop2->version <= VOP_VERSION_RK3588) { + src_color_ctrl_reg = RK3568_MIX0_SRC_COLOR_CTRL; + dst_color_ctrl_reg = RK3568_MIX0_DST_COLOR_CTRL; + src_alpha_ctrl_reg = RK3568_MIX0_SRC_ALPHA_CTRL; + dst_alpha_ctrl_reg = RK3568_MIX0_DST_ALPHA_CTRL; + } else { + src_color_ctrl_reg = RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp->id); + dst_color_ctrl_reg = RK3576_OVL_MIX0_DST_COLOR_CTRL(vp->id); + src_alpha_ctrl_reg = RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp->id); + dst_alpha_ctrl_reg = RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp->id); + } + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { struct vop2_win *win = to_vop2_win(plane); int zpos = plane->state->normalized_zpos; @@ -1372,17 +2011,26 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) vop2_parse_alpha(&alpha_config, &alpha); offset = (mixer_id + zpos - 1) * 0x10; - vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset, - alpha.dst_alpha_ctrl.val); + + vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val); + vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val); + vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val); + vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val); } if (vp->id == 0) { + if (vop2->version <= VOP_VERSION_RK3588) { + src_color_ctrl_reg = RK3568_HDR0_SRC_COLOR_CTRL; + dst_color_ctrl_reg = RK3568_HDR0_DST_COLOR_CTRL; + src_alpha_ctrl_reg = RK3568_HDR0_SRC_ALPHA_CTRL; + dst_alpha_ctrl_reg = RK3568_HDR0_DST_ALPHA_CTRL; + } else { + src_color_ctrl_reg = RK3576_OVL_HDR_SRC_COLOR_CTRL(vp->id); + dst_color_ctrl_reg = RK3576_OVL_HDR_DST_COLOR_CTRL(vp->id); + src_alpha_ctrl_reg = RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp->id); + dst_alpha_ctrl_reg = RK3576_OVL_HDR_DST_ALPHA_CTRL(vp->id); + } + if (bottom_layer_alpha_en) { /* Transfer pixel alpha to hdr mix */ alpha_config.src_premulti_en = gpremulti_en; @@ -1390,18 +2038,15 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) alpha_config.src_pixel_alpha_en = true; alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE; + vop2_parse_alpha(&alpha_config, &alpha); - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, - alpha.src_color_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL, - alpha.dst_color_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL, - alpha.src_alpha_ctrl.val); - vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL, - alpha.dst_alpha_ctrl.val); + vop2_writel(vop2, src_color_ctrl_reg, alpha.src_color_ctrl.val); + vop2_writel(vop2, dst_color_ctrl_reg, alpha.dst_color_ctrl.val); + vop2_writel(vop2, src_alpha_ctrl_reg, alpha.src_alpha_ctrl.val); + vop2_writel(vop2, dst_alpha_ctrl_reg, alpha.dst_alpha_ctrl.val); } else { - vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0); + vop2_writel(vop2, src_color_ctrl_reg, 0); } } } @@ -1611,6 +2256,71 @@ static void rk3568_vop2_setup_overlay(struct vop2_video_port *vp) rk3568_vop2_setup_dly_for_windows(vp); } +static void rk3576_vop2_setup_layer_mixer(struct vop2_video_port *vp) +{ + struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); + struct vop2 *vop2 = vp->vop2; + struct drm_plane *plane; + u32 layer_sel = 0xffff; /* 0xf means this layer is disabled */ + u32 ovl_ctrl; + + ovl_ctrl = vop2_readl(vop2, RK3576_OVL_CTRL(vp->id)); + if (vcstate->yuv_overlay) + ovl_ctrl |= RK3576_OVL_CTRL__YUV_MODE; + else + ovl_ctrl &= ~RK3576_OVL_CTRL__YUV_MODE; + + vop2_writel(vop2, RK3576_OVL_CTRL(vp->id), ovl_ctrl); + + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { + struct vop2_win *win = to_vop2_win(plane); + + layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos, + 0xf); + layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos, + win->data->layer_sel_id[vp->id]); + } + + vop2_writel(vop2, RK3576_OVL_LAYER_SEL(vp->id), layer_sel); +} + +static void rk3576_vop2_setup_dly_for_windows(struct vop2_video_port *vp) +{ + struct drm_plane *plane; + struct vop2_win *win; + + drm_atomic_crtc_for_each_plane(plane, &vp->crtc) { + win = to_vop2_win(plane); + vop2_win_write(win, VOP2_WIN_DLY_NUM, 0); + } +} + +static void rk3576_vop2_setup_overlay(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + struct drm_crtc *crtc = &vp->crtc; + struct drm_plane *plane; + + vp->win_mask = 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + struct vop2_win *win = to_vop2_win(plane); + + win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT]; + vp->win_mask |= BIT(win->data->phys_id); + + if (vop2_cluster_window(win)) + vop2_setup_cluster_alpha(vop2, win); + } + + if (!vp->win_mask) + return; + + rk3576_vop2_setup_layer_mixer(vp); + vop2_setup_alpha(vp); + rk3576_vop2_setup_dly_for_windows(vp); +} + static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp) { struct drm_crtc *crtc = &vp->crtc; @@ -1628,12 +2338,38 @@ static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp) vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); } +static void rk3576_vop2_setup_bg_dly(struct vop2_video_port *vp) +{ + struct drm_crtc *crtc = &vp->crtc; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + u16 hdisplay = mode->crtc_hdisplay; + u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + u32 bg_dly; + u32 pre_scan_dly; + + bg_dly = vp->data->pre_scan_max_dly[VOP2_DLY_WIN] + + vp->data->pre_scan_max_dly[VOP2_DLY_LAYER_MIX] + + vp->data->pre_scan_max_dly[VOP2_DLY_HDR_MIX]; + + vop2_writel(vp->vop2, RK3576_OVL_BG_MIX_CTRL(vp->id), + FIELD_PREP(RK3576_OVL_BG_MIX_CTRL__BG_DLY, bg_dly)); + + pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len; + vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly); +} + static const struct vop2_ops rk3568_vop_ops = { .setup_intf_mux = rk3568_set_intf_mux, .setup_bg_dly = rk3568_vop2_setup_bg_dly, .setup_overlay = rk3568_vop2_setup_overlay, }; +static const struct vop2_ops rk3576_vop_ops = { + .setup_intf_mux = rk3576_set_intf_mux, + .setup_bg_dly = rk3576_vop2_setup_bg_dly, + .setup_overlay = rk3576_vop2_setup_overlay, +}; + static const struct vop2_ops rk3588_vop_ops = { .setup_intf_mux = rk3588_set_intf_mux, .setup_bg_dly = rk3568_vop2_setup_bg_dly, @@ -1678,6 +2414,25 @@ static const struct vop2_data rk3568_vop = { .soc_id = 3568, }; +static const struct vop2_data rk3576_vop = { + .version = VOP_VERSION_RK3576, + .feature = VOP2_FEATURE_HAS_SYS_PMU, + .nr_vps = 3, + .max_input = { 4096, 4320 }, + .max_output = { 4096, 4320 }, + .vp = rk3576_vop_video_ports, + .win = rk3576_vop_win_data, + .win_size = ARRAY_SIZE(rk3576_vop_win_data), + .cluster_reg = rk3576_vop_cluster_regs, + .nr_cluster_regs = ARRAY_SIZE(rk3576_vop_cluster_regs), + .smart_reg = rk3576_vop_smart_regs, + .nr_smart_regs = ARRAY_SIZE(rk3576_vop_smart_regs), + .regs_dump = rk3576_regs_dump, + .regs_dump_size = ARRAY_SIZE(rk3576_regs_dump), + .ops = &rk3576_vop_ops, + .soc_id = 3576, +}; + static const struct vop2_data rk3588_vop = { .version = VOP_VERSION_RK3588, .feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF | @@ -1705,6 +2460,9 @@ static const struct of_device_id vop2_dt_match[] = { }, { .compatible = "rockchip,rk3568-vop", .data = &rk3568_vop, + }, { + .compatible = "rockchip,rk3576-vop", + .data = &rk3576_vop, }, { .compatible = "rockchip,rk3588-vop", .data = &rk3588_vop From 95a5c9d197bb22a506913acb330a926d4e51aa95 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Mon, 3 Mar 2025 19:22:56 +0100 Subject: [PATCH 42/77] drm/rockchip: vop2: add missing bitfield.h include Commit 328e6885996c ("drm/rockchip: vop2: Add platform specific callback") moved per soc configuration code to the other per-soc data into rockchip_vop2_reg.c, but forgot to also include bitfield.h for the used FIELD_PREP macro. Add this missing include. Fixes: 328e6885996c ("drm/rockchip: vop2: Add platform specific callback") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202503040135.fgoyWdLB-lkp@intel.com/ Reviewed-by: Sebastian Reichel Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250303182256.1727178-1-heiko@sntech.de --- drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index a1402622589a..14958d6b3d2e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -4,6 +4,7 @@ * Author: Andy Yan */ +#include #include #include #include From c449f506ef38435b2c7e9ceb661e48662ac86b10 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 4 Mar 2025 14:19:21 +0100 Subject: [PATCH 43/77] drm/appletbdrm: Fix format specifier for size_t variables When building for a 32-bit platform, there are some warnings (or errors with CONFIG_WERROR=y) due to an incorrect specifier for 'size_t' variables, which is typedef'd as 'unsigned int' for these architectures: drivers/gpu/drm/tiny/appletbdrm.c:171:17: error: format specifies type 'unsigned long' but the argument has type 'size_t' (aka 'unsigned int') [-Werror,-Wformat] 170 | drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", | ~~~ | %zu 171 | actual_size, size); | ^~~~ ... drivers/gpu/drm/tiny/appletbdrm.c:212:17: error: format specifies type 'unsigned long' but the argument has type 'size_t' (aka 'unsigned int') [-Werror,-Wformat] 211 | drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", | ~~~ | %zu 212 | actual_size, size); | ^~~~ Use '%zu' as suggested, clearing up the warnings. Fixes: 0670c2f56e45 ("drm/tiny: add driver for Apple Touch Bars in x86 Macs") Signed-off-by: Nathan Chancellor Reviewed-by: Jani Nikula Acked-by: Aditya Garg Link: https://patchwork.freedesktop.org/patch/msgid/20250304-appletbdrm-fix-size_t-specifier-v1-1-94fe1d2c91f8@kernel.org Signed-off-by: Jani Nikula --- drivers/gpu/drm/tiny/appletbdrm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c index f5d177e234e4..394c8f9bd41a 100644 --- a/drivers/gpu/drm/tiny/appletbdrm.c +++ b/drivers/gpu/drm/tiny/appletbdrm.c @@ -167,7 +167,7 @@ static int appletbdrm_send_request(struct appletbdrm_device *adev, } if (actual_size != size) { - drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", + drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n", actual_size, size); return -EIO; } @@ -208,7 +208,7 @@ static int appletbdrm_read_response(struct appletbdrm_device *adev, } if (actual_size != size) { - drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", + drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n", actual_size, size); return -EBADMSG; } From 491626f705fd22191e8d620255effb0ba878a657 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 23 Jan 2025 17:09:08 +0200 Subject: [PATCH 44/77] drm/mipi-dsi: stop passing non struct drm_device to drm_err() and friends The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Convert drm_err(host, ...) to dev_err(host->dev, ...). This matches current usage, as struct drm_device is not available, but drops "[drm] *ERROR*" from logs. Reviewed-by: Simona Vetter Reviewed-by: Luca Ceresoli Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/842f97ade87d6f0c4b1de12e8ed5610a1b07fd8c.1737644530.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_mipi_dsi.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2e148753ea97..dfa595556320 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -162,13 +162,13 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node) u32 reg; if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) { - drm_err(host, "modalias failure on %pOF\n", node); + dev_err(host->dev, "modalias failure on %pOF\n", node); return ERR_PTR(-EINVAL); } ret = of_property_read_u32(node, "reg", ®); if (ret) { - drm_err(host, "device node %pOF has no valid reg property: %d\n", + dev_err(host->dev, "device node %pOF has no valid reg property: %d\n", node, ret); return ERR_PTR(-EINVAL); } @@ -206,18 +206,18 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host, int ret; if (!info) { - drm_err(host, "invalid mipi_dsi_device_info pointer\n"); + dev_err(host->dev, "invalid mipi_dsi_device_info pointer\n"); return ERR_PTR(-EINVAL); } if (info->channel > 3) { - drm_err(host, "invalid virtual channel: %u\n", info->channel); + dev_err(host->dev, "invalid virtual channel: %u\n", info->channel); return ERR_PTR(-EINVAL); } dsi = mipi_dsi_device_alloc(host); if (IS_ERR(dsi)) { - drm_err(host, "failed to allocate DSI device %ld\n", + dev_err(host->dev, "failed to allocate DSI device %ld\n", PTR_ERR(dsi)); return dsi; } @@ -228,7 +228,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host, ret = mipi_dsi_device_add(dsi); if (ret) { - drm_err(host, "failed to add DSI device %d\n", ret); + dev_err(host->dev, "failed to add DSI device %d\n", ret); kfree(dsi); return ERR_PTR(ret); } From abeef1f9eaf9301cc98a6841dab5f72de5c95360 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 23 Jan 2025 17:09:09 +0200 Subject: [PATCH 45/77] drm/rockchip: stop passing non struct drm_device to drm_err() and friends The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Convert drm_err(hdmi, ...) to dev_err(hdmi->dev, ...). This matches current usage, but drops "[drm] *ERROR*" prefix from logging. Reviewed-by: Simona Vetter Reviewed-by: Louis Chauvet Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/f42da4c9943a2f2a9de4272b7849e72236d4c3f9.1737644530.git.jani.nikula@intel.com --- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 16 ++++++++-------- drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index e7a6669c46b0..f737e7d46e66 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -203,7 +203,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(hdmi->regmap)) { - drm_err(hdmi, "Unable to get rockchip,grf\n"); + dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); return PTR_ERR(hdmi->regmap); } @@ -214,7 +214,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) if (IS_ERR(hdmi->ref_clk)) { ret = PTR_ERR(hdmi->ref_clk); if (ret != -EPROBE_DEFER) - drm_err(hdmi, "failed to get reference clock\n"); + dev_err(hdmi->dev, "failed to get reference clock\n"); return ret; } @@ -222,7 +222,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) if (IS_ERR(hdmi->grf_clk)) { ret = PTR_ERR(hdmi->grf_clk); if (ret != -EPROBE_DEFER) - drm_err(hdmi, "failed to get grf clock\n"); + dev_err(hdmi->dev, "failed to get grf clock\n"); return ret; } @@ -302,16 +302,16 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder) ret = clk_prepare_enable(hdmi->grf_clk); if (ret < 0) { - drm_err(hdmi, "failed to enable grfclk %d\n", ret); + dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret); return; } ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val); if (ret != 0) - drm_err(hdmi, "Could not write to GRF: %d\n", ret); + dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret); clk_disable_unprepare(hdmi->grf_clk); - drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG"); + dev_dbg(hdmi->dev, "vop %s output to hdmi\n", ret ? "LIT" : "BIG"); } static int @@ -574,7 +574,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, ret = rockchip_hdmi_parse_dt(hdmi); if (ret) { if (ret != -EPROBE_DEFER) - drm_err(hdmi, "Unable to parse OF data\n"); + dev_err(hdmi->dev, "Unable to parse OF data\n"); return ret; } @@ -582,7 +582,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); if (ret != -EPROBE_DEFER) - drm_err(hdmi, "failed to get phy\n"); + dev_err(hdmi->dev, "failed to get phy\n"); return ret; } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c index f41151d49fca..3d1dddb34603 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -242,7 +242,7 @@ static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work) if (drm) { changed = drm_helper_hpd_irq_event(drm); if (changed) - drm_dbg(hdmi, "connector status changed\n"); + dev_dbg(hdmi->dev, "connector status changed\n"); } } @@ -472,7 +472,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, } } if (hdmi->port_id < 0) { - drm_err(hdmi, "Failed to match HDMI port ID\n"); + dev_err(hdmi->dev, "Failed to match HDMI port ID\n"); return hdmi->port_id; } @@ -496,20 +496,20 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(hdmi->regmap)) { - drm_err(hdmi, "Unable to get rockchip,grf\n"); + dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); return PTR_ERR(hdmi->regmap); } hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo-grf"); if (IS_ERR(hdmi->vo_regmap)) { - drm_err(hdmi, "Unable to get rockchip,vo-grf\n"); + dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n"); return PTR_ERR(hdmi->vo_regmap); } ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks); if (ret < 0) { - drm_err(hdmi, "Failed to get clocks: %d\n", ret); + dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret); return ret; } @@ -517,7 +517,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, GPIOD_OUT_HIGH); if (IS_ERR(hdmi->enable_gpio)) { ret = PTR_ERR(hdmi->enable_gpio); - drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret); + dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret); return ret; } @@ -525,7 +525,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); if (ret != -EPROBE_DEFER) - drm_err(hdmi, "failed to get phy: %d\n", ret); + dev_err(hdmi->dev, "failed to get phy: %d\n", ret); return ret; } @@ -564,7 +564,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, connector = drm_bridge_connector_init(drm, encoder); if (IS_ERR(connector)) { ret = PTR_ERR(connector); - drm_err(hdmi, "failed to init bridge connector: %d\n", ret); + dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret); return ret; } From e5f3081291eb958b46775edfd29d1f5367078474 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 23 Jan 2025 17:09:10 +0200 Subject: [PATCH 46/77] drm/sched: stop passing non struct drm_device to drm_err() and friends The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Convert drm_err(sched, ...) to dev_err(sched->dev, ...) and similar. This matches current usage, as struct drm_device is not available, but drops "[drm]" or "[drm] *ERROR*" prefix from logging. Unfortunately, there's no dev_WARN_ON(), so the conversion is not exactly the same. Reviewed-by: Simona Vetter Acked-by: Philipp Stanner Reviewed-by: Louis Chauvet Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/fe441dd1469d2b03e6b2ff247078bdde2011c6e3.1737644530.git.jani.nikula@intel.com --- drivers/gpu/drm/scheduler/sched_entity.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 87f88259ddf6..a6d2a4722d82 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -91,7 +91,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, * the lowest priority available. */ if (entity->priority >= sched_list[0]->num_rqs) { - drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", + dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n", entity->priority, sched_list[0]->num_rqs); entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, (s32) DRM_SCHED_PRIORITY_KERNEL); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c634993f1346..c0b9822d6274 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -102,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) { u32 credits; - drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, - atomic_read(&sched->credit_count), - &credits)); + WARN_ON(check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); return credits; } @@ -129,9 +129,11 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, /* If a job exceeds the credit limit, truncate it to the credit limit * itself to guarantee forward progress. */ - if (drm_WARN(sched, s_job->credits > sched->credit_limit, - "Jobs may not exceed the credit limit, truncate.\n")) + if (s_job->credits > sched->credit_limit) { + dev_WARN(sched->dev, + "Jobs may not exceed the credit limit, truncate.\n"); s_job->credits = sched->credit_limit; + } return drm_sched_available_credits(sched) >= s_job->credits; } @@ -789,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job, * or worse--a blank screen--leave a trail in the * logs, so this can be debugged easier. */ - drm_err(job->sched, "%s: entity has no rq!\n", __func__); + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); return -ENOENT; } @@ -1263,7 +1265,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { /* This is a gross violation--tell drivers what the problem is. */ - drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", __func__); return -EINVAL; } else if (sched->sched_rq) { @@ -1271,7 +1273,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ * fine-tune their DRM calling order, and return all * is good. */ - drm_warn(sched, "%s: scheduler already initialized!\n", __func__); + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); return 0; } @@ -1326,7 +1328,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ Out_check_own: if (sched->own_submit_wq) destroy_workqueue(sched->submit_wq); - drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); return -ENOMEM; } EXPORT_SYMBOL(drm_sched_init); From d05386a3fdf373a19ab1918846668f096e6f966a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 23 Jan 2025 17:09:12 +0200 Subject: [PATCH 47/77] drm/print: require struct drm_device for drm_err() and friends The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Add a static inline helper to convert struct drm_device to struct device, with the main benefit being the type checking of the macro argument. As a side effect, this also reduces macro argument double references. Reviewed-by: Simona Vetter Reviewed-by: Louis Chauvet Reviewed-by: Luca Ceresoli Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/dfe6e774883e6ef93cfaa2b6fe92b804061ab9d9.1737644530.git.jani.nikula@intel.com --- include/drm/drm_print.h | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 9732f514566d..f31eba1c7cab 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -584,9 +584,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, * Prefer drm_device based logging over device or prink based logging. */ +/* Helper to enforce struct drm_device type */ +static inline struct device *__drm_to_dev(const struct drm_device *drm) +{ + return drm ? drm->dev : NULL; +} + /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ - dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) + dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ @@ -620,25 +626,25 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, #define drm_dbg_core(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) -#define drm_dbg_driver(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__) +#define drm_dbg_driver(drm, fmt, ...) \ + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) @@ -727,10 +733,9 @@ void __drm_err(const char *format, ...); #define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ - const struct drm_device *drm_ = (drm); \ \ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ - drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ + drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \ }) #define drm_dbg_ratelimited(drm, fmt, ...) \ @@ -752,13 +757,13 @@ void __drm_err(const char *format, ...); /* Helper for struct drm_device based WARNs */ #define drm_WARN(drm, condition, format, arg...) \ WARN(condition, "%s %s: [drm] " format, \ - dev_driver_string((drm)->dev), \ - dev_name((drm)->dev), ## arg) + dev_driver_string(__drm_to_dev(drm)), \ + dev_name(__drm_to_dev(drm)), ## arg) #define drm_WARN_ONCE(drm, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: [drm] " format, \ - dev_driver_string((drm)->dev), \ - dev_name((drm)->dev), ## arg) + dev_driver_string(__drm_to_dev(drm)), \ + dev_name(__drm_to_dev(drm)), ## arg) #define drm_WARN_ON(drm, x) \ drm_WARN((drm), (x), "%s", \ From d4f5efb9139cad34823f265053c57baf6af3c70c Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Tue, 4 Mar 2025 13:44:16 +0100 Subject: [PATCH 48/77] drm/rockchip: lvds: move pclk preparation in with clk_get The LVDS block needs a separate pclk only on some socs, so currently requests and prepares it in the soc-specific probe function, but common code is required to unprepare it in the error path or on driver remove. While this works because clk_unprepare just does nothing if clk is NULL, this mismatch of who is responsible still is not very nice. The clock-framework already has a helper for clk-get-and-prepare even with devres support in devm_clk_get_prepared(). This will get and prepare the clock and also unprepare it on driver removal, saving the driver from having to handle it "manually". Reviewed-by: Quentin Schulz Reviewed-by: Andy Yan Signed-off-by: Heiko Stuebner Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250304124418.111061-2-heiko@sntech.de --- drivers/gpu/drm/rockchip/rockchip_lvds.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 385cf6881504..ecfae8d5da89 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -448,15 +448,13 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = { static int rk3288_lvds_probe(struct platform_device *pdev, struct rockchip_lvds *lvds) { - int ret; - lvds->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lvds->regs)) return PTR_ERR(lvds->regs); - lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds"); + lvds->pclk = devm_clk_get_prepared(lvds->dev, "pclk_lvds"); if (IS_ERR(lvds->pclk)) { - DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n"); + DRM_DEV_ERROR(lvds->dev, "could not get or prepare pclk_lvds\n"); return PTR_ERR(lvds->pclk); } @@ -480,12 +478,6 @@ static int rk3288_lvds_probe(struct platform_device *pdev, } } - ret = clk_prepare(lvds->pclk); - if (ret < 0) { - DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n"); - return ret; - } - return 0; } @@ -728,20 +720,15 @@ static int rockchip_lvds_probe(struct platform_device *pdev) dev_set_drvdata(dev, lvds); ret = component_add(&pdev->dev, &rockchip_lvds_component_ops); - if (ret < 0) { + if (ret < 0) DRM_DEV_ERROR(dev, "failed to add component\n"); - clk_unprepare(lvds->pclk); - } return ret; } static void rockchip_lvds_remove(struct platform_device *pdev) { - struct rockchip_lvds *lvds = platform_get_drvdata(pdev); - component_del(&pdev->dev, &rockchip_lvds_component_ops); - clk_unprepare(lvds->pclk); } struct platform_driver rockchip_lvds_driver = { From 37c18639504aacbd31371f562fabafdb890bcd2e Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Tue, 4 Mar 2025 13:44:17 +0100 Subject: [PATCH 49/77] drm/rockchip: lvds: Hide scary error messages on probe deferral Commit 52d11c863ac9 ("drm/rockchip: lvds: do not print scary message when probing defer") already started hiding scary messages that are not relevant if the requested supply just returned EPROBE_DEFER, but there are more possible sources - like the phy. So modernize the whole logging in the probe path by replacing the remaining deprecated DRM_DEV_ERROR with appropriate dev_err(_probe) and drm_err calls. The distinction here is that all messages talking about mishaps of the lvds element use dev_err(_probe) while messages caused by interaction with the main Rockchip drm-device use drm_err. Reviewed-by: Andy Yan Signed-off-by: Heiko Stuebner Reviewed-by: Quentin Schulz Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250304124418.111061-3-heiko@sntech.de --- drivers/gpu/drm/rockchip/rockchip_lvds.c | 63 ++++++++++-------------- 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index ecfae8d5da89..bfebe42a0331 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -453,10 +453,9 @@ static int rk3288_lvds_probe(struct platform_device *pdev, return PTR_ERR(lvds->regs); lvds->pclk = devm_clk_get_prepared(lvds->dev, "pclk_lvds"); - if (IS_ERR(lvds->pclk)) { - DRM_DEV_ERROR(lvds->dev, "could not get or prepare pclk_lvds\n"); - return PTR_ERR(lvds->pclk); - } + if (IS_ERR(lvds->pclk)) + return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk), + "could not get or prepare pclk_lvds\n"); lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins), GFP_KERNEL); @@ -465,14 +464,14 @@ static int rk3288_lvds_probe(struct platform_device *pdev, lvds->pins->p = devm_pinctrl_get(lvds->dev); if (IS_ERR(lvds->pins->p)) { - DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n"); + dev_err(lvds->dev, "no pinctrl handle\n"); devm_kfree(lvds->dev, lvds->pins); lvds->pins = NULL; } else { lvds->pins->default_state = pinctrl_lookup_state(lvds->pins->p, "lcdc"); if (IS_ERR(lvds->pins->default_state)) { - DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n"); + dev_err(lvds->dev, "no default pinctrl state\n"); devm_kfree(lvds->dev, lvds->pins); lvds->pins = NULL; } @@ -547,11 +546,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, lvds->drm_dev = drm_dev; port = of_graph_get_port_by_id(dev->of_node, 1); - if (!port) { - DRM_DEV_ERROR(dev, - "can't found port point, please init lvds panel port!\n"); - return -EINVAL; - } + if (!port) + return dev_err_probe(dev, -EINVAL, + "can't found port point, please init lvds panel port!\n"); + for_each_child_of_node(port, endpoint) { child_count++; of_property_read_u32(endpoint, "reg", &endpoint_id); @@ -563,8 +561,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, } } if (!child_count) { - DRM_DEV_ERROR(dev, "lvds port does not have any children\n"); - ret = -EINVAL; + ret = dev_err_probe(dev, -EINVAL, "lvds port does not have any children\n"); goto err_put_port; } else if (ret) { dev_err_probe(dev, ret, "failed to find panel and bridge node\n"); @@ -581,8 +578,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, lvds->output = rockchip_lvds_name_to_output(name); if (lvds->output < 0) { - DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name); - ret = lvds->output; + ret = dev_err_probe(dev, lvds->output, "invalid output type [%s]\n", name); goto err_put_remote; } @@ -593,8 +589,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, lvds->format = rockchip_lvds_name_to_format(name); if (lvds->format < 0) { - DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name); - ret = lvds->format; + ret = dev_err_probe(dev, lvds->format, + "invalid data-mapping format [%s]\n", name); goto err_put_remote; } @@ -604,8 +600,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS); if (ret < 0) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to initialize encoder: %d\n", ret); + drm_err(drm_dev, + "failed to initialize encoder: %d\n", ret); goto err_put_remote; } @@ -618,8 +614,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, &rockchip_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); if (ret < 0) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to initialize connector: %d\n", ret); + drm_err(drm_dev, + "failed to initialize connector: %d\n", ret); goto err_free_encoder; } @@ -633,9 +629,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, connector = drm_bridge_connector_init(lvds->drm_dev, encoder); if (IS_ERR(connector)) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to initialize bridge connector: %pe\n", - connector); + drm_err(drm_dev, + "failed to initialize bridge connector: %pe\n", + connector); ret = PTR_ERR(connector); goto err_free_encoder; } @@ -643,8 +639,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to attach encoder: %d\n", ret); + drm_err(drm_dev, "failed to attach encoder: %d\n", ret); goto err_free_connector; } @@ -706,24 +701,20 @@ static int rockchip_lvds_probe(struct platform_device *pdev) lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - if (IS_ERR(lvds->grf)) { - DRM_DEV_ERROR(dev, "missing rockchip,grf property\n"); - return PTR_ERR(lvds->grf); - } + if (IS_ERR(lvds->grf)) + return dev_err_probe(dev, PTR_ERR(lvds->grf), "missing rockchip,grf property\n"); ret = lvds->soc_data->probe(pdev, lvds); - if (ret) { - DRM_DEV_ERROR(dev, "Platform initialization failed\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Platform initialization failed\n"); dev_set_drvdata(dev, lvds); ret = component_add(&pdev->dev, &rockchip_lvds_component_ops); if (ret < 0) - DRM_DEV_ERROR(dev, "failed to add component\n"); + return dev_err_probe(dev, ret, "failed to add component\n"); - return ret; + return 0; } static void rockchip_lvds_remove(struct platform_device *pdev) From 4006be2f77cd26d065133b338dc51f59857d20f0 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Tue, 4 Mar 2025 13:44:18 +0100 Subject: [PATCH 50/77] drm/rockchip: lvds: lower log severity for missing pinctrl settings While missing lvds pinctrl is unexpected and is reported, we nevertheless don't fail setting up the device and instead continue without explicit pinctrl handling. So lower the log-level from error to warning to reflect that. Suggested-by: Quentin Schulz Signed-off-by: Heiko Stuebner Reviewed-by: Quentin Schulz Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20250304124418.111061-4-heiko@sntech.de --- drivers/gpu/drm/rockchip/rockchip_lvds.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index bfebe42a0331..a673779de3d2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -464,14 +464,14 @@ static int rk3288_lvds_probe(struct platform_device *pdev, lvds->pins->p = devm_pinctrl_get(lvds->dev); if (IS_ERR(lvds->pins->p)) { - dev_err(lvds->dev, "no pinctrl handle\n"); + dev_warn(lvds->dev, "no pinctrl handle\n"); devm_kfree(lvds->dev, lvds->pins); lvds->pins = NULL; } else { lvds->pins->default_state = pinctrl_lookup_state(lvds->pins->p, "lcdc"); if (IS_ERR(lvds->pins->default_state)) { - dev_err(lvds->dev, "no default pinctrl state\n"); + dev_warn(lvds->dev, "no default pinctrl state\n"); devm_kfree(lvds->dev, lvds->pins); lvds->pins = NULL; } From c25b716e493994cfc4f1ff2694d976097e2aa694 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 21 Feb 2025 17:31:29 +0000 Subject: [PATCH 51/77] fbtft: Remove access to page->index There is no need to print out page->index as part of the debug message. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Lorenzo Stoakes Signed-off-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250221173131.3470667-1-willy@infradead.org --- drivers/staging/fbtft/fbtft-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 4cfa494243b9..da9c64152a60 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -337,9 +337,7 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis list_for_each_entry(pageref, pagereflist, list) { y_low = pageref->offset / info->fix.line_length; y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length; - dev_dbg(info->device, - "page->index=%lu y_low=%d y_high=%d\n", - pageref->page->index, y_low, y_high); + dev_dbg(info->device, "y_low=%d y_high=%d\n", y_low, y_high); if (y_high > info->var.yres - 1) y_high = info->var.yres - 1; if (y_low < dirty_lines_start) From c82734fbdc50dc9e568e8686622eaa4498acb81e Mon Sep 17 00:00:00 2001 From: Ashley Smith Date: Mon, 3 Mar 2025 18:04:32 +0000 Subject: [PATCH 52/77] drm/panthor: Update CS_STATUS_ defines to correct values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Values for SC_STATUS_BLOCKED_REASON_ are documented in the G610 "Odin" GPU specification (CS_STATUS_BLOCKED_REASON register). This change updates the defines to the correct values. Fixes: 2718d91816ee ("drm/panthor: Add the FW logical block") Signed-off-by: Ashley Smith Reviewed-by: Liviu Dudau Reviewed-by: Adrián Larumbe Reviewed-by: Boris Brezillon Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20250303180444.3768993-1-ashley.smith@collabora.com --- drivers/gpu/drm/panthor/panthor_fw.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h index 22448abde992..6598d96c6d2a 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.h +++ b/drivers/gpu/drm/panthor/panthor_fw.h @@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface { #define CS_STATUS_BLOCKED_REASON_SB_WAIT 1 #define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2 #define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3 -#define CS_STATUS_BLOCKED_REASON_DEFERRED 5 -#define CS_STATUS_BLOCKED_REASON_RES 6 -#define CS_STATUS_BLOCKED_REASON_FLUSH 7 +#define CS_STATUS_BLOCKED_REASON_DEFERRED 4 +#define CS_STATUS_BLOCKED_REASON_RESOURCE 5 +#define CS_STATUS_BLOCKED_REASON_FLUSH 6 #define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0) u32 status_blocked_reason; u32 status_wait_sync_value_hi; From 44d2f310f008613c1dbe5e234c2cf2be90cbbfab Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Tue, 4 Mar 2025 15:13:47 +0100 Subject: [PATCH 53/77] drm/sched: drm_sched_job_cleanup(): correct false doc drm_sched_job_cleanup()'s documentation claims that calling drm_sched_job_arm() is a "point of no return", implying that afterwards a job cannot be cancelled anymore. This is not correct, as proven by the function's code itself, which takes a previous call to drm_sched_job_arm() into account. In truth, the decisive factors are whether fences have been shared (e.g., with other processes) and if the job has been submitted to an entity already. Correct the wrong docstring. Reviewed-by: Tvrtko Ursulin Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250304141346.102683-2-phasta@kernel.org --- drivers/gpu/drm/scheduler/sched_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c0b9822d6274..bfea608a7106 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1015,11 +1015,13 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency); * Cleans up the resources allocated with drm_sched_job_init(). * * Drivers should call this from their error unwind code if @job is aborted - * before drm_sched_job_arm() is called. + * before it was submitted to an entity with drm_sched_entity_push_job(). * - * After that point of no return @job is committed to be executed by the - * scheduler, and this function should be called from the - * &drm_sched_backend_ops.free_job callback. + * Since calling drm_sched_job_arm() causes the job's fences to be initialized, + * it is up to the driver to ensure that fences that were exposed to external + * parties get signaled. drm_sched_job_cleanup() does not ensure this. + * + * This function must also be called in &struct drm_sched_backend_ops.free_job */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1030,7 +1032,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) /* drm_sched_job_arm() has been called */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before committing to run it */ + /* aborted job before arming */ drm_sched_fence_free(job->s_fence); } From e4c0fd3f965533cd2b38200ca73625afd602d39b Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 5 Mar 2025 10:05:46 +0800 Subject: [PATCH 54/77] drm: adp: Remove unnecessary print function dev_err() The print function dev_err() is redundant because platform_get_irq_byname() already prints an error. ./drivers/gpu/drm/adp/adp_drv.c:470:2-9: line 470 is redundant because platform_get_irq() already prints an error. ./drivers/gpu/drm/adp/adp_drv.c:476:2-9: line 476 is redundant because platform_get_irq() already prints an error. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=19211 Signed-off-by: Jiapeng Chong Acked-by: Sasha Finkelstein Link: https://patchwork.freedesktop.org/patch/msgid/20250305020546.96564-1-jiapeng.chong@linux.alibaba.com Signed-off-by: Alyssa Rosenzweig --- drivers/gpu/drm/adp/adp_drv.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index 0a39abdc9238..0eeb9e5fab26 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -466,16 +466,12 @@ static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *ad } adp->be_irq = platform_get_irq_byname(pdev, "be"); - if (adp->be_irq < 0) { - dev_err(dev, "failed to find be irq"); + if (adp->be_irq < 0) return adp->be_irq; - } adp->fe_irq = platform_get_irq_byname(pdev, "fe"); - if (adp->fe_irq < 0) { - dev_err(dev, "failed to find fe irq"); + if (adp->fe_irq < 0) return adp->fe_irq; - } return 0; } From e379856b428acafb8ed689f31d65814da6447b2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Larumbe?= Date: Mon, 3 Mar 2025 19:08:45 +0000 Subject: [PATCH 55/77] drm/panthor: Replace sleep locks with spinlocks in fdinfo path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo group samples") introduced an xarray lock to deal with potential use-after-free errors when accessing groups fdinfo figures. However, this toggles the kernel's atomic context status, so the next nested mutex lock will raise a warning when the kernel is compiled with mutex debug options: CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_MUTEXES=y Replace Panthor's group fdinfo data mutex with a guarded spinlock. Signed-off-by: Adrián Larumbe Fixes: 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo group samples") Reviewed-by: Liviu Dudau Reviewed-by: Boris Brezillon Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20250303190923.1639985-1-adrian.larumbe@collabora.com --- drivers/gpu/drm/panthor/panthor_sched.c | 26 ++++++++++++------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 1a276db095ff..4d31d1967716 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -631,10 +632,10 @@ struct panthor_group { struct panthor_gpu_usage data; /** - * @lock: Mutex to govern concurrent access from drm file's fdinfo callback - * and job post-completion processing function + * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo + * callback and job post-completion processing function */ - struct mutex lock; + spinlock_t lock; /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ size_t kbo_sizes; @@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work) release_work); u32 i; - mutex_destroy(&group->fdinfo.lock); - for (i = 0; i < group->queue_count; i++) group_free_queue(group, group->queues[i]); @@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job) struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; - mutex_lock(&group->fdinfo.lock); - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) - fdinfo->cycles += data->cycles.after - data->cycles.before; - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) - fdinfo->time += data->time.after - data->time.before; - mutex_unlock(&group->fdinfo.lock); + scoped_guard(spinlock, &group->fdinfo.lock) { + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) + fdinfo->cycles += data->cycles.after - data->cycles.before; + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) + fdinfo->time += data->time.after - data->time.before; + } } void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) @@ -2880,12 +2879,11 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) xa_lock(&gpool->xa); xa_for_each(&gpool->xa, i, group) { - mutex_lock(&group->fdinfo.lock); + guard(spinlock)(&group->fdinfo.lock); pfile->stats.cycles += group->fdinfo.data.cycles; pfile->stats.time += group->fdinfo.data.time; group->fdinfo.data.cycles = 0; group->fdinfo.data.time = 0; - mutex_unlock(&group->fdinfo.lock); } xa_unlock(&gpool->xa); } @@ -3537,7 +3535,7 @@ int panthor_group_create(struct panthor_file *pfile, mutex_unlock(&sched->reset.lock); add_group_kbo_sizes(group->ptdev, group); - mutex_init(&group->fdinfo.lock); + spin_lock_init(&group->fdinfo.lock); return gid; From c63c3bfdde2656a3ead50ac3ce4a51a634e22dab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Larumbe?= Date: Mon, 3 Mar 2025 19:08:46 +0000 Subject: [PATCH 56/77] drm/panthor: Avoid sleep locking in the internal BO size path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 434e5ca5b5d7 ("drm/panthor: Expose size of driver internal BO's over fdinfo") locks the VMS xarray, to avoid UAF errors when the same VM is being concurrently destroyed by another thread. However, that puts the current thread in atomic context, which means taking the VMS' heap locks will trigger a warning as the thread is no longer allowed to sleep. Because in this case replacing the heap mutex with a spinlock isn't feasible, the fdinfo handler no longer traverses the list of heaps for every single VM associated with an open DRM file. Instead, when a new heap chunk is allocated, its size is accumulated into a pool-wide tally, which also makes the atomic context code path somewhat faster. Signed-off-by: Adrián Larumbe Fixes: 434e5ca5b5d7 ("drm/panthor: Expose size of driver internal BO's over fdinfo") Reviewed-by: Boris Brezillon Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20250303190923.1639985-2-adrian.larumbe@collabora.com --- drivers/gpu/drm/panthor/panthor_heap.c | 62 +++++++++++++------------- drivers/gpu/drm/panthor/panthor_mmu.c | 8 +--- 2 files changed, 31 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c index db0285ce5812..3bdf61c14264 100644 --- a/drivers/gpu/drm/panthor/panthor_heap.c +++ b/drivers/gpu/drm/panthor/panthor_heap.c @@ -97,6 +97,9 @@ struct panthor_heap_pool { /** @gpu_contexts: Buffer object containing the GPU heap contexts. */ struct panthor_kernel_bo *gpu_contexts; + + /** @size: Size of all chunks across all heaps in the pool. */ + atomic_t size; }; static int panthor_heap_ctx_stride(struct panthor_device *ptdev) @@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) panthor_get_heap_ctx_offset(pool, id); } -static void panthor_free_heap_chunk(struct panthor_vm *vm, +static void panthor_free_heap_chunk(struct panthor_heap_pool *pool, struct panthor_heap *heap, struct panthor_heap_chunk *chunk) { @@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm, heap->chunk_count--; mutex_unlock(&heap->lock); + atomic_sub(heap->chunk_size, &pool->size); + panthor_kernel_bo_destroy(chunk->bo); kfree(chunk); } -static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, - struct panthor_vm *vm, +static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool, struct panthor_heap *heap, bool initial_chunk) { @@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, if (!chunk) return -ENOMEM; - chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size, + chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, PANTHOR_VM_KERNEL_AUTO_VA); @@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, heap->chunk_count++; mutex_unlock(&heap->lock); + atomic_add(heap->chunk_size, &pool->size); + return 0; err_destroy_bo: @@ -191,17 +197,16 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, return ret; } -static void panthor_free_heap_chunks(struct panthor_vm *vm, +static void panthor_free_heap_chunks(struct panthor_heap_pool *pool, struct panthor_heap *heap) { struct panthor_heap_chunk *chunk, *tmp; list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) - panthor_free_heap_chunk(vm, heap, chunk); + panthor_free_heap_chunk(pool, heap, chunk); } -static int panthor_alloc_heap_chunks(struct panthor_device *ptdev, - struct panthor_vm *vm, +static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool, struct panthor_heap *heap, u32 chunk_count) { @@ -209,7 +214,7 @@ static int panthor_alloc_heap_chunks(struct panthor_device *ptdev, u32 i; for (i = 0; i < chunk_count; i++) { - ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true); + ret = panthor_alloc_heap_chunk(pool, heap, true); if (ret) return ret; } @@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle) if (!heap) return -EINVAL; - panthor_free_heap_chunks(pool->vm, heap); + panthor_free_heap_chunks(pool, heap); mutex_destroy(&heap->lock); kfree(heap); return 0; @@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool, heap->max_chunks = max_chunks; heap->target_in_flight = target_in_flight; - ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap, - initial_chunk_count); + ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count); if (ret) goto err_free_heap; @@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool, return id; err_free_heap: - panthor_free_heap_chunks(pool->vm, heap); + panthor_free_heap_chunks(pool, heap); mutex_destroy(&heap->lock); kfree(heap); @@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, removed = chunk; list_del(&chunk->node); heap->chunk_count--; + atomic_sub(heap->chunk_size, &pool->size); break; } } @@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool, * further jobs in this queue fail immediately instead of having to * wait for the job timeout. */ - ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false); + ret = panthor_alloc_heap_chunk(pool, heap, false); if (ret) goto out_unlock; @@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) if (ret) goto err_destroy_pool; + atomic_add(pool->gpu_contexts->obj->size, &pool->size); + return pool; err_destroy_pool: @@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool) xa_for_each(&pool->xa, i, heap) drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i)); - if (!IS_ERR_OR_NULL(pool->gpu_contexts)) + if (!IS_ERR_OR_NULL(pool->gpu_contexts)) { + atomic_sub(pool->gpu_contexts->obj->size, &pool->size); panthor_kernel_bo_destroy(pool->gpu_contexts); + } /* Reflects the fact the pool has been destroyed. */ pool->vm = NULL; @@ -605,27 +614,16 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool) } /** - * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool - * @pool: Pool whose total chunk size to calculate. + * panthor_heap_pool_size() - Get a heap pool's total size + * @pool: Pool whose total chunks size to return * - * This function adds the size of all heap chunks across all heaps in the - * argument pool. It also adds the size of the gpu contexts kernel bo. - * It is meant to be used by fdinfo for displaying the size of internal - * driver BO's that aren't exposed to userspace through a GEM handle. + * Returns the aggregated size of all chunks for all heaps in the pool * */ size_t panthor_heap_pool_size(struct panthor_heap_pool *pool) { - struct panthor_heap *heap; - unsigned long i; - size_t size = 0; - - down_read(&pool->lock); - xa_for_each(&pool->xa, i, heap) - size += heap->chunk_size * heap->chunk_count; - up_read(&pool->lock); - - size += pool->gpu_contexts->obj->size; + if (!pool) + return 0; - return size; + return atomic_read(&pool->size); } diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 8c6fc587ddc3..12a02e28f50f 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1963,13 +1963,7 @@ void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats xa_lock(&pfile->vms->xa); xa_for_each(&pfile->vms->xa, i, vm) { - size_t size = 0; - - mutex_lock(&vm->heaps.lock); - if (vm->heaps.pool) - size = panthor_heap_pool_size(vm->heaps.pool); - mutex_unlock(&vm->heaps.lock); - + size_t size = panthor_heap_pool_size(vm->heaps.pool); stats->resident += size; if (vm->as.id >= 0) stats->active += size; From 3b87886bfb038de2c62e627079472ba612e89410 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Thu, 13 Feb 2025 16:12:48 +0000 Subject: [PATCH 57/77] drm/panthor: Clean up FW version information display Assigning a string to an array which is too small to include the NUL byte at the end causes a warning on some compilers. But this function also has some other oddities like the 'header' array which is only ever used within sizeof(). Tidy up the function by removing the 'header' array, allow the NUL byte to be present in git_sha_header, and calculate the length directly from git_sha_header. Reported-by: Will Deacon Closes: https://lore.kernel.org/all/20250213154237.GA11897@willie-the-truck/ Fixes: 9d443deb0441 ("drm/panthor: Display FW version information") Signed-off-by: Steven Price Acked-by: Will Deacon Reviewed-by: Boris Brezillon Link: https://patchwork.freedesktop.org/patch/msgid/20250213161248.1642392-1-steven.price@arm.com --- drivers/gpu/drm/panthor/panthor_fw.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 4a9c4afa9ad7..0f52766a3120 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -636,8 +636,8 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, u32 ehdr) { struct panthor_fw_build_info_hdr hdr; - char header[9]; - const char git_sha_header[sizeof(header)] = "git_sha: "; + static const char git_sha_header[] = "git_sha: "; + const int header_len = sizeof(git_sha_header) - 1; int ret; ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr)); @@ -651,8 +651,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, return 0; } - if (memcmp(git_sha_header, fw->data + hdr.meta_start, - sizeof(git_sha_header))) { + if (memcmp(git_sha_header, fw->data + hdr.meta_start, header_len)) { /* Not the expected header, this isn't metadata we understand */ return 0; } @@ -665,7 +664,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, } drm_info(&ptdev->base, "Firmware git sha: %s\n", - fw->data + hdr.meta_start + sizeof(git_sha_header)); + fw->data + hdr.meta_start + header_len); return 0; } From e7b5d23e5d4705ae93ef6af891b7b7bcccbe1257 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:14 +0100 Subject: [PATCH 58/77] drm/ttm: Provide a shmem backup implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a standalone shmem backup implementation. Given the ttm_backup interface, this could later on be extended to providing other backup implementation than shmem, with one use-case being GPU swapout to a user-provided fd. v5: - Fix a UAF. (kernel test robot, Dan Carptenter) v6: - Rename ttm_backup_shmem_copy_page() function argument (Matthew Brost) - Add some missing documentation v8: - Use folio_file_page to get to the page we want to writeback instead of using the first page of the folio. v13: - Remove the base class abstraction (Christian König) - Include ttm_backup_bytes_avail(). v14: - Fix kerneldoc for ttm_backup_bytes_avail() (0-day) - Work around casting of __randomize_layout struct pointer (0-day) v15: - Return negative error code from ttm_backup_backup_page() (Christian König) - Doc fixes. (Christian König). Cc: Christian König Cc: Somalapuram Amaranath Cc: Matthew Brost Cc: Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Reviewed-by: Christian König Link: https://lore.kernel.org/intel-xe/20250305092220.123405-2-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/ttm/Makefile | 2 +- drivers/gpu/drm/ttm/ttm_backup.c | 207 +++++++++++++++++++++++++++++++ include/drm/ttm/ttm_backup.h | 74 +++++++++++ 3 files changed, 282 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_backup.c create mode 100644 include/drm/ttm/ttm_backup.h diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index dad298127226..40d07a35293a 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,7 +4,7 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \ - ttm_device.o ttm_sys_manager.o + ttm_device.o ttm_sys_manager.o ttm_backup.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c new file mode 100644 index 000000000000..93c007f18855 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include +#include +#include + +/* + * Casting from randomized struct file * to struct ttm_backup * is fine since + * struct ttm_backup is never defined nor dereferenced. + */ +static struct file *ttm_backup_to_file(struct ttm_backup *backup) +{ + return (void *)backup; +} + +static struct ttm_backup *ttm_file_to_backup(struct file *file) +{ + return (void *)file; +} + +/* + * Need to map shmem indices to handle since a handle value + * of 0 means error, following the swp_entry_t convention. + */ +static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx) +{ + return (unsigned long)idx + 1; +} + +static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) +{ + return handle - 1; +} + +/** + * ttm_backup_drop() - release memory associated with a handle + * @backup: The struct backup pointer used to obtain the handle + * @handle: The handle obtained from the @backup_page function. + */ +void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) +{ + loff_t start = ttm_backup_handle_to_shmem_idx(handle); + + start <<= PAGE_SHIFT; + shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start, + start + PAGE_SIZE - 1); +} + +/** + * ttm_backup_copy_page() - Copy the contents of a previously backed + * up page + * @backup: The struct backup pointer used to back up the page. + * @dst: The struct page to copy into. + * @handle: The handle returned when the page was backed up. + * @intr: Try to perform waits interruptable or at least killable. + * + * Return: 0 on success, Negative error code on failure, notably + * -EINTR if @intr was set to true and a signal is pending. + */ +int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, + pgoff_t handle, bool intr) +{ + struct file *filp = ttm_backup_to_file(backup); + struct address_space *mapping = filp->f_mapping; + struct folio *from_folio; + pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); + + from_folio = shmem_read_folio(mapping, idx); + if (IS_ERR(from_folio)) + return PTR_ERR(from_folio); + + copy_highpage(dst, folio_file_page(from_folio, idx)); + folio_put(from_folio); + + return 0; +} + +/** + * ttm_backup_backup_page() - Backup a page + * @backup: The struct backup pointer to use. + * @page: The page to back up. + * @writeback: Whether to perform immediate writeback of the page. + * This may have performance implications. + * @idx: A unique integer for each page and each struct backup. + * This allows the backup implementation to avoid managing + * its address space separately. + * @page_gfp: The gfp value used when the page was allocated. + * This is used for accounting purposes. + * @alloc_gfp: The gfp to be used when allocating memory. + * + * Context: If called from reclaim context, the caller needs to + * assert that the shrinker gfp has __GFP_FS set, to avoid + * deadlocking on lock_page(). If @writeback is set to true and + * called from reclaim context, the caller also needs to assert + * that the shrinker gfp has __GFP_IO set, since without it, + * we're not allowed to start backup IO. + * + * Return: A handle on success. Negative error code on failure. + * + * Note: This function could be extended to back up a folio and + * implementations would then split the folio internally if needed. + * Drawback is that the caller would then have to keep track of + * the folio size- and usage. + */ +s64 +ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, + bool writeback, pgoff_t idx, gfp_t page_gfp, + gfp_t alloc_gfp) +{ + struct file *filp = ttm_backup_to_file(backup); + struct address_space *mapping = filp->f_mapping; + unsigned long handle = 0; + struct folio *to_folio; + int ret; + + to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp); + if (IS_ERR(to_folio)) + return PTR_ERR(to_folio); + + folio_mark_accessed(to_folio); + folio_lock(to_folio); + folio_mark_dirty(to_folio); + copy_highpage(folio_file_page(to_folio, idx), page); + handle = ttm_backup_shmem_idx_to_handle(idx); + + if (writeback && !folio_mapped(to_folio) && + folio_clear_dirty_for_io(to_folio)) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = SWAP_CLUSTER_MAX, + .range_start = 0, + .range_end = LLONG_MAX, + .for_reclaim = 1, + }; + folio_set_reclaim(to_folio); + ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc); + if (!folio_test_writeback(to_folio)) + folio_clear_reclaim(to_folio); + /* + * If writepage succeeds, it unlocks the folio. + * writepage() errors are otherwise dropped, since writepage() + * is only best effort here. + */ + if (ret) + folio_unlock(to_folio); + } else { + folio_unlock(to_folio); + } + + folio_put(to_folio); + + return handle; +} + +/** + * ttm_backup_fini() - Free the struct backup resources after last use. + * @backup: Pointer to the struct backup whose resources to free. + * + * After a call to this function, it's illegal to use the @backup pointer. + */ +void ttm_backup_fini(struct ttm_backup *backup) +{ + fput(ttm_backup_to_file(backup)); +} + +/** + * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space + * left for backup. + * + * This function is intended also for driver use to indicate whether a + * backup attempt is meaningful. + * + * Return: An approximate size of backup space available. + */ +u64 ttm_backup_bytes_avail(void) +{ + /* + * The idea behind backing up to shmem is that shmem objects may + * eventually be swapped out. So no point swapping out if there + * is no or low swap-space available. But the accuracy of this + * number also depends on shmem actually swapping out backed-up + * shmem objects without too much buffering. + */ + return (u64)get_nr_swap_pages() << PAGE_SHIFT; +} +EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); + +/** + * ttm_backup_shmem_create() - Create a shmem-based struct backup. + * @size: The maximum size (in bytes) to back up. + * + * Create a backup utilizing shmem objects. + * + * Return: A pointer to a struct ttm_backup on success, + * an error pointer on error. + */ +struct ttm_backup *ttm_backup_shmem_create(loff_t size) +{ + struct file *filp; + + filp = shmem_file_setup("ttm shmem backup", size, 0); + + return ttm_file_to_backup(filp); +} diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h new file mode 100644 index 000000000000..24ad120b8827 --- /dev/null +++ b/include/drm/ttm/ttm_backup.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _TTM_BACKUP_H_ +#define _TTM_BACKUP_H_ + +#include +#include + +struct ttm_backup; + +/** + * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer + * @handle: The handle to convert. + * + * Converts an opaque handle received from the + * struct ttm_backoup_ops::backup_page() function to an (invalid) + * struct page pointer suitable for a struct page array. + * + * Return: An (invalid) struct page pointer. + */ +static inline struct page * +ttm_backup_handle_to_page_ptr(unsigned long handle) +{ + return (struct page *)(handle << 1 | 1); +} + +/** + * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle + * @page: The struct page pointer to check. + * + * Return: true if the struct page pointer is a handld returned from + * ttm_backup_handle_to_page_ptr(). False otherwise. + */ +static inline bool ttm_backup_page_ptr_is_handle(const struct page *page) +{ + return (unsigned long)page & 1; +} + +/** + * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle + * @page: The struct page pointer to convert + * + * Return: The handle that was previously used in + * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable + * for use as argument in the struct ttm_backup_ops drop() or + * copy_backed_up_page() functions. + */ +static inline unsigned long +ttm_backup_page_ptr_to_handle(const struct page *page) +{ + WARN_ON(!ttm_backup_page_ptr_is_handle(page)); + return (unsigned long)page >> 1; +} + +void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle); + +int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, + pgoff_t handle, bool intr); + +s64 +ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, + bool writeback, pgoff_t idx, gfp_t page_gfp, + gfp_t alloc_gfp); + +void ttm_backup_fini(struct ttm_backup *backup); + +u64 ttm_backup_bytes_avail(void); + +struct ttm_backup *ttm_backup_shmem_create(loff_t size); + +#endif From b63d715b8090aed48bdef5930625946fa4c0d324 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:15 +0100 Subject: [PATCH 59/77] drm/ttm/pool, drm/ttm/tt: Provide a helper to shrink pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a helper to shrink ttm_tt page-vectors on a per-page basis. A ttm_backup backend could then in theory get away with allocating a single temporary page for each struct ttm_tt. This is accomplished by splitting larger pages before trying to back them up. In the future we could allow ttm_backup to handle backing up large pages as well, but currently there's no benefit in doing that, since the shmem backup backend would have to split those anyway to avoid allocating too much temporary memory, and if the backend instead inserts pages into the swap-cache, those are split on reclaim by the core. Due to potential backup- and recover errors, allow partially swapped out struct ttm_tt's, although mark them as swapped out stopping them from being swapped out a second time. More details in the ttm_pool.c DOC section. v2: - A couple of cleanups and error fixes in ttm_pool_back_up_tt. - s/back_up/backup/ - Add a writeback parameter to the exported interface. v8: - Use a struct for flags for readability (Matt Brost) - Address misc other review comments (Matt Brost) v9: - Update the kerneldoc for the ttm_tt::backup field. v10: - Rebase. v13: - Rebase on ttm_backup interface change. Update kerneldoc. - Rebase and adjust ttm_tt_is_swapped(). v15: - Rebase on ttm_backup return value change. - Rebase on previous restructuring of ttm_pool_alloc() - Rework the ttm_pool backup interface (Christian König) - Remove cond_resched() (Christian König) - Get rid of the need to allocate an intermediate page array when restoring a multi-order page (Christian König) - Update documentation. Cc: Christian König Cc: Somalapuram Amaranath Cc: Matthew Brost Cc: Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Acked-by: Christian Koenig Link: https://lore.kernel.org/intel-xe/20250305092220.123405-3-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/ttm/ttm_pool.c | 554 +++++++++++++++++++++++++++++---- drivers/gpu/drm/ttm/ttm_tt.c | 54 ++++ include/drm/ttm/ttm_pool.h | 8 + include/drm/ttm/ttm_tt.h | 67 +++- 4 files changed, 629 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index c9eba76d5143..ffb7abf52bab 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -41,6 +41,7 @@ #include #endif +#include #include #include #include @@ -75,6 +76,35 @@ struct ttm_pool_alloc_state { enum ttm_caching tt_caching; }; +/** + * struct ttm_pool_tt_restore - State representing restore from backup + * @pool: The pool used for page allocation while restoring. + * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state. + * @alloced_page: Pointer to the page most recently allocated from a pool or system. + * @first_dma: The dma address corresponding to @alloced_page if dma_mapping + * is requested. + * @alloced_pages: The number of allocated pages present in the struct ttm_tt + * page vector from this restore session. + * @restored_pages: The number of 4K pages restored for @alloced_page (which + * is typically a multi-order page). + * @page_caching: The struct ttm_tt requested caching + * @order: The order of @alloced_page. + * + * Recovery from backup might fail when we've recovered less than the + * full ttm_tt. In order not to loose any data (yet), keep information + * around that allows us to restart a failed ttm backup recovery. + */ +struct ttm_pool_tt_restore { + struct ttm_pool *pool; + struct ttm_pool_alloc_state snapshot_alloc; + struct page *alloced_page; + dma_addr_t first_dma; + pgoff_t alloced_pages; + pgoff_t restored_pages; + enum ttm_caching page_caching; + unsigned int order; +}; + static unsigned long page_pool_size; MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); @@ -199,12 +229,11 @@ static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc) return 0; } -/* Map pages of 1 << order size and fill the DMA address array */ +/* DMA Map pages of 1 << order size and return the resulting dma_address. */ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, - struct page *p, dma_addr_t **dma_addr) + struct page *p, dma_addr_t *dma_addr) { dma_addr_t addr; - unsigned int i; if (pool->use_dma_alloc) { struct ttm_pool_dma *dma = (void *)p->private; @@ -218,10 +247,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, return -EFAULT; } - for (i = 1 << order; i ; --i) { - *(*dma_addr)++ = addr; - addr += PAGE_SIZE; - } + *dma_addr = addr; return 0; } @@ -371,6 +397,190 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) return p->private; } +/* + * Split larger pages so that we can free each PAGE_SIZE page as soon + * as it has been backed up, in order to avoid memory pressure during + * reclaim. + */ +static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p) +{ + unsigned int order = ttm_pool_page_order(pool, p); + pgoff_t nr; + + if (!order) + return; + + split_page(p, order); + nr = 1UL << order; + while (nr--) + (p++)->private = 0; +} + +/** + * DOC: Partial backup and restoration of a struct ttm_tt. + * + * Swapout using ttm_backup_backup_page() and swapin using + * ttm_backup_copy_page() may fail. + * The former most likely due to lack of swap-space or memory, the latter due + * to lack of memory or because of signal interruption during waits. + * + * Backup failure is easily handled by using a ttm_tt pages vector that holds + * both backup handles and page pointers. This has to be taken into account when + * restoring such a ttm_tt from backup, and when freeing it while backed up. + * When restoring, for simplicity, new pages are actually allocated from the + * pool and the contents of any old pages are copied in and then the old pages + * are released. + * + * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state + * to be able to resume an interrupted restore, and that structure is freed once + * the restoration is complete. If the struct ttm_tt is destroyed while there + * is a valid struct ttm_pool_tt_restore attached, that is also properly taken + * care of. + */ + +/* Is restore ongoing for the currently allocated page? */ +static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) +{ + return restore && restore->restored_pages < (1 << restore->order); +} + +/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */ +static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page, + const dma_addr_t *dma_addr, enum ttm_caching caching) +{ + struct ttm_pool_type *pt = NULL; + unsigned int order; + pgoff_t nr; + + if (pool) { + order = ttm_pool_page_order(pool, page); + nr = (1UL << order); + if (dma_addr) + ttm_pool_unmap(pool, *dma_addr, nr); + + pt = ttm_pool_select_type(pool, caching, order); + } else { + order = page->private; + nr = (1UL << order); + } + + if (pt) + ttm_pool_type_give(pt, page); + else + ttm_pool_free_page(pool, caching, order, page); + + return nr; +} + +/* Populate the page-array using the most recent allocated multi-order page. */ +static void ttm_pool_allocated_page_commit(struct page *allocated, + dma_addr_t first_dma, + struct ttm_pool_alloc_state *alloc, + pgoff_t nr) +{ + pgoff_t i; + + for (i = 0; i < nr; ++i) + *alloc->pages++ = allocated++; + + alloc->remaining_pages -= nr; + + if (!alloc->dma_addr) + return; + + for (i = 0; i < nr; ++i) { + *alloc->dma_addr++ = first_dma; + first_dma += PAGE_SIZE; + } +} + +/* + * When restoring, restore backed-up content to the newly allocated page and + * if successful, populate the page-table and dma-address arrays. + */ +static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, + struct ttm_backup *backup, + const struct ttm_operation_ctx *ctx, + struct ttm_pool_alloc_state *alloc) + +{ + pgoff_t i, nr = 1UL << restore->order; + struct page **first_page = alloc->pages; + struct page *p; + int ret = 0; + + for (i = restore->restored_pages; i < nr; ++i) { + p = first_page[i]; + if (ttm_backup_page_ptr_is_handle(p)) { + unsigned long handle = ttm_backup_page_ptr_to_handle(p); + + if (handle == 0) { + restore->restored_pages++; + continue; + } + + ret = ttm_backup_copy_page(backup, restore->alloced_page + i, + handle, ctx->interruptible); + if (ret) + break; + + ttm_backup_drop(backup, handle); + } else if (p) { + /* + * We could probably avoid splitting the old page + * using clever logic, but ATM we don't care, as + * we prioritize releasing memory ASAP. Note that + * here, the old retained page is always write-back + * cached. + */ + ttm_pool_split_for_swap(restore->pool, p); + copy_highpage(restore->alloced_page + i, p); + __free_pages(p, 0); + } + + restore->restored_pages++; + first_page[i] = ttm_backup_handle_to_page_ptr(0); + } + + if (ret) { + if (!restore->restored_pages) { + dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; + + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, + dma_addr, restore->page_caching); + restore->restored_pages = nr; + } + return ret; + } + + ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma, + alloc, nr); + if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page)) + alloc->caching_divide = alloc->pages; + restore->snapshot_alloc = *alloc; + restore->alloced_pages += nr; + + return 0; +} + +/* If restoring, save information needed for ttm_pool_restore_commit(). */ +static void +ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, + struct page *p, + enum ttm_caching page_caching, + dma_addr_t first_dma, + struct ttm_pool_tt_restore *restore, + const struct ttm_pool_alloc_state *alloc) +{ + restore->pool = pool; + restore->order = order; + restore->restored_pages = 0; + restore->page_caching = page_caching; + restore->first_dma = first_dma; + restore->alloced_page = p; + restore->snapshot_alloc = *alloc; +} + /* * Called when we got a page, either from a pool or newly allocated. * if needed, dma map the page and populate the dma address array. @@ -380,10 +590,11 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) */ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, struct page *p, enum ttm_caching page_caching, - struct ttm_pool_alloc_state *alloc) + struct ttm_pool_alloc_state *alloc, + struct ttm_pool_tt_restore *restore) { - pgoff_t i, nr = 1UL << order; bool caching_consistent; + dma_addr_t first_dma; int r = 0; caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p); @@ -395,17 +606,20 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, } if (alloc->dma_addr) { - r = ttm_pool_map(pool, order, p, &alloc->dma_addr); + r = ttm_pool_map(pool, order, p, &first_dma); if (r) return r; } - alloc->remaining_pages -= nr; - for (i = 0; i < nr; ++i) - *alloc->pages++ = p++; + if (restore) { + ttm_pool_page_allocated_restore(pool, order, p, page_caching, + first_dma, restore, alloc); + } else { + ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order); - if (caching_consistent) - alloc->caching_divide = alloc->pages; + if (caching_consistent) + alloc->caching_divide = alloc->pages; + } return 0; } @@ -428,22 +642,24 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, pgoff_t start_page, pgoff_t end_page) { struct page **pages = &tt->pages[start_page]; - unsigned int order; + struct ttm_backup *backup = tt->backup; pgoff_t i, nr; for (i = start_page; i < end_page; i += nr, pages += nr) { - struct ttm_pool_type *pt = NULL; + struct page *p = *pages; - order = ttm_pool_page_order(pool, *pages); - nr = (1UL << order); - if (tt->dma_address) - ttm_pool_unmap(pool, tt->dma_address[i], nr); + nr = 1; + if (ttm_backup_page_ptr_is_handle(p)) { + unsigned long handle = ttm_backup_page_ptr_to_handle(p); - pt = ttm_pool_select_type(pool, caching, order); - if (pt) - ttm_pool_type_give(pt, *pages); - else - ttm_pool_free_page(pool, caching, order, *pages); + if (handle != 0) + ttm_backup_drop(backup, handle); + } else if (p) { + dma_addr_t *dma_addr = tt->dma_address ? + tt->dma_address + i : NULL; + + nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching); + } } } @@ -467,22 +683,11 @@ static unsigned int ttm_pool_alloc_find_order(unsigned int highest, return min_t(unsigned int, highest, __fls(alloc->remaining_pages)); } -/** - * ttm_pool_alloc - Fill a ttm_tt object - * - * @pool: ttm_pool to use - * @tt: ttm_tt object to fill - * @ctx: operation context - * - * Fill the ttm_tt object with pages and also make sure to DMA map them when - * necessary. - * - * Returns: 0 on successe, negative error code otherwise. - */ -int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, - struct ttm_operation_ctx *ctx) +static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx, + struct ttm_pool_alloc_state *alloc, + struct ttm_pool_tt_restore *restore) { - struct ttm_pool_alloc_state alloc; enum ttm_caching page_caching; gfp_t gfp_flags = GFP_USER; pgoff_t caching_divide; @@ -491,10 +696,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, struct page *p; int r; - ttm_pool_alloc_state_init(tt, &alloc); - - WARN_ON(!alloc.remaining_pages || ttm_tt_is_populated(tt)); - WARN_ON(alloc.dma_addr && !pool->dev); + WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt)); + WARN_ON(alloc->dma_addr && !pool->dev); if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; @@ -509,9 +712,9 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, page_caching = tt->caching; allow_pools = true; - for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, &alloc); - alloc.remaining_pages; - order = ttm_pool_alloc_find_order(order, &alloc)) { + for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); + alloc->remaining_pages; + order = ttm_pool_alloc_find_order(order, alloc)) { struct ttm_pool_type *pt; /* First, try to allocate a page from a pool if one exists. */ @@ -541,30 +744,120 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, r = -ENOMEM; goto error_free_all; } - r = ttm_pool_page_allocated(pool, order, p, page_caching, &alloc); + r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, + restore); if (r) goto error_free_page; + + if (ttm_pool_restore_valid(restore)) { + r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc); + if (r) + goto error_free_all; + } } - r = ttm_pool_apply_caching(&alloc); + r = ttm_pool_apply_caching(alloc); if (r) goto error_free_all; + kfree(tt->restore); + tt->restore = NULL; + return 0; error_free_page: ttm_pool_free_page(pool, page_caching, order, p); error_free_all: - caching_divide = alloc.caching_divide - tt->pages; + if (tt->restore) + return r; + + caching_divide = alloc->caching_divide - tt->pages; ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, - tt->num_pages - alloc.remaining_pages); + tt->num_pages - alloc->remaining_pages); return r; } + +/** + * ttm_pool_alloc - Fill a ttm_tt object + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx) +{ + struct ttm_pool_alloc_state alloc; + + if (WARN_ON(ttm_tt_is_backed_up(tt))) + return -EINVAL; + + ttm_pool_alloc_state_init(tt, &alloc); + + return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL); +} EXPORT_SYMBOL(ttm_pool_alloc); +/** + * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up + * content. + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. Read in backed-up content. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx) +{ + struct ttm_pool_alloc_state alloc; + + if (WARN_ON(!ttm_tt_is_backed_up(tt))) + return -EINVAL; + + if (!tt->restore) { + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + + ttm_pool_alloc_state_init(tt, &alloc); + if (ctx->gfp_retry_mayfail) + gfp |= __GFP_RETRY_MAYFAIL; + + tt->restore = kzalloc(sizeof(*tt->restore), gfp); + if (!tt->restore) + return -ENOMEM; + + tt->restore->snapshot_alloc = alloc; + tt->restore->pool = pool; + tt->restore->restored_pages = 1; + } else { + struct ttm_pool_tt_restore *restore = tt->restore; + int ret; + + alloc = restore->snapshot_alloc; + if (ttm_pool_restore_valid(tt->restore)) { + ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc); + if (ret) + return ret; + } + if (!alloc.remaining_pages) + return 0; + } + + return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore); +} + /** * ttm_pool_free - Free the backing pages from a ttm_tt object * @@ -582,6 +875,163 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) } EXPORT_SYMBOL(ttm_pool_free); +/** + * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt + * @tt: The struct ttm_tt. + * + * Release handles with associated content or any remaining pages of + * a backed-up struct ttm_tt. + */ +void ttm_pool_drop_backed_up(struct ttm_tt *tt) +{ + struct ttm_pool_tt_restore *restore; + pgoff_t start_page = 0; + + WARN_ON(!ttm_tt_is_backed_up(tt)); + + restore = tt->restore; + + /* + * Unmap and free any uncommitted restore page. + * any tt page-array backup entries already read back has + * been cleared already + */ + if (ttm_pool_restore_valid(restore)) { + dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; + + ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, + dma_addr, restore->page_caching); + restore->restored_pages = 1UL << restore->order; + } + + /* + * If a restore is ongoing, part of the tt pages may have a + * caching different than writeback. + */ + if (restore) { + pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; + + start_page = restore->alloced_pages; + WARN_ON(mid > start_page); + /* Pages that might be dma-mapped and non-cached */ + ttm_pool_free_range(restore->pool, tt, tt->caching, + 0, mid); + /* Pages that might be dma-mapped but cached */ + ttm_pool_free_range(restore->pool, tt, ttm_cached, + mid, restore->alloced_pages); + kfree(restore); + tt->restore = NULL; + } + + ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages); +} + +/** + * ttm_pool_backup() - Back up or purge a struct ttm_tt + * @pool: The pool used when allocating the struct ttm_tt. + * @tt: The struct ttm_tt. + * @flags: Flags to govern the backup behaviour. + * + * Back up or purge a struct ttm_tt. If @purge is true, then + * all pages will be freed directly to the system rather than to the pool + * they were allocated from, making the function behave similarly to + * ttm_pool_free(). If @purge is false the pages will be backed up instead, + * exchanged for handles. + * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and + * a subsequent call to ttm_pool_drop_backed_up() will drop it. + * If backup of a page fails for whatever reason, @ttm will still be + * partially backed up, retaining those pages for which backup fails. + * In that case, this function can be retried, possibly after freeing up + * memory resources. + * + * Return: Number of pages actually backed up or freed, or negative + * error code on error. + */ +long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_backup_flags *flags) +{ + struct ttm_backup *backup = tt->backup; + struct page *page; + unsigned long handle; + gfp_t alloc_gfp; + gfp_t gfp; + int ret = 0; + pgoff_t shrunken = 0; + pgoff_t i, num_pages; + + if (WARN_ON(ttm_tt_is_backed_up(tt))) + return -EINVAL; + + if ((!ttm_backup_bytes_avail() && !flags->purge) || + pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) + return -EBUSY; + +#ifdef CONFIG_X86 + /* Anything returned to the system needs to be cached. */ + if (tt->caching != ttm_cached) + set_pages_array_wb(tt->pages, tt->num_pages); +#endif + + if (tt->dma_address || flags->purge) { + for (i = 0; i < tt->num_pages; i += num_pages) { + unsigned int order; + + page = tt->pages[i]; + if (unlikely(!page)) { + num_pages = 1; + continue; + } + + order = ttm_pool_page_order(pool, page); + num_pages = 1UL << order; + if (tt->dma_address) + ttm_pool_unmap(pool, tt->dma_address[i], + num_pages); + if (flags->purge) { + shrunken += num_pages; + page->private = 0; + __free_pages(page, order); + memset(tt->pages + i, 0, + num_pages * sizeof(*tt->pages)); + } + } + } + + if (flags->purge) + return shrunken; + + if (pool->use_dma32) + gfp = GFP_DMA32; + else + gfp = GFP_HIGHUSER; + + alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; + + for (i = 0; i < tt->num_pages; ++i) { + s64 shandle; + + page = tt->pages[i]; + if (unlikely(!page)) + continue; + + ttm_pool_split_for_swap(pool, page); + + shandle = ttm_backup_backup_page(backup, page, flags->writeback, i, + gfp, alloc_gfp); + if (shandle < 0) { + /* We allow partially shrunken tts */ + ret = shandle; + break; + } + handle = shandle; + tt->pages[i] = ttm_backup_handle_to_page_ptr(handle); + put_page(page); + shrunken++; + } + + return shrunken ? shrunken : ret; +} + /** * ttm_pool_init - Initialize a pool * diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 3baf215eca23..00b7c28f2329 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -158,6 +159,8 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ttm->swap_storage = NULL; ttm->sg = bo->sg; ttm->caching = caching; + ttm->restore = NULL; + ttm->backup = NULL; } int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, @@ -182,6 +185,13 @@ void ttm_tt_fini(struct ttm_tt *ttm) fput(ttm->swap_storage); ttm->swap_storage = NULL; + if (ttm_tt_is_backed_up(ttm)) + ttm_pool_drop_backed_up(ttm); + if (ttm->backup) { + ttm_backup_fini(ttm->backup); + ttm->backup = NULL; + } + if (ttm->pages) kvfree(ttm->pages); else @@ -253,6 +263,49 @@ int ttm_tt_swapin(struct ttm_tt *ttm) } EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin); +/** + * ttm_tt_backup() - Helper to back up a struct ttm_tt. + * @bdev: The TTM device. + * @tt: The struct ttm_tt. + * @flags: Flags that govern the backup behaviour. + * + * Update the page accounting and call ttm_pool_shrink_tt to free pages + * or back them up. + * + * Return: Number of pages freed or swapped out, or negative error code on + * error. + */ +long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_backup_flags flags) +{ + long ret; + + if (WARN_ON(IS_ERR_OR_NULL(tt->backup))) + return 0; + + ret = ttm_pool_backup(&bdev->pool, tt, &flags); + if (ret > 0) { + tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; + tt->page_flags |= TTM_TT_FLAG_BACKED_UP; + } + + return ret; +} + +int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx) +{ + int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx); + + if (ret) + return ret; + + tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; + + return 0; +} +EXPORT_SYMBOL(ttm_tt_restore); + /** * ttm_tt_swapout - swap out tt object * @@ -348,6 +401,7 @@ int ttm_tt_populate(struct ttm_device *bdev, goto error; ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED; + ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP; if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) { diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index 160d954a261e..54cd34a6e4c0 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -33,6 +33,7 @@ struct device; struct seq_file; +struct ttm_backup_flags; struct ttm_operation_ctx; struct ttm_pool; struct ttm_tt; @@ -89,6 +90,13 @@ void ttm_pool_fini(struct ttm_pool *pool); int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); +void ttm_pool_drop_backed_up(struct ttm_tt *tt); + +long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm, + const struct ttm_backup_flags *flags); +int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx); + int ttm_pool_mgr_init(unsigned long num_pages); void ttm_pool_mgr_fini(void); diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 991edafdb2dd..c736c01ac2ca 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -32,11 +32,13 @@ #include #include +struct ttm_backup; struct ttm_device; struct ttm_tt; struct ttm_resource; struct ttm_buffer_object; struct ttm_operation_ctx; +struct ttm_pool_tt_restore; /** * struct ttm_tt - This is a structure holding the pages, caching- and aperture @@ -85,17 +87,22 @@ struct ttm_tt { * fault handling abuses the DMA api a bit and dma_map_attrs can't be * used to assure pgprot always matches. * + * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the + * struct ttm_tt has been (possibly partially) backed up. + * * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is * set by TTM after ttm_tt_populate() has successfully returned, and is * then unset when TTM calls ttm_tt_unpopulate(). + * */ #define TTM_TT_FLAG_SWAPPED BIT(0) #define TTM_TT_FLAG_ZERO_ALLOC BIT(1) #define TTM_TT_FLAG_EXTERNAL BIT(2) #define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3) #define TTM_TT_FLAG_DECRYPTED BIT(4) +#define TTM_TT_FLAG_BACKED_UP BIT(5) -#define TTM_TT_FLAG_PRIV_POPULATED BIT(5) +#define TTM_TT_FLAG_PRIV_POPULATED BIT(6) uint32_t page_flags; /** @num_pages: Number of pages in the page array. */ uint32_t num_pages; @@ -105,11 +112,20 @@ struct ttm_tt { dma_addr_t *dma_address; /** @swap_storage: Pointer to shmem struct file for swap storage. */ struct file *swap_storage; + /** + * @backup: Pointer to backup struct for backed up tts. + * Could be unified with @swap_storage. Meanwhile, the driver's + * ttm_tt_create() callback is responsible for assigning + * this field. + */ + struct ttm_backup *backup; /** * @caching: The current caching state of the pages, see enum * ttm_caching. */ enum ttm_caching caching; + /** @restore: Partial restoration from backup state. TTM private */ + struct ttm_pool_tt_restore *restore; }; /** @@ -129,9 +145,38 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED; } +/** + * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up + * @tt: The struct ttm_tt. + * + * Return: true if swapped or backed up, false otherwise. + */ static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt) { - return tt->page_flags & TTM_TT_FLAG_SWAPPED; + return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP); +} + +/** + * ttm_tt_is_backed_up() - Whether the ttm_tt backed up + * @tt: The struct ttm_tt. + * + * Return: true if swapped or backed up, false otherwise. + */ +static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt) +{ + return tt->page_flags & TTM_TT_FLAG_BACKED_UP; +} + +/** + * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status + * @tt: The struct ttm_tt. + * + * Drivers can use this functionto clear the backed-up status, + * for example before destroying or re-validating a purged tt. + */ +static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt) +{ + tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; } /** @@ -235,6 +280,24 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, struct ttm_tt *tt); unsigned long ttm_tt_pages_limit(void); + +/** + * struct ttm_backup_flags - Flags to govern backup behaviour. + * @purge: Free pages without backing up. Bypass pools. + * @writeback: Attempt to copy contents directly to swap space, even + * if that means blocking on writes to external memory. + */ +struct ttm_backup_flags { + u32 purge : 1; + u32 writeback : 1; +}; + +long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_backup_flags flags); + +int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx); + #if IS_ENABLED(CONFIG_AGP) #include From 8ae875f641188be338126cc76c76c82d256364dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:16 +0100 Subject: [PATCH 60/77] drm/ttm: Use fault-injection to test error paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use fault-injection to test partial TTM swapout and interrupted swapin. Return -EINTR for swapin to test the callers ability to handle and restart the swapin, and on swapout perform a partial swapout to test that the swapin and release_shrunken functionality. v8: - Use the core fault-injection system. v9: - Fix compliation failure for !CONFIG_FAULT_INJECTION Cc: Christian König Cc: Somalapuram Amaranath Cc: Matthew Brost Cc: Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Reviewed-by: Christian König Link: https://lore.kernel.org/intel-xe/20250305092220.123405-4-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/ttm/ttm_pool.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index ffb7abf52bab..83b10706ba89 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -48,6 +48,13 @@ #include "ttm_module.h" +#ifdef CONFIG_FAULT_INJECTION +#include +static DECLARE_FAULT_ATTR(backup_fault_inject); +#else +#define should_fail(...) false +#endif + /** * struct ttm_pool_dma - Helper object for coherent DMA mappings * @@ -514,6 +521,12 @@ static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, if (ttm_backup_page_ptr_is_handle(p)) { unsigned long handle = ttm_backup_page_ptr_to_handle(p); + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible && + should_fail(&backup_fault_inject, 1)) { + ret = -EINTR; + break; + } + if (handle == 0) { restore->restored_pages++; continue; @@ -1007,7 +1020,13 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; - for (i = 0; i < tt->num_pages; ++i) { + num_pages = tt->num_pages; + + /* Pretend doing fault injection by shrinking only half of the pages. */ + if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1)) + num_pages = DIV_ROUND_UP(num_pages, 2); + + for (i = 0; i < num_pages; ++i) { s64 shandle; page = tt->pages[i]; @@ -1293,6 +1312,10 @@ int ttm_pool_mgr_init(unsigned long num_pages) &ttm_pool_debugfs_globals_fops); debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, &ttm_pool_debugfs_shrink_fops); +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root, + &backup_fault_inject); +#endif #endif mm_shrinker = shrinker_alloc(0, "drm-ttm_pool"); From f3bcfd04a52fb1b1702349bed2bccc1126b97f89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:17 +0100 Subject: [PATCH 61/77] drm/ttm: Add a macro to perform LRU iteration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Following the design direction communicated here: https://lore.kernel.org/linux-mm/b7491378-defd-4f1c-31e2-29e4c77e2d67@amd.com/T/#ma918844aa8a6efe8768fdcda0c6590d5c93850c9 Export a LRU walker for driver shrinker use. The walker initially supports only trylocking, since that's the method used by shrinkes. The walker makes use of scoped_guard() to allow exiting from the LRU walk loop without performing any explicit unlocking or cleanup. v8: - Split out from another patch. - Use a struct for bool arguments to increase readability (Matt Brost). - Unmap user-space cpu-mappings before shrinking pages. - Explain non-fatal error codes (Matt Brost) v10: - Instead of using the existing helper, Wrap the interface inside out and provide a loop to de-midlayer things the LRU iteration (Christian König). - Removing the R-B by Matt Brost since the patch was significantly changed. v11: - Split the patch up to include just the LRU walk helper. v12: - Indent after scoped_guard() (Matt Brost) v15: - Adapt to new definition of scoped_guard() Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Acked-by: Dave Airlie Acked-by: Christian König Link: https://lore.kernel.org/intel-xe/20250305092220.123405-5-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/ttm/ttm_bo_util.c | 140 +++++++++++++++++++++++++++++- include/drm/ttm/ttm_bo.h | 72 +++++++++++++++ 2 files changed, 208 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 917096bd5f68..0cac02a9764c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -769,12 +769,10 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) return ret; } -static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk, +static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, bool *needs_unlock) { - struct ttm_operation_ctx *ctx = walk->ctx; - *needs_unlock = false; if (dma_resv_trylock(bo->base.resv)) { @@ -877,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, * since if we do it the other way around, and the trylock fails, * we need to drop the lru lock to put the bo. */ - if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock)) + if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock)) bo_locked = true; else if (!walk->ticket || walk->ctx->no_wait_gpu || walk->trylock_only) @@ -920,3 +918,137 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, return progress; } +EXPORT_SYMBOL(ttm_lru_walk_for_evict); + +static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs) +{ + struct ttm_buffer_object *bo = curs->bo; + + if (bo) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + ttm_bo_put(bo); + curs->bo = NULL; + } +} + +/** + * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor + * and clean up any iteration it was used for. + * @curs: The cursor. + */ +void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + + ttm_bo_lru_cursor_cleanup_bo(curs); + spin_lock(lru_lock); + ttm_resource_cursor_fini(&curs->res_curs); + spin_unlock(lru_lock); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); + +/** + * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor + * @curs: The ttm_bo_lru_cursor to initialize. + * @man: The ttm resource_manager whose LRU lists to iterate over. + * @ctx: The ttm_operation_ctx to govern the locking. + * + * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking + * or prelocked buffer objects are available as detailed by + * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not + * supported. + * + * Return: Pointer to @curs. The function does not fail. + */ +struct ttm_bo_lru_cursor * +ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, + struct ttm_resource_manager *man, + struct ttm_operation_ctx *ctx) +{ + memset(curs, 0, sizeof(*curs)); + ttm_resource_cursor_init(&curs->res_curs, man); + curs->ctx = ctx; + + return curs; +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_init); + +static struct ttm_buffer_object * +ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs) +{ + struct ttm_buffer_object *bo = res->bo; + + if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock)) + return NULL; + + if (!ttm_bo_get_unless_zero(bo)) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + return NULL; + } + + curs->bo = bo; + return bo; +} + +/** + * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and + * ttm_bo_lru_cursor_first(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_resource *res = NULL; + struct ttm_buffer_object *bo; + + ttm_bo_lru_cursor_cleanup_bo(curs); + + spin_lock(lru_lock); + for (;;) { + res = ttm_resource_manager_next(&curs->res_curs); + if (!res) + break; + + bo = ttm_bo_from_res_reserved(res, curs); + if (bo) + break; + } + + spin_unlock(lru_lock); + return res ? bo : NULL; +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_next); + +/** + * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_buffer_object *bo; + struct ttm_resource *res; + + spin_lock(lru_lock); + res = ttm_resource_manager_first(&curs->res_curs); + if (!res) { + spin_unlock(lru_lock); + return NULL; + } + + bo = ttm_bo_from_res_reserved(res, curs); + spin_unlock(lru_lock); + + return bo ? bo : ttm_bo_lru_cursor_next(curs); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_first); diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 8ea11cd8df39..1509268849c0 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -467,4 +467,76 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); int ttm_bo_populate(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); +/* Driver LRU walk helpers initially targeted for shrinking. */ + +/** + * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping + */ +struct ttm_bo_lru_cursor { + /** @res_curs: Embedded struct ttm_resource_cursor. */ + struct ttm_resource_cursor res_curs; + /** + * @ctx: The struct ttm_operation_ctx used while looping. + * governs the locking mode. + */ + struct ttm_operation_ctx *ctx; + /** + * @bo: Buffer object pointer if a buffer object is refcounted, + * NULL otherwise. + */ + struct ttm_buffer_object *bo; + /** + * @needs_unlock: Valid iff @bo != NULL. The bo resv needs + * unlock before the next iteration or after loop exit. + */ + bool needs_unlock; +}; + +void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs); + +struct ttm_bo_lru_cursor * +ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, + struct ttm_resource_manager *man, + struct ttm_operation_ctx *ctx); + +struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs); + +struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs); + +/* + * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor. + */ +DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *, + if (_T) {ttm_bo_lru_cursor_fini(_T); }, + ttm_bo_lru_cursor_init(curs, man, ctx), + struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, + struct ttm_operation_ctx *ctx); +static inline void * +class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T) +{ return *_T; } +#define class_ttm_bo_lru_cursor_is_conditional false + +/** + * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning + * resources on LRU lists. + * @_cursor: struct ttm_bo_lru_cursor to use for the iteration. + * @_man: The resource manager whose LRU lists to iterate over. + * @_ctx: The struct ttm_operation_context to govern the @_bo locking. + * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object + * for the current iteration. + * + * Iterate over all resources of @_man and for each resource, attempt to + * reference and lock (using the locking mode detailed in @_ctx) the buffer + * object it points to. If successful, assign @_bo to the address of the + * buffer object and update @_cursor. The iteration is guarded in the + * sense that @_cursor will be initialized before looping start and cleaned + * up at looping termination, even if terminated prematurely by, for + * example a return or break statement. Exiting the loop will also unlock + * (if needed) and unreference @_bo. + */ +#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \ + scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \ + for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \ + (_bo) = ttm_bo_lru_cursor_next(_cursor)) + #endif From 70d645deac98303d1bf9ab08a4e68da52bf8c1e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:18 +0100 Subject: [PATCH 62/77] drm/ttm: Add helpers for shrinking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a number of helpers for shrinking that access core TTM and core MM functionality in a way that make them unsuitable for driver open-coding. v11: - New patch (split off from previous) and additional helpers. v13: - Adapt to ttm_backup interface change. - Take resource off LRU when backed up. Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Acked-by: Dave Airlie Acked-by: Christian König Link: https://lore.kernel.org/intel-xe/20250305092220.123405-6-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/ttm/ttm_bo_util.c | 107 +++++++++++++++++++++++++++++- drivers/gpu/drm/ttm/ttm_tt.c | 29 ++++++++ include/drm/ttm/ttm_bo.h | 21 ++++++ include/drm/ttm/ttm_tt.h | 2 + 4 files changed, 158 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 0cac02a9764c..15cab9bda17f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -28,7 +28,7 @@ /* * Authors: Thomas Hellstrom */ - +#include #include #include @@ -1052,3 +1052,108 @@ struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs return bo ? bo : ttm_bo_lru_cursor_next(curs); } EXPORT_SYMBOL(ttm_bo_lru_cursor_first); + +/** + * ttm_bo_shrink() - Helper to shrink a ttm buffer object. + * @ctx: The struct ttm_operation_ctx used for the shrinking operation. + * @bo: The buffer object. + * @flags: Flags governing the shrinking behaviour. + * + * The function uses the ttm_tt_back_up functionality to back up or + * purge a struct ttm_tt. If the bo is not in system, it's first + * moved there. + * + * Return: The number of pages shrunken or purged, or + * negative error code on failure. + */ +long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct ttm_bo_shrink_flags flags) +{ + static const struct ttm_place sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .mem_type = TTM_PL_SYSTEM, + .flags = 0, + }; + static struct ttm_placement sys_placement = { + .num_placement = 1, + .placement = &sys_placement_flags, + }; + struct ttm_tt *tt = bo->ttm; + long lret; + + dma_resv_assert_held(bo->base.resv); + + if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { + int ret = ttm_bo_validate(bo, &sys_placement, ctx); + + /* Consider -ENOMEM and -ENOSPC non-fatal. */ + if (ret) { + if (ret == -ENOMEM || ret == -ENOSPC) + ret = -EBUSY; + return ret; + } + } + + ttm_bo_unmap_virtual(bo); + lret = ttm_bo_wait_ctx(bo, ctx); + if (lret < 0) + return lret; + + if (bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) + {.purge = flags.purge, + .writeback = flags.writeback}); + + if (lret <= 0 && bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + if (lret < 0 && lret != -EINTR) + return -EBUSY; + + return lret; +} +EXPORT_SYMBOL(ttm_bo_shrink); + +/** + * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking + * @ctx: The struct ttm_operation_ctx governing the shrinking. + * @bo: The candidate for shrinking. + * + * Check whether the object, given the information available to TTM, + * is suitable for shinking, This function can and should be used + * before attempting to shrink an object. + * + * Return: true if suitable. false if not. + */ +bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) +{ + return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && + (!ctx->no_wait_gpu || + dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); +} +EXPORT_SYMBOL(ttm_bo_shrink_suitable); + +/** + * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU + * during shrinking + * + * In some situations, like direct reclaim, waiting (in particular gpu waiting) + * should be avoided since it may stall a system that could otherwise make progress + * shrinking something else less time consuming. + * + * Return: true if gpu waiting should be avoided, false if not. + */ +bool ttm_bo_shrink_avoid_wait(void) +{ + return !current_is_kswapd(); +} +EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 00b7c28f2329..df0aa6c4b8b8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -531,3 +531,32 @@ unsigned long ttm_tt_pages_limit(void) return ttm_pages_limit; } EXPORT_SYMBOL(ttm_tt_pages_limit); + +/** + * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt + * @tt: The ttm_tt for wich to allocate and assign a backup structure. + * + * Assign a backup structure to be used for tt backup. This should + * typically be done at bo creation, to avoid allocations at shrinking + * time. + * + * Return: 0 on success, negative error code on failure. + */ +int ttm_tt_setup_backup(struct ttm_tt *tt) +{ + struct ttm_backup *backup = + ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); + + if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) + return -EINVAL; + + if (IS_ERR(backup)) + return PTR_ERR(backup); + + if (tt->backup) + ttm_backup_fini(tt->backup); + + tt->backup = backup; + return 0; +} +EXPORT_SYMBOL(ttm_tt_setup_backup); diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 1509268849c0..903cd1030110 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -225,6 +225,27 @@ struct ttm_lru_walk { s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, struct ttm_resource_manager *man, s64 target); +/** + * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour + * @purge: Purge the content rather than backing it up. + * @writeback: Attempt to immediately write content to swap space. + * @allow_move: Allow moving to system before shrinking. This is typically + * not desired for zombie- or ghost objects (with zombie object meaning + * objects with a zero gem object refcount) + */ +struct ttm_bo_shrink_flags { + u32 purge : 1; + u32 writeback : 1; + u32 allow_move : 1; +}; + +long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct ttm_bo_shrink_flags flags); + +bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); + +bool ttm_bo_shrink_avoid_wait(void); + /** * ttm_bo_get - reference a struct ttm_buffer_object * diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c736c01ac2ca..13cf47f3322f 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -298,6 +298,8 @@ long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, const struct ttm_operation_ctx *ctx); +int ttm_tt_setup_backup(struct ttm_tt *tt); + #if IS_ENABLED(CONFIG_AGP) #include From 00c8efc3180f0cf919b53980e969430657e01685 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:19 +0100 Subject: [PATCH 63/77] drm/xe: Add a shrinker for xe bos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than relying on the TTM watermark accounting add a shrinker for xe_bos in TT or system memory. Leverage the newly added TTM per-page shrinking and shmem backup support. Although xe doesn't fully support WONTNEED (purgeable) bos yet, introduce and add shrinker support for purgeable ttm_tts. v2: - Cleanups bugfixes and a KUNIT shrinker test. - Add writeback support, and activate if kswapd. v3: - Move the try_shrink() helper to core TTM. - Minor cleanups. v4: - Add runtime pm for the shrinker. Shrinking may require an active device for CCS metadata copying. v5: - Separately purge ghost- and zombie objects in the shrinker. - Fix a format specifier - type inconsistency. (Kernel test robot). v7: - s/long/s64/ (Christian König) - s/sofar/progress/ (Matt Brost) v8: - Rebase on Xe KUNIT update. - Add content verifying to the shrinker kunit test. - Split out TTM changes to a separate patch. - Get rid of multiple bool arguments for clarity (Matt Brost) - Avoid an error pointer dereference (Matt Brost) - Avoid an integer overflow (Matt Auld) - Address misc review comments by Matt Brost. v9: - Fix a compliation error. - Rebase. v10: - Update to new LRU walk interface. - Rework ghost-, zombie and purged object shrinking. - Rebase. v11: - Use additional TTM helpers. - Honor __GFP_FS and __GFP_IO - Rebase. v13: - Use ttm_tt_setup_backup(). v14: - Don't set up backup on imported bos. v15: - Rebase on backup interface changes. Cc: Christian König Cc: Somalapuram Amaranath Cc: Matthew Brost Cc: Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Acked-by: Christian König Link: https://lore.kernel.org/intel-xe/20250305092220.123405-7-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/tests/xe_bo.c | 6 +- drivers/gpu/drm/xe/xe_bo.c | 202 +++++++++++++++++++-- drivers/gpu/drm/xe/xe_bo.h | 36 ++++ drivers/gpu/drm/xe/xe_device.c | 8 + drivers/gpu/drm/xe/xe_device_types.h | 2 + drivers/gpu/drm/xe/xe_shrinker.c | 258 +++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_shrinker.h | 18 ++ 8 files changed, 513 insertions(+), 18 deletions(-) create mode 100644 drivers/gpu/drm/xe/xe_shrinker.c create mode 100644 drivers/gpu/drm/xe/xe_shrinker.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 5c97ad6ed738..45cdaab71126 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -94,6 +94,7 @@ xe-y += xe_bb.o \ xe_ring_ops.o \ xe_sa.o \ xe_sched_job.o \ + xe_shrinker.o \ xe_step.o \ xe_sync.o \ xe_tile.o \ diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 6795d1d916e4..9fde67ca989f 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -514,8 +514,13 @@ static int shrink_test_run_device(struct xe_device *xe) * other way around, they may not be subject to swapping... */ if (alloced < purgeable) { + xe_ttm_tt_account_subtract(&xe_tt->ttm); xe_tt->purgeable = true; + xe_ttm_tt_account_add(&xe_tt->ttm); bo->ttm.priority = 0; + spin_lock(&bo->ttm.bdev->lru_lock); + ttm_bo_move_to_lru_tail(&bo->ttm); + spin_unlock(&bo->ttm.bdev->lru_lock); } else { int ret = shrink_test_fill_random(bo, &prng, link); @@ -570,7 +575,6 @@ static int shrink_test_run_device(struct xe_device *xe) if (ret == -EINTR) intr = true; } while (ret == -EINTR && !signal_pending(current)); - if (!ret && !purgeable) failed = shrink_test_verify(test, bo, count, &prng, link); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 3f5391d416d4..2827cb4618e6 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include "xe_pm.h" #include "xe_preempt_fence.h" #include "xe_res_cursor.h" +#include "xe_shrinker.h" #include "xe_trace_bo.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" @@ -281,9 +283,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, } } +/* struct xe_ttm_tt - Subclassed ttm_tt for xe */ struct xe_ttm_tt { struct ttm_tt ttm; - struct device *dev; + /** @xe - The xe device */ + struct xe_device *xe; struct sg_table sgt; struct sg_table *sg; /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ @@ -296,7 +300,8 @@ static int xe_tt_map_sg(struct ttm_tt *tt) unsigned long num_pages = tt->num_pages; int ret; - XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); + XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); if (xe_tt->sg) return 0; @@ -304,13 +309,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt) ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, num_pages, 0, (u64)num_pages << PAGE_SHIFT, - xe_sg_segment_size(xe_tt->dev), + xe_sg_segment_size(xe_tt->xe->drm.dev), GFP_KERNEL); if (ret) return ret; xe_tt->sg = &xe_tt->sgt; - ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, + ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(xe_tt->sg); @@ -326,7 +331,7 @@ static void xe_tt_unmap_sg(struct ttm_tt *tt) struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); if (xe_tt->sg) { - dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, + dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, 0); sg_free_table(xe_tt->sg); xe_tt->sg = NULL; @@ -341,21 +346,47 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo) return xe_tt->sg; } +/* + * Account ttm pages against the device shrinker's shrinkable and + * purgeable counts. + */ +static void xe_ttm_tt_account_add(struct ttm_tt *tt) +{ + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); + else + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); +} + +static void xe_ttm_tt_account_subtract(struct ttm_tt *tt) +{ + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); + else + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); +} + static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, u32 page_flags) { struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_device *xe = xe_bo_device(bo); - struct xe_ttm_tt *tt; + struct xe_ttm_tt *xe_tt; + struct ttm_tt *tt; unsigned long extra_pages; enum ttm_caching caching = ttm_cached; int err; - tt = kzalloc(sizeof(*tt), GFP_KERNEL); - if (!tt) + xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL); + if (!xe_tt) return NULL; - tt->dev = xe->drm.dev; + tt = &xe_tt->ttm; + xe_tt->xe = xe; extra_pages = 0; if (xe_bo_needs_ccs_pages(bo)) @@ -401,42 +432,66 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, caching = ttm_uncached; } - err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); + if (ttm_bo->type != ttm_bo_type_sg) + page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; + + err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); if (err) { - kfree(tt); + kfree(xe_tt); return NULL; } - return &tt->ttm; + if (ttm_bo->type != ttm_bo_type_sg) { + err = ttm_tt_setup_backup(tt); + if (err) { + ttm_tt_fini(tt); + kfree(xe_tt); + return NULL; + } + } + + return tt; } static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, struct ttm_operation_ctx *ctx) { + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); int err; /* * dma-bufs are not populated with pages, and the dma- * addresses are set up when moved to XE_PL_TT. */ - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) return 0; - err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); + if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { + err = ttm_tt_restore(ttm_dev, tt, ctx); + } else { + ttm_tt_clear_backed_up(tt); + err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); + } if (err) return err; - return err; + xe_tt->purgeable = false; + xe_ttm_tt_account_add(tt); + + return 0; } static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) { - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) return; xe_tt_unmap_sg(tt); - return ttm_pool_free(&ttm_dev->pool, tt); + ttm_pool_free(&ttm_dev->pool, tt); + xe_ttm_tt_account_subtract(tt); } static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) @@ -871,6 +926,111 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, return ret; } +static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, + struct ttm_buffer_object *bo, + unsigned long *scanned) +{ + long lret; + + /* Fake move to system, without copying data. */ + if (bo->resource->mem_type != XE_PL_SYSTEM) { + struct ttm_resource *new_resource; + + lret = ttm_bo_wait_ctx(bo, ctx); + if (lret) + return lret; + + lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx); + if (lret) + return lret; + + xe_tt_unmap_sg(bo->ttm); + ttm_bo_move_null(bo, new_resource); + } + + *scanned += bo->ttm->num_pages; + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) + {.purge = true, + .writeback = false, + .allow_move = false}); + + if (lret > 0) + xe_ttm_tt_account_subtract(bo->ttm); + + return lret; +} + +/** + * xe_bo_shrink() - Try to shrink an xe bo. + * @ctx: The struct ttm_operation_ctx used for shrinking. + * @bo: The TTM buffer object whose pages to shrink. + * @flags: Flags governing the shrink behaviour. + * @scanned: Pointer to a counter of the number of pages + * attempted to shrink. + * + * Try to shrink- or purge a bo, and if it succeeds, unmap dma. + * Note that we need to be able to handle also non xe bos + * (ghost bos), but only if the struct ttm_tt is embedded in + * a struct xe_ttm_tt. When the function attempts to shrink + * the pages of a buffer object, The value pointed to by @scanned + * is updated. + * + * Return: The number of pages shrunken or purged, or negative error + * code on failure. + */ +long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct xe_bo_shrink_flags flags, + unsigned long *scanned) +{ + struct ttm_tt *tt = bo->ttm; + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + struct ttm_place place = {.mem_type = bo->resource->mem_type}; + struct xe_bo *xe_bo = ttm_to_xe_bo(bo); + struct xe_device *xe = xe_tt->xe; + bool needs_rpm; + long lret = 0L; + + if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || + (flags.purge && !xe_tt->purgeable)) + return -EBUSY; + + if (!ttm_bo_eviction_valuable(bo, &place)) + return -EBUSY; + + if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo)) + return xe_bo_shrink_purge(ctx, bo, scanned); + + if (xe_tt->purgeable) { + if (bo->resource->mem_type != XE_PL_SYSTEM) + lret = xe_bo_move_notify(xe_bo, ctx); + if (!lret) + lret = xe_bo_shrink_purge(ctx, bo, scanned); + goto out_unref; + } + + /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ + needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && + xe_bo_needs_ccs_pages(xe_bo)); + if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) + goto out_unref; + + *scanned += tt->num_pages; + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) + {.purge = false, + .writeback = flags.writeback, + .allow_move = true}); + if (needs_rpm) + xe_pm_runtime_put(xe); + + if (lret > 0) + xe_ttm_tt_account_subtract(tt); + +out_unref: + xe_bo_put(xe_bo); + + return lret; +} + /** * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory * @bo: The buffer object to move. @@ -1885,6 +2045,8 @@ int xe_bo_pin_external(struct xe_bo *bo) } ttm_bo_pin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_subtract(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -1944,6 +2106,8 @@ int xe_bo_pin(struct xe_bo *bo) } ttm_bo_pin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_subtract(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -1978,6 +2142,8 @@ void xe_bo_unpin_external(struct xe_bo *bo) spin_unlock(&xe->pinned.lock); ttm_bo_unpin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_add(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -2001,6 +2167,8 @@ void xe_bo_unpin(struct xe_bo *bo) spin_unlock(&xe->pinned.lock); } ttm_bo_unpin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_add(bo->ttm.ttm); } /** diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index d9386ab03140..f39a218a73d2 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -146,6 +146,28 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo) void xe_bo_put(struct xe_bo *bo); +/* + * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an + * xe bo + * @bo: The bo for which we want to obtain a refcount. + * + * There is a short window between where the bo's GEM object refcount reaches + * zero and where we put the final ttm_bo reference. Code in the eviction- and + * shrinking path should therefore attempt to grab a gem object reference before + * trying to use members outside of the base class ttm object. This function is + * intended for that purpose. On successful return, this function must be paired + * with an xe_bo_put(). + * + * Return: @bo on success, NULL on failure. + */ +static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo) +{ + if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount)) + return NULL; + + return bo; +} + static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo) { if (bo) @@ -341,6 +363,20 @@ static inline unsigned int xe_sg_segment_size(struct device *dev) return round_down(max / 2, PAGE_SIZE); } +/** + * struct xe_bo_shrink_flags - flags governing the shrink behaviour. + * @purge: Only purging allowed. Don't shrink if bo not purgeable. + * @writeback: Attempt to immediately move content to swap. + */ +struct xe_bo_shrink_flags { + u32 purge : 1; + u32 writeback : 1; +}; + +long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct xe_bo_shrink_flags flags, + unsigned long *scanned); + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index fc4a49f25c09..41ed8ed55015 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -50,6 +50,7 @@ #include "xe_pcode.h" #include "xe_pm.h" #include "xe_query.h" +#include "xe_shrinker.h" #include "xe_sriov.h" #include "xe_tile.h" #include "xe_ttm_stolen_mgr.h" @@ -289,6 +290,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) if (xe->unordered_wq) destroy_workqueue(xe->unordered_wq); + if (!IS_ERR_OR_NULL(xe->mem.shrinker)) + xe_shrinker_destroy(xe->mem.shrinker); + if (xe->destroy_wq) destroy_workqueue(xe->destroy_wq); @@ -321,6 +325,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, if (err) goto err; + xe->mem.shrinker = xe_shrinker_create(xe); + if (IS_ERR(xe->mem.shrinker)) + return ERR_CAST(xe->mem.shrinker); + xe->info.devid = pdev->device; xe->info.revid = pdev->revision; xe->info.force_execlist = xe_modparam.force_execlist; diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 8a7b15972413..20161344709d 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -375,6 +375,8 @@ struct xe_device { struct xe_mem_region vram; /** @mem.sys_mgr: system TTM manager */ struct ttm_resource_manager sys_mgr; + /** @mem.sys_mgr: system memory shrinker. */ + struct xe_shrinker *shrinker; } mem; /** @sriov: device level virtualization data */ diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c new file mode 100644 index 000000000000..8184390f9c7b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include + +#include +#include +#include + +#include "xe_bo.h" +#include "xe_pm.h" +#include "xe_shrinker.h" + +/** + * struct xe_shrinker - per-device shrinker + * @xe: Back pointer to the device. + * @lock: Lock protecting accounting. + * @shrinkable_pages: Number of pages that are currently shrinkable. + * @purgeable_pages: Number of pages that are currently purgeable. + * @shrink: Pointer to the mm shrinker. + * @pm_worker: Worker to wake up the device if required. + */ +struct xe_shrinker { + struct xe_device *xe; + rwlock_t lock; + long shrinkable_pages; + long purgeable_pages; + struct shrinker *shrink; + struct work_struct pm_worker; +}; + +static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink) +{ + return shrink->private_data; +} + +/** + * xe_shrinker_mod_pages() - Modify shrinker page accounting + * @shrinker: Pointer to the struct xe_shrinker. + * @shrinkable: Shrinkable pages delta. May be negative. + * @purgeable: Purgeable page delta. May be negative. + * + * Modifies the shrinkable and purgeable pages accounting. + */ +void +xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable) +{ + write_lock(&shrinker->lock); + shrinker->shrinkable_pages += shrinkable; + shrinker->purgeable_pages += purgeable; + write_unlock(&shrinker->lock); +} + +static s64 xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) +{ + unsigned int mem_type; + s64 freed = 0, lret; + + for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) { + struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); + struct ttm_bo_lru_cursor curs; + struct ttm_buffer_object *ttm_bo; + + if (!man || !man->use_tt) + continue; + + ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) { + if (!ttm_bo_shrink_suitable(ttm_bo, ctx)) + continue; + + lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned); + if (lret < 0) + return lret; + + freed += lret; + if (*scanned >= to_scan) + break; + } + } + + return freed; +} + +static unsigned long +xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) +{ + struct xe_shrinker *shrinker = to_xe_shrinker(shrink); + unsigned long num_pages; + bool can_backup = !!(sc->gfp_mask & __GFP_FS); + + num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT; + read_lock(&shrinker->lock); + + if (can_backup) + num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages); + else + num_pages = 0; + + num_pages += shrinker->purgeable_pages; + read_unlock(&shrinker->lock); + + return num_pages ? num_pages : SHRINK_EMPTY; +} + +/* + * Check if we need runtime pm, and if so try to grab a reference if + * already active. If grabbing a reference fails, queue a worker that + * does it for us outside of reclaim, but don't wait for it to complete. + * If bo shrinking needs an rpm reference and we don't have it (yet), + * that bo will be skipped anyway. + */ +static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force, + unsigned long nr_to_scan, bool can_backup) +{ + struct xe_device *xe = shrinker->xe; + + if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) || + !ttm_backup_bytes_avail()) + return false; + + if (!force) { + read_lock(&shrinker->lock); + force = (nr_to_scan > shrinker->purgeable_pages && can_backup); + read_unlock(&shrinker->lock); + if (!force) + return false; + } + + if (!xe_pm_runtime_get_if_active(xe)) { + if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) { + xe_pm_runtime_get(xe); + return true; + } + queue_work(xe->unordered_wq, &shrinker->pm_worker); + return false; + } + + return true; +} + +static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm) +{ + if (runtime_pm) + xe_pm_runtime_put(shrinker->xe); +} + +static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + struct xe_shrinker *shrinker = to_xe_shrinker(shrink); + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = ttm_bo_shrink_avoid_wait(), + }; + unsigned long nr_to_scan, nr_scanned = 0, freed = 0; + struct xe_bo_shrink_flags shrink_flags = { + .purge = true, + /* Don't request writeback without __GFP_IO. */ + .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO), + }; + bool runtime_pm; + bool purgeable; + bool can_backup = !!(sc->gfp_mask & __GFP_FS); + s64 lret; + + nr_to_scan = sc->nr_to_scan; + + read_lock(&shrinker->lock); + purgeable = !!shrinker->purgeable_pages; + read_unlock(&shrinker->lock); + + /* Might need runtime PM. Try to wake early if it looks like it. */ + runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup); + + if (purgeable && nr_scanned < nr_to_scan) { + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, + nr_to_scan, &nr_scanned); + if (lret >= 0) + freed += lret; + } + + sc->nr_scanned = nr_scanned; + if (nr_scanned >= nr_to_scan || !can_backup) + goto out; + + /* If we didn't wake before, try to do it now if needed. */ + if (!runtime_pm) + runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); + + shrink_flags.purge = false; + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, + nr_to_scan, &nr_scanned); + if (lret >= 0) + freed += lret; + + sc->nr_scanned = nr_scanned; +out: + xe_shrinker_runtime_pm_put(shrinker, runtime_pm); + return nr_scanned ? freed : SHRINK_STOP; +} + +/* Wake up the device for shrinking. */ +static void xe_shrinker_pm(struct work_struct *work) +{ + struct xe_shrinker *shrinker = + container_of(work, typeof(*shrinker), pm_worker); + + xe_pm_runtime_get(shrinker->xe); + xe_pm_runtime_put(shrinker->xe); +} + +/** + * xe_shrinker_create() - Create an xe per-device shrinker + * @xe: Pointer to the xe device. + * + * Returns: A pointer to the created shrinker on success, + * Negative error code on failure. + */ +struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) +{ + struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL); + + if (!shrinker) + return ERR_PTR(-ENOMEM); + + shrinker->shrink = shrinker_alloc(0, "xe system shrinker"); + if (!shrinker->shrink) { + kfree(shrinker); + return ERR_PTR(-ENOMEM); + } + + INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm); + shrinker->xe = xe; + rwlock_init(&shrinker->lock); + shrinker->shrink->count_objects = xe_shrinker_count; + shrinker->shrink->scan_objects = xe_shrinker_scan; + shrinker->shrink->private_data = shrinker; + shrinker_register(shrinker->shrink); + + return shrinker; +} + +/** + * xe_shrinker_destroy() - Destroy an xe per-device shrinker + * @shrinker: Pointer to the shrinker to destroy. + */ +void xe_shrinker_destroy(struct xe_shrinker *shrinker) +{ + xe_assert(shrinker->xe, !shrinker->shrinkable_pages); + xe_assert(shrinker->xe, !shrinker->purgeable_pages); + shrinker_free(shrinker->shrink); + flush_work(&shrinker->pm_worker); + kfree(shrinker); +} diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h new file mode 100644 index 000000000000..28a038f4fcbf --- /dev/null +++ b/drivers/gpu/drm/xe/xe_shrinker.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_SHRINKER_H_ +#define _XE_SHRINKER_H_ + +struct xe_shrinker; +struct xe_device; + +void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable); + +struct xe_shrinker *xe_shrinker_create(struct xe_device *xe); + +void xe_shrinker_destroy(struct xe_shrinker *shrinker); + +#endif From d2d5f6d578848f13b1d01abd4e9a2452e5602586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Wed, 5 Mar 2025 10:22:20 +0100 Subject: [PATCH 64/77] drm/xe: Increase the XE_PL_TT watermark MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XE_PL_TT watermark was set to 50% of system memory. The idea behind that was unclear since the net effect is that TT memory will be evicted to TTM_PL_SYSTEM memory if that watermark is exceeded, requiring PPGTT rebinds and dma remapping. But there is no similar watermark for TTM_PL_1SYSTEM memory. The TTM functionality that tries to swap out system memory to shmem objects if a 50% limit of total system memory is reached is orthogonal to this, and with the shrinker added, it's no longer in effect. Replace the 50% TTM_PL_TT limit with a 100% limit, in effect allowing all graphics memory to be bound to the device unless it has been swapped out by the shrinker. Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Link: https://lore.kernel.org/intel-xe/20250305092220.123405-8-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/xe/xe_ttm_sys_mgr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c index 9844a8edbfe1..d38b91872da3 100644 --- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c @@ -108,9 +108,8 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe) u64 gtt_size; si_meminfo(&si); + /* Potentially restrict amount of TT memory here. */ gtt_size = (u64)si.totalram * si.mem_unit; - /* TTM limits allocation of all TTM devices by 50% of system memory */ - gtt_size /= 2; man->use_tt = true; man->func = &xe_ttm_sys_mgr_func; From ced7486468ac3b38d59a69fca5d97998499c936b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 4 Mar 2025 15:29:02 +0100 Subject: [PATCH 65/77] drm/panel: fix Visionox RM692E5 dependencies The newly added driver uses the DSC helpers, so the corresponding Kconfig option must be enabled: ERROR: modpost: "drm_dsc_pps_payload_pack" [drivers/gpu/drm/panel/panel-visionox-rm692e5.ko] undefined! Fixes: 7cb3274341bf ("drm/panel: Add Visionox RM692E5 panel driver") Signed-off-by: Arnd Bergmann Reviewed-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20250304142907.732196-1-arnd@kernel.org Signed-off-by: Neil Armstrong --- drivers/gpu/drm/panel/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 5927806cb4a9..e059b06e0239 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -1020,6 +1020,8 @@ config DRM_PANEL_VISIONOX_RM692E5 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DSC_HELPER + select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Visionox RM692E5 amoled display panels, such as the one found in the Nothing Phone (1) From b57aa47d39e94dc47403a745e2024664e544078c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:04 +0100 Subject: [PATCH 66/77] drm/gem: Test for imported GEM buffers with helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add drm_gem_is_imported() that tests if a GEM object's buffer has been imported. Update the GEM code accordingly. GEM code usually tests for imports if import_attach has been set in struct drm_gem_object. But attaching a dma-buf on import requires a DMA-capable importer device, which is not the case for many serial busses like USB or I2C. The new helper tests if a GEM object's dma-buf has been created from the GEM object. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem.c | 4 ++-- include/drm/drm_gem.h | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ee811764c3df..c6240bab3fa5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -348,7 +348,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, return -ENOENT; /* Don't allow imported objects to be mapped */ - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = -EINVAL; goto out; } @@ -1178,7 +1178,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, drm_vma_node_start(&obj->vma_node)); drm_printf_indent(p, indent, "size=%zu\n", obj->size); drm_printf_indent(p, indent, "imported=%s\n", - str_yes_no(obj->import_attach)); + str_yes_no(drm_gem_is_imported(obj))); if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index fdae947682cd..2bf893eabb4b 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,6 +35,7 @@ */ #include +#include #include #include #include @@ -575,6 +576,19 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje return (obj->handle_count > 1) || obj->dma_buf; } +/** + * drm_gem_is_imported() - Tests if GEM object's buffer has been imported + * @obj: the GEM object + * + * Returns: + * True if the GEM object's buffer has been imported, false otherwise + */ +static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) +{ + /* The dma-buf's priv field points to the original GEM object. */ + return obj->dma_buf && (obj->dma_buf->priv != obj); +} + #ifdef CONFIG_LOCKDEP /** * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. From 3f0e02609692086a07354099dae11edba6d9d859 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:05 +0100 Subject: [PATCH 67/77] drm/gem-dma: Test for imported buffers with drm_gem_is_imported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of testing import_attach for imported GEM buffers, invoke drm_gem_is_imported() to do the test. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-3-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_dma_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 16988d316a6d..4f0320df858f 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -228,7 +228,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) struct drm_gem_object *gem_obj = &dma_obj->base; struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr); - if (gem_obj->import_attach) { + if (drm_gem_is_imported(gem_obj)) { if (dma_obj->vaddr) dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, dma_obj->sgt); From e8afa1557f4f963c9a511bd2c6074a941c308685 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:06 +0100 Subject: [PATCH 68/77] drm/gem-dma: Use dma_buf from GEM object instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid dereferencing struct drm_gem_object.import_attach for the imported dma-buf. The dma_buf field in the GEM object instance refers to the same buffer. Prepares to make import_attach optional. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-4-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_dma_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 4f0320df858f..b7f033d4352a 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) if (drm_gem_is_imported(gem_obj)) { if (dma_obj->vaddr) - dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); + dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); drm_prime_gem_destroy(gem_obj, dma_obj->sgt); } else if (dma_obj->vaddr) { if (dma_obj->map_noncoherent) From dbdd636e51eb0429f10f47e5562119407b86f09a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:07 +0100 Subject: [PATCH 69/77] drm/gem-shmem: Test for imported buffers with drm_gem_is_imported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of testing import_attach for imported GEM buffers, invoke drm_gem_is_imported() to do the test. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-5-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_shmem_helper.c | 24 ++++++++++++------------ include/drm/drm_gem_shmem_helper.h | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5ab351409312..7722cd720248 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -160,7 +160,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { drm_prime_gem_destroy(obj, shmem->sgt); } else { dma_resv_lock(shmem->base.resv, NULL); @@ -255,7 +255,7 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) dma_resv_assert_held(shmem->base.resv); - drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); + drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); ret = drm_gem_shmem_get_pages(shmem); @@ -286,7 +286,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; int ret; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) @@ -309,7 +309,7 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_unpin_locked(shmem); @@ -338,7 +338,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; int ret = 0; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { @@ -378,7 +378,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, return 0; err_put_pages: - if (!obj->import_attach) + if (!drm_gem_is_imported(obj)) drm_gem_shmem_put_pages(shmem); err_zero_use: shmem->vmap_use_count = 0; @@ -404,7 +404,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, { struct drm_gem_object *obj = &shmem->base; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { dma_resv_assert_held(shmem->base.resv); @@ -566,7 +566,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); dma_resv_lock(shmem->base.resv, NULL); @@ -618,7 +618,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct struct drm_gem_object *obj = &shmem->base; int ret; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { /* Reset both vm_ops and vm_private_data, so we don't end up with * vm_ops pointing to our implementation if the dma-buf backend * doesn't set those fields. @@ -663,7 +663,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent) { - if (shmem->base.import_attach) + if (drm_gem_is_imported(&shmem->base)) return; drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); @@ -690,7 +690,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } @@ -705,7 +705,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ if (shmem->sgt) return shmem->sgt; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); ret = drm_gem_shmem_get_pages(shmem); if (ret) diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index d22e3fb53631..cef5a6b5a4d6 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -120,7 +120,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem { return (shmem->madv > 0) && !shmem->vmap_use_count && shmem->sgt && - !shmem->base.dma_buf && !shmem->base.import_attach; + !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); } void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); From 1a148af06000e545e714fe3210af3d77ff903c11 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:08 +0100 Subject: [PATCH 70/77] drm/gem-shmem: Use dma_buf from GEM object instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid dereferencing struct drm_gem_object.import_attach for the imported dma-buf. The dma_buf field in the GEM object instance refers to the same buffer. Prepares to make import_attach optional. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-6-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_shmem_helper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 7722cd720248..d99dee67353a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -339,10 +339,10 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, int ret = 0; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + ret = dma_buf_vmap(obj->dma_buf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); + dma_buf_vunmap(obj->dma_buf, map); return -EIO; } } @@ -405,7 +405,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; if (drm_gem_is_imported(obj)) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); + dma_buf_vunmap(obj->dma_buf, map); } else { dma_resv_assert_held(shmem->base.resv); From 3d672f483e5dc99053b9c94bd0e5f504c2e3f758 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:09 +0100 Subject: [PATCH 71/77] drm/gem-framebuffer: Test for imported buffers with drm_gem_is_imported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of testing import_attach for imported GEM buffers, invoke drm_gem_is_imported() to do the test. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-7-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 185534f56bab..2bf606ba24cd 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -429,7 +429,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat if (!obj) continue; import_attach = obj->import_attach; - if (!import_attach) + if (!drm_gem_is_imported(obj)) continue; ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); if (ret) @@ -466,7 +466,7 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct goto err___drm_gem_fb_end_cpu_access; } import_attach = obj->import_attach; - if (!import_attach) + if (!drm_gem_is_imported(obj)) continue; ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); if (ret) From cce16fcd7446dcff7480cd9d2b6417075ed81065 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:10 +0100 Subject: [PATCH 72/77] drm/gem-framebuffer: Use dma_buf from GEM object instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid dereferencing struct drm_gem_object.import_attach for the imported dma-buf. The dma_buf field in the GEM object instance refers to the same buffer. Prepares to make import_attach optional. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-8-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 2bf606ba24cd..0fbeb686e561 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -419,7 +419,6 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap); static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, unsigned int num_planes) { - struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; int ret; @@ -428,10 +427,9 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat obj = drm_gem_fb_get_obj(fb, num_planes); if (!obj) continue; - import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); + ret = dma_buf_end_cpu_access(obj->dma_buf, dir); if (ret) drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", ret, num_planes, dir); @@ -454,7 +452,6 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat */ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) { - struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; unsigned int i; int ret; @@ -465,10 +462,9 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct ret = -EINVAL; goto err___drm_gem_fb_end_cpu_access; } - import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); + ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); if (ret) goto err___drm_gem_fb_end_cpu_access; } From 4972532ccda34b930004847c7aa40a078640c393 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:11 +0100 Subject: [PATCH 73/77] drm/fb-dma-helper: Test for imported buffers with drm_gem_is_imported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of testing import_attach for imported GEM buffers, invoke drm_gem_is_imported() to do the test. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-9-tzimmermann@suse.de --- drivers/gpu/drm/drm_fb_dma_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c index e1d61a65210b..2c4dc7ebc0c3 100644 --- a/drivers/gpu/drm/drm_fb_dma_helper.c +++ b/drivers/gpu/drm/drm_fb_dma_helper.c @@ -178,7 +178,7 @@ int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane, dma_obj = drm_fb_dma_get_gem_obj(fb, 0); /* Buffer should be accessible from the CPU */ - if (dma_obj->base.import_attach) + if (drm_gem_is_imported(&dma_obj->base)) return -ENODEV; /* Buffer should be already mapped to CPU */ From 0695d8fc113a03facfee55930f5906ab4461fd4d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:12 +0100 Subject: [PATCH 74/77] drm/mipi-dbi: Test for imported buffers with drm_gem_is_imported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of testing import_attach for imported GEM buffers, invoke drm_gem_is_imported() to do the test. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-10-tzimmermann@suse.de --- drivers/gpu/drm/drm_mipi_dbi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 34bca7567576..89e05a5bed1d 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -218,7 +218,7 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer * switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach, + drm_fb_swab(&dst_map, NULL, src, fb, clip, !drm_gem_is_imported(gem), fmtcnv_state); else drm_fb_memcpy(&dst_map, NULL, src, fb, clip); From f83a9b8c7fd0557b0c50784bfdc1bbe9140c9bf8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 26 Feb 2025 18:03:13 +0100 Subject: [PATCH 75/77] drm/prime: Use dma_buf from GEM object instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid dereferencing struct drm_gem_object.import_attach for the imported dma-buf. The dma_buf field in the GEM object instance refers to the same buffer. Prepares to make import_attach optional. Signed-off-by: Thomas Zimmermann Reviewed-by: Anusha Srivatsa Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20250226172457.217725-11-tzimmermann@suse.de --- drivers/gpu/drm/drm_prime.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 32a8781cfd67..bdb51c8f262e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -453,13 +453,7 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, } mutex_lock(&dev->object_name_lock); - /* re-export the original imported object */ - if (obj->import_attach) { - dmabuf = obj->import_attach->dmabuf; - get_dma_buf(dmabuf); - goto out_have_obj; - } - + /* re-export the original imported/exported object */ if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; From ce43cf347759936bd7b16d66b5b5072757756e4a Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 25 Feb 2025 21:39:32 +0100 Subject: [PATCH 76/77] drm/gma500: Replace deprecated strncpy() with strscpy() strncpy() is deprecated for NUL-terminated destination buffers. Use strscpy() instead and remove the manual NUL-termination. Compile-tested only. Link: https://github.com/KSPP/linux/issues/90 Cc: linux-hardening@vger.kernel.org Signed-off-by: Thorsten Blum Reviewed-by: Kees Cook Signed-off-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250225203932.334123-1-thorsten.blum@linux.dev --- drivers/gpu/drm/gma500/cdv_intel_dp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 53990d27c39f..c85143792019 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -855,8 +855,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector, memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); intel_dp->adapter.owner = THIS_MODULE; - strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); - intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; + strscpy(intel_dp->adapter.name, name); intel_dp->adapter.algo_data = &intel_dp->algo; intel_dp->adapter.dev.parent = connector->base.kdev; From 4423e607ff50157aaf088854b145936cbab4d560 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Wed, 5 Mar 2025 16:49:11 +0800 Subject: [PATCH 77/77] drm/gma500: fix inconsistent indenting warning Fix below inconsistent indenting smatch warning. smatch warnings: drivers/gpu/drm/gma500/cdv_device.c:218 cdv_errata() warn: inconsistent indenting Signed-off-by: Charles Han Signed-off-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250305084911.6394-1-hanchunchao@inspur.com --- drivers/gpu/drm/gma500/cdv_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 3e83299113e3..718d45891fc7 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -215,7 +215,7 @@ static void cdv_errata(struct drm_device *dev) * Bonus Launch to work around the issue, by degrading * performance. */ - CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108); + CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108); } /**