Skip to content

Commit

Permalink
Merge branch 'net-ipa-start-adding-ipa-v4-5-support'
Browse files Browse the repository at this point in the history
Alex Elder says:

====================
net: ipa: start adding IPA v4.5 support

This series starts updating the IPA code to support IPA hardware
version 4.5.

The first patch fixes a problem found while preparing these updates.
Testing shows the code works with or without the change, and with
the fix the code matches "downstream" Qualcomm code.

The second patch updates the definitions for IPA register offsets
and field masks to reflect the changes that come with IPA v4.5.  A
few register updates have been deferred until later, because making
use of them involves some nontrivial code updates.

One type of change that IPA v4.5 brings is expanding the range of
certain configuration values.  High-order bits are added in a few
cases, and the third patch implements the code changes necessary to
use those newly available bits.

The fourth patch implements several fairly minor changes to the code
required for IPA v4.5 support.

The last two patches implement changes to the GSI registers used for
IPA.  Almost none of the registers change, but the range of memory
in which most of the GSI registers is located is shifted by a fixed
amount.  The fifth patch updates the GSI register definitions, and
the last patch implements the memory shift for IPA v4.5.
====================

Link: https://lore.kernel.org/r/20201125204522.5884-1-elder@linaro.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Nov 28, 2020
2 parents 3567e23 + cdeee49 commit e71d2b9
Show file tree
Hide file tree
Showing 6 changed files with 220 additions and 46 deletions.
35 changes: 30 additions & 5 deletions drivers/net/ipa/gsi.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,8 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
/* Turn off all GSI interrupts initially */
static void gsi_irq_setup(struct gsi *gsi)
{
u32 adjust;

/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);

Expand All @@ -203,8 +205,12 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);

/* Reverse the offset adjustment for inter-EE register offsets */
adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);

iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}

Expand Down Expand Up @@ -781,9 +787,17 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
val |= USE_DB_ENG_FMASK;

/* Starting with IPA v4.0 the command channel uses the escape buffer */
if (gsi->version != IPA_VERSION_3_5_1 && channel->command)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
*/
if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
else
val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
PREFETCH_MODE_FMASK);
}

iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));

Expand Down Expand Up @@ -2081,6 +2095,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
u32 adjust;
int ret;

gsi_validate_build();
Expand All @@ -2107,11 +2122,21 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
return -EINVAL;
}

/* Make sure we can make our pointer adjustment if necessary */
adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
if (res->start < adjust) {
dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
adjust);
return -EINVAL;
}

gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
/* Adjust register range pointer downward for newer IPA versions */
gsi->virt -= adjust;

init_completion(&gsi->completion);

Expand Down
24 changes: 24 additions & 0 deletions drivers/net/ipa/gsi_reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,17 @@
* (though the actual limit is hardware-dependent).
*/

/* GSI EE registers as a group are shifted downward by a fixed
* constant amount for IPA versions 4.5 and beyond. This applies
* to all GSI registers we use *except* the ones that disable
* inter-EE interrupts for channels and event channels.
*
* We handle this by adjusting the pointer to the mapped GSI memory
* region downward. Then in the one place we use them (gsi_irq_setup())
* we undo that adjustment for the inter-EE interrupt registers.
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */

#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
Expand Down Expand Up @@ -105,6 +116,16 @@ enum gsi_channel_type {
#define USE_DB_ENG_FMASK GENMASK(9, 9)
/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
/* The next two fields are present for IPA v4.5 and above */
#define PREFETCH_MODE_FMASK GENMASK(13, 10)
#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
enum gsi_prefetch_mode {
GSI_USE_PREFETCH_BUFS = 0x0,
GSI_ESCAPE_BUF_ONLY = 0x1,
GSI_SMART_PREFETCH = 0x2,
GSI_FREE_PREFETCH = 0x3,
};

#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
Expand Down Expand Up @@ -287,6 +308,9 @@ enum gsi_iram_size {
/* The next two values are available for IPA v4.0 and above */
IRAM_SIZE_TWO_N_HALF_KB = 0x2,
IRAM_SIZE_THREE_KB = 0x3,
/* The next two values are available for IPA v4.5 and above */
IRAM_SIZE_THREE_N_HALF_KB = 0x4,
IRAM_SIZE_FOUR_KB = 0x5,
};

/* IRQ condition for each type is cleared by writing type-specific register */
Expand Down
49 changes: 36 additions & 13 deletions drivers/net/ipa/ipa_endpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -485,45 +485,52 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
u32 val = 0;

if (endpoint->data->qmap) {
size_t header_size = sizeof(struct rmnet_map_header);
enum ipa_version version = ipa->version;

/* We might supply a checksum header after the QMAP header */
if (endpoint->toward_ipa && endpoint->data->checksum)
header_size += sizeof(struct rmnet_map_ul_csum_header);
val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
val |= ipa_header_size_encoded(version, header_size);

/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
u32 off; /* Field offset within header */
u32 offset; /* Field offset within header */

/* Where IPA will write the metadata value */
off = offsetof(struct rmnet_map_header, mux_id);
val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
offset = offsetof(struct rmnet_map_header, mux_id);
val |= ipa_metadata_offset_encoded(version, offset);

/* Where IPA will write the length */
off = offsetof(struct rmnet_map_header, pkt_len);
offset = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version == IPA_VERSION_4_5)
offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);

val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
val |= HDR_OFST_METADATA_VALID_FMASK;

/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
/* HDR_LEN_INC_DEAGG_HDR is 0 */
/* HDR_METADATA_REG_VALID is 0 (TX only) */
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}

iowrite32(val, endpoint->ipa->reg_virt + offset);
iowrite32(val, ipa->reg_virt + offset);
}

static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->data->rx.pad_align;
struct ipa *ipa = endpoint->ipa;
u32 val = 0;

val |= HDR_ENDIANNESS_FMASK; /* big endian */
Expand All @@ -545,10 +552,24 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);

iowrite32(val, endpoint->ipa->reg_virt + offset);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
*/
if (ipa->version == IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->data->qmap && !endpoint->toward_ipa) {
u32 offset;

offset = offsetof(struct rmnet_map_header, pkt_len);
offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
val |= u32_encode_bits(offset,
HDR_OFST_PKT_SIZE_MSB_FMASK);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
iowrite32(val, ipa->reg_virt + offset);
}


static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
Expand Down Expand Up @@ -634,6 +655,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
/* other fields ignored */
Expand Down Expand Up @@ -844,9 +866,10 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(status_endpoint_id,
STATUS_ENDP_FMASK);
}
/* STATUS_LOCATION is 0 (status element precedes packet) */
/* The next field is present for IPA v4.0 and above */
/* STATUS_PKT_SUPPRESS_FMASK is 0 */
/* STATUS_LOCATION is 0, meaning status element precedes
* packet (not present for IPA v4.5)
*/
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
}

iowrite32(val, ipa->reg_virt + offset);
Expand Down
76 changes: 52 additions & 24 deletions drivers/net/ipa/ipa_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,10 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
} else {
} else if (ipa->version < IPA_VERSION_4_5) {
val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
} else {
/* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
}

val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
Expand All @@ -243,25 +245,47 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
/* Configure DDR and PCIe max read/write QSB values */
static void ipa_hardware_config_qsb(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
u32 max0;
u32 max1;
u32 val;

/* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
/* QMB_0 represents DDR; QMB_1 represents PCIe */
val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
if (ipa->version == IPA_VERSION_4_2)
val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
else
val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
switch (version) {
case IPA_VERSION_4_2:
max1 = 0; /* PCIe not present */
break;
case IPA_VERSION_4_5:
max1 = 8;
break;
default:
max1 = 4;
break;
}
val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK);
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);

if (ipa->version == IPA_VERSION_3_5_1) {
val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
} else {
val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
if (ipa->version == IPA_VERSION_4_2)
val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
else
val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
max1 = 12;
switch (version) {
case IPA_VERSION_3_5_1:
max0 = 8;
break;
case IPA_VERSION_4_0:
case IPA_VERSION_4_1:
max0 = 12;
break;
case IPA_VERSION_4_2:
max0 = 12;
max1 = 0; /* PCIe not present */
break;
case IPA_VERSION_4_5:
max0 = 16;
break;
}
val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK);
val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK);
if (version != IPA_VERSION_3_5_1) {
/* GEN_QMB_0_MAX_READS_BEATS is 0 */
/* GEN_QMB_1_MAX_READS_BEATS is 0 */
}
Expand Down Expand Up @@ -294,7 +318,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
*/
static void ipa_hardware_dcd_config(struct ipa *ipa)
{
/* Recommended values for IPA 3.5 according to IPA HPG */
/* Recommended values for IPA 3.5 and later according to IPA HPG */
ipa_idle_indication_cfg(ipa, 256, false);
}

Expand All @@ -310,20 +334,24 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
u32 granularity;
u32 val;

/* Fill in backward-compatibility register, based on version */
val = ipa_reg_bcr_val(ipa->version);
iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
/* IPA v4.5 has no backward compatibility register */
if (version < IPA_VERSION_4_5) {
val = ipa_reg_bcr_val(version);
iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
}

if (ipa->version != IPA_VERSION_3_5_1) {
/* Enable open global clocks (hardware workaround) */
/* Implement some hardware workarounds */
if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) {
/* Enable open global clocks (not needed for IPA v4.5) */
val = GLOBAL_FMASK;
val |= GLOBAL_2X_CLK_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);

/* Disable PA mask to allow HOLB drop (hardware workaround) */
/* Disable PA mask to allow HOLB drop */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
Expand All @@ -340,8 +368,8 @@ static void ipa_hardware_config(struct ipa *ipa)
iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);

/* IPA v4.2 does not support hashed tables, so disable them */
if (ipa->version == IPA_VERSION_4_2) {
u32 offset = ipa_reg_filt_rout_hash_en_offset(ipa->version);
if (version == IPA_VERSION_4_2) {
u32 offset = ipa_reg_filt_rout_hash_en_offset(version);

iowrite32(0, ipa->reg_virt + offset);
}
Expand Down
Loading

0 comments on commit e71d2b9

Please sign in to comment.