Skip to content

Commit

Permalink
Merge branch 'cxgb4-next'
Browse files Browse the repository at this point in the history
Hariprasad Shenai says:

====================
add meminfo, bist status and misc. fixes

This patch series adds the following.
Add support to dump memory address range of various hw modules
Add support to dump edc bist status during ecc error
Read correct bits of who am i register for T6 adapter
and update T6 register range

This patch series has been created against net-next tree and includes
patches on cxgb4 and cxgb4vf driver.

We have included all the maintainers of respective drivers. Kindly review
the change and let us know in case of any review comments.

V2: PATCH 3/4 ("cxgb4/cxgb4vf: read the correct bits of PL Who Am I
    register") Fix switch statement in get_chip_type() and some more style
    fixes based on review comment by Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Aug 4, 2015
2 parents a6affd2 + f109ff1 commit d1b22e4
Show file tree
Hide file tree
Showing 5 changed files with 506 additions and 20 deletions.
285 changes: 285 additions & 0 deletions drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2275,6 +2275,290 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};

struct mem_desc {
unsigned int base;
unsigned int limit;
unsigned int idx;
};

static int mem_desc_cmp(const void *a, const void *b)
{
return ((const struct mem_desc *)a)->base -
((const struct mem_desc *)b)->base;
}

static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to)
{
char buf[40];

string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
sizeof(buf));
seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
}

static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
"MC0:", "MC1:"};
static const char * const region[] = {
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
"On-chip queues:"
};

int i, n;
u32 lo, hi, used, alloc;
struct mem_desc avail[4];
struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
struct mem_desc *md = mem;
struct adapter *adap = seq->private;

for (i = 0; i < ARRAY_SIZE(mem); i++) {
mem[i].limit = 0;
mem[i].idx = i;
}

/* Find and sort the populated memory ranges */
i = 0;
lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (lo & EDRAM0_ENABLE_F) {
hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
avail[i].base = EDRAM0_BASE_G(hi) << 20;
avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
avail[i].idx = 0;
i++;
}
if (lo & EDRAM1_ENABLE_F) {
hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
avail[i].base = EDRAM1_BASE_G(hi) << 20;
avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
avail[i].idx = 1;
i++;
}

if (is_t5(adap->params.chip)) {
if (lo & EXT_MEM0_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
avail[i].idx = 3;
i++;
}
if (lo & EXT_MEM1_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
avail[i].idx = 4;
i++;
}
} else {
if (lo & EXT_MEM_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
avail[i].base = EXT_MEM_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
avail[i].idx = 2;
i++;
}
}
if (!i) /* no memory available */
return 0;
sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);

(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);

/* the next few have explicit upper bounds */
md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
md->limit = md->base - 1 +
t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
md++;

md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
md->limit = md->base - 1 +
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
md++;

if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
} else {
hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
md->base = t4_read_reg(adap,
LE_DB_HASH_TBL_BASE_ADDR_A);
}
md->limit = 0;
} else {
md->base = 0;
md->idx = ARRAY_SIZE(region); /* hide it */
}
md++;

#define ulp_region(reg) do { \
md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
} while (0)

ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
ulp_region(RX_RQUDP);
ulp_region(RX_PBL);
ulp_region(TX_PBL);
#undef ulp_region
md->base = 0;
md->idx = ARRAY_SIZE(region);
if (!is_t4(adap->params.chip)) {
u32 size = 0;
u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);

if (is_t5(adap->params.chip)) {
if (sge_ctrl & VFIFO_ENABLE_F)
size = DBVFIFO_SIZE_G(fifo_size);
} else {
size = T6_DBVFIFO_SIZE_G(fifo_size);
}

if (size) {
md->base = BASEADDR_G(t4_read_reg(adap,
SGE_DBVFIFO_BADDR_A));
md->limit = md->base + (size << 2) - 1;
}
}

md++;

md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
md->limit = 0;
md++;
md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
md->limit = 0;
md++;

md->base = adap->vres.ocq.start;
if (adap->vres.ocq.size)
md->limit = md->base + adap->vres.ocq.size - 1;
else
md->idx = ARRAY_SIZE(region); /* hide it */
md++;

/* add any address-space holes, there can be up to 3 */
for (n = 0; n < i - 1; n++)
if (avail[n].limit < avail[n + 1].base)
(md++)->base = avail[n].limit;
if (avail[n].limit)
(md++)->base = avail[n].limit;

n = md - mem;
sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);

for (lo = 0; lo < i; lo++)
mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
avail[lo].limit - 1);

seq_putc(seq, '\n');
for (i = 0; i < n; i++) {
if (mem[i].idx >= ARRAY_SIZE(region))
continue; /* skip holes */
if (!mem[i].limit)
mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
mem_region_show(seq, region[mem[i].idx], mem[i].base,
mem[i].limit);
}

seq_putc(seq, '\n');
lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
mem_region_show(seq, "uP RAM:", lo, hi);

lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
mem_region_show(seq, "uP Extmem2:", lo, hi);

lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
PMRXMAXPAGE_G(lo),
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
(lo & PMRXNUMCHN_F) ? 2 : 1);

lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
PMTXMAXPAGE_G(lo),
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
seq_printf(seq, "%u p-structs\n\n",
t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));

for (i = 0; i < 4; i++) {
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
else
lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
if (is_t5(adap->params.chip)) {
used = T5_USED_G(lo);
alloc = T5_ALLOC_G(lo);
} else {
used = USED_G(lo);
alloc = ALLOC_G(lo);
}
/* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
i, used, alloc);
}
for (i = 0; i < adap->params.arch.nchan; i++) {
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
lo = t4_read_reg(adap,
MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
else
lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
if (is_t5(adap->params.chip)) {
used = T5_USED_G(lo);
alloc = T5_ALLOC_G(lo);
} else {
used = USED_G(lo);
alloc = ALLOC_G(lo);
}
/* For T6 these are MAC buffer groups */
seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n",
i, used, alloc);
}
return 0;
}

static int meminfo_open(struct inode *inode, struct file *file)
{
return single_open(file, meminfo_show, inode->i_private);
}

static const struct file_operations meminfo_fops = {
.owner = THIS_MODULE,
.open = meminfo_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
Expand Down Expand Up @@ -2342,6 +2626,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
};

/* Debug FS nodes common to all T5 and later adapters.
Expand Down
34 changes: 33 additions & 1 deletion drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -4551,13 +4551,41 @@ static void free_some_resources(struct adapter *adapter)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128

static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
{
int ver, chip;
u16 device_id;

/* Retrieve adapter's device ID */
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
ver = device_id >> 12;
switch (ver) {
case CHELSIO_T4:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
break;
case CHELSIO_T5:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
break;
case CHELSIO_T6:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
break;
default:
dev_err(&pdev->dev, "Device %d is not supported\n",
device_id);
return -EINVAL;
}
return chip;
}

static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
void __iomem *regs;
u32 whoami, pl_rev;
enum chip_type chip;

printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);

Expand Down Expand Up @@ -4586,7 +4614,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;

/* We control everything through one PF */
func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
whoami = readl(regs + PL_WHOAMI_A);
pl_rev = REV_G(readl(regs + PL_REV_A));
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
Expand Down
Loading

0 comments on commit d1b22e4

Please sign in to comment.