From ebf78aa61934216fa6a7e5df9c78600d516b6489 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 28 Dec 2010 09:16:54 +0000 Subject: [PATCH] --- yaml --- r: 225782 b: refs/heads/master c: e8a7ba86ff993311f8712e5b3bb2e3892e82df5f h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/mmc/host/mmci.c | 207 +++++------------------------- trunk/drivers/mmc/host/mmci.h | 9 -- trunk/drivers/serial/amba-pl011.c | 6 +- 4 files changed, 36 insertions(+), 188 deletions(-) diff --git a/[refs] b/[refs] index 45ec426bd131..01e7c1645765 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8c11a94d86eb5489dc665bc566bf624e329d89fa +refs/heads/master: e8a7ba86ff993311f8712e5b3bb2e3892e82df5f diff --git a/trunk/drivers/mmc/host/mmci.c b/trunk/drivers/mmc/host/mmci.c index 563022825667..87b4fc6c98c2 100644 --- a/trunk/drivers/mmc/host/mmci.c +++ b/trunk/drivers/mmc/host/mmci.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -46,12 +45,6 @@ static unsigned int fmax = 515633; * is asserted (likewise for RX) * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY * is asserted (likewise for RX) - * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware - * and will not work at all. - * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when - * using DMA. - * @sdio: variant supports SDIO - * @st_clkdiv: true if using a ST-specific clock divider algorithm */ struct variant_data { unsigned int clkreg; @@ -59,10 +52,6 @@ struct variant_data { unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; - bool broken_blockend; - bool broken_blockend_dma; - bool sdio; - bool st_clkdiv; }; static struct variant_data variant_arm = { @@ -76,8 +65,6 @@ static struct variant_data variant_u300 = { .fifohalfsize = 8 * 4, .clkreg_enable = 1 << 13, /* HWFCEN */ .datalength_bits = 16, - .broken_blockend_dma = true, - .sdio = true, }; static struct variant_data variant_ux500 = { @@ -86,11 +73,7 @@ static struct variant_data variant_ux500 = { .clkreg = MCI_CLK_ENABLE, .clkreg_enable = 1 << 14, /* HWFCEN */ .datalength_bits = 24, - .broken_blockend = true, - .sdio = true, - .st_clkdiv = true, }; - /* * This must be called with host->lock held */ @@ -103,22 +86,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) if (desired >= host->mclk) { clk = MCI_CLK_BYPASS; host->cclk = host->mclk; - } else if (variant->st_clkdiv) { - /* - * DB8500 TRM says f = mclk / (clkdiv + 2) - * => clkdiv = (mclk / f) - 2 - * Round the divider up so we don't exceed the max - * frequency - */ - clk = DIV_ROUND_UP(host->mclk, desired) - 2; - if (clk >= 256) - clk = 255; - host->cclk = host->mclk / (clk + 2); } else { - /* - * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) - * => clkdiv = mclk / (2 * f) - 1 - */ clk = host->mclk / (2 * desired) - 1; if (clk >= 256) clk = 255; @@ -161,26 +129,10 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) spin_lock(&host->lock); } -static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) -{ - void __iomem *base = host->base; - - if (host->singleirq) { - unsigned int mask0 = readl(base + MMCIMASK0); - - mask0 &= ~MCI_IRQ1MASK; - mask0 |= mask; - - writel(mask0, base + MMCIMASK0); - } - - writel(mask, base + MMCIMASK1); -} - static void mmci_stop_data(struct mmci_host *host) { writel(0, host->base + MMCIDATACTRL); - mmci_set_mask1(host, 0); + writel(0, host->base + MMCIMASK1); host->data = NULL; } @@ -210,8 +162,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->data = data; host->size = data->blksz * data->blocks; host->data_xfered = 0; - host->blockend = false; - host->dataend = false; mmci_init_sg(host, data); @@ -246,14 +196,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) irqmask = MCI_TXFIFOHALFEMPTYMASK; } - /* The ST Micro variants has a special bit to enable SDIO */ - if (variant->sdio && host->mmc->card) - if (mmc_card_sdio(host->mmc->card)) - datactrl |= MCI_ST_DPSM_SDIOEN; - writel(datactrl, base + MMCIDATACTRL); writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); - mmci_set_mask1(host, irqmask); + writel(irqmask, base + MMCIMASK1); } static void @@ -288,9 +233,20 @@ static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { - struct variant_data *variant = host->variant; - - /* First check for errors */ + if (status & MCI_DATABLOCKEND) { + host->data_xfered += data->blksz; +#ifdef CONFIG_ARCH_U300 + /* + * On the U300 some signal or other is + * badly routed so that a data write does + * not properly terminate with a MCI_DATAEND + * status flag. This quirk will make writes + * work again. + */ + if (data->flags & MMC_DATA_WRITE) + status |= MCI_DATAEND; +#endif + } if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); if (status & MCI_DATACRCFAIL) @@ -299,10 +255,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, data->error = -ETIMEDOUT; else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) data->error = -EIO; - - /* Force-complete the transaction */ - host->blockend = true; - host->dataend = true; + status |= MCI_DATAEND; /* * We hit an error condition. Ensure that any data @@ -320,64 +273,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, local_irq_restore(flags); } } - - /* - * On ARM variants in PIO mode, MCI_DATABLOCKEND - * is always sent first, and we increase the - * transfered number of bytes for that IRQ. Then - * MCI_DATAEND follows and we conclude the transaction. - * - * On the Ux500 single-IRQ variant MCI_DATABLOCKEND - * doesn't seem to immediately clear from the status, - * so we can't use it keep count when only one irq is - * used because the irq will hit for other reasons, and - * then the flag is still up. So we use the MCI_DATAEND - * IRQ at the end of the entire transfer because - * MCI_DATABLOCKEND is broken. - * - * In the U300, the IRQs can arrive out-of-order, - * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND, - * so for this case we use the flags "blockend" and - * "dataend" to make sure both IRQs have arrived before - * concluding the transaction. (This does not apply - * to the Ux500 which doesn't fire MCI_DATABLOCKEND - * at all.) In DMA mode it suffers from the same problem - * as the Ux500. - */ - if (status & MCI_DATABLOCKEND) { - /* - * Just being a little over-cautious, we do not - * use this progressive update if the hardware blockend - * flag is unreliable: since it can stay high between - * IRQs it will corrupt the transfer counter. - */ - if (!variant->broken_blockend) - host->data_xfered += data->blksz; - host->blockend = true; - } - - if (status & MCI_DATAEND) - host->dataend = true; - - /* - * On variants with broken blockend we shall only wait for dataend, - * on others we must sync with the blockend signal since they can - * appear out-of-order. - */ - if (host->dataend && (host->blockend || variant->broken_blockend)) { + if (status & MCI_DATAEND) { mmci_stop_data(host); - /* Reset these flags */ - host->blockend = false; - host->dataend = false; - - /* - * Variants with broken blockend flags need to handle the - * end of the entire transfer here. - */ - if (variant->broken_blockend && !data->error) - host->data_xfered += data->blksz * data->blocks; - if (!data->stop) { mmci_request_end(host, data->mrq); } else { @@ -458,32 +356,7 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem variant->fifosize : variant->fifohalfsize; count = min(remain, maxcnt); - /* - * The ST Micro variant for SDIO transfer sizes - * less then 8 bytes should have clock H/W flow - * control disabled. - */ - if (variant->sdio && - mmc_card_sdio(host->mmc->card)) { - if (count < 8) - writel(readl(host->base + MMCICLOCK) & - ~variant->clkreg_enable, - host->base + MMCICLOCK); - else - writel(readl(host->base + MMCICLOCK) | - variant->clkreg_enable, - host->base + MMCICLOCK); - } - - /* - * SDIO especially may want to send something that is - * not divisible by 4 (as opposed to card sectors - * etc), and the FIFO only accept full 32-bit writes. - * So compensate by adding +3 on the count, a single - * byte become a 32bit write, 7 bytes will be two - * 32bit writes etc. - */ - writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); + writesl(base + MMCIFIFO, ptr, count >> 2); ptr += count; remain -= count; @@ -564,7 +437,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) * "any data available" mode. */ if (status & MCI_RXACTIVE && host->size < variant->fifosize) - mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); + writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); /* * If we run out of data, disable the data IRQs; this @@ -573,7 +446,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) * stops us racing with our data end IRQ. */ if (host->size == 0) { - mmci_set_mask1(host, 0); + writel(0, base + MMCIMASK1); writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); } @@ -596,14 +469,6 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) struct mmc_data *data; status = readl(host->base + MMCISTATUS); - - if (host->singleirq) { - if (status & readl(host->base + MMCIMASK1)) - mmci_pio_irq(irq, dev_id); - - status &= ~MCI_IRQ1MASK; - } - status &= readl(host->base + MMCIMASK0); writel(status, host->base + MMCICLEAR); @@ -770,7 +635,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) struct variant_data *variant = id->data; struct mmci_host *host; struct mmc_host *mmc; - unsigned int mask; int ret; /* must have platform data */ @@ -942,30 +806,20 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) if (ret) goto unmap; - if (dev->irq[1] == NO_IRQ) - host->singleirq = true; - else { - ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, - DRIVER_NAME " (pio)", host); - if (ret) - goto irq0_free; - } - - mask = MCI_IRQENABLE; - /* Don't use the datablockend flag if it's broken */ - if (variant->broken_blockend) - mask &= ~MCI_DATABLOCKEND; + ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); + if (ret) + goto irq0_free; - writel(mask, host->base + MMCIMASK0); + writel(MCI_IRQENABLE, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); - dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", - mmc_hostname(mmc), amba_part(dev), amba_rev(dev), - (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); - mmc_add_host(mmc); + dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", + mmc_hostname(mmc), amba_rev(dev), amba_config(dev), + (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); + return 0; irq0_free: @@ -1010,8 +864,7 @@ static int __devexit mmci_remove(struct amba_device *dev) writel(0, host->base + MMCIDATACTRL); free_irq(dev->irq[0], host); - if (!host->singleirq) - free_irq(dev->irq[1], host); + free_irq(dev->irq[1], host); if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); diff --git a/trunk/drivers/mmc/host/mmci.h b/trunk/drivers/mmc/host/mmci.h index df06f01aac89..4ae887fc0189 100644 --- a/trunk/drivers/mmc/host/mmci.h +++ b/trunk/drivers/mmc/host/mmci.h @@ -139,11 +139,6 @@ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) -/* These interrupts are directed to IRQ1 when two IRQ lines are available */ -#define MCI_IRQ1MASK \ - (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ - MCI_TXFIFOHALFEMPTYMASK) - #define NR_SG 16 struct clk; @@ -159,7 +154,6 @@ struct mmci_host { int gpio_cd; int gpio_wp; int gpio_cd_irq; - bool singleirq; unsigned int data_xfered; @@ -177,9 +171,6 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; - bool blockend; - bool dataend; - /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; diff --git a/trunk/drivers/serial/amba-pl011.c b/trunk/drivers/serial/amba-pl011.c index 6ca7a44f29c2..2c07939be02c 100644 --- a/trunk/drivers/serial/amba-pl011.c +++ b/trunk/drivers/serial/amba-pl011.c @@ -76,6 +76,7 @@ struct uart_amba_port { unsigned int lcrh_rx; /* vendor-specific */ bool oversampling; /* vendor-specific */ bool autorts; + char type[12]; }; /* There is by now at least one vendor with differing details, so handle it */ @@ -622,7 +623,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, static const char *pl011_type(struct uart_port *port) { - return port->type == PORT_AMBA ? "AMBA/PL011" : NULL; + struct uart_amba_port *uap = (struct uart_amba_port *)port; + return uap->port.type == PORT_AMBA ? uap->type : NULL; } /* @@ -872,6 +874,8 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = i; + snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); + amba_ports[i] = uap; amba_set_drvdata(dev, uap);