From dee5af7ab8903620d4dbdc69f4ae47327db464ef Mon Sep 17 00:00:00 2001 From: Mark Lord Date: Mon, 10 Oct 2005 17:53:58 -0400 Subject: [PATCH] --- yaml --- r: 9710 b: refs/heads/master c: edea3ab58f8edd5f72d31f891ab4f34382e97e3a h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/scsi/Kconfig | 14 +- trunk/drivers/scsi/Makefile | 1 + trunk/drivers/scsi/ahci.c | 30 +- trunk/drivers/scsi/libata-core.c | 123 +----- trunk/drivers/scsi/libata-scsi.c | 14 +- trunk/drivers/scsi/pdc_adma.c | 737 +++++++++++++++++++++++++++++++ trunk/drivers/scsi/sata_qstor.c | 8 +- trunk/drivers/scsi/sata_sx4.c | 13 +- trunk/include/linux/libata.h | 27 +- 10 files changed, 791 insertions(+), 178 deletions(-) create mode 100644 trunk/drivers/scsi/pdc_adma.c diff --git a/[refs] b/[refs] index 4ed297d73d7f..d43f83ed2f08 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c4052da6f0c01a0b059d125d72bb934d0980b798 +refs/heads/master: edea3ab58f8edd5f72d31f891ab4f34382e97e3a diff --git a/trunk/drivers/scsi/Kconfig b/trunk/drivers/scsi/Kconfig index be96cb78e3b5..e2845b442776 100644 --- a/trunk/drivers/scsi/Kconfig +++ b/trunk/drivers/scsi/Kconfig @@ -489,11 +489,11 @@ config SCSI_SATA_NV If unsure, say N. -config SCSI_SATA_PROMISE - tristate "Promise SATA TX2/TX4 support" +config SCSI_PDC_ADMA + tristate "Pacific Digital ADMA support" depends on SCSI_SATA && PCI help - This option enables support for Promise Serial ATA TX2/TX4. + This option enables support for Pacific Digital ADMA controllers If unsure, say N. @@ -505,6 +505,14 @@ config SCSI_SATA_QSTOR If unsure, say N. +config SCSI_SATA_PROMISE + tristate "Promise SATA TX2/TX4 support" + depends on SCSI_SATA && PCI + help + This option enables support for Promise Serial ATA TX2/TX4. + + If unsure, say N. + config SCSI_SATA_SX4 tristate "Promise SATA SX4 support" depends on SCSI_SATA && PCI && EXPERIMENTAL diff --git a/trunk/drivers/scsi/Makefile b/trunk/drivers/scsi/Makefile index e2e3d8671930..2d4439826c08 100644 --- a/trunk/drivers/scsi/Makefile +++ b/trunk/drivers/scsi/Makefile @@ -138,6 +138,7 @@ obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o +obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o obj-$(CONFIG_ARM) += arm/ diff --git a/trunk/drivers/scsi/ahci.c b/trunk/drivers/scsi/ahci.c index f0d8f89b5d40..5ec866b00479 100644 --- a/trunk/drivers/scsi/ahci.c +++ b/trunk/drivers/scsi/ahci.c @@ -314,15 +314,8 @@ static int ahci_port_start(struct ata_port *ap) return -ENOMEM; memset(pp, 0, sizeof(*pp)); - ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); - if (!ap->pad) { - kfree(pp); - return -ENOMEM; - } - mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); if (!mem) { - dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); kfree(pp); return -ENOMEM; } @@ -398,7 +391,6 @@ static void ahci_port_stop(struct ata_port *ap) ap->private_data = NULL; dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, pp->cmd_slot, pp->cmd_slot_dma); - dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); kfree(pp); } @@ -484,23 +476,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) static void ahci_fill_sg(struct ata_queued_cmd *qc) { struct ahci_port_priv *pp = qc->ap->private_data; - struct scatterlist *sg; - struct ahci_sg *ahci_sg; + unsigned int i; VPRINTK("ENTER\n"); /* * Next, the S/G list. */ - ahci_sg = pp->cmd_tbl_sg; - ata_for_each_sg(sg, qc) { - dma_addr_t addr = sg_dma_address(sg); - u32 sg_len = sg_dma_len(sg); - - ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); - ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); - ahci_sg->flags_size = cpu_to_le32(sg_len - 1); - ahci_sg++; + for (i = 0; i < qc->n_elem; i++) { + u32 sg_len; + dma_addr_t addr; + + addr = sg_dma_address(&qc->sg[i]); + sg_len = sg_dma_len(&qc->sg[i]); + + pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff); + pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); + pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1); } } diff --git a/trunk/drivers/scsi/libata-core.c b/trunk/drivers/scsi/libata-core.c index 9aa93087d495..d568914c4344 100644 --- a/trunk/drivers/scsi/libata-core.c +++ b/trunk/drivers/scsi/libata-core.c @@ -2242,9 +2242,8 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) static void ata_sg_clean(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->__sg; + struct scatterlist *sg = qc->sg; int dir = qc->dma_dir; - void *pad_buf = NULL; assert(qc->flags & ATA_QCFLAG_DMAMAP); assert(sg != NULL); @@ -2254,35 +2253,14 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) DPRINTK("unmapping %u sg elements\n", qc->n_elem); - /* if we padded the buffer out to 32-bit bound, and data - * xfer direction is from-device, we must copy from the - * pad buffer back into the supplied buffer - */ - if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) - pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - - if (qc->flags & ATA_QCFLAG_SG) { + if (qc->flags & ATA_QCFLAG_SG) dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); - /* restore last sg */ - sg[qc->orig_n_elem - 1].length += qc->pad_len; - if (pad_buf) { - struct scatterlist *psg = &qc->pad_sgent; - void *addr = kmap_atomic(psg->page, KM_IRQ0); - memcpy(addr + psg->offset, pad_buf, qc->pad_len); - kunmap_atomic(psg->page, KM_IRQ0); - } - } else { + else dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), dir); - /* restore sg */ - sg->length += qc->pad_len; - if (pad_buf) - memcpy(qc->buf_virt + sg->length - qc->pad_len, - pad_buf, qc->pad_len); - } qc->flags &= ~ATA_QCFLAG_DMAMAP; - qc->__sg = NULL; + qc->sg = NULL; } /** @@ -2298,15 +2276,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) */ static void ata_fill_sg(struct ata_queued_cmd *qc) { + struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int idx; + unsigned int idx, nelem; - assert(qc->__sg != NULL); + assert(sg != NULL); assert(qc->n_elem > 0); idx = 0; - ata_for_each_sg(sg, qc) { + for (nelem = qc->n_elem; nelem; nelem--,sg++) { u32 addr, offset; u32 sg_len, len; @@ -2397,12 +2375,11 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) qc->flags |= ATA_QCFLAG_SINGLE; memset(&qc->sgent, 0, sizeof(qc->sgent)); - qc->__sg = &qc->sgent; + qc->sg = &qc->sgent; qc->n_elem = 1; - qc->orig_n_elem = 1; qc->buf_virt = buf; - sg = qc->__sg; + sg = qc->sg; sg->page = virt_to_page(buf); sg->offset = (unsigned long) buf & ~PAGE_MASK; sg->length = buflen; @@ -2426,9 +2403,8 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem) { qc->flags |= ATA_QCFLAG_SG; - qc->__sg = sg; + qc->sg = sg; qc->n_elem = n_elem; - qc->orig_n_elem = n_elem; } /** @@ -2448,32 +2424,9 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; int dir = qc->dma_dir; - struct scatterlist *sg = qc->__sg; + struct scatterlist *sg = qc->sg; dma_addr_t dma_address; - /* we must lengthen transfers to end on a 32-bit boundary */ - qc->pad_len = sg->length & 3; - if (qc->pad_len) { - void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - struct scatterlist *psg = &qc->pad_sgent; - - assert(qc->dev->class == ATA_DEV_ATAPI); - - memset(pad_buf, 0, ATA_DMA_PAD_SZ); - - if (qc->tf.flags & ATA_TFLAG_WRITE) - memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, - qc->pad_len); - - sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); - sg_dma_len(psg) = ATA_DMA_PAD_SZ; - /* trim sg */ - sg->length -= qc->pad_len; - - DPRINTK("padding done, sg->length=%u pad_len=%u\n", - sg->length, qc->pad_len); - } - dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, sg->length, dir); if (dma_mapping_error(dma_address)) @@ -2505,47 +2458,12 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) static int ata_sg_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->__sg; - struct scatterlist *lsg = &sg[qc->n_elem - 1]; + struct scatterlist *sg = qc->sg; int n_elem, dir; VPRINTK("ENTER, ata%u\n", ap->id); assert(qc->flags & ATA_QCFLAG_SG); - /* we must lengthen transfers to end on a 32-bit boundary */ - qc->pad_len = lsg->length & 3; - if (qc->pad_len) { - void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - struct scatterlist *psg = &qc->pad_sgent; - unsigned int offset; - - assert(qc->dev->class == ATA_DEV_ATAPI); - - memset(pad_buf, 0, ATA_DMA_PAD_SZ); - - /* - * psg->page/offset are used to copy to-be-written - * data in this function or read data in ata_sg_clean. - */ - offset = lsg->offset + lsg->length - qc->pad_len; - psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); - psg->offset = offset_in_page(offset); - - if (qc->tf.flags & ATA_TFLAG_WRITE) { - void *addr = kmap_atomic(psg->page, KM_IRQ0); - memcpy(pad_buf, addr + psg->offset, qc->pad_len); - kunmap_atomic(psg->page, KM_IRQ0); - } - - sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); - sg_dma_len(psg) = ATA_DMA_PAD_SZ; - /* trim last sg */ - lsg->length -= qc->pad_len; - - DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", - qc->n_elem - 1, lsg->length, qc->pad_len); - } - dir = qc->dma_dir; n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); if (n_elem < 1) @@ -2823,7 +2741,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, static void ata_pio_sector(struct ata_queued_cmd *qc) { int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->__sg; + struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; struct page *page; unsigned int offset; @@ -2873,7 +2791,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) { int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->__sg; + struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; struct page *page; unsigned char *buf; @@ -2906,7 +2824,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) return; } - sg = &qc->__sg[qc->cursg]; + sg = &qc->sg[qc->cursg]; page = sg->page; offset = sg->offset + qc->cursg_ofs; @@ -3267,7 +3185,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, qc = ata_qc_new(ap); if (qc) { - qc->__sg = NULL; + qc->sg = NULL; qc->flags = 0; qc->scsicmd = NULL; qc->ap = ap; @@ -3963,12 +3881,6 @@ int ata_port_start (struct ata_port *ap) if (!ap->prd) return -ENOMEM; - ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); - if (!ap->pad) { - dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); - return -ENOMEM; - } - DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); return 0; @@ -3991,7 +3903,6 @@ void ata_port_stop (struct ata_port *ap) struct device *dev = ap->host_set->dev; dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); - dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); } void ata_host_stop (struct ata_host_set *host_set) diff --git a/trunk/drivers/scsi/libata-scsi.c b/trunk/drivers/scsi/libata-scsi.c index 4cf43de4060e..c64169ca7ff0 100644 --- a/trunk/drivers/scsi/libata-scsi.c +++ b/trunk/drivers/scsi/libata-scsi.c @@ -158,10 +158,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, qc->scsidone = done; if (cmd->use_sg) { - qc->__sg = (struct scatterlist *) cmd->request_buffer; + qc->sg = (struct scatterlist *) cmd->request_buffer; qc->n_elem = cmd->use_sg; } else { - qc->__sg = &qc->sgent; + qc->sg = &qc->sgent; qc->n_elem = 1; } } else { @@ -362,16 +362,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev) */ blk_queue_max_sectors(sdev->request_queue, 2048); } - - /* - * SATA DMA transfers must be multiples of 4 byte, so - * we need to pad ATAPI transfers using an extra sg. - * Decrement max hw segments accordingly. - */ - if (dev->class == ATA_DEV_ATAPI) { - request_queue_t *q = sdev->request_queue; - blk_queue_max_hw_segments(q, q->max_hw_segments - 1); - } } return 0; /* scsi layer doesn't check return value, sigh */ diff --git a/trunk/drivers/scsi/pdc_adma.c b/trunk/drivers/scsi/pdc_adma.c new file mode 100644 index 000000000000..ce006a5b4533 --- /dev/null +++ b/trunk/drivers/scsi/pdc_adma.c @@ -0,0 +1,737 @@ +/* + * pdc_adma.c - Pacific Digital Corporation ADMA + * + * Maintained by: Mark Lord + * + * Copyright 2005 Mark Lord + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + * Supports ATA disks in single-packet ADMA mode. + * Uses PIO for everything else. + * + * TODO: Use ADMA transfers for ATAPI devices, when possible. + * This requires careful attention to a number of quirks of the chip. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include +#include +#include + +#define DRV_NAME "pdc_adma" +#define DRV_VERSION "0.01" + +/* macro to calculate base address for ATA regs */ +#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) + +/* macro to calculate base address for ADMA regs */ +#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) + +enum { + ADMA_PORTS = 2, + ADMA_CPB_BYTES = 40, + ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, + ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES, + + ADMA_DMA_BOUNDARY = 0xffffffff, + + /* global register offsets */ + ADMA_MODE_LOCK = 0x00c7, + + /* per-channel register offsets */ + ADMA_CONTROL = 0x0000, /* ADMA control */ + ADMA_STATUS = 0x0002, /* ADMA status */ + ADMA_CPB_COUNT = 0x0004, /* CPB count */ + ADMA_CPB_CURRENT = 0x000c, /* current CPB address */ + ADMA_CPB_NEXT = 0x000c, /* next CPB address */ + ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */ + ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */ + ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */ + + /* ADMA_CONTROL register bits */ + aNIEN = (1 << 8), /* irq mask: 1==masked */ + aGO = (1 << 7), /* packet trigger ("Go!") */ + aRSTADM = (1 << 5), /* ADMA logic reset */ + aAUTEN = (1 << 3), /* packet trigger ("Go!") */ + aRSTA = (1 << 2), /* ATA hard reset */ + aPIOMD4 = 0x0003, /* PIO mode 4 */ + + /* ADMA_STATUS register bits */ + aPSD = (1 << 6), + aUIRQ = (1 << 4), + aPERR = (1 << 0), + + /* CPB bits */ + cDONE = (1 << 0), + cVLD = (1 << 0), + cDAT = (1 << 2), + cIEN = (1 << 3), + + /* PRD bits */ + pORD = (1 << 4), + pDIRO = (1 << 5), + pEND = (1 << 7), + + /* ATA register flags */ + rIGN = (1 << 5), + rEND = (1 << 7), + + /* ATA register addresses */ + ADMA_REGS_CONTROL = 0x0e, + ADMA_REGS_SECTOR_COUNT = 0x12, + ADMA_REGS_LBA_LOW = 0x13, + ADMA_REGS_LBA_MID = 0x14, + ADMA_REGS_LBA_HIGH = 0x15, + ADMA_REGS_DEVICE = 0x16, + ADMA_REGS_COMMAND = 0x17, + + /* PCI device IDs */ + board_1841_idx = 0, /* ADMA 2-port controller */ +}; + +typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t; + +struct adma_port_priv { + u8 *pkt; + dma_addr_t pkt_dma; + adma_state_t state; +}; + +static int adma_ata_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent); +static irqreturn_t adma_intr (int irq, void *dev_instance, + struct pt_regs *regs); +static int adma_port_start(struct ata_port *ap); +static void adma_host_stop(struct ata_host_set *host_set); +static void adma_port_stop(struct ata_port *ap); +static void adma_phy_reset(struct ata_port *ap); +static void adma_qc_prep(struct ata_queued_cmd *qc); +static int adma_qc_issue(struct ata_queued_cmd *qc); +static int adma_check_atapi_dma(struct ata_queued_cmd *qc); +static void adma_bmdma_stop(struct ata_queued_cmd *qc); +static u8 adma_bmdma_status(struct ata_port *ap); +static void adma_irq_clear(struct ata_port *ap); +static void adma_eng_timeout(struct ata_port *ap); + +static Scsi_Host_Template adma_ata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .ioctl = ata_scsi_ioctl, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ENABLE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ADMA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations adma_ata_ops = { + .port_disable = ata_port_disable, + .tf_load = ata_tf_load, + .tf_read = ata_tf_read, + .check_status = ata_check_status, + .check_atapi_dma = adma_check_atapi_dma, + .exec_command = ata_exec_command, + .dev_select = ata_std_dev_select, + .phy_reset = adma_phy_reset, + .qc_prep = adma_qc_prep, + .qc_issue = adma_qc_issue, + .eng_timeout = adma_eng_timeout, + .irq_handler = adma_intr, + .irq_clear = adma_irq_clear, + .port_start = adma_port_start, + .port_stop = adma_port_stop, + .host_stop = adma_host_stop, + .bmdma_stop = adma_bmdma_stop, + .bmdma_status = adma_bmdma_status, +}; + +static struct ata_port_info adma_port_info[] = { + /* board_1841_idx */ + { + .sht = &adma_ata_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, + .pio_mask = 0x10, /* pio4 */ + .udma_mask = 0x1f, /* udma0-4 */ + .port_ops = &adma_ata_ops, + }, +}; + +static struct pci_device_id adma_ata_pci_tbl[] = { + { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_1841_idx }, + + { } /* terminate list */ +}; + +static struct pci_driver adma_ata_pci_driver = { + .name = DRV_NAME, + .id_table = adma_ata_pci_tbl, + .probe = adma_ata_init_one, + .remove = ata_pci_remove_one, +}; + +static int adma_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return 1; /* ATAPI DMA not yet supported */ +} + +static void adma_bmdma_stop(struct ata_queued_cmd *qc) +{ + /* nothing */ +} + +static u8 adma_bmdma_status(struct ata_port *ap) +{ + return 0; +} + +static void adma_irq_clear(struct ata_port *ap) +{ + /* nothing */ +} + +static void adma_reset_engine(void __iomem *chan) +{ + /* reset ADMA to idle state */ + writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); + udelay(2); + writew(aPIOMD4, chan + ADMA_CONTROL); + udelay(2); +} + +static void adma_reinit_engine(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + void __iomem *mmio_base = ap->host_set->mmio_base; + void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); + + /* mask/clear ATA interrupts */ + writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr); + ata_check_status(ap); + + /* reset the ADMA engine */ + adma_reset_engine(chan); + + /* set in-FIFO threshold to 0x100 */ + writew(0x100, chan + ADMA_FIFO_IN); + + /* set CPB pointer */ + writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT); + + /* set out-FIFO threshold to 0x100 */ + writew(0x100, chan + ADMA_FIFO_OUT); + + /* set CPB count */ + writew(1, chan + ADMA_CPB_COUNT); + + /* read/discard ADMA status */ + readb(chan + ADMA_STATUS); +} + +static inline void adma_enter_reg_mode(struct ata_port *ap) +{ + void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); + + writew(aPIOMD4, chan + ADMA_CONTROL); + readb(chan + ADMA_STATUS); /* flush */ +} + +static void adma_phy_reset(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + + pp->state = adma_state_idle; + adma_reinit_engine(ap); + ata_port_probe(ap); + ata_bus_reset(ap); +} + +static void adma_eng_timeout(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + + if (pp->state != adma_state_idle) /* healthy paranoia */ + pp->state = adma_state_mmio; + adma_reinit_engine(ap); + ata_eng_timeout(ap); +} + +static int adma_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + struct adma_port_priv *pp = ap->private_data; + u8 *buf = pp->pkt; + int nelem, i = (2 + buf[3]) * 8; + u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); + + for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { + u32 addr; + u32 len; + + addr = (u32)sg_dma_address(sg); + *(__le32 *)(buf + i) = cpu_to_le32(addr); + i += 4; + + len = sg_dma_len(sg) >> 3; + *(__le32 *)(buf + i) = cpu_to_le32(len); + i += 4; + + if ((nelem + 1) == qc->n_elem) + pFLAGS |= pEND; + buf[i++] = pFLAGS; + buf[i++] = qc->dev->dma_mode & 0xf; + buf[i++] = 0; /* pPKLW */ + buf[i++] = 0; /* reserved */ + + *(__le32 *)(buf + i) + = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); + i += 4; + + VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem, + (unsigned long)addr, len); + } + return i; +} + +static void adma_qc_prep(struct ata_queued_cmd *qc) +{ + struct adma_port_priv *pp = qc->ap->private_data; + u8 *buf = pp->pkt; + u32 pkt_dma = (u32)pp->pkt_dma; + int i = 0; + + VPRINTK("ENTER\n"); + + adma_enter_reg_mode(qc->ap); + if (qc->tf.protocol != ATA_PROT_DMA) { + ata_qc_prep(qc); + return; + } + + buf[i++] = 0; /* Response flags */ + buf[i++] = 0; /* reserved */ + buf[i++] = cVLD | cDAT | cIEN; + i++; /* cLEN, gets filled in below */ + + *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */ + i += 4; /* cNCPB */ + i += 4; /* cPRD, gets filled in below */ + + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + + /* ATA registers; must be a multiple of 4 */ + buf[i++] = qc->tf.device; + buf[i++] = ADMA_REGS_DEVICE; + if ((qc->tf.flags & ATA_TFLAG_LBA48)) { + buf[i++] = qc->tf.hob_nsect; + buf[i++] = ADMA_REGS_SECTOR_COUNT; + buf[i++] = qc->tf.hob_lbal; + buf[i++] = ADMA_REGS_LBA_LOW; + buf[i++] = qc->tf.hob_lbam; + buf[i++] = ADMA_REGS_LBA_MID; + buf[i++] = qc->tf.hob_lbah; + buf[i++] = ADMA_REGS_LBA_HIGH; + } + buf[i++] = qc->tf.nsect; + buf[i++] = ADMA_REGS_SECTOR_COUNT; + buf[i++] = qc->tf.lbal; + buf[i++] = ADMA_REGS_LBA_LOW; + buf[i++] = qc->tf.lbam; + buf[i++] = ADMA_REGS_LBA_MID; + buf[i++] = qc->tf.lbah; + buf[i++] = ADMA_REGS_LBA_HIGH; + buf[i++] = 0; + buf[i++] = ADMA_REGS_CONTROL; + buf[i++] = rIGN; + buf[i++] = 0; + buf[i++] = qc->tf.command; + buf[i++] = ADMA_REGS_COMMAND | rEND; + + buf[3] = (i >> 3) - 2; /* cLEN */ + *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */ + + i = adma_fill_sg(qc); + wmb(); /* flush PRDs and pkt to memory */ +#if 0 + /* dump out CPB + PRDs for debug */ + { + int j, len = 0; + static char obuf[2048]; + for (j = 0; j < i; ++j) { + len += sprintf(obuf+len, "%02x ", buf[j]); + if ((j & 7) == 7) { + printk("%s\n", obuf); + len = 0; + } + } + if (len) + printk("%s\n", obuf); + } +#endif +} + +static inline void adma_packet_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); + + VPRINTK("ENTER, ap %p\n", ap); + + /* fire up the ADMA engine */ + writew(aPIOMD4 | aGO | aAUTEN, chan + ADMA_CONTROL); +} + +static int adma_qc_issue(struct ata_queued_cmd *qc) +{ + struct adma_port_priv *pp = qc->ap->private_data; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + pp->state = adma_state_pkt; + adma_packet_start(qc); + return 0; + + case ATA_PROT_ATAPI_DMA: + BUG(); + break; + + default: + break; + } + + pp->state = adma_state_mmio; + return ata_qc_issue_prot(qc); +} + +static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) +{ + unsigned int handled = 0, port_no; + u8 __iomem *mmio_base = host_set->mmio_base; + + for (port_no = 0; port_no < host_set->n_ports; ++port_no) { + struct ata_port *ap = host_set->ports[port_no]; + struct adma_port_priv *pp; + struct ata_queued_cmd *qc; + void __iomem *chan = ADMA_REGS(mmio_base, port_no); + u8 drv_stat, status = readb(chan + ADMA_STATUS); + + if (status == 0) + continue; + handled = 1; + adma_enter_reg_mode(ap); + if ((ap->flags & ATA_FLAG_PORT_DISABLED)) + continue; + pp = ap->private_data; + if (!pp || pp->state != adma_state_pkt) + continue; + qc = ata_qc_from_tag(ap, ap->active_tag); + drv_stat = 0; + if ((status & (aPERR | aPSD | aUIRQ))) + drv_stat = ATA_ERR; + else if (pp->pkt[0] != cDONE) + drv_stat = ATA_ERR; + ata_qc_complete(qc, drv_stat); + } + return handled; +} + +static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) +{ + unsigned int handled = 0, port_no; + + for (port_no = 0; port_no < host_set->n_ports; ++port_no) { + struct ata_port *ap; + ap = host_set->ports[port_no]; + if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { + struct ata_queued_cmd *qc; + struct adma_port_priv *pp = ap->private_data; + if (!pp || pp->state != adma_state_mmio) + continue; + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + + /* check main status, clearing INTRQ */ + u8 status = ata_chk_status(ap); + if ((status & ATA_BUSY)) + continue; + DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", + ap->id, qc->tf.protocol, status); + + /* complete taskfile transaction */ + pp->state = adma_state_idle; + ata_qc_complete(qc, status); + handled = 1; + } + } + } + return handled; +} + +static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int handled = 0; + + VPRINTK("ENTER\n"); + + spin_lock(&host_set->lock); + handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set); + spin_unlock(&host_set->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} + +static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = + port->data_addr = base + 0x000; + port->error_addr = + port->feature_addr = base + 0x004; + port->nsect_addr = base + 0x008; + port->lbal_addr = base + 0x00c; + port->lbam_addr = base + 0x010; + port->lbah_addr = base + 0x014; + port->device_addr = base + 0x018; + port->status_addr = + port->command_addr = base + 0x01c; + port->altstatus_addr = + port->ctl_addr = base + 0x038; +} + +static int adma_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct adma_port_priv *pp; + int rc; + + rc = ata_port_start(ap); + if (rc) + return rc; + adma_enter_reg_mode(ap); + rc = -ENOMEM; + pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); + if (!pp) + goto err_out; + pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, + GFP_KERNEL); + if (!pp->pkt) + goto err_out_kfree; + /* paranoia? */ + if ((pp->pkt_dma & 7) != 0) { + printk("bad alignment for pp->pkt_dma: %08x\n", + (u32)pp->pkt_dma); + goto err_out_kfree2; + } + memset(pp->pkt, 0, ADMA_PKT_BYTES); + ap->private_data = pp; + adma_reinit_engine(ap); + return 0; + +err_out_kfree2: + kfree(pp); +err_out_kfree: + kfree(pp); +err_out: + ata_port_stop(ap); + return rc; +} + +static void adma_port_stop(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct adma_port_priv *pp = ap->private_data; + + adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no)); + if (pp != NULL) { + ap->private_data = NULL; + if (pp->pkt != NULL) + dma_free_coherent(dev, ADMA_PKT_BYTES, + pp->pkt, pp->pkt_dma); + kfree(pp); + } + ata_port_stop(ap); +} + +static void adma_host_stop(struct ata_host_set *host_set) +{ + unsigned int port_no; + + for (port_no = 0; port_no < ADMA_PORTS; ++port_no) + adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no)); + + ata_pci_host_stop(host_set); +} + +static void adma_host_init(unsigned int chip_id, + struct ata_probe_ent *probe_ent) +{ + unsigned int port_no; + void __iomem *mmio_base = probe_ent->mmio_base; + + /* enable/lock aGO operation */ + writeb(7, mmio_base + ADMA_MODE_LOCK); + + /* reset the ADMA logic */ + for (port_no = 0; port_no < ADMA_PORTS; ++port_no) + adma_reset_engine(ADMA_REGS(mmio_base, port_no)); +} + +static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) +{ + int rc; + + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME + "(%s): 32-bit DMA enable failed\n", + pci_name(pdev)); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME + "(%s): 32-bit consistent DMA enable failed\n", + pci_name(pdev)); + return rc; + } + return 0; +} + +static int adma_ata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + void __iomem *mmio_base; + unsigned int board_idx = (unsigned int) ent->driver_data; + int rc, port_no; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) { + rc = -ENODEV; + goto err_out_regions; + } + + mmio_base = pci_iomap(pdev, 4, 0); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + rc = adma_set_dma_masks(pdev, mmio_base); + if (rc) + goto err_out_iounmap; + + probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_iounmap; + } + + probe_ent->dev = pci_dev_to_dev(pdev); + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->sht = adma_port_info[board_idx].sht; + probe_ent->host_flags = adma_port_info[board_idx].host_flags; + probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; + probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; + probe_ent->udma_mask = adma_port_info[board_idx].udma_mask; + probe_ent->port_ops = adma_port_info[board_idx].port_ops; + + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + probe_ent->n_ports = ADMA_PORTS; + + for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { + adma_ata_setup_port(&probe_ent->port[port_no], + ADMA_ATA_REGS((unsigned long)mmio_base, port_no)); + } + + pci_set_master(pdev); + + /* initialize adapter */ + adma_host_init(board_idx, probe_ent); + + rc = ata_device_add(probe_ent); + kfree(probe_ent); + if (rc != ADMA_PORTS) + goto err_out_iounmap; + return 0; + +err_out_iounmap: + pci_iounmap(pdev, mmio_base); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init adma_ata_init(void) +{ + return pci_module_init(&adma_ata_pci_driver); +} + +static void __exit adma_ata_exit(void) +{ + pci_unregister_driver(&adma_ata_pci_driver); +} + +MODULE_AUTHOR("Mark Lord"); +MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); +MODULE_VERSION(DRV_VERSION); + +module_init(adma_ata_init); +module_exit(adma_ata_exit); diff --git a/trunk/drivers/scsi/sata_qstor.c b/trunk/drivers/scsi/sata_qstor.c index 69a9b1cf6f9c..ffcdeb68641c 100644 --- a/trunk/drivers/scsi/sata_qstor.c +++ b/trunk/drivers/scsi/sata_qstor.c @@ -268,17 +268,16 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) static void qs_fill_sg(struct ata_queued_cmd *qc) { - struct scatterlist *sg; + struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; struct qs_port_priv *pp = ap->private_data; unsigned int nelem; u8 *prd = pp->pkt + QS_CPB_BYTES; - assert(qc->__sg != NULL); + assert(sg != NULL); assert(qc->n_elem > 0); - nelem = 0; - ata_for_each_sg(sg, qc) { + for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { u64 addr; u32 len; @@ -292,7 +291,6 @@ static void qs_fill_sg(struct ata_queued_cmd *qc) VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, (unsigned long long)addr, len); - nelem++; } } diff --git a/trunk/drivers/scsi/sata_sx4.c b/trunk/drivers/scsi/sata_sx4.c index 79fdbbab513e..540a85191172 100644 --- a/trunk/drivers/scsi/sata_sx4.c +++ b/trunk/drivers/scsi/sata_sx4.c @@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, static void pdc20621_dma_prep(struct ata_queued_cmd *qc) { - struct scatterlist *sg; + struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; void __iomem *mmio = ap->host_set->mmio_base; struct pdc_host_priv *hpriv = ap->host_set->private_data; void __iomem *dimm_mmio = hpriv->dimm_mmio; unsigned int portno = ap->port_no; - unsigned int i, idx, total_len = 0, sgt_len; + unsigned int i, last, idx, total_len = 0, sgt_len; u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; assert(qc->flags & ATA_QCFLAG_DMAMAP); @@ -469,11 +469,12 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) /* * Build S/G table */ + last = qc->n_elem; idx = 0; - ata_for_each_sg(sg, qc) { - buf[idx++] = cpu_to_le32(sg_dma_address(sg)); - buf[idx++] = cpu_to_le32(sg_dma_len(sg)); - total_len += sg_dma_len(sg); + for (i = 0; i < last; i++) { + buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); + buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); + total_len += sg_dma_len(&sg[i]); } buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); sgt_len = idx * 4; diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h index 0f89f4121fa3..7929cfc9318d 100644 --- a/trunk/include/linux/libata.h +++ b/trunk/include/linux/libata.h @@ -155,10 +155,6 @@ enum { ATA_SHIFT_UDMA = 0, ATA_SHIFT_MWDMA = 8, ATA_SHIFT_PIO = 11, - - /* size of buffer to pad xfers ending on unaligned boundaries */ - ATA_DMA_PAD_SZ = 4, - ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, /* Masks for port functions */ ATA_PORT_PRIMARY = (1 << 0), @@ -246,12 +242,9 @@ struct ata_queued_cmd { unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned int tag; unsigned int n_elem; - unsigned int orig_n_elem; int dma_dir; - unsigned int pad_len; - unsigned int nsect; unsigned int cursect; @@ -262,11 +255,9 @@ struct ata_queued_cmd { unsigned int cursg_ofs; struct scatterlist sgent; - struct scatterlist pad_sgent; void *buf_virt; - /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ - struct scatterlist *__sg; + struct scatterlist *sg; ata_qc_cb_t complete_fn; @@ -314,9 +305,6 @@ struct ata_port { struct ata_prd *prd; /* our SG list */ dma_addr_t prd_dma; /* and its DMA mapping */ - void *pad; /* array of DMA pad buffers */ - dma_addr_t pad_dma; - struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ u8 ctl; /* cache of ATA control register */ @@ -482,19 +470,6 @@ extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); #endif /* CONFIG_PCI */ -static inline struct scatterlist * -ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) -{ - if (sg == &qc->pad_sgent) - return NULL; - if (++sg - qc->__sg < qc->n_elem) - return sg; - return qc->pad_len ? &qc->pad_sgent : NULL; -} - -#define ata_for_each_sg(sg, qc) \ - for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc)) - static inline unsigned int ata_tag_valid(unsigned int tag) { return (tag < ATA_MAX_QUEUE) ? 1 : 0;