From ce0fe507df2f960b9a523b045da241faf2e0fdd6 Mon Sep 17 00:00:00 2001 From: Thibaut VARENE Date: Fri, 3 Feb 2006 18:06:30 -0700 Subject: [PATCH] --- yaml --- r: 24808 b: refs/heads/master c: a81dd18eb974cc34634c53a6447b2799ec0c3158 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/DocBook/libata.tmpl | 47 +- trunk/arch/i386/kernel/syscall_table.S | 1 - trunk/arch/ia64/kernel/entry.S | 1 - trunk/arch/ia64/kernel/gate.lds.S | 1 - trunk/arch/ia64/kernel/iosapic.c | 265 +++---- trunk/arch/ia64/kernel/vmlinux.lds.S | 18 +- trunk/arch/ia64/mm/init.c | 8 +- trunk/arch/ia64/mm/ioremap.c | 6 +- trunk/arch/ia64/mm/tlb.c | 12 +- trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c | 8 +- trunk/arch/parisc/kernel/pdc_chassis.c | 5 +- trunk/drivers/char/drm/drmP.h | 16 +- trunk/drivers/char/drm/drm_bufs.c | 20 +- trunk/drivers/char/drm/drm_dma.c | 4 +- trunk/drivers/char/drm/drm_memory.c | 59 ++ trunk/drivers/char/drm/drm_memory_debug.h | 70 ++ trunk/drivers/char/drm/drm_pci.c | 29 +- trunk/drivers/char/drm/drm_pciids.h | 116 +-- trunk/drivers/char/drm/i915_dma.c | 2 + trunk/drivers/char/drm/i915_irq.c | 2 - trunk/drivers/char/drm/r300_cmdbuf.c | 86 +-- trunk/drivers/char/drm/r300_reg.h | 39 +- trunk/drivers/char/drm/radeon_cp.c | 151 ++-- trunk/drivers/char/drm/radeon_drm.h | 5 - trunk/drivers/char/drm/radeon_drv.h | 25 +- trunk/drivers/char/drm/radeon_state.c | 105 +-- trunk/drivers/char/drm/sis_mm.c | 2 +- trunk/drivers/infiniband/core/mad.c | 112 +-- trunk/drivers/infiniband/core/mad_priv.h | 3 +- trunk/drivers/infiniband/core/mad_rmpp.c | 54 +- trunk/drivers/infiniband/core/user_mad.c | 30 +- trunk/drivers/infiniband/hw/mthca/mthca_av.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_cq.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_eq.c | 6 +- trunk/drivers/infiniband/hw/mthca/mthca_mad.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_mcg.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_mr.c | 4 +- trunk/drivers/infiniband/hw/mthca/mthca_pd.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_qp.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_srq.c | 4 +- .../drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- trunk/drivers/infiniband/ulp/srp/ib_srp.c | 4 +- trunk/drivers/parisc/pdc_stable.c | 5 +- trunk/drivers/scsi/libata-bmdma.c | 26 +- trunk/drivers/scsi/libata-core.c | 168 +++-- trunk/drivers/scsi/sata_mv.c | 42 +- trunk/fs/Makefile | 2 +- trunk/fs/ext2/file.c | 2 - trunk/fs/ext3/file.c | 2 - trunk/fs/pipe.c | 41 +- trunk/fs/reiserfs/file.c | 2 - trunk/fs/splice.c | 659 ------------------ trunk/include/asm-i386/unistd.h | 3 +- trunk/include/asm-ia64/asmmacro.h | 4 - trunk/include/asm-ia64/unistd.h | 3 +- trunk/include/asm-parisc/pdc_chassis.h | 5 +- trunk/include/asm-powerpc/unistd.h | 3 +- trunk/include/asm-x86_64/unistd.h | 4 +- trunk/include/linux/fs.h | 4 - trunk/include/linux/libata.h | 10 +- trunk/include/linux/pipe_fs_i.h | 8 - trunk/include/linux/syscalls.h | 2 - trunk/include/rdma/ib_mad.h | 27 +- trunk/net/socket.c | 6 +- 65 files changed, 678 insertions(+), 1686 deletions(-) delete mode 100644 trunk/fs/splice.c diff --git a/[refs] b/[refs] index 6d0d48f289ae..56a6f5bfa20d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 064c94f9da8845f12446ab37142aa10f3c6f66ac +refs/heads/master: a81dd18eb974cc34634c53a6447b2799ec0c3158 diff --git a/trunk/Documentation/DocBook/libata.tmpl b/trunk/Documentation/DocBook/libata.tmpl index 5bcbb6ee3bc0..d260d92089ad 100644 --- a/trunk/Documentation/DocBook/libata.tmpl +++ b/trunk/Documentation/DocBook/libata.tmpl @@ -120,27 +120,14 @@ void (*dev_config) (struct ata_port *, struct ata_device *); void (*set_piomode) (struct ata_port *, struct ata_device *); void (*set_dmamode) (struct ata_port *, struct ata_device *); -void (*post_set_mode) (struct ata_port *); -unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned int); +void (*post_set_mode) (struct ata_port *ap); Hooks called prior to the issue of SET FEATURES - XFER MODE - command. The optional ->mode_filter() hook is called when libata - has built a mask of the possible modes. This is passed to the - ->mode_filter() function which should return a mask of valid modes - after filtering those unsuitable due to hardware limits. It is not - valid to use this interface to add modes. - - - dev->pio_mode and dev->dma_mode are guaranteed to be valid when - ->set_piomode() and when ->set_dmamode() is called. The timings for - any other drive sharing the cable will also be valid at this point. - That is the library records the decisions for the modes of each - drive on a channel before it attempts to set any of them. - - - ->post_set_mode() is + command. dev->pio_mode is guaranteed to be valid when + ->set_piomode() is called, and dev->dma_mode is guaranteed to be + valid when ->set_dmamode() is called. ->post_set_mode() is called unconditionally, after the SET FEATURES - XFER MODE command completes successfully. @@ -243,32 +230,6 @@ void (*dev_select)(struct ata_port *ap, unsigned int device); - Private tuning method - -void (*set_mode) (struct ata_port *ap); - - - - By default libata performs drive and controller tuning in - accordance with the ATA timing rules and also applies blacklists - and cable limits. Some controllers need special handling and have - custom tuning rules, typically raid controllers that use ATA - commands but do not actually do drive timing. - - - - - This hook should not be used to replace the standard controller - tuning logic when a controller has quirks. Replacing the default - tuning logic in that case would bypass handling for drive and - bridge quirks that may be important to data reliability. If a - controller needs to filter the mode selection it should use the - mode_filter hook instead. - - - - - Reset ATA bus void (*phy_reset) (struct ata_port *ap); diff --git a/trunk/arch/i386/kernel/syscall_table.S b/trunk/arch/i386/kernel/syscall_table.S index ce3ef4fa0551..326595f3fa4d 100644 --- a/trunk/arch/i386/kernel/syscall_table.S +++ b/trunk/arch/i386/kernel/syscall_table.S @@ -312,4 +312,3 @@ ENTRY(sys_call_table) .long sys_unshare /* 310 */ .long sys_set_robust_list .long sys_get_robust_list - .long sys_splice diff --git a/trunk/arch/ia64/kernel/entry.S b/trunk/arch/ia64/kernel/entry.S index 750e8e7fbdc3..0e3eda99e549 100644 --- a/trunk/arch/ia64/kernel/entry.S +++ b/trunk/arch/ia64/kernel/entry.S @@ -1605,6 +1605,5 @@ sys_call_table: data8 sys_ni_syscall // reserved for pselect data8 sys_ni_syscall // 1295 reserved for ppoll data8 sys_unshare - data8 sys_splice .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/trunk/arch/ia64/kernel/gate.lds.S b/trunk/arch/ia64/kernel/gate.lds.S index 7c99e6ec3daf..e1e4aba9ecd0 100644 --- a/trunk/arch/ia64/kernel/gate.lds.S +++ b/trunk/arch/ia64/kernel/gate.lds.S @@ -59,7 +59,6 @@ SECTIONS *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(__ex_table) - *(__mca_table) } } diff --git a/trunk/arch/ia64/kernel/iosapic.c b/trunk/arch/ia64/kernel/iosapic.c index 7956eb9058fc..8832c553230a 100644 --- a/trunk/arch/ia64/kernel/iosapic.c +++ b/trunk/arch/ia64/kernel/iosapic.c @@ -9,65 +9,54 @@ * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond * - * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O - * APIC code. In particular, we now have separate - * handlers for edge and level triggered - * interrupts. - * 00/10/27 Asit Mallick, Goutham Rao IRQ vector - * allocation PCI to vector mapping, shared PCI - * interrupts. - * 00/10/27 D. Mosberger Document things a bit more to make them more - * understandable. Clean up much of the old - * IOSAPIC cruft. - * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts - * and fixes for ACPI S5(SoftOff) support. + * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O APIC code. + * In particular, we now have separate handlers for edge + * and level triggered interrupts. + * 00/10/27 Asit Mallick, Goutham Rao IRQ vector allocation + * PCI to vector mapping, shared PCI interrupts. + * 00/10/27 D. Mosberger Document things a bit more to make them more understandable. + * Clean up much of the old IOSAPIC cruft. + * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts and fixes for + * ACPI S5(SoftOff) support. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT - * 02/01/07 E. Focht Redirectable interrupt - * vectors in iosapic_set_affinity(), - * initializations for /proc/irq/#/smp_affinity + * 02/01/07 E. Focht Redirectable interrupt vectors in + * iosapic_set_affinity(), initializations for + * /proc/irq/#/smp_affinity * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq - * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to - * IOSAPIC mapping error + * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping + * error * 02/07/29 T. Kochi Allocate interrupt vectors dynamically - * 02/08/04 T. Kochi Cleaned up terminology (irq, global system - * interrupt, vector, etc.) - * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's - * pci_irq code. + * 02/08/04 T. Kochi Cleaned up terminology (irq, global system interrupt, vector, etc.) + * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's pci_irq code. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC. - * Remove iosapic_address & gsi_base from - * external interfaces. Rationalize - * __init/__devinit attributes. + * Remove iosapic_address & gsi_base from external interfaces. + * Rationalize __init/__devinit attributes. * 04/12/04 Ashok Raj Intel Corporation 2004 - * Updated to work with irq migration necessary - * for CPU Hotplug + * Updated to work with irq migration necessary for CPU Hotplug */ /* - * Here is what the interrupt logic between a PCI device and the kernel looks - * like: + * Here is what the interrupt logic between a PCI device and the kernel looks like: * - * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, - * INTD). The device is uniquely identified by its bus-, and slot-number - * (the function number does not matter here because all functions share - * the same interrupt lines). + * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD). The + * device is uniquely identified by its bus--, and slot-number (the function + * number does not matter here because all functions share the same interrupt + * lines). * - * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC - * controller. Multiple interrupt lines may have to share the same - * IOSAPIC pin (if they're level triggered and use the same polarity). - * Each interrupt line has a unique Global System Interrupt (GSI) number - * which can be calculated as the sum of the controller's base GSI number - * and the IOSAPIC pin number to which the line connects. + * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller. + * Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level + * triggered and use the same polarity). Each interrupt line has a unique Global + * System Interrupt (GSI) number which can be calculated as the sum of the controller's + * base GSI number and the IOSAPIC pin number to which the line connects. * - * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the - * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then - * sent to the CPU. + * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the IOSAPIC pin + * into the IA-64 interrupt vector. This interrupt vector is then sent to the CPU. * - * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is - * used as architecture-independent interrupt handling mechanism in Linux. - * As an IRQ is a number, we have to have - * IA-64 interrupt vector number <-> IRQ number mapping. On smaller - * systems, we use one-to-one mapping between IA-64 vector and IRQ. A - * platform can implement platform_irq_to_vector(irq) and + * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is used as + * architecture-independent interrupt handling mechanism in Linux. As an + * IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ number + * mapping. On smaller systems, we use one-to-one mapping between IA-64 vector and + * IRQ. A platform can implement platform_irq_to_vector(irq) and * platform_local_vector_to_irq(vector) APIs to differentiate the mapping. * Please see also include/asm-ia64/hw_irq.h for those APIs. * @@ -75,9 +64,9 @@ * * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * - * Note: The term "IRQ" is loosely used everywhere in Linux kernel to - * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ - * (isa_irq) is the only exception in this source code. + * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe interrupts. + * Now we use "IRQ" only for Linux IRQ's. ISA IRQ (isa_irq) is the only exception in this + * source code. */ #include @@ -101,6 +90,7 @@ #include #include + #undef DEBUG_INTERRUPT_ROUTING #ifdef DEBUG_INTERRUPT_ROUTING @@ -109,46 +99,36 @@ #define DBG(fmt...) #endif -#define NR_PREALLOCATE_RTE_ENTRIES \ - (PAGE_SIZE / sizeof(struct iosapic_rte_info)) +#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info)) #define RTE_PREALLOCATED (1) static DEFINE_SPINLOCK(iosapic_lock); -/* - * These tables map IA-64 vectors to the IOSAPIC pin that generates this - * vector. - */ +/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ struct iosapic_rte_info { - struct list_head rte_list; /* node in list of RTEs sharing the - * same vector */ + struct list_head rte_list; /* node in list of RTEs sharing the same vector */ char __iomem *addr; /* base address of IOSAPIC */ - unsigned int gsi_base; /* first GSI assigned to this - * IOSAPIC */ + unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ char rte_index; /* IOSAPIC RTE index */ int refcnt; /* reference counter */ unsigned int flags; /* flags */ } ____cacheline_aligned; static struct iosapic_intr_info { - struct list_head rtes; /* RTEs using this vector (empty => - * not an IOSAPIC interrupt) */ + struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */ int count; /* # of RTEs that shares this vector */ - u32 low32; /* current value of low word of - * Redirection table entry */ + u32 low32; /* current value of low word of Redirection table entry */ unsigned int dest; /* destination CPU physical ID */ unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ - unsigned char polarity: 1; /* interrupt polarity - * (see iosapic.h) */ + unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ } iosapic_intr_info[IA64_NUM_VECTORS]; static struct iosapic { char __iomem *addr; /* base address of IOSAPIC */ - unsigned int gsi_base; /* first GSI assigned to this - * IOSAPIC */ - unsigned short num_rte; /* # of RTEs on this IOSAPIC */ + unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ + unsigned short num_rte; /* number of RTE in this IOSAPIC */ int rtes_inuse; /* # of RTEs in use on this IOSAPIC */ #ifdef CONFIG_NUMA unsigned short node; /* numa node association via pxm */ @@ -169,8 +149,7 @@ find_iosapic (unsigned int gsi) int i; for (i = 0; i < NR_IOSAPICS; i++) { - if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < - iosapic_lists[i].num_rte) + if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte) return i; } @@ -183,8 +162,7 @@ _gsi_to_vector (unsigned int gsi) struct iosapic_intr_info *info; struct iosapic_rte_info *rte; - for (info = iosapic_intr_info; info < - iosapic_intr_info + IA64_NUM_VECTORS; ++info) + for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) list_for_each_entry(rte, &info->rtes, rte_list) if (rte->gsi_base + rte->rte_index == gsi) return info - iosapic_intr_info; @@ -207,8 +185,8 @@ gsi_to_irq (unsigned int gsi) unsigned long flags; int irq; /* - * XXX fix me: this assumes an identity mapping between IA-64 vector - * and Linux irq numbers... + * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq + * numbers... */ spin_lock_irqsave(&iosapic_lock, flags); { @@ -219,8 +197,7 @@ gsi_to_irq (unsigned int gsi) return irq; } -static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, - unsigned int vec) +static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec) { struct iosapic_rte_info *rte; @@ -260,9 +237,7 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == vector) { - set_irq_affinity_info(irq, - (int)(dest & 0xffff), - redir); + set_irq_affinity_info(irq, (int)(dest & 0xffff), redir); break; } } @@ -284,7 +259,7 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) } static void -nop (unsigned int irq) +nop (unsigned int vector) { /* do nothing... */ } @@ -306,8 +281,7 @@ mask_irq (unsigned int irq) { /* set only the mask bit */ low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, - rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { addr = rte->addr; rte_index = rte->rte_index; iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); @@ -332,8 +306,7 @@ unmask_irq (unsigned int irq) spin_lock_irqsave(&iosapic_lock, flags); { low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, - rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { addr = rte->addr; rte_index = rte->rte_index; iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); @@ -373,25 +346,21 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) spin_lock_irqsave(&iosapic_lock, flags); { - low32 = iosapic_intr_info[vec].low32 & - ~(7 << IOSAPIC_DELIVERY_SHIFT); + low32 = iosapic_intr_info[vec].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT); if (redir) /* change delivery mode to lowest priority */ - low32 |= (IOSAPIC_LOWEST_PRIORITY << - IOSAPIC_DELIVERY_SHIFT); + low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT); else /* change delivery mode to fixed */ low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); iosapic_intr_info[vec].low32 = low32; iosapic_intr_info[vec].dest = dest; - list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, - rte_list) { + list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { addr = rte->addr; rte_index = rte->rte_index; - iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), - high32); + iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); } } @@ -464,8 +433,7 @@ iosapic_ack_edge_irq (unsigned int irq) * interrupt for real. This prevents IRQ storms from unhandled * devices. */ - if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == - (IRQ_PENDING|IRQ_DISABLED)) + if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED)) mask_irq(irq); } @@ -499,8 +467,7 @@ iosapic_version (char __iomem *addr) return iosapic_read(addr, IOSAPIC_VERSION); } -static int iosapic_find_sharable_vector (unsigned long trigger, - unsigned long pol) +static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol) { int i, vector = -1, min_count = -1; struct iosapic_intr_info *info; @@ -515,8 +482,7 @@ static int iosapic_find_sharable_vector (unsigned long trigger, for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { info = &iosapic_intr_info[i]; if (info->trigger == trigger && info->polarity == pol && - (info->dmode == IOSAPIC_FIXED || info->dmode == - IOSAPIC_LOWEST_PRIORITY)) { + (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) { if (min_count == -1 || info->count < min_count) { vector = i; min_count = info->count; @@ -540,15 +506,12 @@ iosapic_reassign_vector (int vector) new_vector = assign_irq_vector(AUTO_ASSIGN); if (new_vector < 0) panic("%s: out of interrupt vectors!\n", __FUNCTION__); - printk(KERN_INFO "Reassigning vector %d to %d\n", - vector, new_vector); + printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], sizeof(struct iosapic_intr_info)); INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); - list_move(iosapic_intr_info[vector].rtes.next, - &iosapic_intr_info[new_vector].rtes); - memset(&iosapic_intr_info[vector], 0, - sizeof(struct iosapic_intr_info)); + list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes); + memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); iosapic_intr_info[vector].low32 = IOSAPIC_MASK; INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); } @@ -561,8 +524,7 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void) int preallocated = 0; if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { - rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * - NR_PREALLOCATE_RTE_ENTRIES); + rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES); if (!rte) return NULL; for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) @@ -570,8 +532,7 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void) } if (!list_empty(&free_rte_list)) { - rte = list_entry(free_rte_list.next, struct iosapic_rte_info, - rte_list); + rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list); list_del(&rte->rte_list); preallocated++; } else { @@ -614,8 +575,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, index = find_iosapic(gsi); if (index < 0) { - printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", - __FUNCTION__, gsi); + printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); return -ENODEV; } @@ -626,8 +586,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, if (!rte) { rte = iosapic_alloc_rte(); if (!rte) { - printk(KERN_WARNING "%s: cannot allocate memory\n", - __FUNCTION__); + printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); return -ENOMEM; } @@ -643,9 +602,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, else if (vector_is_shared(vector)) { struct iosapic_intr_info *info = &iosapic_intr_info[vector]; if (info->trigger != trigger || info->polarity != polarity) { - printk (KERN_WARNING - "%s: cannot override the interrupt\n", - __FUNCTION__); + printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); return -EINVAL; } } @@ -662,10 +619,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, idesc = irq_descp(vector); if (idesc->handler != irq_type) { if (idesc->handler != &no_irq_type) - printk(KERN_WARNING - "%s: changing vector %d from %s to %s\n", - __FUNCTION__, vector, - idesc->handler->typename, irq_type->typename); + printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", + __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); idesc->handler = irq_type; } return 0; @@ -726,7 +681,7 @@ get_target_cpu (unsigned int gsi, int vector) if (!num_cpus) goto skip_numa_setup; - /* Use vector assignment to distribute across cpus in node */ + /* Use vector assigment to distribute across cpus in node */ cpu_index = vector % num_cpus; for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) @@ -748,7 +703,7 @@ get_target_cpu (unsigned int gsi, int vector) } while (!cpu_online(cpu)); return cpu_physical_id(cpu); -#else /* CONFIG_SMP */ +#else return cpu_physical_id(smp_processor_id()); #endif } @@ -800,8 +755,7 @@ iosapic_register_intr (unsigned int gsi, if (list_empty(&iosapic_intr_info[vector].rtes)) free_irq_vector(vector); spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, - flags); + spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); goto again; } @@ -810,8 +764,7 @@ iosapic_register_intr (unsigned int gsi, polarity, trigger); if (err < 0) { spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, - flags); + spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); return err; } @@ -853,8 +806,7 @@ iosapic_unregister_intr (unsigned int gsi) */ irq = gsi_to_irq(gsi); if (irq < 0) { - printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", - gsi); + printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); WARN_ON(1); return; } @@ -865,9 +817,7 @@ iosapic_unregister_intr (unsigned int gsi) spin_lock(&iosapic_lock); { if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { - printk(KERN_ERR - "iosapic_unregister_intr(%u) unbalanced\n", - gsi); + printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); WARN_ON(1); goto out; } @@ -877,8 +827,7 @@ iosapic_unregister_intr (unsigned int gsi) /* Mask the interrupt */ low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; - iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), - low32); + iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32); /* Remove the rte entry from the list */ list_del(&rte->rte_list); @@ -891,9 +840,7 @@ iosapic_unregister_intr (unsigned int gsi) trigger = iosapic_intr_info[vector].trigger; polarity = iosapic_intr_info[vector].polarity; dest = iosapic_intr_info[vector].dest; - printk(KERN_INFO - "GSI %u (%s, %s) -> CPU %d (0x%04x)" - " vector %d unregistered\n", + printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n", gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, vector); @@ -906,15 +853,12 @@ iosapic_unregister_intr (unsigned int gsi) idesc->handler = &no_irq_type; /* Clear the interrupt information */ - memset(&iosapic_intr_info[vector], 0, - sizeof(struct iosapic_intr_info)); + memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); if (idesc->action) { - printk(KERN_ERR - "interrupt handlers still exist on" - "IRQ %u\n", irq); + printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq); WARN_ON(1); } @@ -929,6 +873,7 @@ iosapic_unregister_intr (unsigned int gsi) /* * ACPI calls this when it finds an entry for a platform interrupt. + * Note that the irq_base and IOSAPIC address must be set in iosapic_init(). */ int __init iosapic_register_platform_intr (u32 int_type, unsigned int gsi, @@ -962,16 +907,13 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, mask = 1; break; default: - printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, - int_type); + printk(KERN_ERR "iosapic_register_platform_irq(): invalid int type 0x%x\n", int_type); return -1; } register_intr(gsi, vector, delivery, polarity, trigger); - printk(KERN_INFO - "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" - " vector %d\n", + printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown", int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), @@ -981,8 +923,10 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, return vector; } + /* * ACPI calls this when it finds an entry for a legacy ISA IRQ override. + * Note that the gsi_base and IOSAPIC address must be set in iosapic_init(). */ void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, @@ -1011,19 +955,16 @@ iosapic_system_init (int system_pcat_compat) for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { iosapic_intr_info[vector].low32 = IOSAPIC_MASK; - /* mark as unused */ - INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); + INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */ } pcat_compat = system_pcat_compat; if (pcat_compat) { /* - * Disable the compatibility mode interrupts (8259 style), - * needs IN/OUT support enabled. + * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support + * enabled. */ - printk(KERN_INFO - "%s: Disabling PC-AT compatible 8259 interrupts\n", - __FUNCTION__); + printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__); outb(0xff, 0xA1); outb(0xff, 0x21); } @@ -1063,7 +1004,10 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) base = iosapic_lists[index].gsi_base; end = base + iosapic_lists[index].num_rte - 1; - if (gsi_end < base || end < gsi_base) + if (gsi_base < base && gsi_end < base) + continue;/* OK */ + + if (gsi_base > end && gsi_end > end) continue; /* OK */ return -EBUSY; @@ -1109,14 +1053,12 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) if ((gsi_base == 0) && pcat_compat) { /* - * Map the legacy ISA devices into the IOSAPIC data. Some of - * these may get reprogrammed later on with data from the ACPI - * Interrupt Source Override table. + * Map the legacy ISA devices into the IOSAPIC data. Some of these may + * get reprogrammed later on with data from the ACPI Interrupt Source + * Override table. */ for (isa_irq = 0; isa_irq < 16; ++isa_irq) - iosapic_override_isa_irq(isa_irq, isa_irq, - IOSAPIC_POL_HIGH, - IOSAPIC_EDGE); + iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE); } return 0; } @@ -1139,8 +1081,7 @@ iosapic_remove (unsigned int gsi_base) if (iosapic_lists[index].rtes_inuse) { err = -EBUSY; - printk(KERN_WARNING - "%s: IOSAPIC for GSI base %u is busy\n", + printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", __FUNCTION__, gsi_base); goto out; } diff --git a/trunk/arch/ia64/kernel/vmlinux.lds.S b/trunk/arch/ia64/kernel/vmlinux.lds.S index 783600fe52b2..0b9e56dd7f05 100644 --- a/trunk/arch/ia64/kernel/vmlinux.lds.S +++ b/trunk/arch/ia64/kernel/vmlinux.lds.S @@ -70,15 +70,6 @@ SECTIONS __stop___ex_table = .; } - /* MCA table */ - . = ALIGN(16); - __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) - { - __start___mca_table = .; - *(__mca_table) - __stop___mca_table = .; - } - /* Global data */ _data = .; @@ -139,6 +130,15 @@ SECTIONS __initcall_end = .; } + /* MCA table */ + . = ALIGN(16); + __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) + { + __start___mca_table = .; + *(__mca_table) + __stop___mca_table = .; + } + .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c index cafa8776a53d..2ef1151cde90 100644 --- a/trunk/arch/ia64/mm/init.c +++ b/trunk/arch/ia64/mm/init.c @@ -109,7 +109,6 @@ lazy_mmu_prot_update (pte_t pte) { unsigned long addr; struct page *page; - unsigned long order; if (!pte_exec(pte)) return; /* not an executable page... */ @@ -120,12 +119,7 @@ lazy_mmu_prot_update (pte_t pte) if (test_bit(PG_arch_1, &page->flags)) return; /* i-cache is already coherent with d-cache */ - if (PageCompound(page)) { - order = (unsigned long) (page[1].lru.prev); - flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); - } - else - flush_icache_range(addr, addr + PAGE_SIZE); + flush_icache_range(addr, addr + PAGE_SIZE); set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } diff --git a/trunk/arch/ia64/mm/ioremap.c b/trunk/arch/ia64/mm/ioremap.c index 643ccc6960ce..62328621f99c 100644 --- a/trunk/arch/ia64/mm/ioremap.c +++ b/trunk/arch/ia64/mm/ioremap.c @@ -21,12 +21,12 @@ __ioremap (unsigned long offset, unsigned long size) void __iomem * ioremap (unsigned long offset, unsigned long size) { - if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) - return phys_to_virt(offset); - if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) return __ioremap(offset, size); + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) + return phys_to_virt(offset); + /* * Someday this should check ACPI resources so we * can do the right thing for hot-plugged regions. diff --git a/trunk/arch/ia64/mm/tlb.c b/trunk/arch/ia64/mm/tlb.c index 4dbbca0b5e9c..6a4eec9113e8 100644 --- a/trunk/arch/ia64/mm/tlb.c +++ b/trunk/arch/ia64/mm/tlb.c @@ -156,19 +156,17 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, nbits = purge.max_bits; start &= ~((1UL << nbits) - 1); +# ifdef CONFIG_SMP + platform_global_tlb_purge(mm, start, end, nbits); +# else preempt_disable(); -#ifdef CONFIG_SMP - if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { - platform_global_tlb_purge(mm, start, end, nbits); - preempt_enable(); - return; - } -#endif do { ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); preempt_enable(); +# endif + ia64_srlz_i(); /* srlz.i implies srlz.d */ } EXPORT_SYMBOL(flush_tlb_range); diff --git a/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c index d917afa30b27..70db21f3df21 100644 --- a/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -110,11 +110,7 @@ static int sn_hwperf_geoid_to_cnode(char *location) if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) return -1; - /* - * FIXME: replace with cleaner for_each_XXX macro which addresses - * both compute and IO nodes once ACPI3.0 is available. - */ - for (cnode = 0; cnode < num_cnodes; cnode++) { + for_each_node(cnode) { geoid = cnodeid_get_geoid(cnode); module_id = geo_module(geoid); this_rack = MODULE_GET_RACK(module_id); @@ -609,7 +605,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; if (cpu != SN_HWPERF_ARG_ANY_CPU) { - if (cpu >= NR_CPUS || !cpu_online(cpu)) { + if (cpu >= num_online_cpus() || !cpu_online(cpu)) { r = -EINVAL; goto out; } diff --git a/trunk/arch/parisc/kernel/pdc_chassis.c b/trunk/arch/parisc/kernel/pdc_chassis.c index 0cea6958f427..a45e2e2ffd9f 100644 --- a/trunk/arch/parisc/kernel/pdc_chassis.c +++ b/trunk/arch/parisc/kernel/pdc_chassis.c @@ -5,9 +5,8 @@ * Copyright (C) 2002-2004 Thibaut VARENE * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/trunk/drivers/char/drm/drmP.h b/trunk/drivers/char/drm/drmP.h index edc72a6348a7..107df9fdba4e 100644 --- a/trunk/drivers/char/drm/drmP.h +++ b/trunk/drivers/char/drm/drmP.h @@ -357,12 +357,6 @@ typedef struct drm_freelist { spinlock_t lock; } drm_freelist_t; -typedef struct drm_dma_handle { - dma_addr_t busaddr; - void *vaddr; - size_t size; -} drm_dma_handle_t; - /** * Buffer entry. There is one of this for each buffer size order. */ @@ -372,7 +366,7 @@ typedef struct drm_buf_entry { drm_buf_t *buflist; /**< buffer list */ int seg_count; int page_order; - drm_dma_handle_t **seglist; + unsigned long *seglist; drm_freelist_t freelist; } drm_buf_entry_t; @@ -489,6 +483,12 @@ typedef struct drm_sigdata { drm_hw_lock_t *lock; } drm_sigdata_t; +typedef struct drm_dma_handle { + dma_addr_t busaddr; + void *vaddr; + size_t size; +} drm_dma_handle_t; + /** * Mappings list */ @@ -813,6 +813,8 @@ extern void drm_mem_init(void); extern int drm_mem_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); +extern unsigned long drm_alloc_pages(int order, int area); +extern void drm_free_pages(unsigned long address, int order, int area); extern void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev); extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size, diff --git a/trunk/drivers/char/drm/drm_bufs.c b/trunk/drivers/char/drm/drm_bufs.c index 8a9cf12e6183..e2637b4d51de 100644 --- a/trunk/drivers/char/drm/drm_bufs.c +++ b/trunk/drivers/char/drm/drm_bufs.c @@ -474,7 +474,8 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { if (entry->seglist[i]) { - drm_pci_free(dev, entry->seglist[i]); + drm_free_pages(entry->seglist[i], + entry->page_order, DRM_MEM_DMA); } } drm_free(entry->seglist, @@ -677,7 +678,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) int total; int page_order; drm_buf_entry_t *entry; - drm_dma_handle_t *dmah; + unsigned long page; drm_buf_t *buf; int alignment; unsigned long offset; @@ -780,10 +781,8 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) page_count = 0; while (entry->buf_count < count) { - - dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); - - if (!dmah) { + page = drm_alloc_pages(page_order, DRM_MEM_DMA); + if (!page) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; @@ -795,13 +794,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) atomic_dec(&dev->buf_alloc); return -ENOMEM; } - entry->seglist[entry->seg_count++] = dmah; + entry->seglist[entry->seg_count++] = page; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ 0x%08lx\n", dma->page_count + page_count, - (unsigned long)dmah->vaddr + PAGE_SIZE * i); + page + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] - = (unsigned long)dmah->vaddr + PAGE_SIZE * i; + = page + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; @@ -812,8 +811,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(dmah->vaddr + offset); - buf->bus_address = dmah->busaddr + offset; + buf->address = (void *)(page + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; diff --git a/trunk/drivers/char/drm/drm_dma.c b/trunk/drivers/char/drm/drm_dma.c index 892db7096986..2afab95ca036 100644 --- a/trunk/drivers/char/drm/drm_dma.c +++ b/trunk/drivers/char/drm/drm_dma.c @@ -85,7 +85,9 @@ void drm_dma_takedown(drm_device_t * dev) dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { if (dma->bufs[i].seglist[j]) { - drm_pci_free(dev, dma->bufs[i].seglist[j]); + drm_free_pages(dma->bufs[i].seglist[j], + dma->bufs[i].page_order, + DRM_MEM_DMA); } } drm_free(dma->bufs[i].seglist, diff --git a/trunk/drivers/char/drm/drm_memory.c b/trunk/drivers/char/drm/drm_memory.c index dddf8de66143..8074771e348f 100644 --- a/trunk/drivers/char/drm/drm_memory.c +++ b/trunk/drivers/char/drm/drm_memory.c @@ -79,6 +79,65 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) return pt; } +/** + * Allocate pages. + * + * \param order size order. + * \param area memory area. (Not used.) + * \return page address on success, or zero on failure. + * + * Allocate and reserve free pages. + */ +unsigned long drm_alloc_pages(int order, int area) +{ + unsigned long address; + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); + if (!address) + return 0; + + /* Zero */ + memset((void *)address, 0, bytes); + + /* Reserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } + + return address; +} + +/** + * Free pages. + * + * \param address address of the pages to free. + * \param order size order. + * \param area memory area. (Not used.) + * + * Unreserve and free pages allocated by alloc_pages(). + */ +void drm_free_pages(unsigned long address, int order, int area) +{ + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + if (!address) + return; + + /* Unreserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } + + free_pages(address, order); +} + #if __OS_HAS_AGP /** Wrapper around agp_allocate_memory() */ DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type) diff --git a/trunk/drivers/char/drm/drm_memory_debug.h b/trunk/drivers/char/drm/drm_memory_debug.h index 7868341817da..e84605fc54af 100644 --- a/trunk/drivers/char/drm/drm_memory_debug.h +++ b/trunk/drivers/char/drm/drm_memory_debug.h @@ -206,6 +206,76 @@ void drm_free (void *pt, size_t size, int area) { } } +unsigned long drm_alloc_pages (int order, int area) { + unsigned long address; + unsigned long bytes = PAGE_SIZE << order; + unsigned long addr; + unsigned int sz; + + spin_lock(&drm_mem_lock); + if ((drm_ram_used >> PAGE_SHIFT) + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { + spin_unlock(&drm_mem_lock); + return 0; + } + spin_unlock(&drm_mem_lock); + + address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); + if (!address) { + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].fail_count; + spin_unlock(&drm_mem_lock); + return 0; + } + spin_lock(&drm_mem_lock); + ++drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_allocated += bytes; + drm_ram_used += bytes; + spin_unlock(&drm_mem_lock); + + /* Zero outside the lock */ + memset((void *)address, 0, bytes); + + /* Reserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + SetPageReserved(virt_to_page(addr)); + } + + return address; +} + +void drm_free_pages (unsigned long address, int order, int area) { + unsigned long bytes = PAGE_SIZE << order; + int alloc_count; + int free_count; + unsigned long addr; + unsigned int sz; + + if (!address) { + DRM_MEM_ERROR(area, "Attempt to free address 0\n"); + } else { + /* Unreserve */ + for (addr = address, sz = bytes; + sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + } + free_pages(address, order); + } + + spin_lock(&drm_mem_lock); + free_count = ++drm_mem_stats[area].free_count; + alloc_count = drm_mem_stats[area].succeed_count; + drm_mem_stats[area].bytes_freed += bytes; + drm_ram_used -= bytes; + spin_unlock(&drm_mem_lock); + if (free_count > alloc_count) { + DRM_MEM_ERROR(area, + "Excess frees: %d frees, %d allocs\n", + free_count, alloc_count); + } +} + void *drm_ioremap (unsigned long offset, unsigned long size, drm_device_t * dev) { void *pt; diff --git a/trunk/drivers/char/drm/drm_pci.c b/trunk/drivers/char/drm/drm_pci.c index b28ca9cea8a2..1fd7ff164817 100644 --- a/trunk/drivers/char/drm/drm_pci.c +++ b/trunk/drivers/char/drm/drm_pci.c @@ -50,10 +50,6 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; -#if 1 - unsigned long addr; - size_t sz; -#endif #ifdef DRM_DEBUG_MEMORY int area = DRM_MEM_DMA; @@ -83,7 +79,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, return NULL; dmah->size = size; - dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); + dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr); #ifdef DRM_DEBUG_MEMORY if (dmah->vaddr == NULL) { @@ -108,29 +104,18 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, memset(dmah->vaddr, 0, size); - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Reserve */ - for (addr = (unsigned long)dmah->vaddr, sz = size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - SetPageReserved(virt_to_page(addr)); - } - return dmah; } EXPORT_SYMBOL(drm_pci_alloc); /** - * \brief Free a PCI consistent memory block without freeing its descriptor. + * \brief Free a PCI consistent memory block with freeing its descriptor. * * This function is for internal use in the Linux-specific DRM core code. */ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah) { -#if 1 - unsigned long addr; - size_t sz; -#endif #ifdef DRM_DEBUG_MEMORY int area = DRM_MEM_DMA; int alloc_count; @@ -142,14 +127,8 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah) DRM_MEM_ERROR(area, "Attempt to free address 0\n"); #endif } else { - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Unreserve */ - for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - } - dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, - dmah->busaddr); + pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr, + dmah->busaddr); } #ifdef DRM_DEBUG_MEMORY diff --git a/trunk/drivers/char/drm/drm_pciids.h b/trunk/drivers/char/drm/drm_pciids.h index b1bb3c7b568d..2c17e88a8847 100644 --- a/trunk/drivers/char/drm/drm_pciids.h +++ b/trunk/drivers/char/drm/drm_pciids.h @@ -3,69 +3,49 @@ Please contact dri-devel@lists.sf.net to add new cards to this list */ #define radeon_PCI_IDS \ - {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ - {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350},\ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP}, \ {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \ {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ - {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ - {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ - {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ - {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ - {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ - {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS250|CHIP_IS_IGP}, \ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ - {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ - {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS250|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ + {0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ + {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ + {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ + {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ + {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ - {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ - {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ - {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ @@ -73,66 +53,44 @@ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ - {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ - {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ - {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ - {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ + {0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ + {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ + {0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ - {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ - {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ + {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ + {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ - {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP}, \ - {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ + {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ {0, 0, 0} #define r128_PCI_IDS \ diff --git a/trunk/drivers/char/drm/i915_dma.c b/trunk/drivers/char/drm/i915_dma.c index 9f4b8ce4c05e..1ff4c7ca0bff 100644 --- a/trunk/drivers/char/drm/i915_dma.c +++ b/trunk/drivers/char/drm/i915_dma.c @@ -495,6 +495,8 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev, } } + dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; + i915_emit_breadcrumb(dev); return 0; diff --git a/trunk/drivers/char/drm/i915_irq.c b/trunk/drivers/char/drm/i915_irq.c index a752afd86ab8..d3879ac9970f 100644 --- a/trunk/drivers/char/drm/i915_irq.c +++ b/trunk/drivers/char/drm/i915_irq.c @@ -53,8 +53,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) I915_WRITE16(I915REG_INT_IDENTITY_R, temp); - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - if (temp & USER_INT_FLAG) DRM_WAKEUP(&dev_priv->irq_queue); diff --git a/trunk/drivers/char/drm/r300_cmdbuf.c b/trunk/drivers/char/drm/r300_cmdbuf.c index b108c7f913b2..c08fa5076f05 100644 --- a/trunk/drivers/char/drm/r300_cmdbuf.c +++ b/trunk/drivers/char/drm/r300_cmdbuf.c @@ -214,13 +214,13 @@ void r300_init_reg_flags(void) ADD_RANGE(0x4F54, 1); ADD_RANGE(R300_TX_FILTER_0, 16); - ADD_RANGE(R300_TX_FILTER1_0, 16); + ADD_RANGE(R300_TX_UNK1_0, 16); ADD_RANGE(R300_TX_SIZE_0, 16); ADD_RANGE(R300_TX_FORMAT_0, 16); ADD_RANGE(R300_TX_PITCH_0, 16); /* Texture offset is dangerous and needs more checking */ ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); - ADD_RANGE(R300_TX_CHROMA_KEY_0, 16); + ADD_RANGE(R300_TX_UNK4_0, 16); ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); /* Sporadic registers used as primitives are emitted */ @@ -242,10 +242,8 @@ static __inline__ int r300_check_range(unsigned reg, int count) return 0; } -/* - * we expect offsets passed to the framebuffer to be either within video - * memory or within AGP space - */ + /* we expect offsets passed to the framebuffer to be either within video memory or + within AGP space */ static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv, u32 offset) { @@ -253,11 +251,11 @@ static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv, but this value is not being kept. This code is correct for now (does the same thing as the code that sets MC_FB_LOCATION) in radeon_cp.c */ - if (offset >= dev_priv->fb_location && - offset < (dev_priv->fb_location + dev_priv->fb_size)) + if ((offset >= dev_priv->fb_location) && + (offset < dev_priv->gart_vm_start)) return 0; - if (offset >= dev_priv->gart_vm_start && - offset < (dev_priv->gart_vm_start + dev_priv->gart_size)) + if ((offset >= dev_priv->gart_vm_start) && + (offset < dev_priv->gart_vm_start + dev_priv->gart_size)) return 0; return 1; } @@ -492,7 +490,6 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, return 0; } - static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { @@ -704,64 +701,6 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf) buf->used = 0; } -static int r300_scratch(drm_radeon_private_t *dev_priv, - drm_radeon_kcmd_buffer_t *cmdbuf, - drm_r300_cmd_header_t header) -{ - u32 *ref_age_base; - u32 i, buf_idx, h_pending; - RING_LOCALS; - - if (cmdbuf->bufsz < - (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { - return DRM_ERR(EINVAL); - } - - if (header.scratch.reg >= 5) { - return DRM_ERR(EINVAL); - } - - dev_priv->scratch_ages[header.scratch.reg]++; - - ref_age_base = *(u32 **)cmdbuf->buf; - - cmdbuf->buf += sizeof(u64); - cmdbuf->bufsz -= sizeof(u64); - - for (i=0; i < header.scratch.n_bufs; i++) { - buf_idx = *(u32 *)cmdbuf->buf; - buf_idx *= 2; /* 8 bytes per buf */ - - if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { - return DRM_ERR(EINVAL); - } - - if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { - return DRM_ERR(EINVAL); - } - - if (h_pending == 0) { - return DRM_ERR(EINVAL); - } - - h_pending--; - - if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { - return DRM_ERR(EINVAL); - } - - cmdbuf->buf += sizeof(buf_idx); - cmdbuf->bufsz -= sizeof(buf_idx); - } - - BEGIN_RING(2); - OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0)); - OUT_RING(dev_priv->scratch_ages[header.scratch.reg]); - ADVANCE_RING(); - - return 0; -} - /** * Parses and validates a user-supplied command buffer and emits appropriate * commands on the DMA ring buffer. @@ -899,15 +838,6 @@ int r300_do_cp_cmdbuf(drm_device_t *dev, } break; - case R300_CMD_SCRATCH: - DRM_DEBUG("R300_CMD_SCRATCH\n"); - ret = r300_scratch(dev_priv, cmdbuf, header); - if (ret) { - DRM_ERROR("r300_scratch failed\n"); - goto cleanup; - } - break; - default: DRM_ERROR("bad cmd_type %i at %p\n", header.header.cmd_type, diff --git a/trunk/drivers/char/drm/r300_reg.h b/trunk/drivers/char/drm/r300_reg.h index a881f96c983e..d1e19954406b 100644 --- a/trunk/drivers/char/drm/r300_reg.h +++ b/trunk/drivers/char/drm/r300_reg.h @@ -711,22 +711,8 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) # define R300_TX_MAX_ANISO_MASK (14 << 21) -#define R300_TX_FILTER1_0 0x4440 -# define R300_CHROMA_KEY_MODE_DISABLE 0 -# define R300_CHROMA_KEY_FORCE 1 -# define R300_CHROMA_KEY_BLEND 2 -# define R300_MC_ROUND_NORMAL (0<<2) -# define R300_MC_ROUND_MPEG4 (1<<2) +#define R300_TX_UNK1_0 0x4440 # define R300_LOD_BIAS_MASK 0x1fff -# define R300_EDGE_ANISO_EDGE_DIAG (0<<13) -# define R300_EDGE_ANISO_EDGE_ONLY (1<<13) -# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14) -# define R300_MC_COORD_TRUNCATE_MPEG (1<<14) -# define R300_TX_TRI_PERF_0_8 (0<<15) -# define R300_TX_TRI_PERF_1_8 (1<<15) -# define R300_TX_TRI_PERF_1_4 (2<<15) -# define R300_TX_TRI_PERF_3_8 (3<<15) -# define R300_ANISO_THRESHOLD_MASK (7<<17) #define R300_TX_SIZE_0 0x4480 # define R300_TX_WIDTHMASK_SHIFT 0 @@ -736,8 +722,6 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_TX_UNK23 (1 << 23) # define R300_TX_SIZE_SHIFT 26 /* largest of width, height */ # define R300_TX_SIZE_MASK (15 << 26) -# define R300_TX_SIZE_PROJECTED (1<<30) -# define R300_TX_SIZE_TXPITCH_EN (1<<31) #define R300_TX_FORMAT_0 0x44C0 /* The interpretation of the format word by Wladimir van der Laan */ /* The X, Y, Z and W refer to the layout of the components. @@ -766,8 +750,7 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ # define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ /* 0x16 - some 16 bit green format.. ?? */ -# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ -# define R300_TX_FORMAT_CUBIC_MAP (1 << 26) +# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ /* gap */ /* Floating point formats */ @@ -817,20 +800,18 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_TX_FORMAT_YUV_MODE 0x00800000 -#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */ +#define R300_TX_PITCH_0 0x4500 #define R300_TX_OFFSET_0 0x4540 /* BEGIN: Guess from R200 */ # define R300_TXO_ENDIAN_NO_SWAP (0 << 0) # define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0) # define R300_TXO_ENDIAN_WORD_SWAP (2 << 0) # define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) -# define R300_TXO_MACRO_TILE (1 << 2) -# define R300_TXO_MICRO_TILE (1 << 3) # define R300_TXO_OFFSET_MASK 0xffffffe0 # define R300_TXO_OFFSET_SHIFT 5 /* END */ -#define R300_TX_CHROMA_KEY_0 0x4580 /* 32 bit chroma key */ -#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 } +#define R300_TX_UNK4_0 0x4580 +#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 } /* END */ @@ -887,9 +868,7 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12) # define R300_PFS_NODE_TEX_END_SHIFT 17 # define R300_PFS_NODE_TEX_END_MASK (31 << 17) -/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */ -# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22) -# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23) +# define R300_PFS_NODE_LAST_NODE (1 << 22) /* TEX // As far as I can tell, texture instructions cannot write into output @@ -908,7 +887,6 @@ I am fairly certain that they are correct unless stated otherwise in comments. */ # define R300_FPITX_OPCODE_SHIFT 15 # define R300_FPITX_OP_TEX 1 -# define R300_FPITX_OP_KIL 2 # define R300_FPITX_OP_TXP 3 # define R300_FPITX_OP_TXB 4 @@ -984,11 +962,9 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_FPI1_SRC2C_CONST (1 << 17) # define R300_FPI1_DSTC_SHIFT 18 # define R300_FPI1_DSTC_MASK (31 << 18) -# define R300_FPI1_DSTC_REG_MASK_SHIFT 23 # define R300_FPI1_DSTC_REG_X (1 << 23) # define R300_FPI1_DSTC_REG_Y (1 << 24) # define R300_FPI1_DSTC_REG_Z (1 << 25) -# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26 # define R300_FPI1_DSTC_OUTPUT_X (1 << 26) # define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) # define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) @@ -1007,7 +983,6 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_FPI3_DSTA_MASK (31 << 18) # define R300_FPI3_DSTA_REG (1 << 23) # define R300_FPI3_DSTA_OUTPUT (1 << 24) -# define R300_FPI3_DSTA_DEPTH (1 << 27) #define R300_PFS_INSTR0_0 0x48C0 # define R300_FPI0_ARGC_SRC0C_XYZ 0 @@ -1061,7 +1036,7 @@ I am fairly certain that they are correct unless stated otherwise in comments. # define R300_FPI0_OUTC_FRC (9 << 23) # define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) # define R300_FPI0_OUTC_SAT (1 << 30) -# define R300_FPI0_INSERT_NOP (1 << 31) +# define R300_FPI0_UNKNOWN_31 (1 << 31) #define R300_PFS_INSTR2_0 0x49C0 # define R300_FPI2_ARGA_SRC0C_X 0 diff --git a/trunk/drivers/char/drm/radeon_cp.c b/trunk/drivers/char/drm/radeon_cp.c index 7f949c9c9691..9bb8ae0c1c27 100644 --- a/trunk/drivers/char/drm/radeon_cp.c +++ b/trunk/drivers/char/drm/radeon_cp.c @@ -1118,20 +1118,14 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, { u32 ring_start, cur_read_ptr; u32 tmp; - - /* Initialize the memory controller. With new memory map, the fb location - * is not changed, it should have been properly initialized already. Part - * of the problem is that the code below is bogus, assuming the GART is - * always appended to the fb which is not necessarily the case - */ - if (!dev_priv->new_memmap) - RADEON_WRITE(RADEON_MC_FB_LOCATION, - ((dev_priv->gart_vm_start - 1) & 0xffff0000) - | (dev_priv->fb_location >> 16)); + + /* Initialize the memory controller */ + RADEON_WRITE(RADEON_MC_FB_LOCATION, + ((dev_priv->gart_vm_start - 1) & 0xffff0000) + | (dev_priv->fb_location >> 16)); #if __OS_HAS_AGP if (dev_priv->flags & CHIP_IS_AGP) { - RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); RADEON_WRITE(RADEON_MC_AGP_LOCATION, (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | @@ -1159,6 +1153,8 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, #if __OS_HAS_AGP if (dev_priv->flags & CHIP_IS_AGP) { + /* set RADEON_AGP_BASE here instead of relying on X from user space */ + RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->agp->base + dev_priv->gart_vm_start); @@ -1178,17 +1174,6 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, entry->handle + tmp_ofs); } - /* Set ring buffer size */ -#ifdef __BIG_ENDIAN - RADEON_WRITE(RADEON_CP_RB_CNTL, - dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT); -#else - RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw); -#endif - - /* Start with assuming that writeback doesn't work */ - dev_priv->writeback_works = 0; - /* Initialize the scratch register pointer. This will cause * the scratch register values to be written out to memory * whenever they are updated. @@ -1205,9 +1190,28 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); - /* Turn on bus mastering */ - tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; - RADEON_WRITE(RADEON_BUS_CNTL, tmp); + /* Writeback doesn't seem to work everywhere, test it first */ + DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); + RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); + + for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { + if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == + 0xdeadbeef) + break; + DRM_UDELAY(1); + } + + if (tmp < dev_priv->usec_timeout) { + dev_priv->writeback_works = 1; + DRM_DEBUG("writeback test succeeded, tmp=%d\n", tmp); + } else { + dev_priv->writeback_works = 0; + DRM_DEBUG("writeback test failed\n"); + } + if (radeon_no_wb == 1) { + dev_priv->writeback_works = 0; + DRM_DEBUG("writeback forced off\n"); + } dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); @@ -1219,45 +1223,26 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); + /* Set ring buffer size */ +#ifdef __BIG_ENDIAN + RADEON_WRITE(RADEON_CP_RB_CNTL, + dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT); +#else + RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw); +#endif + radeon_do_wait_for_idle(dev_priv); + /* Turn on bus mastering */ + tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; + RADEON_WRITE(RADEON_BUS_CNTL, tmp); + /* Sync everything up */ RADEON_WRITE(RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); - -} - -static void radeon_test_writeback(drm_radeon_private_t * dev_priv) -{ - u32 tmp; - - /* Writeback doesn't seem to work everywhere, test it here and possibly - * enable it if it appears to work - */ - DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); - RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); - - for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { - if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == - 0xdeadbeef) - break; - DRM_UDELAY(1); - } - - if (tmp < dev_priv->usec_timeout) { - dev_priv->writeback_works = 1; - DRM_INFO("writeback test succeeded in %d usecs\n", tmp); - } else { - dev_priv->writeback_works = 0; - DRM_INFO("writeback test failed\n"); - } - if (radeon_no_wb == 1) { - dev_priv->writeback_works = 0; - DRM_INFO("writeback forced off\n"); - } } /* Enable or disable PCI-E GART on the chip */ @@ -1332,14 +1317,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) DRM_DEBUG("\n"); - /* if we require new memory map but we don't have it fail */ - if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap) - { - DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX\n"); - radeon_do_cleanup_cp(dev); - return DRM_ERR(EINVAL); - } - if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP)) { DRM_DEBUG("Forcing AGP card to PCI mode\n"); @@ -1519,9 +1496,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff) << 16; - dev_priv->fb_size = - ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000) - - dev_priv->fb_location; dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ((dev_priv->front_offset @@ -1536,46 +1510,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) + dev_priv->fb_location) >> 10)); dev_priv->gart_size = init->gart_size; - - /* New let's set the memory map ... */ - if (dev_priv->new_memmap) { - u32 base = 0; - - DRM_INFO("Setting GART location based on new memory map\n"); - - /* If using AGP, try to locate the AGP aperture at the same - * location in the card and on the bus, though we have to - * align it down. - */ -#if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { - base = dev->agp->base; - /* Check if valid */ - if ((base + dev_priv->gart_size) > dev_priv->fb_location && - base < (dev_priv->fb_location + dev_priv->fb_size)) { - DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", - dev->agp->base); - base = 0; - } - } -#endif - /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ - if (base == 0) { - base = dev_priv->fb_location + dev_priv->fb_size; - if (((base + dev_priv->gart_size) & 0xfffffffful) - < base) - base = dev_priv->fb_location - - dev_priv->gart_size; - } - dev_priv->gart_vm_start = base & 0xffc00000u; - if (dev_priv->gart_vm_start != base) - DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", - base, dev_priv->gart_vm_start); - } else { - DRM_INFO("Setting GART location based on old memory map\n"); - dev_priv->gart_vm_start = dev_priv->fb_location + - RADEON_READ(RADEON_CONFIG_APER_SIZE); - } + dev_priv->gart_vm_start = dev_priv->fb_location + + RADEON_READ(RADEON_CONFIG_APER_SIZE); #if __OS_HAS_AGP if (dev_priv->flags & CHIP_IS_AGP) @@ -1660,7 +1596,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) dev_priv->last_buf = 0; radeon_do_engine_reset(dev); - radeon_test_writeback(dev_priv); return 0; } diff --git a/trunk/drivers/char/drm/radeon_drm.h b/trunk/drivers/char/drm/radeon_drm.h index c8e279e89c2e..9c177a6b2a4c 100644 --- a/trunk/drivers/char/drm/radeon_drm.h +++ b/trunk/drivers/char/drm/radeon_drm.h @@ -222,7 +222,6 @@ typedef union { # define R300_WAIT_3D 0x2 # define R300_WAIT_2D_CLEAN 0x3 # define R300_WAIT_3D_CLEAN 0x4 -#define R300_CMD_SCRATCH 8 typedef union { unsigned int u; @@ -248,9 +247,6 @@ typedef union { struct { unsigned char cmd_type, flags, pad0, pad1; } wait; - struct { - unsigned char cmd_type, reg, n_bufs, flags; - } scratch; } drm_r300_cmd_header_t; #define RADEON_FRONT 0x1 @@ -701,7 +697,6 @@ typedef struct drm_radeon_setparam { #define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ #define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ #define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ -#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ /* 1.14: Clients can allocate/free a surface */ diff --git a/trunk/drivers/char/drm/radeon_drv.h b/trunk/drivers/char/drm/radeon_drv.h index 78345cee8f8e..1f7d2ab8c4fc 100644 --- a/trunk/drivers/char/drm/radeon_drv.h +++ b/trunk/drivers/char/drm/radeon_drv.h @@ -38,7 +38,7 @@ #define DRIVER_NAME "radeon" #define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20060225" +#define DRIVER_DATE "20051229" /* Interface history: * @@ -91,11 +91,9 @@ * 1.20- Add support for r300 texrect * 1.21- Add support for card type getparam * 1.22- Add support for texture cache flushes (R300_TX_CNTL) - * 1.23- Add new radeon memory map work from benh - * 1.24- Add general-purpose packet for manipulating scratch registers (r300) */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 24 +#define DRIVER_MINOR 22 #define DRIVER_PATCHLEVEL 0 /* @@ -103,21 +101,20 @@ */ enum radeon_family { CHIP_R100, - CHIP_RV100, CHIP_RS100, + CHIP_RV100, CHIP_RV200, - CHIP_RS200, CHIP_R200, + CHIP_RS200, + CHIP_R250, + CHIP_RS250, CHIP_RV250, - CHIP_RS300, CHIP_RV280, CHIP_R300, + CHIP_RS300, CHIP_R350, CHIP_RV350, - CHIP_RV380, CHIP_R420, - CHIP_RV410, - CHIP_RS400, CHIP_LAST, }; @@ -139,11 +136,9 @@ enum radeon_chip_flags { CHIP_IS_AGP = 0x00080000UL, CHIP_HAS_HIERZ = 0x00100000UL, CHIP_IS_PCIE = 0x00200000UL, - CHIP_NEW_MEMMAP = 0x00400000UL, }; -#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ - DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR)) +#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) #define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) typedef struct drm_radeon_freelist { @@ -204,8 +199,6 @@ typedef struct drm_radeon_private { drm_radeon_sarea_t *sarea_priv; u32 fb_location; - u32 fb_size; - int new_memmap; int gart_size; u32 gart_vm_start; @@ -279,8 +272,6 @@ typedef struct drm_radeon_private { unsigned long pcigart_offset; drm_ati_pcigart_info gart_info; - u32 scratch_ages[5]; - /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ } drm_radeon_private_t; diff --git a/trunk/drivers/char/drm/radeon_state.c b/trunk/drivers/char/drm/radeon_state.c index c5b8f774a599..7bc27516d425 100644 --- a/trunk/drivers/char/drm/radeon_state.c +++ b/trunk/drivers/char/drm/radeon_state.c @@ -45,53 +45,22 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * u32 off = *offset; struct drm_radeon_driver_file_fields *radeon_priv; - /* Hrm ... the story of the offset ... So this function converts - * the various ideas of what userland clients might have for an - * offset in the card address space into an offset into the card - * address space :) So with a sane client, it should just keep - * the value intact and just do some boundary checking. However, - * not all clients are sane. Some older clients pass us 0 based - * offsets relative to the start of the framebuffer and some may - * assume the AGP aperture it appended to the framebuffer, so we - * try to detect those cases and fix them up. - * - * Note: It might be a good idea here to make sure the offset lands - * in some "allowed" area to protect things like the PCIE GART... - */ - - /* First, the best case, the offset already lands in either the - * framebuffer or the GART mapped space - */ - if ((off >= dev_priv->fb_location && - off < (dev_priv->fb_location + dev_priv->fb_size)) || - (off >= dev_priv->gart_vm_start && - off < (dev_priv->gart_vm_start + dev_priv->gart_size))) + if (off >= dev_priv->fb_location && + off < (dev_priv->gart_vm_start + dev_priv->gart_size)) return 0; - /* Ok, that didn't happen... now check if we have a zero based - * offset that fits in the framebuffer + gart space, apply the - * magic offset we get from SETPARAM or calculated from fb_location - */ - if (off < (dev_priv->fb_size + dev_priv->gart_size)) { - radeon_priv = filp_priv->driver_priv; - off += radeon_priv->radeon_fb_delta; - } + radeon_priv = filp_priv->driver_priv; + off += radeon_priv->radeon_fb_delta; - /* Finally, assume we aimed at a GART offset if beyond the fb */ - if (off > (dev_priv->fb_location + dev_priv->fb_size)) - off = off - (dev_priv->fb_location + dev_priv->fb_size) + - dev_priv->gart_vm_start; + DRM_DEBUG("offset fixed up to 0x%x\n", off); - /* Now recheck and fail if out of bounds */ - if ((off >= dev_priv->fb_location && - off < (dev_priv->fb_location + dev_priv->fb_size)) || - (off >= dev_priv->gart_vm_start && - off < (dev_priv->gart_vm_start + dev_priv->gart_size))) { - DRM_DEBUG("offset fixed up to 0x%x\n", off); - *offset = off; - return 0; - } - return DRM_ERR(EINVAL); + if (off < dev_priv->fb_location || + off >= (dev_priv->gart_vm_start + dev_priv->gart_size)) + return DRM_ERR(EINVAL); + + *offset = off; + + return 0; } static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * @@ -1970,6 +1939,11 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_alloc_t alloc; + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *) data, sizeof(alloc)); @@ -1986,7 +1960,12 @@ static int radeon_surface_free(DRM_IOCTL_ARGS) drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_free_t memfree; - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data, + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, sizeof(memfree)); if (free_surface(filp, dev_priv, memfree.address)) @@ -2121,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, @@ -2205,6 +2189,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data, @@ -2351,6 +2340,11 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_COPY_FROM_USER_IOCTL(indirect, (drm_radeon_indirect_t __user *) data, sizeof(indirect)); @@ -2423,6 +2417,11 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data, @@ -2739,6 +2738,11 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(cmdbuf, @@ -2893,6 +2897,11 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) drm_radeon_getparam_t param; int value; + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data, sizeof(param)); @@ -2972,6 +2981,11 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) drm_radeon_setparam_t sp; struct drm_radeon_driver_file_fields *radeon_priv; + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data, @@ -2998,9 +3012,6 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) case RADEON_SETPARAM_PCIGART_LOCATION: dev_priv->pcigart_offset = sp.value; break; - case RADEON_SETPARAM_NEW_MEMMAP: - dev_priv->new_memmap = sp.value; - break; default: DRM_DEBUG("Invalid parameter %d\n", sp.param); return DRM_ERR(EINVAL); diff --git a/trunk/drivers/char/drm/sis_mm.c b/trunk/drivers/char/drm/sis_mm.c index 5e9936bc307f..6774d2fe3452 100644 --- a/trunk/drivers/char/drm/sis_mm.c +++ b/trunk/drivers/char/drm/sis_mm.c @@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); - DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset); + DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset); return retval; } diff --git a/trunk/drivers/infiniband/core/mad.c b/trunk/drivers/infiniband/core/mad.c index ba54c856b0e5..f7854b65fd55 100644 --- a/trunk/drivers/infiniband/core/mad.c +++ b/trunk/drivers/infiniband/core/mad.c @@ -227,14 +227,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (!is_vendor_oui(mad_reg_req->oui)) goto error1; } - /* Make sure class supplied is consistent with RMPP */ - if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { - if (!rmpp_version) - goto error1; - } else { - if (rmpp_version) - goto error1; - } /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != @@ -898,35 +890,6 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, } EXPORT_SYMBOL(ib_create_send_mad); -int ib_get_mad_data_offset(u8 mgmt_class) -{ - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) - return IB_MGMT_SA_HDR; - else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || - (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || - (mgmt_class == IB_MGMT_CLASS_BIS)) - return IB_MGMT_DEVICE_HDR; - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) - return IB_MGMT_VENDOR_HDR; - else - return IB_MGMT_MAD_HDR; -} -EXPORT_SYMBOL(ib_get_mad_data_offset); - -int ib_is_mad_class_rmpp(u8 mgmt_class) -{ - if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || - (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || - (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || - (mgmt_class == IB_MGMT_CLASS_BIS) || - ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) - return 1; - return 0; -} -EXPORT_SYMBOL(ib_is_mad_class_rmpp); - void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) { struct ib_mad_send_wr_private *mad_send_wr; @@ -1059,13 +1022,6 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, goto error; } - if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { - if (mad_agent_priv->agent.rmpp_version) { - ret = -EINVAL; - goto error; - } - } - /* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work @@ -1662,59 +1618,14 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } -static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc) -{ - return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == - rwc->recv_buf.mad->mad_hdr.mgmt_class; -} - -static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc ) -{ - struct ib_ah_attr attr; - u8 send_resp, rcv_resp; - - send_resp = ((struct ib_mad *)(wr->send_buf.mad))-> - mad_hdr.method & IB_MGMT_METHOD_RESP; - rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP; - - if (!send_resp && rcv_resp) - /* is request/response. GID/LIDs are both local (same). */ - return 1; - - if (send_resp == rcv_resp) - /* both requests, or both responses. GIDs different */ - return 0; - - if (ib_query_ah(wr->send_buf.ah, &attr)) - /* Assume not equal, to avoid false positives. */ - return 0; - - if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH)) - return attr.dlid == rwc->wc->slid; - else if ((attr.ah_flags & IB_AH_GRH) && - (rwc->wc->wc_flags & IB_WC_GRH)) - return memcmp(attr.grh.dgid.raw, - rwc->recv_buf.grh->sgid.raw, 16) == 0; - else - /* one has GID, other does not. Assume different */ - return 0; -} struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *mad_recv_wc) +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) { struct ib_mad_send_wr_private *mad_send_wr; - struct ib_mad *mad; - - mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad; list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, agent_list) { - if ((mad_send_wr->tid == mad->mad_hdr.tid) && - rcv_has_same_class(mad_send_wr, mad_recv_wc) && - rcv_has_same_gid(mad_send_wr, mad_recv_wc)) + if (mad_send_wr->tid == tid) return mad_send_wr; } @@ -1725,10 +1636,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && - mad_send_wr->tid == mad->mad_hdr.tid && - mad_send_wr->timeout && - rcv_has_same_class(mad_send_wr, mad_recv_wc) && - rcv_has_same_gid(mad_send_wr, mad_recv_wc)) { + mad_send_wr->tid == tid && mad_send_wr->timeout) { /* Verify request has not been canceled */ return (mad_send_wr->status == IB_WC_SUCCESS) ? mad_send_wr : NULL; @@ -1753,6 +1661,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; + __be64 tid; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -1768,8 +1677,9 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, /* Complete corresponding request */ if (response_mad(mad_recv_wc->recv_buf.mad)) { + tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; spin_lock_irqsave(&mad_agent_priv->lock, flags); - mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); + mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(mad_recv_wc); @@ -2498,11 +2408,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } } sg_list.addr = dma_map_single(qp_info->port_priv-> - device->dma_device, - &mad_priv->grh, - sizeof *mad_priv - - sizeof mad_priv->header, - DMA_FROM_DEVICE); + device->dma_device, + &mad_priv->grh, + sizeof *mad_priv - + sizeof mad_priv->header, + DMA_FROM_DEVICE); pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; mad_priv->header.mad_list.mad_queue = recv_queue; diff --git a/trunk/drivers/infiniband/core/mad_priv.h b/trunk/drivers/infiniband/core/mad_priv.h index 6c9c133d71ef..a7125d4b5ccf 100644 --- a/trunk/drivers/infiniband/core/mad_priv.h +++ b/trunk/drivers/infiniband/core/mad_priv.h @@ -216,8 +216,7 @@ extern kmem_cache_t *ib_mad_cache; int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *mad_recv_wc); +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/trunk/drivers/infiniband/core/mad_rmpp.c b/trunk/drivers/infiniband/core/mad_rmpp.c index dfd4e588ce03..bacfdd5bddad 100644 --- a/trunk/drivers/infiniband/core/mad_rmpp.c +++ b/trunk/drivers/infiniband/core/mad_rmpp.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005 Intel Inc. All rights reserved. - * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -100,6 +100,17 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) } } +static int data_offset(u8 mgmt_class) +{ + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) + return IB_MGMT_SA_HDR; + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) + return IB_MGMT_VENDOR_HDR; + else + return IB_MGMT_RMPP_HDR; +} + static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) @@ -126,7 +137,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_send_buf *msg; int ret, hdr_len; - hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); + hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); @@ -152,7 +163,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, if (IS_ERR(ah)) return (void *) ah; - hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); + hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); @@ -397,7 +408,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; - hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); + hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) @@ -551,15 +562,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) return ib_send_mad(mad_send_wr); } -static void abort_send(struct ib_mad_agent_private *agent, - struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) +static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, + u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); - mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); + mad_send_wr = ib_find_send_mad(agent, tid); if (!mad_send_wr) goto out; /* Unmatched send */ @@ -601,7 +612,8 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } @@ -609,13 +621,14 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); - mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); + mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); if (!mad_send_wr) goto out; /* Unmatched ACK */ @@ -626,7 +639,8 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } @@ -714,10 +728,12 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent, rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else - abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, @@ -729,10 +745,12 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent, if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else - abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * @@ -746,7 +764,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } @@ -764,7 +783,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, process_rmpp_abort(agent, mad_recv_wc); break; default: - abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } diff --git a/trunk/drivers/infiniband/core/user_mad.c b/trunk/drivers/infiniband/core/user_mad.c index afe70a549c2f..fb6cd42601f9 100644 --- a/trunk/drivers/infiniband/core/user_mad.c +++ b/trunk/drivers/infiniband/core/user_mad.c @@ -177,6 +177,17 @@ static int queue_packet(struct ib_umad_file *file, return ret; } +static int data_offset(u8 mgmt_class) +{ + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) + return IB_MGMT_SA_HDR; + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) + return IB_MGMT_VENDOR_HDR; + else + return IB_MGMT_RMPP_HDR; +} + static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *send_wc) { @@ -272,7 +283,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, */ return -ENOSPC; } - offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); + offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); max_seg_payload = sizeof (struct ib_mad) - offset; for (left = packet->length - seg_payload, buf += seg_payload; @@ -430,14 +441,21 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; - hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); - if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { - copy_offset = IB_MGMT_MAD_HDR; - rmpp_active = 0; - } else { + if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { + hdr_len = IB_MGMT_SA_HDR; copy_offset = IB_MGMT_RMPP_HDR; rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE; + } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && + rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { + hdr_len = IB_MGMT_VENDOR_HDR; + copy_offset = IB_MGMT_RMPP_HDR; + rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & + IB_MGMT_RMPP_FLAG_ACTIVE; + } else { + hdr_len = IB_MGMT_MAD_HDR; + copy_offset = IB_MGMT_MAD_HDR; + rmpp_active = 0; } data_len = count - sizeof (struct ib_user_mad) - hdr_len; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_av.c b/trunk/drivers/infiniband/hw/mthca/mthca_av.c index bc5bdcbe51b5..f023d3936518 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_av.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_av.c @@ -265,7 +265,7 @@ int __devinit mthca_init_av_table(struct mthca_dev *dev) return -ENOMEM; } -void mthca_cleanup_av_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_av_table(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) return; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..76aabc5bf371 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c @@ -973,7 +973,7 @@ int __devinit mthca_init_cq_table(struct mthca_dev *dev) return err; } -void mthca_cleanup_cq_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev) { mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); mthca_alloc_cleanup(&dev->cq_table.alloc); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c index 99f109c3815d..cbdc348fb689 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c @@ -765,7 +765,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) } -static void mthca_unmap_eq_regs(struct mthca_dev *dev) +static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) { mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & @@ -821,7 +821,7 @@ int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) return ret; } -void mthca_unmap_eq_icm(struct mthca_dev *dev) +void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) { u8 status; @@ -954,7 +954,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev) return err; } -void mthca_cleanup_eq_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) { u8 status; int i; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mad.c b/trunk/drivers/infiniband/hw/mthca/mthca_mad.c index dfb482eac9a2..4ace6a392f41 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_mad.c @@ -271,7 +271,7 @@ int mthca_create_agents(struct mthca_dev *dev) return PTR_ERR(agent); } -void __devexit mthca_free_agents(struct mthca_dev *dev) +void mthca_free_agents(struct mthca_dev *dev) { struct ib_mad_agent *agent; int p, q; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c b/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c index 47ca8a9b7247..9965bda9afed 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c @@ -388,7 +388,7 @@ int __devinit mthca_init_mcg_table(struct mthca_dev *dev) return 0; } -void mthca_cleanup_mcg_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_mcg_table(struct mthca_dev *dev) { mthca_alloc_cleanup(&dev->mcg_table.alloc); } diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..698b62125765 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c @@ -170,7 +170,7 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) return -ENOMEM; } -static void mthca_buddy_cleanup(struct mthca_buddy *buddy) +static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy) { int i; @@ -866,7 +866,7 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) return err; } -void mthca_cleanup_mr_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev) { /* XXX check if any MRs are still allocated? */ if (dev->limits.fmr_reserved_mtts) diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_pd.c b/trunk/drivers/infiniband/hw/mthca/mthca_pd.c index 59df51614c85..105fc5faaddf 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_pd.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_pd.c @@ -77,7 +77,7 @@ int __devinit mthca_init_pd_table(struct mthca_dev *dev) dev->limits.reserved_pds); } -void mthca_cleanup_pd_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_pd_table(struct mthca_dev *dev) { /* XXX check if any PDs are still allocated? */ mthca_alloc_cleanup(&dev->pd_table.alloc); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index 057c8e6af87b..1bc2678c2fae 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -2204,7 +2204,7 @@ int __devinit mthca_init_qp_table(struct mthca_dev *dev) return err; } -void mthca_cleanup_qp_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) { int i; u8 status; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c index 2dd3aea05341..0cfd15802217 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c @@ -206,7 +206,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, roundup_pow_of_two(sizeof (struct mthca_next_seg) + srq->max_gs * sizeof (struct mthca_data_seg))); - if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) + if (ds > dev->limits.max_desc_sz) return -EINVAL; srq->wqe_shift = long_log2(ds); @@ -684,7 +684,7 @@ int __devinit mthca_init_srq_table(struct mthca_dev *dev) return err; } -void mthca_cleanup_srq_table(struct mthca_dev *dev) +void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) { if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) return; diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9b0bd7c746ca..53a32f65788d 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -723,7 +723,7 @@ static int ipoib_hard_header(struct sk_buff *skb, * destination address onto the front of the skb so we can * figure out where to send the packet later. */ - if ((!skb->dst || !skb->dst->neighbour) && daddr) { + if (!skb->dst || !skb->dst->neighbour) { struct ipoib_pseudoheader *phdr = (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index fd8a95a9c5d3..61924cc30e55 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -607,10 +607,10 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, */ if (likely(scmnd->use_sg)) { nents = scmnd->use_sg; - scat = scmnd->request_buffer; + scat = (struct scatterlist *) scmnd->request_buffer; } else { nents = 1; - scat = &req->fake_sg; + scat = (struct scatterlist *) scmnd->request_buffer; } dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, diff --git a/trunk/drivers/parisc/pdc_stable.c b/trunk/drivers/parisc/pdc_stable.c index a28e17898fbd..4e53be9c03ab 100644 --- a/trunk/drivers/parisc/pdc_stable.c +++ b/trunk/drivers/parisc/pdc_stable.c @@ -4,9 +4,8 @@ * Copyright (C) 2005-2006 Thibaut VARENE * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/trunk/drivers/scsi/libata-bmdma.c b/trunk/drivers/scsi/libata-bmdma.c index 835dff0bafdc..95d81d86d8b7 100644 --- a/trunk/drivers/scsi/libata-bmdma.c +++ b/trunk/drivers/scsi/libata-bmdma.c @@ -703,7 +703,6 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); int p = 0; - unsigned long bmdma; if (!probe_ent) return NULL; @@ -717,12 +716,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int probe_ent->port[p].altstatus_addr = probe_ent->port[p].ctl_addr = pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; - bmdma = pci_resource_start(pdev, 4); - if (bmdma) { - if (inb(bmdma + 2) & 0x80) - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; - probe_ent->port[p].bmdma_addr = bmdma; - } + probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4); ata_std_ports(&probe_ent->port[p]); p++; } @@ -732,13 +726,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int probe_ent->port[p].altstatus_addr = probe_ent->port[p].ctl_addr = pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; - bmdma = pci_resource_start(pdev, 4); - if (bmdma) { - bmdma += 8; - if(inb(bmdma + 2) & 0x80) - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; - probe_ent->port[p].bmdma_addr = bmdma; - } + probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8; ata_std_ports(&probe_ent->port[p]); p++; } @@ -752,7 +740,6 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num) { struct ata_probe_ent *probe_ent; - unsigned long bmdma; probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port); if (!probe_ent) @@ -779,13 +766,8 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, break; } - bmdma = pci_resource_start(pdev, 4); - if (bmdma != 0) { - bmdma += 8 * port_num; - probe_ent->port[0].bmdma_addr = bmdma; - if (inb(bmdma + 2) & 0x80) - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; - } + probe_ent->port[0].bmdma_addr = + pci_resource_start(pdev, 4) + 8 * port_num; ata_std_ports(&probe_ent->port[0]); return probe_ent; diff --git a/trunk/drivers/scsi/libata-core.c b/trunk/drivers/scsi/libata-core.c index 21b0ed583b8a..d279666dcb38 100644 --- a/trunk/drivers/scsi/libata-core.c +++ b/trunk/drivers/scsi/libata-core.c @@ -62,9 +62,7 @@ #include "libata.h" static unsigned int ata_dev_init_params(struct ata_port *ap, - struct ata_device *dev, - u16 heads, - u16 sectors); + struct ata_device *dev); static void ata_set_mode(struct ata_port *ap); static unsigned int ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); @@ -1083,8 +1081,9 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * * Read ID data from the specified device. ATA_CMD_ID_ATA is * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI - * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS - * for pre-ATA4 drives. + * devices. This function also takes care of EDD signature + * misreporting (to be removed once EDD support is gone) and + * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives. * * LOCKING: * Kernel thread context (may sleep) @@ -1096,6 +1095,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, unsigned int *p_class, int post_reset, u16 **p_id) { unsigned int class = *p_class; + unsigned int using_edd; struct ata_taskfile tf; unsigned int err_mask = 0; u16 *id; @@ -1104,6 +1104,12 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); + if (ap->ops->probe_reset || + ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) + using_edd = 0; + else + using_edd = 1; + ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL); @@ -1133,16 +1139,39 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS); + if (err_mask) { rc = -EIO; reason = "I/O error"; + + if (err_mask & ~AC_ERR_DEV) + goto err_out; + + /* + * arg! EDD works for all test cases, but seems to return + * the ATA signature for some ATAPI devices. Until the + * reason for this is found and fixed, we fix up the mess + * here. If IDENTIFY DEVICE returns command aborted + * (as ATAPI devices do), then we issue an + * IDENTIFY PACKET DEVICE. + * + * ATA software reset (SRST, the default) does not appear + * to have this problem. + */ + if ((using_edd) && (class == ATA_DEV_ATA)) { + u8 err = tf.feature; + if (err & ATA_ABORTED) { + class = ATA_DEV_ATAPI; + goto retry; + } + } goto err_out; } swap_buf_le16(id, ATA_ID_WORDS); /* sanity check */ - if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) { + if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) { rc = -EINVAL; reason = "device reports illegal type"; goto err_out; @@ -1158,7 +1187,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, * Some drives were very specific about that exact sequence. */ if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { - err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); + err_mask = ata_dev_init_params(ap, dev); if (err_mask) { rc = -EIO; reason = "INIT_DEV_PARAMS failed"; @@ -1411,11 +1440,7 @@ static int ata_bus_probe(struct ata_port *ap) if (!found) goto err_out_disable; - if (ap->ops->set_mode) - ap->ops->set_mode(ap); - else - ata_set_mode(ap); - + ata_set_mode(ap); if (ap->flags & ATA_FLAG_PORT_DISABLED) goto err_out_disable; @@ -1820,7 +1845,7 @@ static void ata_host_set_dma(struct ata_port *ap) */ static void ata_set_mode(struct ata_port *ap) { - int i, rc, used_dma = 0; + int i, rc; /* step 1: calculate xfer_mask */ for (i = 0; i < ATA_MAX_DEVICES; i++) { @@ -1838,9 +1863,6 @@ static void ata_set_mode(struct ata_port *ap) dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); dev->pio_mode = ata_xfer_mask2mode(pio_mask); dev->dma_mode = ata_xfer_mask2mode(dma_mask); - - if (dev->dma_mode) - used_dma = 1; } /* step 2: always set host PIO timings */ @@ -1862,17 +1884,6 @@ static void ata_set_mode(struct ata_port *ap) goto err_out; } - /* - * Record simplex status. If we selected DMA then the other - * host channels are not permitted to do so. - */ - - if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) - ap->host_set->simplex_claimed = 1; - - /* - * Chip specific finalisation - */ if (ap->ops->post_set_mode) ap->ops->post_set_mode(ap); @@ -1994,6 +2005,45 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) ap->ops->dev_select(ap, 0); } +/** + * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command. + * @ap: Port to reset and probe + * + * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and + * probe the bus. Not often used these days. + * + * LOCKING: + * PCI/etc. bus probe sem. + * Obtains host_set lock. + * + */ + +static unsigned int ata_bus_edd(struct ata_port *ap) +{ + struct ata_taskfile tf; + unsigned long flags; + + /* set up execute-device-diag (bus reset) taskfile */ + /* also, take interrupts to a known state (disabled) */ + DPRINTK("execute-device-diag\n"); + ata_tf_init(ap, &tf, 0); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_EDD; + tf.protocol = ATA_PROT_NODATA; + + /* do bus reset */ + spin_lock_irqsave(&ap->host_set->lock, flags); + ata_tf_to_host(ap, &tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + /* spec says at least 2ms. but who knows with those + * crazy ATAPI devices... + */ + msleep(150); + + return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); +} + static unsigned int ata_bus_softreset(struct ata_port *ap, unsigned int devmask) { @@ -2028,12 +2078,13 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, */ msleep(150); + /* Before we perform post reset processing we want to see if - * the bus shows 0xFF because the odd clown forgets the D7 - * pulldown resistor. - */ + the bus shows 0xFF because the odd clown forgets the D7 pulldown + resistor */ + if (ata_check_status(ap) == 0xFF) - return AC_ERR_OTHER; + return 1; /* Positive is failure for some reason */ ata_bus_post_reset(ap, devmask); @@ -2065,7 +2116,7 @@ void ata_bus_reset(struct ata_port *ap) struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; u8 err; - unsigned int dev0, dev1 = 0, devmask = 0; + unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); @@ -2088,8 +2139,18 @@ void ata_bus_reset(struct ata_port *ap) /* issue bus reset */ if (ap->flags & ATA_FLAG_SRST) - if (ata_bus_softreset(ap, devmask)) - goto err_out; + rc = ata_bus_softreset(ap, devmask); + else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { + /* set up device control */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + rc = ata_bus_edd(ap); + } + + if (rc) + goto err_out; /* * determine by signature whether we have ATA or ATAPI devices @@ -2162,9 +2223,9 @@ static int sata_phy_resume(struct ata_port *ap) * so makes reset sequence different from the original * ->phy_reset implementation and Jeff nervous. :-P */ -void ata_std_probeinit(struct ata_port *ap) +extern void ata_std_probeinit(struct ata_port *ap) { - if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { + if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) { sata_phy_resume(ap); if (sata_dev_present(ap)) ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); @@ -2653,23 +2714,18 @@ static int ata_dma_blacklisted(const struct ata_device *dev) * known limits including host controller limits, device * blacklist, etc... * - * FIXME: The current implementation limits all transfer modes to - * the fastest of the lowested device on the port. This is not - * required on most controllers. - * * LOCKING: * None. */ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) { - struct ata_host_set *hs = ap->host_set; unsigned long xfer_mask; int i; xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, ap->udma_mask); - /* FIXME: Use port-wide xfermask for now */ + /* use port-wide xfermask for now */ for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *d = &ap->device[i]; if (!ata_dev_present(d)) @@ -2679,23 +2735,12 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) xfer_mask &= ata_id_xfermask(d->id); if (ata_dma_blacklisted(d)) xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - /* Apply cable rule here. Don't apply it early because when - we handle hot plug the cable type can itself change */ - if (ap->cbl == ATA_CBL_PATA40) - xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); } if (ata_dma_blacklisted(dev)) printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " "disabling DMA\n", ap->id, dev->devno); - if (hs->flags & ATA_HOST_SIMPLEX) { - if (hs->simplex_claimed) - xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - } - if (ap->ops->mode_filter) - xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); - ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, &dev->udma_mask); } @@ -2750,16 +2795,16 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap, */ static unsigned int ata_dev_init_params(struct ata_port *ap, - struct ata_device *dev, - u16 heads, - u16 sectors) + struct ata_device *dev) { struct ata_taskfile tf; unsigned int err_mask; + u16 sectors = dev->id[6]; + u16 heads = dev->id[3]; /* Number of sectors per track 1-255. Number of heads 1-16 */ if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) - return AC_ERR_INVALID; + return 0; /* set up init dev params taskfile */ DPRINTK("init dev params \n"); @@ -4491,14 +4536,6 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, int rc; DPRINTK("ENTER\n"); - - if (!ent->port_ops->probe_reset && - !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { - printk(KERN_ERR "ata%u: no reset mechanism available\n", - port_no); - return NULL; - } - host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); if (!host) return NULL; @@ -4559,7 +4596,6 @@ int ata_device_add(const struct ata_probe_ent *ent) host_set->mmio_base = ent->mmio_base; host_set->private_data = ent->private_data; host_set->ops = ent->port_ops; - host_set->flags = ent->host_set_flags; /* register each port bound to this device */ for (i = 0; i < ent->n_ports; i++) { diff --git a/trunk/drivers/scsi/sata_mv.c b/trunk/drivers/scsi/sata_mv.c index fa901fd65085..275ed9bd898c 100644 --- a/trunk/drivers/scsi/sata_mv.c +++ b/trunk/drivers/scsi/sata_mv.c @@ -1010,7 +1010,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); - pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); + pp->sg_tbl[i].flags_size = cpu_to_le32(len); sg_len -= len; addr += len; @@ -1350,6 +1350,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, { void __iomem *mmio = host_set->mmio_base; void __iomem *hc_mmio = mv_hc_base(mmio, hc); + struct ata_port *ap; struct ata_queued_cmd *qc; u32 hc_irq_cause; int shift, port, port0, hard_port, handled; @@ -1372,32 +1373,25 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { u8 ata_status = 0; - struct ata_port *ap = host_set->ports[port]; - struct mv_port_priv *pp = ap->private_data; - + ap = host_set->ports[port]; hard_port = port & MV_PORT_MASK; /* range 0-3 */ handled = 0; /* ensure ata_status is set if handled++ */ - /* Note that DEV_IRQ might happen spuriously during EDMA, - * and should be ignored in such cases. We could mask it, - * but it's pretty rare and may not be worth the overhead. - */ - if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { - /* EDMA: check for response queue interrupt */ - if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { - ata_status = mv_get_crpb_status(ap); - handled = 1; - } - } else { - /* PIO: check for device (drive) interrupt */ - if ((DEV_IRQ << hard_port) & hc_irq_cause) { - ata_status = readb((void __iomem *) + if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { + /* new CRPB on the queue; just one at a time until NCQ + */ + ata_status = mv_get_crpb_status(ap); + handled++; + } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { + /* received ATA IRQ; read the status reg to clear INTRQ + */ + ata_status = readb((void __iomem *) ap->ioaddr.status_addr); - handled = 1; - } + handled++; } - if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) + if (ap && + (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) continue; err_mask = ac_err_mask(ata_status); @@ -1409,12 +1403,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, if ((PORT0_ERR << shift) & relevant) { mv_err_intr(ap); err_mask |= AC_ERR_OTHER; - handled = 1; + handled++; } - if (handled) { + if (handled && ap) { qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { + if (NULL != qc) { VPRINTK("port %u IRQ found for qc, " "ata_status 0x%x\n", port,ata_status); /* mark qc status appropriately */ diff --git a/trunk/fs/Makefile b/trunk/fs/Makefile index f3a4f7077175..080b3867be4d 100644 --- a/trunk/fs/Makefile +++ b/trunk/fs/Makefile @@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ - ioprio.o pnode.o drop_caches.o splice.o + ioprio.o pnode.o drop_caches.o obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_EPOLL) += eventpoll.o diff --git a/trunk/fs/ext2/file.c b/trunk/fs/ext2/file.c index 23e2c7ccec1d..509cceca04db 100644 --- a/trunk/fs/ext2/file.c +++ b/trunk/fs/ext2/file.c @@ -53,8 +53,6 @@ const struct file_operations ext2_file_operations = { .readv = generic_file_readv, .writev = generic_file_writev, .sendfile = generic_file_sendfile, - .splice_read = generic_file_splice_read, - .splice_write = generic_file_splice_write, }; #ifdef CONFIG_EXT2_FS_XIP diff --git a/trunk/fs/ext3/file.c b/trunk/fs/ext3/file.c index 1efefb630ea9..783a796220bb 100644 --- a/trunk/fs/ext3/file.c +++ b/trunk/fs/ext3/file.c @@ -119,8 +119,6 @@ const struct file_operations ext3_file_operations = { .release = ext3_release_file, .fsync = ext3_sync_file, .sendfile = generic_file_sendfile, - .splice_read = generic_file_splice_read, - .splice_write = generic_file_splice_write, }; struct inode_operations ext3_file_inode_operations = { diff --git a/trunk/fs/pipe.c b/trunk/fs/pipe.c index 109a102c150d..e2f4f1d9ffc2 100644 --- a/trunk/fs/pipe.c +++ b/trunk/fs/pipe.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -95,20 +94,11 @@ static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buff { struct page *page = buf->page; - /* - * If nobody else uses this page, and we don't already have a - * temporary page, let's keep track of it as a one-deep - * allocation cache - */ - if (page_count(page) == 1 && !info->tmp_page) { - info->tmp_page = page; + if (info->tmp_page) { + __free_page(page); return; } - - /* - * Otherwise just release our reference to it - */ - page_cache_release(page); + info->tmp_page = page; } static void *anon_pipe_buf_map(struct file *file, struct pipe_inode_info *info, struct pipe_buffer *buf) @@ -121,19 +111,11 @@ static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer kunmap(buf->page); } -static int anon_pipe_buf_steal(struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - buf->stolen = 1; - return 0; -} - static struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .map = anon_pipe_buf_map, .unmap = anon_pipe_buf_unmap, .release = anon_pipe_buf_release, - .steal = anon_pipe_buf_steal, }; static ssize_t @@ -170,11 +152,6 @@ pipe_readv(struct file *filp, const struct iovec *_iov, chars = total_len; addr = ops->map(filp, info, buf); - if (IS_ERR(addr)) { - if (!ret) - ret = PTR_ERR(addr); - break; - } error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars); ops->unmap(info, buf); if (unlikely(error)) { @@ -277,16 +254,8 @@ pipe_writev(struct file *filp, const struct iovec *_iov, struct pipe_buf_operations *ops = buf->ops; int offset = buf->offset + buf->len; if (ops->can_merge && offset + chars <= PAGE_SIZE) { - void *addr; - int error; - - addr = ops->map(filp, info, buf); - if (IS_ERR(addr)) { - error = PTR_ERR(addr); - goto out; - } - error = pipe_iov_copy_from_user(offset + addr, iov, - chars); + void *addr = ops->map(filp, info, buf); + int error = pipe_iov_copy_from_user(offset + addr, iov, chars); ops->unmap(info, buf); ret = error; do_wakeup = 1; diff --git a/trunk/fs/reiserfs/file.c b/trunk/fs/reiserfs/file.c index cf6e1cf40351..010094d14da6 100644 --- a/trunk/fs/reiserfs/file.c +++ b/trunk/fs/reiserfs/file.c @@ -1576,8 +1576,6 @@ const struct file_operations reiserfs_file_operations = { .sendfile = generic_file_sendfile, .aio_read = generic_file_aio_read, .aio_write = reiserfs_aio_write, - .splice_read = generic_file_splice_read, - .splice_write = generic_file_splice_write, }; struct inode_operations reiserfs_file_inode_operations = { diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c deleted file mode 100644 index 4a026f95884f..000000000000 --- a/trunk/fs/splice.c +++ /dev/null @@ -1,659 +0,0 @@ -/* - * "splice": joining two ropes together by interweaving their strands. - * - * This is the "extended pipe" functionality, where a pipe is used as - * an arbitrary in-memory buffer. Think of a pipe as a small kernel - * buffer that you can use to transfer data from one end to the other. - * - * The traditional unix read/write is extended with a "splice()" operation - * that transfers data buffers to or from a pipe buffer. - * - * Named by Larry McVoy, original implementation from Linus, extended by - * Jens to support splicing to files and fixing the initial implementation - * bugs. - * - * Copyright (C) 2005 Jens Axboe - * Copyright (C) 2005 Linus Torvalds - * - */ -#include -#include -#include -#include -#include -#include - -/* - * Passed to the actors - */ -struct splice_desc { - unsigned int len, total_len; /* current and remaining length */ - unsigned int flags; /* splice flags */ - struct file *file; /* file to read/write */ - loff_t pos; /* file position */ -}; - -static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - struct page *page = buf->page; - - WARN_ON(!PageLocked(page)); - WARN_ON(!PageUptodate(page)); - - if (!remove_mapping(page_mapping(page), page)) - return 1; - - if (PageLRU(page)) { - struct zone *zone = page_zone(page); - - spin_lock_irq(&zone->lru_lock); - BUG_ON(!PageLRU(page)); - __ClearPageLRU(page); - del_page_from_lru(zone, page); - spin_unlock_irq(&zone->lru_lock); - } - - buf->stolen = 1; - return 0; -} - -static void page_cache_pipe_buf_release(struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - page_cache_release(buf->page); - buf->page = NULL; - buf->stolen = 0; -} - -static void *page_cache_pipe_buf_map(struct file *file, - struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - struct page *page = buf->page; - - lock_page(page); - - if (!PageUptodate(page)) { - unlock_page(page); - return ERR_PTR(-EIO); - } - - if (!page->mapping) { - unlock_page(page); - return ERR_PTR(-ENODATA); - } - - return kmap(buf->page); -} - -static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - if (!buf->stolen) - unlock_page(buf->page); - kunmap(buf->page); -} - -static struct pipe_buf_operations page_cache_pipe_buf_ops = { - .can_merge = 0, - .map = page_cache_pipe_buf_map, - .unmap = page_cache_pipe_buf_unmap, - .release = page_cache_pipe_buf_release, - .steal = page_cache_pipe_buf_steal, -}; - -static ssize_t move_to_pipe(struct inode *inode, struct page **pages, - int nr_pages, unsigned long offset, - unsigned long len) -{ - struct pipe_inode_info *info; - int ret, do_wakeup, i; - - ret = 0; - do_wakeup = 0; - i = 0; - - mutex_lock(PIPE_MUTEX(*inode)); - - info = inode->i_pipe; - for (;;) { - int bufs; - - if (!PIPE_READERS(*inode)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; - break; - } - - bufs = info->nrbufs; - if (bufs < PIPE_BUFFERS) { - int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1); - struct pipe_buffer *buf = info->bufs + newbuf; - struct page *page = pages[i++]; - unsigned long this_len; - - this_len = PAGE_CACHE_SIZE - offset; - if (this_len > len) - this_len = len; - - buf->page = page; - buf->offset = offset; - buf->len = this_len; - buf->ops = &page_cache_pipe_buf_ops; - info->nrbufs = ++bufs; - do_wakeup = 1; - - ret += this_len; - len -= this_len; - offset = 0; - if (!--nr_pages) - break; - if (!len) - break; - if (bufs < PIPE_BUFFERS) - continue; - - break; - } - - if (signal_pending(current)) { - if (!ret) - ret = -ERESTARTSYS; - break; - } - - if (do_wakeup) { - wake_up_interruptible_sync(PIPE_WAIT(*inode)); - kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, - POLL_IN); - do_wakeup = 0; - } - - PIPE_WAITING_WRITERS(*inode)++; - pipe_wait(inode); - PIPE_WAITING_WRITERS(*inode)--; - } - - mutex_unlock(PIPE_MUTEX(*inode)); - - if (do_wakeup) { - wake_up_interruptible(PIPE_WAIT(*inode)); - kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); - } - - while (i < nr_pages) - page_cache_release(pages[i++]); - - return ret; -} - -static int __generic_file_splice_read(struct file *in, struct inode *pipe, - size_t len) -{ - struct address_space *mapping = in->f_mapping; - unsigned int offset, nr_pages; - struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS]; - struct page *page; - pgoff_t index, pidx; - int i, j; - - index = in->f_pos >> PAGE_CACHE_SHIFT; - offset = in->f_pos & ~PAGE_CACHE_MASK; - nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - - if (nr_pages > PIPE_BUFFERS) - nr_pages = PIPE_BUFFERS; - - /* - * initiate read-ahead on this page range - */ - do_page_cache_readahead(mapping, in, index, nr_pages); - - /* - * Get as many pages from the page cache as possible.. - * Start IO on the page cache entries we create (we - * can assume that any pre-existing ones we find have - * already had IO started on them). - */ - i = find_get_pages(mapping, index, nr_pages, pages); - - /* - * common case - we found all pages and they are contiguous, - * kick them off - */ - if (i && (pages[i - 1]->index == index + i - 1)) - goto splice_them; - - /* - * fill shadow[] with pages at the right locations, so we only - * have to fill holes - */ - memset(shadow, 0, i * sizeof(struct page *)); - for (j = 0, pidx = index; j < i; pidx++, j++) - shadow[pages[j]->index - pidx] = pages[j]; - - /* - * now fill in the holes - */ - for (i = 0, pidx = index; i < nr_pages; pidx++, i++) { - int error; - - if (shadow[i]) - continue; - - /* - * no page there, look one up / create it - */ - page = find_or_create_page(mapping, pidx, - mapping_gfp_mask(mapping)); - if (!page) - break; - - if (PageUptodate(page)) - unlock_page(page); - else { - error = mapping->a_ops->readpage(in, page); - - if (unlikely(error)) { - page_cache_release(page); - break; - } - } - shadow[i] = page; - } - - if (!i) { - for (i = 0; i < nr_pages; i++) { - if (shadow[i]) - page_cache_release(shadow[i]); - } - return 0; - } - - memcpy(pages, shadow, i * sizeof(struct page *)); - - /* - * Now we splice them into the pipe.. - */ -splice_them: - return move_to_pipe(pipe, pages, i, offset, len); -} - -ssize_t generic_file_splice_read(struct file *in, struct inode *pipe, - size_t len, unsigned int flags) -{ - ssize_t spliced; - int ret; - - ret = 0; - spliced = 0; - while (len) { - ret = __generic_file_splice_read(in, pipe, len); - - if (ret <= 0) - break; - - in->f_pos += ret; - len -= ret; - spliced += ret; - } - - if (spliced) - return spliced; - - return ret; -} - -/* - * Send 'len' bytes to socket from 'file' at position 'pos' using sendpage(). - */ -static int pipe_to_sendpage(struct pipe_inode_info *info, - struct pipe_buffer *buf, struct splice_desc *sd) -{ - struct file *file = sd->file; - loff_t pos = sd->pos; - unsigned int offset; - ssize_t ret; - void *ptr; - - /* - * sub-optimal, but we are limited by the pipe ->map. we don't - * need a kmap'ed buffer here, we just want to make sure we - * have the page pinned if the pipe page originates from the - * page cache - */ - ptr = buf->ops->map(file, info, buf); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - - offset = pos & ~PAGE_CACHE_MASK; - - ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos, - sd->len < sd->total_len); - - buf->ops->unmap(info, buf); - if (ret == sd->len) - return 0; - - return -EIO; -} - -/* - * This is a little more tricky than the file -> pipe splicing. There are - * basically three cases: - * - * - Destination page already exists in the address space and there - * are users of it. For that case we have no other option that - * copying the data. Tough luck. - * - Destination page already exists in the address space, but there - * are no users of it. Make sure it's uptodate, then drop it. Fall - * through to last case. - * - Destination page does not exist, we can add the pipe page to - * the page cache and avoid the copy. - * - * For now we just do the slower thing and always copy pages over, it's - * easier than migrating pages from the pipe to the target file. For the - * case of doing file | file splicing, the migrate approach had some LRU - * nastiness... - */ -static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, - struct splice_desc *sd) -{ - struct file *file = sd->file; - struct address_space *mapping = file->f_mapping; - unsigned int offset; - struct page *page; - pgoff_t index; - char *src; - int ret; - - /* - * after this, page will be locked and unmapped - */ - src = buf->ops->map(file, info, buf); - if (IS_ERR(src)) - return PTR_ERR(src); - - index = sd->pos >> PAGE_CACHE_SHIFT; - offset = sd->pos & ~PAGE_CACHE_MASK; - - /* - * reuse buf page, if SPLICE_F_MOVE is set - */ - if (sd->flags & SPLICE_F_MOVE) { - if (buf->ops->steal(info, buf)) - goto find_page; - - page = buf->page; - if (add_to_page_cache_lru(page, mapping, index, - mapping_gfp_mask(mapping))) - goto find_page; - } else { -find_page: - ret = -ENOMEM; - page = find_or_create_page(mapping, index, - mapping_gfp_mask(mapping)); - if (!page) - goto out; - - /* - * If the page is uptodate, it is also locked. If it isn't - * uptodate, we can mark it uptodate if we are filling the - * full page. Otherwise we need to read it in first... - */ - if (!PageUptodate(page)) { - if (sd->len < PAGE_CACHE_SIZE) { - ret = mapping->a_ops->readpage(file, page); - if (unlikely(ret)) - goto out; - - lock_page(page); - - if (!PageUptodate(page)) { - /* - * page got invalidated, repeat - */ - if (!page->mapping) { - unlock_page(page); - page_cache_release(page); - goto find_page; - } - ret = -EIO; - goto out; - } - } else { - WARN_ON(!PageLocked(page)); - SetPageUptodate(page); - } - } - } - - ret = mapping->a_ops->prepare_write(file, page, 0, sd->len); - if (ret) - goto out; - - if (!buf->stolen) { - char *dst = kmap_atomic(page, KM_USER0); - - memcpy(dst + offset, src + buf->offset, sd->len); - flush_dcache_page(page); - kunmap_atomic(dst, KM_USER0); - } - - ret = mapping->a_ops->commit_write(file, page, 0, sd->len); - if (ret < 0) - goto out; - - set_page_dirty(page); - ret = write_one_page(page, 0); -out: - if (ret < 0) - unlock_page(page); - if (!buf->stolen) - page_cache_release(page); - buf->ops->unmap(info, buf); - return ret; -} - -typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, - struct splice_desc *); - -static ssize_t move_from_pipe(struct inode *inode, struct file *out, - size_t len, unsigned int flags, - splice_actor *actor) -{ - struct pipe_inode_info *info; - int ret, do_wakeup, err; - struct splice_desc sd; - - ret = 0; - do_wakeup = 0; - - sd.total_len = len; - sd.flags = flags; - sd.file = out; - sd.pos = out->f_pos; - - mutex_lock(PIPE_MUTEX(*inode)); - - info = inode->i_pipe; - for (;;) { - int bufs = info->nrbufs; - - if (bufs) { - int curbuf = info->curbuf; - struct pipe_buffer *buf = info->bufs + curbuf; - struct pipe_buf_operations *ops = buf->ops; - - sd.len = buf->len; - if (sd.len > sd.total_len) - sd.len = sd.total_len; - - err = actor(info, buf, &sd); - if (err) { - if (!ret && err != -ENODATA) - ret = err; - - break; - } - - ret += sd.len; - buf->offset += sd.len; - buf->len -= sd.len; - if (!buf->len) { - buf->ops = NULL; - ops->release(info, buf); - curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1); - info->curbuf = curbuf; - info->nrbufs = --bufs; - do_wakeup = 1; - } - - sd.pos += sd.len; - sd.total_len -= sd.len; - if (!sd.total_len) - break; - } - - if (bufs) - continue; - if (!PIPE_WRITERS(*inode)) - break; - if (!PIPE_WAITING_WRITERS(*inode)) { - if (ret) - break; - } - - if (signal_pending(current)) { - if (!ret) - ret = -ERESTARTSYS; - break; - } - - if (do_wakeup) { - wake_up_interruptible_sync(PIPE_WAIT(*inode)); - kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT); - do_wakeup = 0; - } - - pipe_wait(inode); - } - - mutex_unlock(PIPE_MUTEX(*inode)); - - if (do_wakeup) { - wake_up_interruptible(PIPE_WAIT(*inode)); - kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); - } - - mutex_lock(&out->f_mapping->host->i_mutex); - out->f_pos = sd.pos; - mutex_unlock(&out->f_mapping->host->i_mutex); - return ret; - -} - -ssize_t generic_file_splice_write(struct inode *inode, struct file *out, - size_t len, unsigned int flags) -{ - return move_from_pipe(inode, out, len, flags, pipe_to_file); -} - -ssize_t generic_splice_sendpage(struct inode *inode, struct file *out, - size_t len, unsigned int flags) -{ - return move_from_pipe(inode, out, len, flags, pipe_to_sendpage); -} - -static long do_splice_from(struct inode *pipe, struct file *out, size_t len, - unsigned int flags) -{ - loff_t pos; - int ret; - - if (!out->f_op || !out->f_op->splice_write) - return -EINVAL; - - if (!(out->f_mode & FMODE_WRITE)) - return -EBADF; - - pos = out->f_pos; - ret = rw_verify_area(WRITE, out, &pos, len); - if (unlikely(ret < 0)) - return ret; - - return out->f_op->splice_write(pipe, out, len, flags); -} - -static long do_splice_to(struct file *in, struct inode *pipe, size_t len, - unsigned int flags) -{ - loff_t pos, isize, left; - int ret; - - if (!in->f_op || !in->f_op->splice_read) - return -EINVAL; - - if (!(in->f_mode & FMODE_READ)) - return -EBADF; - - pos = in->f_pos; - ret = rw_verify_area(READ, in, &pos, len); - if (unlikely(ret < 0)) - return ret; - - isize = i_size_read(in->f_mapping->host); - if (unlikely(in->f_pos >= isize)) - return 0; - - left = isize - in->f_pos; - if (left < len) - len = left; - - return in->f_op->splice_read(in, pipe, len, flags); -} - -static long do_splice(struct file *in, struct file *out, size_t len, - unsigned int flags) -{ - struct inode *pipe; - - pipe = in->f_dentry->d_inode; - if (pipe->i_pipe) - return do_splice_from(pipe, out, len, flags); - - pipe = out->f_dentry->d_inode; - if (pipe->i_pipe) - return do_splice_to(in, pipe, len, flags); - - return -EINVAL; -} - -asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags) -{ - long error; - struct file *in, *out; - int fput_in, fput_out; - - if (unlikely(!len)) - return 0; - - error = -EBADF; - in = fget_light(fdin, &fput_in); - if (in) { - if (in->f_mode & FMODE_READ) { - out = fget_light(fdout, &fput_out); - if (out) { - if (out->f_mode & FMODE_WRITE) - error = do_splice(in, out, len, flags); - fput_light(out, fput_out); - } - } - - fput_light(in, fput_in); - } - - return error; -} diff --git a/trunk/include/asm-i386/unistd.h b/trunk/include/asm-i386/unistd.h index 789e9bdd0a40..014e3562895b 100644 --- a/trunk/include/asm-i386/unistd.h +++ b/trunk/include/asm-i386/unistd.h @@ -318,9 +318,8 @@ #define __NR_unshare 310 #define __NR_set_robust_list 311 #define __NR_get_robust_list 312 -#define __NR_sys_splice 313 -#define NR_syscalls 314 +#define NR_syscalls 313 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/trunk/include/asm-ia64/asmmacro.h b/trunk/include/asm-ia64/asmmacro.h index edf2cebb2969..d4cec32083d8 100644 --- a/trunk/include/asm-ia64/asmmacro.h +++ b/trunk/include/asm-ia64/asmmacro.h @@ -38,10 +38,6 @@ /* * Helper macros for accessing user memory. - * - * When adding any new .section/.previous entries here, make sure to - * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or - * unpleasant things will happen. */ .section "__ex_table", "a" // declare section & section attributes diff --git a/trunk/include/asm-ia64/unistd.h b/trunk/include/asm-ia64/unistd.h index 36070c1014d8..019956c613e4 100644 --- a/trunk/include/asm-ia64/unistd.h +++ b/trunk/include/asm-ia64/unistd.h @@ -285,13 +285,12 @@ #define __NR_faccessat 1293 /* 1294, 1295 reserved for pselect/ppoll */ #define __NR_unshare 1296 -#define __NR_splice 1297 #ifdef __KERNEL__ #include -#define NR_syscalls 274 /* length of syscall table */ +#define NR_syscalls 273 /* length of syscall table */ #define __ARCH_WANT_SYS_RT_SIGACTION diff --git a/trunk/include/asm-parisc/pdc_chassis.h b/trunk/include/asm-parisc/pdc_chassis.h index adac9ac2743f..a609273dc6bf 100644 --- a/trunk/include/asm-parisc/pdc_chassis.h +++ b/trunk/include/asm-parisc/pdc_chassis.h @@ -6,9 +6,8 @@ * * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/trunk/include/asm-powerpc/unistd.h b/trunk/include/asm-powerpc/unistd.h index 536ba0873052..1e990747dce7 100644 --- a/trunk/include/asm-powerpc/unistd.h +++ b/trunk/include/asm-powerpc/unistd.h @@ -301,9 +301,8 @@ #define __NR_pselect6 280 #define __NR_ppoll 281 #define __NR_unshare 282 -#define __NR_splice 283 -#define __NR_syscalls 284 +#define __NR_syscalls 283 #ifdef __KERNEL__ #define __NR__exit __NR_exit diff --git a/trunk/include/asm-x86_64/unistd.h b/trunk/include/asm-x86_64/unistd.h index f21ff2c1e960..fcc516353087 100644 --- a/trunk/include/asm-x86_64/unistd.h +++ b/trunk/include/asm-x86_64/unistd.h @@ -609,10 +609,8 @@ __SYSCALL(__NR_unshare, sys_unshare) __SYSCALL(__NR_set_robust_list, sys_set_robust_list) #define __NR_get_robust_list 274 __SYSCALL(__NR_get_robust_list, sys_get_robust_list) -#define __NR_splice 275 -__SYSCALL(__NR_splice, sys_splice) -#define __NR_syscall_max __NR_splice +#define __NR_syscall_max __NR_get_robust_list #ifndef __NO_STUBS diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 20fa5f6d7269..408fe89498f4 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -1032,8 +1032,6 @@ struct file_operations { int (*check_flags)(int); int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); - ssize_t (*splice_write)(struct inode *, struct file *, size_t, unsigned int); - ssize_t (*splice_read)(struct file *, struct inode *, size_t, unsigned int); }; struct inode_operations { @@ -1611,8 +1609,6 @@ extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor extern void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state *, struct file *, loff_t *, read_descriptor_t *, read_actor_t); -extern ssize_t generic_file_splice_read(struct file *, struct inode *, size_t, unsigned int); -extern ssize_t generic_file_splice_write(struct inode *, struct file *, size_t, unsigned int); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h index 0d61357604d5..047192253c3a 100644 --- a/trunk/include/linux/libata.h +++ b/trunk/include/linux/libata.h @@ -160,10 +160,8 @@ enum { ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ - /* host set flags */ - ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ - /* various lengths of time */ + ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ ATA_TMOUT_PIO = 30 * HZ, ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ @@ -281,7 +279,6 @@ struct ata_probe_ent { unsigned long irq; unsigned int irq_flags; unsigned long host_flags; - unsigned long host_set_flags; void __iomem *mmio_base; void *private_data; }; @@ -294,9 +291,6 @@ struct ata_host_set { unsigned int n_ports; void *private_data; const struct ata_port_operations *ops; - unsigned long flags; - int simplex_claimed; /* Keep seperate in case we - ever need to do this locked */ struct ata_port * ports[0]; }; @@ -426,7 +420,6 @@ struct ata_port_operations { void (*set_piomode) (struct ata_port *, struct ata_device *); void (*set_dmamode) (struct ata_port *, struct ata_device *); - unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long); void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); @@ -437,7 +430,6 @@ struct ata_port_operations { void (*dev_select)(struct ata_port *ap, unsigned int device); void (*phy_reset) (struct ata_port *ap); /* obsolete */ - void (*set_mode) (struct ata_port *ap); int (*probe_reset) (struct ata_port *ap, unsigned int *classes); void (*post_set_mode) (struct ata_port *ap); diff --git a/trunk/include/linux/pipe_fs_i.h b/trunk/include/linux/pipe_fs_i.h index 75c7f55023ab..b12e59c75752 100644 --- a/trunk/include/linux/pipe_fs_i.h +++ b/trunk/include/linux/pipe_fs_i.h @@ -9,7 +9,6 @@ struct pipe_buffer { struct page *page; unsigned int offset, len; struct pipe_buf_operations *ops; - unsigned int stolen; }; struct pipe_buf_operations { @@ -17,7 +16,6 @@ struct pipe_buf_operations { void * (*map)(struct file *, struct pipe_inode_info *, struct pipe_buffer *); void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); - int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); }; struct pipe_inode_info { @@ -55,10 +53,4 @@ void pipe_wait(struct inode * inode); struct inode* pipe_new(struct inode* inode); void free_pipe_info(struct inode* inode); -/* - * splice is tied to pipes as a transport (at least for now), so we'll just - * add the splice flags here. - */ -#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ - #endif diff --git a/trunk/include/linux/syscalls.h b/trunk/include/linux/syscalls.h index e78ffc7d5b56..e487e3b60f60 100644 --- a/trunk/include/linux/syscalls.h +++ b/trunk/include/linux/syscalls.h @@ -569,7 +569,5 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, int mode); asmlinkage long sys_unshare(unsigned long unshare_flags); -asmlinkage long sys_splice(int fdin, int fdout, size_t len, - unsigned int flags); #endif diff --git a/trunk/include/rdma/ib_mad.h b/trunk/include/rdma/ib_mad.h index 5ff77558013b..51ab8eddb295 100644 --- a/trunk/include/rdma/ib_mad.h +++ b/trunk/include/rdma/ib_mad.h @@ -3,7 +3,7 @@ * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -55,10 +55,6 @@ #define IB_MGMT_CLASS_DEVICE_MGMT 0x06 #define IB_MGMT_CLASS_CM 0x07 #define IB_MGMT_CLASS_SNMP 0x08 -#define IB_MGMT_CLASS_DEVICE_ADM 0x10 -#define IB_MGMT_CLASS_BOOT_MGMT 0x11 -#define IB_MGMT_CLASS_BIS 0x12 -#define IB_MGMT_CLASS_CONG_MGMT 0x21 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F @@ -121,8 +117,6 @@ enum { IB_MGMT_VENDOR_DATA = 216, IB_MGMT_SA_HDR = 56, IB_MGMT_SA_DATA = 200, - IB_MGMT_DEVICE_HDR = 64, - IB_MGMT_DEVICE_DATA = 192, }; struct ib_mad_hdr { @@ -608,25 +602,6 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, int hdr_len, int data_len, gfp_t gfp_mask); -/** - * ib_is_mad_class_rmpp - returns whether given management class - * supports RMPP. - * @mgmt_class: management class - * - * This routine returns whether the management class supports RMPP. - */ -int ib_is_mad_class_rmpp(u8 mgmt_class); - -/** - * ib_get_mad_data_offset - returns the data offset for a given - * management class. - * @mgmt_class: management class - * - * This routine returns the data offset in the MAD for the management - * class requested. - */ -int ib_get_mad_data_offset(u8 mgmt_class); - /** * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. * @send_buf: Previously allocated send data buffer. diff --git a/trunk/net/socket.c b/trunk/net/socket.c index b13042f68c02..fcd77eac0ccf 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -119,9 +119,6 @@ static ssize_t sock_writev(struct file *file, const struct iovec *vector, static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); -extern ssize_t generic_splice_sendpage(struct inode *inode, struct file *out, - size_t len, unsigned int flags); - /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear @@ -144,8 +141,7 @@ static struct file_operations socket_file_ops = { .fasync = sock_fasync, .readv = sock_readv, .writev = sock_writev, - .sendpage = sock_sendpage, - .splice_write = generic_splice_sendpage, + .sendpage = sock_sendpage }; /*