diff --git a/[refs] b/[refs] index d01ed9071cbf..1d00bfcf947c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: af575e2d2e09517a8190f1b425453cc3e0102f6c +refs/heads/master: 19a167af7c97248ec646552ebc9140bc6aa3552a diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index fc95ee1bcf6f..943fe6930f77 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -68,9 +68,6 @@ config GENERIC_IOMAP bool default n -config GENERIC_HARDIRQS_NO__DO_IRQ - def_bool y - config GENERIC_HARDIRQS bool default y diff --git a/trunk/arch/alpha/include/asm/io.h b/trunk/arch/alpha/include/asm/io.h index 56ff96501350..eda9b909aa05 100644 --- a/trunk/arch/alpha/include/asm/io.h +++ b/trunk/arch/alpha/include/asm/io.h @@ -37,9 +37,8 @@ */ extern inline void __set_hae(unsigned long new_hae) { - unsigned long flags = swpipl(IPL_MAX); - - barrier(); + unsigned long flags; + local_irq_save(flags); alpha_mv.hae_cache = new_hae; *alpha_mv.hae_register = new_hae; @@ -47,8 +46,7 @@ extern inline void __set_hae(unsigned long new_hae) /* Re-read to make sure it was written. */ new_hae = *alpha_mv.hae_register; - setipl(flags); - barrier(); + local_irq_restore(flags); } extern inline void set_hae(unsigned long new_hae) diff --git a/trunk/arch/alpha/kernel/Makefile b/trunk/arch/alpha/kernel/Makefile index 9bb7b858ed23..1ee9b5b629b8 100644 --- a/trunk/arch/alpha/kernel/Makefile +++ b/trunk/arch/alpha/kernel/Makefile @@ -3,8 +3,8 @@ # extra-y := head.o vmlinux.lds -asflags-y := $(KBUILD_CFLAGS) -ccflags-y := -Werror -Wno-sign-compare +EXTRA_AFLAGS := $(KBUILD_CFLAGS) +EXTRA_CFLAGS := -Werror -Wno-sign-compare obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ diff --git a/trunk/arch/alpha/kernel/irq.c b/trunk/arch/alpha/kernel/irq.c index 9ab234f48dd8..fe912984d9b1 100644 --- a/trunk/arch/alpha/kernel/irq.c +++ b/trunk/arch/alpha/kernel/irq.c @@ -44,11 +44,10 @@ static char irq_user_affinity[NR_IRQS]; int irq_select_affinity(unsigned int irq) { - struct irq_desc *desc = irq_to_desc[irq]; static int last_cpu; int cpu = last_cpu + 1; - if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) + if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || @@ -56,8 +55,8 @@ int irq_select_affinity(unsigned int irq) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; - cpumask_copy(desc->affinity, cpumask_of(cpu)); - get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); + irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); return 0; } #endif /* CONFIG_SMP */ @@ -68,7 +67,6 @@ show_interrupts(struct seq_file *p, void *v) int j; int irq = *(loff_t *) v; struct irqaction * action; - struct irq_desc *desc; unsigned long flags; #ifdef CONFIG_SMP @@ -81,13 +79,8 @@ show_interrupts(struct seq_file *p, void *v) #endif if (irq < ACTUAL_NR_IRQS) { - desc = irq_to_desc(irq); - - if (!desc) - return 0; - - raw_spin_lock_irqsave(&desc->lock, flags); - action = desc->action; + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); + action = irq_desc[irq].action; if (!action) goto unlock; seq_printf(p, "%3d: ", irq); @@ -97,7 +90,7 @@ show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); #endif - seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); + seq_printf(p, " %14s", irq_desc[irq].chip->name); seq_printf(p, " %c%s", (action->flags & IRQF_DISABLED)?'+':' ', action->name); @@ -110,7 +103,7 @@ show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - raw_spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); @@ -149,10 +142,8 @@ handle_irq(int irq) * handled by some other CPU. (or is disabled) */ static unsigned int illegal_count=0; - struct irq_desc *desc = irq_to_desc(irq); - if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS && - illegal_count < MAX_ILLEGAL_IRQS)) { + if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { irq_err_count++; illegal_count++; printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", @@ -160,14 +151,14 @@ handle_irq(int irq) return; } + irq_enter(); /* - * From here we must proceed with IPL_MAX. Note that we do not + * __do_IRQ() must be called with IPL_MAX. Note that we do not * explicitly enable interrupts afterwards - some MILO PALcode * (namely LX164 one) seems to have severe problems with RTI * at IPL 0. */ local_irq_disable(); - irq_enter(); - generic_handle_irq_desc(irq, desc); + __do_IRQ(irq); irq_exit(); } diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c index 2d0679b60939..4c8bb374eb0a 100644 --- a/trunk/arch/alpha/kernel/irq_alpha.c +++ b/trunk/arch/alpha/kernel/irq_alpha.c @@ -219,23 +219,31 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, * processed by PALcode, and comes in via entInt vector 1. */ +static void rtc_enable_disable(unsigned int irq) { } +static unsigned int rtc_startup(unsigned int irq) { return 0; } + struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_DISABLED, .name = "timer", }; +static struct irq_chip rtc_irq_type = { + .name = "RTC", + .startup = rtc_startup, + .shutdown = rtc_enable_disable, + .enable = rtc_enable_disable, + .disable = rtc_enable_disable, + .ack = rtc_enable_disable, + .end = rtc_enable_disable, +}; + void __init init_rtc_irq(void) { - struct irq_desc *desc = irq_to_desc(RTC_IRQ); - - if (desc) { - desc->status |= IRQ_DISABLED; - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, - handle_simple_irq, "RTC"); - setup_irq(RTC_IRQ, &timer_irqaction); - } + irq_desc[RTC_IRQ].status = IRQ_DISABLED; + irq_desc[RTC_IRQ].chip = &rtc_irq_type; + setup_irq(RTC_IRQ, &timer_irqaction); } /* Dummy irqactions. */ diff --git a/trunk/arch/alpha/kernel/irq_i8259.c b/trunk/arch/alpha/kernel/irq_i8259.c index 956ea0ed1694..83a9ac280890 100644 --- a/trunk/arch/alpha/kernel/irq_i8259.c +++ b/trunk/arch/alpha/kernel/irq_i8259.c @@ -69,11 +69,28 @@ i8259a_mask_and_ack_irq(unsigned int irq) spin_unlock(&i8259_irq_lock); } +unsigned int +i8259a_startup_irq(unsigned int irq) +{ + i8259a_enable_irq(irq); + return 0; /* never anything pending */ +} + +void +i8259a_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + i8259a_enable_irq(irq); +} + struct irq_chip i8259a_irq_type = { .name = "XT-PIC", - .unmask = i8259a_enable_irq, - .mask = i8259a_disable_irq, - .mask_ack = i8259a_mask_and_ack_irq, + .startup = i8259a_startup_irq, + .shutdown = i8259a_disable_irq, + .enable = i8259a_enable_irq, + .disable = i8259a_disable_irq, + .ack = i8259a_mask_and_ack_irq, + .end = i8259a_end_irq, }; void __init @@ -90,7 +107,8 @@ init_i8259a_irqs(void) outb(0xff, 0xA1); /* mask all of 8259A-2 */ for (i = 0; i < 16; i++) { - set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].chip = &i8259a_irq_type; } setup_irq(2, &cascade); diff --git a/trunk/arch/alpha/kernel/irq_pyxis.c b/trunk/arch/alpha/kernel/irq_pyxis.c index 2863458c853e..989ce46a0cf3 100644 --- a/trunk/arch/alpha/kernel/irq_pyxis.c +++ b/trunk/arch/alpha/kernel/irq_pyxis.c @@ -40,6 +40,20 @@ pyxis_disable_irq(unsigned int irq) pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); } +static unsigned int +pyxis_startup_irq(unsigned int irq) +{ + pyxis_enable_irq(irq); + return 0; +} + +static void +pyxis_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + pyxis_enable_irq(irq); +} + static void pyxis_mask_and_ack_irq(unsigned int irq) { @@ -58,9 +72,12 @@ pyxis_mask_and_ack_irq(unsigned int irq) static struct irq_chip pyxis_irq_type = { .name = "PYXIS", - .mask_ack = pyxis_mask_and_ack_irq, - .mask = pyxis_disable_irq, - .unmask = pyxis_enable_irq, + .startup = pyxis_startup_irq, + .shutdown = pyxis_disable_irq, + .enable = pyxis_enable_irq, + .disable = pyxis_disable_irq, + .ack = pyxis_mask_and_ack_irq, + .end = pyxis_end_irq, }; void @@ -102,8 +119,8 @@ init_pyxis_irqs(unsigned long ignore_mask) for (i = 16; i < 48; ++i) { if ((ignore_mask >> i) & 1) continue; - set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &pyxis_irq_type; } setup_irq(16+7, &isa_cascade_irqaction); diff --git a/trunk/arch/alpha/kernel/irq_srm.c b/trunk/arch/alpha/kernel/irq_srm.c index 0e57e828b413..d63e93e1e8bf 100644 --- a/trunk/arch/alpha/kernel/irq_srm.c +++ b/trunk/arch/alpha/kernel/irq_srm.c @@ -33,12 +33,29 @@ srm_disable_irq(unsigned int irq) spin_unlock(&srm_irq_lock); } +static unsigned int +srm_startup_irq(unsigned int irq) +{ + srm_enable_irq(irq); + return 0; +} + +static void +srm_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + srm_enable_irq(irq); +} + /* Handle interrupts from the SRM, assuming no additional weirdness. */ static struct irq_chip srm_irq_type = { .name = "SRM", - .unmask = srm_enable_irq, - .mask = srm_disable_irq, - .mask_ack = srm_disable_irq, + .startup = srm_startup_irq, + .shutdown = srm_disable_irq, + .enable = srm_enable_irq, + .disable = srm_disable_irq, + .ack = srm_disable_irq, + .end = srm_end_irq, }; void __init @@ -51,8 +68,8 @@ init_srm_irqs(long max, unsigned long ignore_mask) for (i = 16; i < max; ++i) { if (i < 64 && ((ignore_mask >> i) & 1)) continue; - set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &srm_irq_type; } } diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index fe698b5045e9..547e8b84b2f7 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -951,6 +951,9 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } +#define MAX_SELECT_SECONDS \ + ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) + SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { diff --git a/trunk/arch/alpha/kernel/sys_alcor.c b/trunk/arch/alpha/kernel/sys_alcor.c index 7bef61768236..20a30b8b9655 100644 --- a/trunk/arch/alpha/kernel/sys_alcor.c +++ b/trunk/arch/alpha/kernel/sys_alcor.c @@ -65,6 +65,13 @@ alcor_mask_and_ack_irq(unsigned int irq) *(vuip)GRU_INT_CLEAR = 0; mb(); } +static unsigned int +alcor_startup_irq(unsigned int irq) +{ + alcor_enable_irq(irq); + return 0; +} + static void alcor_isa_mask_and_ack_irq(unsigned int irq) { @@ -75,11 +82,21 @@ alcor_isa_mask_and_ack_irq(unsigned int irq) *(vuip)GRU_INT_CLEAR = 0; mb(); } +static void +alcor_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + alcor_enable_irq(irq); +} + static struct irq_chip alcor_irq_type = { .name = "ALCOR", - .unmask = alcor_enable_irq, - .mask = alcor_disable_irq, - .mask_ack = alcor_mask_and_ack_irq, + .startup = alcor_startup_irq, + .shutdown = alcor_disable_irq, + .enable = alcor_enable_irq, + .disable = alcor_disable_irq, + .ack = alcor_mask_and_ack_irq, + .end = alcor_end_irq, }; static void @@ -125,8 +142,8 @@ alcor_init_irq(void) on while IRQ probing. */ if (i >= 16+20 && i <= 16+30) continue; - set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &alcor_irq_type; } i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; diff --git a/trunk/arch/alpha/kernel/sys_cabriolet.c b/trunk/arch/alpha/kernel/sys_cabriolet.c index b0c916493aea..14c8898d19ec 100644 --- a/trunk/arch/alpha/kernel/sys_cabriolet.c +++ b/trunk/arch/alpha/kernel/sys_cabriolet.c @@ -57,11 +57,28 @@ cabriolet_disable_irq(unsigned int irq) cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); } +static unsigned int +cabriolet_startup_irq(unsigned int irq) +{ + cabriolet_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +cabriolet_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + cabriolet_enable_irq(irq); +} + static struct irq_chip cabriolet_irq_type = { .name = "CABRIOLET", - .unmask = cabriolet_enable_irq, - .mask = cabriolet_disable_irq, - .mask_ack = cabriolet_disable_irq, + .startup = cabriolet_startup_irq, + .shutdown = cabriolet_disable_irq, + .enable = cabriolet_enable_irq, + .disable = cabriolet_disable_irq, + .ack = cabriolet_disable_irq, + .end = cabriolet_end_irq, }; static void @@ -105,9 +122,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) outb(0xff, 0x806); for (i = 16; i < 35; ++i) { - set_irq_chip_and_handler(i, &cabriolet_irq_type, - handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &cabriolet_irq_type; } } diff --git a/trunk/arch/alpha/kernel/sys_dp264.c b/trunk/arch/alpha/kernel/sys_dp264.c index edad5f759ccd..4026502ab707 100644 --- a/trunk/arch/alpha/kernel/sys_dp264.c +++ b/trunk/arch/alpha/kernel/sys_dp264.c @@ -115,6 +115,20 @@ dp264_disable_irq(unsigned int irq) spin_unlock(&dp264_irq_lock); } +static unsigned int +dp264_startup_irq(unsigned int irq) +{ + dp264_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +dp264_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + dp264_enable_irq(irq); +} + static void clipper_enable_irq(unsigned int irq) { @@ -133,6 +147,20 @@ clipper_disable_irq(unsigned int irq) spin_unlock(&dp264_irq_lock); } +static unsigned int +clipper_startup_irq(unsigned int irq) +{ + clipper_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +clipper_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + clipper_enable_irq(irq); +} + static void cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) { @@ -172,17 +200,23 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) static struct irq_chip dp264_irq_type = { .name = "DP264", - .unmask = dp264_enable_irq, - .mask = dp264_disable_irq, - .mask_ack = dp264_disable_irq, + .startup = dp264_startup_irq, + .shutdown = dp264_disable_irq, + .enable = dp264_enable_irq, + .disable = dp264_disable_irq, + .ack = dp264_disable_irq, + .end = dp264_end_irq, .set_affinity = dp264_set_affinity, }; static struct irq_chip clipper_irq_type = { .name = "CLIPPER", - .unmask = clipper_enable_irq, - .mask = clipper_disable_irq, - .mask_ack = clipper_disable_irq, + .startup = clipper_startup_irq, + .shutdown = clipper_disable_irq, + .enable = clipper_enable_irq, + .disable = clipper_disable_irq, + .ack = clipper_disable_irq, + .end = clipper_end_irq, .set_affinity = clipper_set_affinity, }; @@ -268,8 +302,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, ops, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = ops; } } diff --git a/trunk/arch/alpha/kernel/sys_eb64p.c b/trunk/arch/alpha/kernel/sys_eb64p.c index ae5f29d127b0..df2090ce5e7f 100644 --- a/trunk/arch/alpha/kernel/sys_eb64p.c +++ b/trunk/arch/alpha/kernel/sys_eb64p.c @@ -55,11 +55,28 @@ eb64p_disable_irq(unsigned int irq) eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); } +static unsigned int +eb64p_startup_irq(unsigned int irq) +{ + eb64p_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +eb64p_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + eb64p_enable_irq(irq); +} + static struct irq_chip eb64p_irq_type = { .name = "EB64P", - .unmask = eb64p_enable_irq, - .mask = eb64p_disable_irq, - .mask_ack = eb64p_disable_irq, + .startup = eb64p_startup_irq, + .shutdown = eb64p_disable_irq, + .enable = eb64p_enable_irq, + .disable = eb64p_disable_irq, + .ack = eb64p_disable_irq, + .end = eb64p_end_irq, }; static void @@ -118,8 +135,8 @@ eb64p_init_irq(void) init_i8259a_irqs(); for (i = 16; i < 32; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &eb64p_irq_type; } common_init_isa_dma(); diff --git a/trunk/arch/alpha/kernel/sys_eiger.c b/trunk/arch/alpha/kernel/sys_eiger.c index 1121bc5c6c6c..3ca1dbcf4044 100644 --- a/trunk/arch/alpha/kernel/sys_eiger.c +++ b/trunk/arch/alpha/kernel/sys_eiger.c @@ -66,11 +66,28 @@ eiger_disable_irq(unsigned int irq) eiger_update_irq_hw(irq, mask); } +static unsigned int +eiger_startup_irq(unsigned int irq) +{ + eiger_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +eiger_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + eiger_enable_irq(irq); +} + static struct irq_chip eiger_irq_type = { .name = "EIGER", - .unmask = eiger_enable_irq, - .mask = eiger_disable_irq, - .mask_ack = eiger_disable_irq, + .startup = eiger_startup_irq, + .shutdown = eiger_disable_irq, + .enable = eiger_enable_irq, + .disable = eiger_disable_irq, + .ack = eiger_disable_irq, + .end = eiger_end_irq, }; static void @@ -136,8 +153,8 @@ eiger_init_irq(void) init_i8259a_irqs(); for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &eiger_irq_type; } } diff --git a/trunk/arch/alpha/kernel/sys_jensen.c b/trunk/arch/alpha/kernel/sys_jensen.c index 34f55e03d331..7a7ae36fff91 100644 --- a/trunk/arch/alpha/kernel/sys_jensen.c +++ b/trunk/arch/alpha/kernel/sys_jensen.c @@ -62,6 +62,30 @@ * world. */ +static unsigned int +jensen_local_startup(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_startup_irq(1); + else + /* + * For all true local interrupts, set the flag that prevents + * the IPL from being dropped during handler processing. + */ + if (irq_desc[irq].action) + irq_desc[irq].action->flags |= IRQF_DISABLED; + return 0; +} + +static void +jensen_local_shutdown(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_disable_irq(1); +} + static void jensen_local_enable(unsigned int irq) { @@ -79,18 +103,29 @@ jensen_local_disable(unsigned int irq) } static void -jensen_local_mask_ack(unsigned int irq) +jensen_local_ack(unsigned int irq) { /* the parport is really hw IRQ 1, silly Jensen. */ if (irq == 7) i8259a_mask_and_ack_irq(1); } +static void +jensen_local_end(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_end_irq(1); +} + static struct irq_chip jensen_local_irq_type = { .name = "LOCAL", - .unmask = jensen_local_enable, - .mask = jensen_local_disable, - .mask_ack = jensen_local_mask_ack, + .startup = jensen_local_startup, + .shutdown = jensen_local_shutdown, + .enable = jensen_local_enable, + .disable = jensen_local_disable, + .ack = jensen_local_ack, + .end = jensen_local_end, }; static void @@ -123,7 +158,7 @@ jensen_device_interrupt(unsigned long vector) } /* If there is no handler yet... */ - if (!irq_has_action(irq)) { + if (irq_desc[irq].action == NULL) { /* If it is a local interrupt that cannot be masked... */ if (vector >= 0x900) { @@ -171,11 +206,11 @@ jensen_init_irq(void) { init_i8259a_irqs(); - set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); - set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); - set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); - set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); - set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); + irq_desc[1].chip = &jensen_local_irq_type; + irq_desc[4].chip = &jensen_local_irq_type; + irq_desc[3].chip = &jensen_local_irq_type; + irq_desc[7].chip = &jensen_local_irq_type; + irq_desc[9].chip = &jensen_local_irq_type; common_init_isa_dma(); } diff --git a/trunk/arch/alpha/kernel/sys_marvel.c b/trunk/arch/alpha/kernel/sys_marvel.c index 2bfc9f1b1ddc..0bb3b5c4f693 100644 --- a/trunk/arch/alpha/kernel/sys_marvel.c +++ b/trunk/arch/alpha/kernel/sys_marvel.c @@ -143,6 +143,20 @@ io7_disable_irq(unsigned int irq) spin_unlock(&io7->irq_lock); } +static unsigned int +io7_startup_irq(unsigned int irq) +{ + io7_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +io7_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + io7_enable_irq(irq); +} + static void marvel_irq_noop(unsigned int irq) { @@ -157,22 +171,32 @@ marvel_irq_noop_return(unsigned int irq) static struct irq_chip marvel_legacy_irq_type = { .name = "LEGACY", - .mask = marvel_irq_noop, - .unmask = marvel_irq_noop, + .startup = marvel_irq_noop_return, + .shutdown = marvel_irq_noop, + .enable = marvel_irq_noop, + .disable = marvel_irq_noop, + .ack = marvel_irq_noop, + .end = marvel_irq_noop, }; static struct irq_chip io7_lsi_irq_type = { .name = "LSI", - .unmask = io7_enable_irq, - .mask = io7_disable_irq, - .mask_ack = io7_disable_irq, + .startup = io7_startup_irq, + .shutdown = io7_disable_irq, + .enable = io7_enable_irq, + .disable = io7_disable_irq, + .ack = io7_disable_irq, + .end = io7_end_irq, }; static struct irq_chip io7_msi_irq_type = { .name = "MSI", - .unmask = io7_enable_irq, - .mask = io7_disable_irq, + .startup = io7_startup_irq, + .shutdown = io7_disable_irq, + .enable = io7_enable_irq, + .disable = io7_disable_irq, .ack = marvel_irq_noop, + .end = io7_end_irq, }; static void @@ -280,8 +304,8 @@ init_io7_irqs(struct io7 *io7, /* Set up the lsi irqs. */ for (i = 0; i < 128; ++i) { - irq_to_desc(base + i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); + irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[base + i].chip = lsi_ops; } /* Disable the implemented irqs in hardware. */ @@ -294,8 +318,8 @@ init_io7_irqs(struct io7 *io7, /* Set up the msi irqs. */ for (i = 128; i < (128 + 512); ++i) { - irq_to_desc(base + i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); + irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[base + i].chip = msi_ops; } for (i = 0; i < 16; ++i) @@ -312,8 +336,8 @@ marvel_init_irq(void) /* Reserve the legacy irqs. */ for (i = 0; i < 16; ++i) { - set_irq_chip_and_handler(i, &marvel_legacy_irq_type, - handle_level_irq); + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].chip = &marvel_legacy_irq_type; } /* Init the io7 irqs. */ diff --git a/trunk/arch/alpha/kernel/sys_mikasa.c b/trunk/arch/alpha/kernel/sys_mikasa.c index bcc1639e8efb..ee8865169811 100644 --- a/trunk/arch/alpha/kernel/sys_mikasa.c +++ b/trunk/arch/alpha/kernel/sys_mikasa.c @@ -54,11 +54,28 @@ mikasa_disable_irq(unsigned int irq) mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); } +static unsigned int +mikasa_startup_irq(unsigned int irq) +{ + mikasa_enable_irq(irq); + return 0; +} + +static void +mikasa_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + mikasa_enable_irq(irq); +} + static struct irq_chip mikasa_irq_type = { .name = "MIKASA", - .unmask = mikasa_enable_irq, - .mask = mikasa_disable_irq, - .mask_ack = mikasa_disable_irq, + .startup = mikasa_startup_irq, + .shutdown = mikasa_disable_irq, + .enable = mikasa_enable_irq, + .disable = mikasa_disable_irq, + .ack = mikasa_disable_irq, + .end = mikasa_end_irq, }; static void @@ -98,8 +115,8 @@ mikasa_init_irq(void) mikasa_update_irq_hw(0); for (i = 16; i < 32; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &mikasa_irq_type; } init_i8259a_irqs(); diff --git a/trunk/arch/alpha/kernel/sys_noritake.c b/trunk/arch/alpha/kernel/sys_noritake.c index e88f4ae1260e..86503fe73a88 100644 --- a/trunk/arch/alpha/kernel/sys_noritake.c +++ b/trunk/arch/alpha/kernel/sys_noritake.c @@ -59,11 +59,28 @@ noritake_disable_irq(unsigned int irq) noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); } +static unsigned int +noritake_startup_irq(unsigned int irq) +{ + noritake_enable_irq(irq); + return 0; +} + +static void +noritake_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + noritake_enable_irq(irq); +} + static struct irq_chip noritake_irq_type = { .name = "NORITAKE", - .unmask = noritake_enable_irq, - .mask = noritake_disable_irq, - .mask_ack = noritake_disable_irq, + .startup = noritake_startup_irq, + .shutdown = noritake_disable_irq, + .enable = noritake_enable_irq, + .disable = noritake_disable_irq, + .ack = noritake_disable_irq, + .end = noritake_end_irq, }; static void @@ -127,8 +144,8 @@ noritake_init_irq(void) outw(0, 0x54c); for (i = 16; i < 48; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &noritake_irq_type; } init_i8259a_irqs(); diff --git a/trunk/arch/alpha/kernel/sys_rawhide.c b/trunk/arch/alpha/kernel/sys_rawhide.c index 6a51364dd1cc..26c322bf89ee 100644 --- a/trunk/arch/alpha/kernel/sys_rawhide.c +++ b/trunk/arch/alpha/kernel/sys_rawhide.c @@ -121,11 +121,28 @@ rawhide_mask_and_ack_irq(unsigned int irq) spin_unlock(&rawhide_irq_lock); } +static unsigned int +rawhide_startup_irq(unsigned int irq) +{ + rawhide_enable_irq(irq); + return 0; +} + +static void +rawhide_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + rawhide_enable_irq(irq); +} + static struct irq_chip rawhide_irq_type = { .name = "RAWHIDE", - .unmask = rawhide_enable_irq, - .mask = rawhide_disable_irq, - .mask_ack = rawhide_mask_and_ack_irq, + .startup = rawhide_startup_irq, + .shutdown = rawhide_disable_irq, + .enable = rawhide_enable_irq, + .disable = rawhide_disable_irq, + .ack = rawhide_mask_and_ack_irq, + .end = rawhide_end_irq, }; static void @@ -177,8 +194,8 @@ rawhide_init_irq(void) } for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &rawhide_irq_type; } init_i8259a_irqs(); diff --git a/trunk/arch/alpha/kernel/sys_rx164.c b/trunk/arch/alpha/kernel/sys_rx164.c index 89e7e37ec84c..be161129eab9 100644 --- a/trunk/arch/alpha/kernel/sys_rx164.c +++ b/trunk/arch/alpha/kernel/sys_rx164.c @@ -58,11 +58,28 @@ rx164_disable_irq(unsigned int irq) rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); } +static unsigned int +rx164_startup_irq(unsigned int irq) +{ + rx164_enable_irq(irq); + return 0; +} + +static void +rx164_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + rx164_enable_irq(irq); +} + static struct irq_chip rx164_irq_type = { .name = "RX164", - .unmask = rx164_enable_irq, - .mask = rx164_disable_irq, - .mask_ack = rx164_disable_irq, + .startup = rx164_startup_irq, + .shutdown = rx164_disable_irq, + .enable = rx164_enable_irq, + .disable = rx164_disable_irq, + .ack = rx164_disable_irq, + .end = rx164_end_irq, }; static void @@ -99,8 +116,8 @@ rx164_init_irq(void) rx164_update_irq_hw(0); for (i = 16; i < 40; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &rx164_irq_type; } init_i8259a_irqs(); diff --git a/trunk/arch/alpha/kernel/sys_sable.c b/trunk/arch/alpha/kernel/sys_sable.c index 5c4423d1b06c..b2abe27a23cf 100644 --- a/trunk/arch/alpha/kernel/sys_sable.c +++ b/trunk/arch/alpha/kernel/sys_sable.c @@ -474,6 +474,20 @@ sable_lynx_disable_irq(unsigned int irq) #endif } +static unsigned int +sable_lynx_startup_irq(unsigned int irq) +{ + sable_lynx_enable_irq(irq); + return 0; +} + +static void +sable_lynx_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + sable_lynx_enable_irq(irq); +} + static void sable_lynx_mask_and_ack_irq(unsigned int irq) { @@ -489,9 +503,12 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) static struct irq_chip sable_lynx_irq_type = { .name = "SABLE/LYNX", - .unmask = sable_lynx_enable_irq, - .mask = sable_lynx_disable_irq, - .mask_ack = sable_lynx_mask_and_ack_irq, + .startup = sable_lynx_startup_irq, + .shutdown = sable_lynx_disable_irq, + .enable = sable_lynx_enable_irq, + .disable = sable_lynx_disable_irq, + .ack = sable_lynx_mask_and_ack_irq, + .end = sable_lynx_end_irq, }; static void @@ -518,9 +535,8 @@ sable_lynx_init_irq(int nr_of_irqs) long i; for (i = 0; i < nr_of_irqs; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &sable_lynx_irq_type, - handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &sable_lynx_irq_type; } common_init_isa_dma(); diff --git a/trunk/arch/alpha/kernel/sys_takara.c b/trunk/arch/alpha/kernel/sys_takara.c index f8a1e8a862fb..4da596b6adbb 100644 --- a/trunk/arch/alpha/kernel/sys_takara.c +++ b/trunk/arch/alpha/kernel/sys_takara.c @@ -60,11 +60,28 @@ takara_disable_irq(unsigned int irq) takara_update_irq_hw(irq, mask); } +static unsigned int +takara_startup_irq(unsigned int irq) +{ + takara_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +takara_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + takara_enable_irq(irq); +} + static struct irq_chip takara_irq_type = { .name = "TAKARA", - .unmask = takara_enable_irq, - .mask = takara_disable_irq, - .mask_ack = takara_disable_irq, + .startup = takara_startup_irq, + .shutdown = takara_disable_irq, + .enable = takara_enable_irq, + .disable = takara_disable_irq, + .ack = takara_disable_irq, + .end = takara_end_irq, }; static void @@ -136,8 +153,8 @@ takara_init_irq(void) takara_update_irq_hw(i, -1); for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = &takara_irq_type; } common_init_isa_dma(); diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c index e02494bf5ef3..9008d0f20c53 100644 --- a/trunk/arch/alpha/kernel/sys_titan.c +++ b/trunk/arch/alpha/kernel/sys_titan.c @@ -129,6 +129,20 @@ titan_disable_irq(unsigned int irq) spin_unlock(&titan_irq_lock); } +static unsigned int +titan_startup_irq(unsigned int irq) +{ + titan_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +titan_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + titan_enable_irq(irq); +} + static void titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) { @@ -175,17 +189,20 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i, ops, handle_level_irq); + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].chip = ops; } } static struct irq_chip titan_irq_type = { - .name = "TITAN", - .unmask = titan_enable_irq, - .mask = titan_disable_irq, - .mask_ack = titan_disable_irq, - .set_affinity = titan_set_irq_affinity, + .name = "TITAN", + .startup = titan_startup_irq, + .shutdown = titan_disable_irq, + .enable = titan_enable_irq, + .disable = titan_disable_irq, + .ack = titan_disable_irq, + .end = titan_end_irq, + .set_affinity = titan_set_irq_affinity, }; static irqreturn_t diff --git a/trunk/arch/alpha/kernel/sys_wildfire.c b/trunk/arch/alpha/kernel/sys_wildfire.c index eec52594d410..62fd972e18ef 100644 --- a/trunk/arch/alpha/kernel/sys_wildfire.c +++ b/trunk/arch/alpha/kernel/sys_wildfire.c @@ -139,11 +139,32 @@ wildfire_mask_and_ack_irq(unsigned int irq) spin_unlock(&wildfire_irq_lock); } +static unsigned int +wildfire_startup_irq(unsigned int irq) +{ + wildfire_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +wildfire_end_irq(unsigned int irq) +{ +#if 0 + if (!irq_desc[irq].action) + printk("got irq %d\n", irq); +#endif + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + wildfire_enable_irq(irq); +} + static struct irq_chip wildfire_irq_type = { .name = "WILDFIRE", - .unmask = wildfire_enable_irq, - .mask = wildfire_disable_irq, - .mask_ack = wildfire_mask_and_ack_irq, + .startup = wildfire_startup_irq, + .shutdown = wildfire_disable_irq, + .enable = wildfire_enable_irq, + .disable = wildfire_disable_irq, + .ack = wildfire_mask_and_ack_irq, + .end = wildfire_end_irq, }; static void __init @@ -177,18 +198,15 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) for (i = 0; i < 16; ++i) { if (i == 2) continue; - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, - handle_level_irq); + irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i+irq_bias].chip = &wildfire_irq_type; } - irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, - handle_level_irq); + irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[36+irq_bias].chip = &wildfire_irq_type; for (i = 40; i < 64; ++i) { - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, - handle_level_irq); + irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i+irq_bias].chip = &wildfire_irq_type; } setup_irq(32+irq_bias, &isa_enable); diff --git a/trunk/arch/alpha/lib/Makefile b/trunk/arch/alpha/lib/Makefile index c0a83ab62b78..9b72c59c95be 100644 --- a/trunk/arch/alpha/lib/Makefile +++ b/trunk/arch/alpha/lib/Makefile @@ -2,8 +2,8 @@ # Makefile for alpha-specific library files.. # -asflags-y := $(KBUILD_CFLAGS) -ccflags-y := -Werror +EXTRA_AFLAGS := $(KBUILD_CFLAGS) +EXTRA_CFLAGS := -Werror # Many of these routines have implementations tuned for ev6. # Choose them iff we're targeting ev6 specifically. diff --git a/trunk/arch/alpha/math-emu/Makefile b/trunk/arch/alpha/math-emu/Makefile index 7f4671995245..359ef087e69e 100644 --- a/trunk/arch/alpha/math-emu/Makefile +++ b/trunk/arch/alpha/math-emu/Makefile @@ -2,7 +2,7 @@ # Makefile for the FPU instruction emulation. # -ccflags-y := -w +EXTRA_CFLAGS := -w obj-$(CONFIG_MATHEMU) += math-emu.o diff --git a/trunk/arch/alpha/mm/Makefile b/trunk/arch/alpha/mm/Makefile index c993d3f93cf6..09399c5386cb 100644 --- a/trunk/arch/alpha/mm/Makefile +++ b/trunk/arch/alpha/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the linux alpha-specific parts of the memory manager. # -ccflags-y := -Werror +EXTRA_CFLAGS := -Werror obj-y := init.o fault.o extable.o diff --git a/trunk/arch/alpha/oprofile/Makefile b/trunk/arch/alpha/oprofile/Makefile index 3473de751b03..4aa56247bdc6 100644 --- a/trunk/arch/alpha/oprofile/Makefile +++ b/trunk/arch/alpha/oprofile/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Werror -Wno-sign-compare +EXTRA_CFLAGS := -Werror -Wno-sign-compare obj-$(CONFIG_OPROFILE) += oprofile.o diff --git a/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 4d6dd4c39b75..74b62f10d07f 100644 --- a/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -13,14 +13,6 @@ #include #include -/* - * Maxium size for a single dma descriptor - * Size is limited to 16 bits. - * Size is in the units of addr-widths (1,2,4,8 bytes) - * Larger transfers will be split up to multiple linked desc - */ -#define STEDMA40_MAX_SEG_SIZE 0xFFFF - /* dev types for memcpy */ #define STEDMA40_DEV_DST_MEMORY (-1) #define STEDMA40_DEV_SRC_MEMORY (-1) diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index 1c28816152fa..ef138731c0ea 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -200,16 +200,11 @@ config PL330_DMA platform_data for a dma-pl330 device. config PCH_DMA - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" + tristate "Topcliff (Intel EG20T) PCH DMA support" depends on PCI && X86 select DMA_ENGINE help - Enable support for Intel EG20T PCH DMA engine. - - This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ - Output Hub) which is for IVI(In-Vehicle Infotainment) use. - ML7213 is companion chip for Intel Atom E6xx series. - ML7213 is completely compatible for Intel EG20T PCH. + Enable support for the Topcliff (Intel EG20T) PCH DMA engine. config IMX_SDMA tristate "i.MX SDMA support" diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c index 297f48b0cba9..b605cc9ac3a2 100644 --- a/trunk/drivers/dma/amba-pl08x.c +++ b/trunk/drivers/dma/amba-pl08x.c @@ -19,14 +19,14 @@ * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * The full GNU General Public License is in this distribution in the file - * called COPYING. + * The full GNU General Public License is iin this distribution in the + * file called COPYING. * * Documentation: ARM DDI 0196G == PL080 - * Documentation: ARM DDI 0218E == PL081 + * Documentation: ARM DDI 0218E == PL081 * - * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any - * channel. + * PL080 & PL081 both have 16 sets of DMA signals that can be routed to + * any channel. * * The PL080 has 8 channels available for simultaneous use, and the PL081 * has only two channels. So on these DMA controllers the number of channels @@ -53,23 +53,7 @@ * * ASSUMES default (little) endianness for DMA transfers * - * The PL08x has two flow control settings: - * - DMAC flow control: the transfer size defines the number of transfers - * which occur for the current LLI entry, and the DMAC raises TC at the - * end of every LLI entry. Observed behaviour shows the DMAC listening - * to both the BREQ and SREQ signals (contrary to documented), - * transferring data if either is active. The LBREQ and LSREQ signals - * are ignored. - * - * - Peripheral flow control: the transfer size is ignored (and should be - * zero). The data is transferred from the current LLI entry, until - * after the final transfer signalled by LBREQ or LSREQ. The DMAC - * will then move to the next LLI entry. - * - * Only the former works sanely with scatter lists, so we only implement - * the DMAC flow control method. However, peripherals which use the LBREQ - * and LSREQ signals (eg, MMCI) are unable to use this mode, which through - * these hardware restrictions prevents them from using scatter DMA. + * Only DMAC flow control is implemented * * Global TODO: * - Break out common code from arch/arm/mach-s3c64xx and share @@ -77,39 +61,50 @@ #include #include #include +#include #include #include #include -#include #include +#include #include #include #include #include +#include +#include +#include +#include +#include #define DRIVER_NAME "pl08xdmac" /** - * struct vendor_data - vendor-specific config parameters for PL08x derivatives + * struct vendor_data - vendor-specific config parameters + * for PL08x derivates + * @name: the name of this specific variant * @channels: the number of channels available in this variant - * @dualmaster: whether this version supports dual AHB masters or not. + * @dualmaster: whether this version supports dual AHB masters + * or not. */ struct vendor_data { + char *name; u8 channels; bool dualmaster; }; /* * PL08X private data structures - * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, - * start & end do not - their bus bit info is in cctl. Also note that these - * are fixed 32-bit quantities. + * An LLI struct - see pl08x TRM + * Note that next uses bit[0] as a bus bit, + * start & end do not - their bus bit info + * is in cctl */ -struct pl08x_lli { - u32 src; - u32 dst; - u32 lli; +struct lli { + dma_addr_t src; + dma_addr_t dst; + dma_addr_t next; u32 cctl; }; @@ -124,8 +119,6 @@ struct pl08x_lli { * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors * @pool_ctr: counter of LLIs in the pool - * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches - * @mem_buses: set to indicate memory transfers on AHB2. * @lock: a spinlock for this struct */ struct pl08x_driver_data { @@ -133,13 +126,11 @@ struct pl08x_driver_data { struct dma_device memcpy; void __iomem *base; struct amba_device *adev; - const struct vendor_data *vd; + struct vendor_data *vd; struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; int pool_ctr; - u8 lli_buses; - u8 mem_buses; spinlock_t lock; }; @@ -161,9 +152,9 @@ struct pl08x_driver_data { /* Size (bytes) of each LLI buffer allocated for one transfer */ # define PL08X_LLI_TSFR_SIZE 0x2000 -/* Maximum times we call dma_pool_alloc on this pool without freeing */ +/* Maximimum times we call dma_pool_alloc on this pool without freeing */ #define PL08X_MAX_ALLOCS 0x40 -#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) +#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) #define PL08X_ALIGN 8 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) @@ -171,11 +162,6 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) return container_of(chan, struct pl08x_dma_chan, chan); } -static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) -{ - return container_of(tx, struct pl08x_txd, tx); -} - /* * Physical channel handling */ @@ -191,47 +177,88 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) /* * Set the initial DMA register values i.e. those for the first LLI - * The next LLI pointer and the configuration interrupt bit have - * been set when the LLIs were constructed. Poke them into the hardware - * and start the transfer. + * The next lli pointer and the configuration interrupt bit have + * been set when the LLIs were constructed */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, + struct pl08x_phy_chan *ch) { - struct pl08x_driver_data *pl08x = plchan->host; + /* Wait for channel inactive */ + while (pl08x_phy_channel_busy(ch)) + ; + + dev_vdbg(&pl08x->adev->dev, + "WRITE channel %d: csrc=%08x, cdst=%08x, " + "cctl=%08x, clli=%08x, ccfg=%08x\n", + ch->id, + ch->csrc, + ch->cdst, + ch->cctl, + ch->clli, + ch->ccfg); + + writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); + writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); + writel(ch->clli, ch->base + PL080_CH_LLI); + writel(ch->cctl, ch->base + PL080_CH_CONTROL); + writel(ch->ccfg, ch->base + PL080_CH_CONFIG); +} + +static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) +{ + struct pl08x_channel_data *cd = plchan->cd; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; - u32 val; + struct pl08x_txd *txd = plchan->at; + + /* Copy the basic control register calculated at transfer config */ + phychan->csrc = txd->csrc; + phychan->cdst = txd->cdst; + phychan->clli = txd->clli; + phychan->cctl = txd->cctl; + + /* Assign the signal to the proper control registers */ + phychan->ccfg = cd->ccfg; + phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; + phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; + /* If it wasn't set from AMBA, ignore it */ + if (txd->direction == DMA_TO_DEVICE) + /* Select signal as destination */ + phychan->ccfg |= + (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); + else if (txd->direction == DMA_FROM_DEVICE) + /* Select signal as source */ + phychan->ccfg |= + (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); + /* Always enable error interrupts */ + phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; + /* Always enable terminal interrupts */ + phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; +} - plchan->at = txd; +/* + * Enable the DMA channel + * Assumes all other configuration bits have been set + * as desired before this code is called + */ +static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, + struct pl08x_phy_chan *ch) +{ + u32 val; - /* Wait for channel inactive */ - while (pl08x_phy_channel_busy(phychan)) - cpu_relax(); + /* + * Do not access config register until channel shows as disabled + */ + while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) + ; - dev_vdbg(&pl08x->adev->dev, - "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " - "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", - phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, - txd->ccfg); - - writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); - writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); - writel(lli->lli, phychan->base + PL080_CH_LLI); - writel(lli->cctl, phychan->base + PL080_CH_CONTROL); - writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); - - /* Enable the DMA channel */ - /* Do not access config register until channel shows as disabled */ - while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) - cpu_relax(); - - /* Do not access config register until channel shows as inactive */ - val = readl(phychan->base + PL080_CH_CONFIG); + /* + * Do not access config register until channel shows as inactive + */ + val = readl(ch->base + PL080_CH_CONFIG); while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) - val = readl(phychan->base + PL080_CH_CONFIG); + val = readl(ch->base + PL080_CH_CONFIG); - writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); + writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); } /* @@ -239,8 +266,10 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, * * Disabling individual channels could lose data. * - * Disable the peripheral DMA after disabling the DMAC in order to allow - * the DMAC FIFO to drain, and hence allow the channel to show inactive + * Disable the peripheral DMA after disabling the DMAC + * in order to allow the DMAC FIFO to drain, and + * hence allow the channel to show inactive + * */ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) { @@ -253,7 +282,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) /* Wait for channel inactive */ while (pl08x_phy_channel_busy(ch)) - cpu_relax(); + ; } static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) @@ -304,56 +333,54 @@ static inline u32 get_bytes_in_cctl(u32 cctl) static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; + struct pl08x_txd *txdi = NULL; struct pl08x_txd *txd; unsigned long flags; - size_t bytes = 0; + u32 bytes = 0; spin_lock_irqsave(&plchan->lock, flags); + ch = plchan->phychan; txd = plchan->at; /* - * Follow the LLIs to get the number of remaining - * bytes in the currently active transaction. + * Next follow the LLIs to get the number of pending bytes in the + * currently active transaction. */ if (ch && txd) { - u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; + struct lli *llis_va = txd->llis_va; + struct lli *llis_bus = (struct lli *) txd->llis_bus; + u32 clli = readl(ch->base + PL080_CH_LLI); - /* First get the remaining bytes in the active transfer */ + /* First get the bytes in the current active LLI */ bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); if (clli) { - struct pl08x_lli *llis_va = txd->llis_va; - dma_addr_t llis_bus = txd->llis_bus; - int index; - - BUG_ON(clli < llis_bus || clli >= llis_bus + - sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); - - /* - * Locate the next LLI - as this is an array, - * it's simple maths to find. - */ - index = (clli - llis_bus) / sizeof(struct pl08x_lli); + int i = 0; - for (; index < MAX_NUM_TSFR_LLIS; index++) { - bytes += get_bytes_in_cctl(llis_va[index].cctl); + /* Forward to the LLI pointed to by clli */ + while ((clli != (u32) &(llis_bus[i])) && + (i < MAX_NUM_TSFR_LLIS)) + i++; + while (clli) { + bytes += get_bytes_in_cctl(llis_va[i].cctl); /* - * A LLI pointer of 0 terminates the LLI list + * A clli of 0x00000000 will terminate the + * LLI list */ - if (!llis_va[index].lli) - break; + clli = llis_va[i].next; + i++; } } } /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { + if (!list_empty(&plchan->desc_list)) { + list_for_each_entry(txdi, &plchan->desc_list, node) { bytes += txdi->len; } + } spin_unlock_irqrestore(&plchan->lock, flags); @@ -363,10 +390,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) /* * Allocate a physical channel for a virtual channel - * - * Try to locate a physical channel to be used for this transfer. If all - * are taken return NULL and the requester will have to cope by using - * some fallback PIO mode or retrying later. */ static struct pl08x_phy_chan * pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, @@ -376,6 +399,12 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, unsigned long flags; int i; + /* + * Try to locate a physical channel to be used for + * this transfer. If all are taken return NULL and + * the requester will have to cope by using some fallback + * PIO mode or retrying later. + */ for (i = 0; i < pl08x->vd->channels; i++) { ch = &pl08x->phy_chans[i]; @@ -436,11 +465,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) } static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, - size_t tsize) + u32 tsize) { u32 retbits = cctl; - /* Remove all src, dst and transfer size bits */ + /* Remove all src, dst and transfersize bits */ retbits &= ~PL080_CONTROL_DWIDTH_MASK; retbits &= ~PL080_CONTROL_SWIDTH_MASK; retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; @@ -480,87 +509,95 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, return retbits; } -struct pl08x_lli_build_data { - struct pl08x_txd *txd; - struct pl08x_driver_data *pl08x; - struct pl08x_bus_data srcbus; - struct pl08x_bus_data dstbus; - size_t remainder; -}; - /* - * Autoselect a master bus to use for the transfer this prefers the - * destination bus if both available if fixed address on one bus the - * other will be chosen + * Autoselect a master bus to use for the transfer + * this prefers the destination bus if both available + * if fixed address on one bus the other will be chosen */ -static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, - struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) +void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, + struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, + struct pl08x_bus_data **sbus, u32 cctl) { if (!(cctl & PL080_CONTROL_DST_INCR)) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; + *mbus = src_bus; + *sbus = dst_bus; } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; + *mbus = dst_bus; + *sbus = src_bus; } else { - if (bd->dstbus.buswidth == 4) { - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; - } else if (bd->srcbus.buswidth == 4) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; - } else if (bd->dstbus.buswidth == 2) { - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; - } else if (bd->srcbus.buswidth == 2) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; + if (dst_bus->buswidth == 4) { + *mbus = dst_bus; + *sbus = src_bus; + } else if (src_bus->buswidth == 4) { + *mbus = src_bus; + *sbus = dst_bus; + } else if (dst_bus->buswidth == 2) { + *mbus = dst_bus; + *sbus = src_bus; + } else if (src_bus->buswidth == 2) { + *mbus = src_bus; + *sbus = dst_bus; } else { - /* bd->srcbus.buswidth == 1 */ - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; + /* src_bus->buswidth == 1 */ + *mbus = dst_bus; + *sbus = src_bus; } } } /* - * Fills in one LLI for a certain transfer descriptor and advance the counter + * Fills in one LLI for a certain transfer descriptor + * and advance the counter */ -static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, - int num_llis, int len, u32 cctl) +int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, + struct pl08x_txd *txd, int num_llis, int len, + u32 cctl, u32 *remainder) { - struct pl08x_lli *llis_va = bd->txd->llis_va; - dma_addr_t llis_bus = bd->txd->llis_bus; + struct lli *llis_va = txd->llis_va; + struct lli *llis_bus = (struct lli *) txd->llis_bus; BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); - llis_va[num_llis].cctl = cctl; - llis_va[num_llis].src = bd->srcbus.addr; - llis_va[num_llis].dst = bd->dstbus.addr; - llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); - if (bd->pl08x->lli_buses & PL08X_AHB2) - llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; + llis_va[num_llis].cctl = cctl; + llis_va[num_llis].src = txd->srcbus.addr; + llis_va[num_llis].dst = txd->dstbus.addr; + + /* + * On versions with dual masters, you can optionally AND on + * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read + * in new LLIs with that controller, but we always try to + * choose AHB1 to point into memory. The idea is to have AHB2 + * fixed on the peripheral and AHB1 messing around in the + * memory. So we don't manipulate this bit currently. + */ + + llis_va[num_llis].next = + (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); if (cctl & PL080_CONTROL_SRC_INCR) - bd->srcbus.addr += len; + txd->srcbus.addr += len; if (cctl & PL080_CONTROL_DST_INCR) - bd->dstbus.addr += len; + txd->dstbus.addr += len; - BUG_ON(bd->remainder < len); + *remainder -= len; - bd->remainder -= len; + return num_llis + 1; } /* - * Return number of bytes to fill to boundary, or len. - * This calculation works for any value of addr. + * Return number of bytes to fill to boundary, or len */ -static inline size_t pl08x_pre_boundary(u32 addr, size_t len) +static inline u32 pl08x_pre_boundary(u32 addr, u32 len) { - size_t boundary_len = PL08X_BOUNDARY_SIZE - - (addr & (PL08X_BOUNDARY_SIZE - 1)); + u32 boundary; + + boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) + << PL08X_BOUNDARY_SHIFT; - return min(boundary_len, len); + if (boundary < addr + len) + return boundary - addr; + else + return len; } /* @@ -571,13 +608,20 @@ static inline size_t pl08x_pre_boundary(u32 addr, size_t len) static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { + struct pl08x_channel_data *cd = txd->cd; struct pl08x_bus_data *mbus, *sbus; - struct pl08x_lli_build_data bd; + u32 remainder; int num_llis = 0; u32 cctl; - size_t max_bytes_per_lli; - size_t total_bytes = 0; - struct pl08x_lli *llis_va; + int max_bytes_per_lli; + int total_bytes = 0; + struct lli *llis_va; + struct lli *llis_bus; + + if (!txd) { + dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); + return 0; + } txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); @@ -588,79 +632,121 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, pl08x->pool_ctr++; - /* Get the default CCTL */ - cctl = txd->cctl; + /* + * Initialize bus values for this transfer + * from the passed optimal values + */ + if (!cd) { + dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); + return 0; + } - bd.txd = txd; - bd.pl08x = pl08x; - bd.srcbus.addr = txd->src_addr; - bd.dstbus.addr = txd->dst_addr; + /* Get the default CCTL from the platform data */ + cctl = cd->cctl; + + /* + * On the PL080 we have two bus masters and we + * should select one for source and one for + * destination. We try to use AHB2 for the + * bus which does not increment (typically the + * peripheral) else we just choose something. + */ + cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); + if (pl08x->vd->dualmaster) { + if (cctl & PL080_CONTROL_SRC_INCR) + /* Source increments, use AHB2 for destination */ + cctl |= PL080_CONTROL_DST_AHB2; + else if (cctl & PL080_CONTROL_DST_INCR) + /* Destination increments, use AHB2 for source */ + cctl |= PL080_CONTROL_SRC_AHB2; + else + /* Just pick something, source AHB1 dest AHB2 */ + cctl |= PL080_CONTROL_DST_AHB2; + } /* Find maximum width of the source bus */ - bd.srcbus.maxwidth = + txd->srcbus.maxwidth = pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> PL080_CONTROL_SWIDTH_SHIFT); /* Find maximum width of the destination bus */ - bd.dstbus.maxwidth = + txd->dstbus.maxwidth = pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> PL080_CONTROL_DWIDTH_SHIFT); /* Set up the bus widths to the maximum */ - bd.srcbus.buswidth = bd.srcbus.maxwidth; - bd.dstbus.buswidth = bd.dstbus.maxwidth; + txd->srcbus.buswidth = txd->srcbus.maxwidth; + txd->dstbus.buswidth = txd->dstbus.maxwidth; dev_vdbg(&pl08x->adev->dev, "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", - __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); + __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); /* * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) */ - max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * + max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * PL080_CONTROL_TRANSFER_SIZE_MASK; dev_vdbg(&pl08x->adev->dev, - "%s max bytes per lli = %zu\n", + "%s max bytes per lli = %d\n", __func__, max_bytes_per_lli); /* We need to count this down to zero */ - bd.remainder = txd->len; + remainder = txd->len; dev_vdbg(&pl08x->adev->dev, - "%s remainder = %zu\n", - __func__, bd.remainder); + "%s remainder = %d\n", + __func__, remainder); /* * Choose bus to align to * - prefers destination bus if both available * - if fixed address on one bus chooses other + * - modifies cctl to choose an apropriate master + */ + pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, + &mbus, &sbus, cctl); + + + /* + * The lowest bit of the LLI register + * is also used to indicate which master to + * use for reading the LLIs. */ - pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); if (txd->len < mbus->buswidth) { - /* Less than a bus width available - send as single bytes */ - while (bd.remainder) { + /* + * Less than a bus width available + * - send as single bytes + */ + while (remainder) { dev_vdbg(&pl08x->adev->dev, "%s single byte LLIs for a transfer of " - "less than a bus width (remain 0x%08x)\n", - __func__, bd.remainder); + "less than a bus width (remain %08x)\n", + __func__, remainder); cctl = pl08x_cctl_bits(cctl, 1, 1, 1); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); + num_llis = + pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, + cctl, &remainder); total_bytes++; } } else { - /* Make one byte LLIs until master bus is aligned */ + /* + * Make one byte LLIs until master bus is aligned + * - slave will then be aligned also + */ while ((mbus->addr) % (mbus->buswidth)) { dev_vdbg(&pl08x->adev->dev, "%s adjustment lli for less than bus width " - "(remain 0x%08x)\n", - __func__, bd.remainder); + "(remain %08x)\n", + __func__, remainder); cctl = pl08x_cctl_bits(cctl, 1, 1, 1); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); + num_llis = pl08x_fill_lli_for_desc + (pl08x, txd, num_llis, 1, cctl, &remainder); total_bytes++; } /* - * Master now aligned + * Master now aligned * - if slave is not then we must set its width down */ if (sbus->addr % sbus->buswidth) { @@ -675,51 +761,63 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, * Make largest possible LLIs until less than one bus * width left */ - while (bd.remainder > (mbus->buswidth - 1)) { - size_t lli_len, target_len, tsize, odd_bytes; + while (remainder > (mbus->buswidth - 1)) { + int lli_len, target_len; + int tsize; + int odd_bytes; /* * If enough left try to send max possible, * otherwise try to send the remainder */ - target_len = min(bd.remainder, max_bytes_per_lli); + target_len = remainder; + if (remainder > max_bytes_per_lli) + target_len = max_bytes_per_lli; /* - * Set bus lengths for incrementing buses to the - * number of bytes which fill to next memory boundary, - * limiting on the target length calculated above. + * Set bus lengths for incrementing busses + * to number of bytes which fill to next memory + * boundary */ if (cctl & PL080_CONTROL_SRC_INCR) - bd.srcbus.fill_bytes = - pl08x_pre_boundary(bd.srcbus.addr, - target_len); + txd->srcbus.fill_bytes = + pl08x_pre_boundary( + txd->srcbus.addr, + remainder); else - bd.srcbus.fill_bytes = target_len; + txd->srcbus.fill_bytes = + max_bytes_per_lli; if (cctl & PL080_CONTROL_DST_INCR) - bd.dstbus.fill_bytes = - pl08x_pre_boundary(bd.dstbus.addr, - target_len); + txd->dstbus.fill_bytes = + pl08x_pre_boundary( + txd->dstbus.addr, + remainder); else - bd.dstbus.fill_bytes = target_len; + txd->dstbus.fill_bytes = + max_bytes_per_lli; - /* Find the nearest */ - lli_len = min(bd.srcbus.fill_bytes, - bd.dstbus.fill_bytes); + /* + * Find the nearest + */ + lli_len = min(txd->srcbus.fill_bytes, + txd->dstbus.fill_bytes); - BUG_ON(lli_len > bd.remainder); + BUG_ON(lli_len > remainder); if (lli_len <= 0) { dev_err(&pl08x->adev->dev, - "%s lli_len is %zu, <= 0\n", + "%s lli_len is %d, <= 0\n", __func__, lli_len); return 0; } if (lli_len == target_len) { /* - * Can send what we wanted. - * Maintain alignment + * Can send what we wanted + */ + /* + * Maintain alignment */ lli_len = (lli_len/mbus->buswidth) * mbus->buswidth; @@ -727,14 +825,17 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, } else { /* * So now we know how many bytes to transfer - * to get to the nearest boundary. The next - * LLI will past the boundary. However, we - * may be working to a boundary on the slave - * bus. We need to ensure the master stays - * aligned, and that we are working in - * multiples of the bus widths. + * to get to the nearest boundary + * The next lli will past the boundary + * - however we may be working to a boundary + * on the slave bus + * We need to ensure the master stays aligned */ odd_bytes = lli_len % mbus->buswidth; + /* + * - and that we are working in multiples + * of the bus widths + */ lli_len -= odd_bytes; } @@ -754,38 +855,41 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, if (target_len != lli_len) { dev_vdbg(&pl08x->adev->dev, - "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", + "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", __func__, target_len, lli_len, txd->len); } cctl = pl08x_cctl_bits(cctl, - bd.srcbus.buswidth, - bd.dstbus.buswidth, + txd->srcbus.buswidth, + txd->dstbus.buswidth, tsize); dev_vdbg(&pl08x->adev->dev, - "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", - __func__, lli_len, bd.remainder); - pl08x_fill_lli_for_desc(&bd, num_llis++, - lli_len, cctl); + "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", + __func__, lli_len, remainder); + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, + num_llis, lli_len, cctl, + &remainder); total_bytes += lli_len; } if (odd_bytes) { /* - * Creep past the boundary, maintaining - * master alignment + * Creep past the boundary, + * maintaining master alignment */ int j; for (j = 0; (j < mbus->buswidth) - && (bd.remainder); j++) { + && (remainder); j++) { cctl = pl08x_cctl_bits(cctl, 1, 1, 1); dev_vdbg(&pl08x->adev->dev, - "%s align with boundary, single byte (remain 0x%08zx)\n", - __func__, bd.remainder); - pl08x_fill_lli_for_desc(&bd, - num_llis++, 1, cctl); + "%s align with boundardy, single byte (remain %08x)\n", + __func__, remainder); + num_llis = + pl08x_fill_lli_for_desc(pl08x, + txd, num_llis, 1, + cctl, &remainder); total_bytes++; } } @@ -794,18 +898,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, /* * Send any odd bytes */ - while (bd.remainder) { + if (remainder < 0) { + dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", + __func__, remainder); + return 0; + } + + while (remainder) { cctl = pl08x_cctl_bits(cctl, 1, 1, 1); dev_vdbg(&pl08x->adev->dev, - "%s align with boundary, single odd byte (remain %zu)\n", - __func__, bd.remainder); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); + "%s align with boundardy, single odd byte (remain %d)\n", + __func__, remainder); + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, + 1, cctl, &remainder); total_bytes++; } } if (total_bytes != txd->len) { dev_err(&pl08x->adev->dev, - "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", + "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", __func__, total_bytes, txd->len); return 0; } @@ -816,12 +927,41 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, __func__, (u32) MAX_NUM_TSFR_LLIS); return 0; } - + /* + * Decide whether this is a loop or a terminated transfer + */ llis_va = txd->llis_va; - /* The final LLI terminates the LLI. */ - llis_va[num_llis - 1].lli = 0; - /* The final LLI element shall also fire an interrupt. */ - llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; + llis_bus = (struct lli *) txd->llis_bus; + + if (cd->circular_buffer) { + /* + * Loop the circular buffer so that the next element + * points back to the beginning of the LLI. + */ + llis_va[num_llis - 1].next = + (dma_addr_t)((unsigned int)&(llis_bus[0])); + } else { + /* + * On non-circular buffers, the final LLI terminates + * the LLI. + */ + llis_va[num_llis - 1].next = 0; + /* + * The final LLI element shall also fire an interrupt + */ + llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; + } + + /* Now store the channel register values */ + txd->csrc = llis_va[0].src; + txd->cdst = llis_va[0].dst; + if (num_llis > 1) + txd->clli = llis_va[0].next; + else + txd->clli = 0; + + txd->cctl = llis_va[0].cctl; + /* ccfg will be set at physical channel allocation time */ #ifdef VERBOSE_DEBUG { @@ -829,13 +969,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, for (i = 0; i < num_llis; i++) { dev_vdbg(&pl08x->adev->dev, - "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", + "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", i, &llis_va[i], llis_va[i].src, llis_va[i].dst, llis_va[i].cctl, - llis_va[i].lli + llis_va[i].next ); } } @@ -848,8 +988,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { + if (!txd) + dev_err(&pl08x->adev->dev, + "%s no descriptor to free\n", + __func__); + /* Free the LLI */ - dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); + dma_pool_free(pl08x->pool, txd->llis_va, + txd->llis_bus); pl08x->pool_ctr--; @@ -862,12 +1008,13 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, struct pl08x_txd *txdi = NULL; struct pl08x_txd *next; - if (!list_empty(&plchan->pend_list)) { + if (!list_empty(&plchan->desc_list)) { list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { + next, &plchan->desc_list, node) { list_del(&txdi->node); pl08x_free_txd(pl08x, txdi); } + } } @@ -922,12 +1069,6 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, return -EBUSY; } ch->signal = ret; - - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_TO_DEVICE) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_FROM_DEVICE) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; } dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", @@ -935,54 +1076,19 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, ch->signal, plchan->name); - plchan->phychan_hold++; plchan->phychan = ch; return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) -{ - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; -} - static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) { struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - - plchan->chan.cookie += 1; - if (plchan->chan.cookie < 0) - plchan->chan.cookie = 1; - tx->cookie = plchan->chan.cookie; - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); + atomic_inc(&plchan->last_issued); + tx->cookie = atomic_read(&plchan->last_issued); + /* This unlock follows the lock in the prep() function */ + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); return tx->cookie; } @@ -996,9 +1102,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( } /* - * Code accessing dma_async_is_complete() in a tight loop may give problems. - * If slaves are relying on interrupts to signal completion this function - * must not be called with interrupts disabled. + * Code accessing dma_async_is_complete() in a tight loop + * may give problems - could schedule where indicated. + * If slaves are relying on interrupts to signal completion this + * function must not be called with interrupts disabled */ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, @@ -1011,7 +1118,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, enum dma_status ret; u32 bytesleft = 0; - last_used = plchan->chan.cookie; + last_used = atomic_read(&plchan->last_issued); last_complete = plchan->lc; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -1020,10 +1127,14 @@ pl08x_dma_tx_status(struct dma_chan *chan, return ret; } + /* + * schedule(); could be inserted here + */ + /* * This cookie not complete yet */ - last_used = plchan->chan.cookie; + last_used = atomic_read(&plchan->last_issued); last_complete = plchan->lc; /* Get number of bytes left in the active transactions and queue */ @@ -1088,35 +1199,37 @@ static const struct burst_table burst_sizes[] = { }, }; -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static void dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_channel_data *cd = plchan->cd; enum dma_slave_buswidth addr_width; - dma_addr_t addr; u32 maxburst; u32 cctl = 0; - int i; - - if (!plchan->slave) - return -EINVAL; + /* Mask out all except src and dst channel */ + u32 ccfg = cd->ccfg & 0x000003DEU; + int i = 0; /* Transfer direction */ plchan->runtime_direction = config->direction; if (config->direction == DMA_TO_DEVICE) { - addr = config->dst_addr; + plchan->runtime_addr = config->dst_addr; + cctl |= PL080_CONTROL_SRC_INCR; + ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; addr_width = config->dst_addr_width; maxburst = config->dst_maxburst; } else if (config->direction == DMA_FROM_DEVICE) { - addr = config->src_addr; + plchan->runtime_addr = config->src_addr; + cctl |= PL080_CONTROL_DST_INCR; + ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; addr_width = config->src_addr_width; maxburst = config->src_maxburst; } else { dev_err(&pl08x->adev->dev, "bad runtime_config: alien transfer direction\n"); - return -EINVAL; + return; } switch (addr_width) { @@ -1135,40 +1248,42 @@ static int dma_set_runtime_config(struct dma_chan *chan, default: dev_err(&pl08x->adev->dev, "bad runtime_config: alien address width\n"); - return -EINVAL; + return; } /* * Now decide on a maxburst: - * If this channel will only request single transfers, set this - * down to ONE element. Also select one element if no maxburst - * is specified. + * If this channel will only request single transfers, set + * this down to ONE element. */ - if (plchan->cd->single || maxburst == 0) { + if (plchan->cd->single) { cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); } else { - for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) + while (i < ARRAY_SIZE(burst_sizes)) { if (burst_sizes[i].burstwords <= maxburst) break; + i++; + } cctl |= burst_sizes[i].reg; } - plchan->runtime_addr = addr; + /* Access the cell in privileged mode, non-bufferable, non-cacheable */ + cctl &= ~PL080_CONTROL_PROT_MASK; + cctl |= PL080_CONTROL_PROT_SYS; /* Modify the default channel data to fit PrimeCell request */ cd->cctl = cctl; + cd->ccfg = ccfg; dev_dbg(&pl08x->adev->dev, "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", + "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", dma_chan_name(chan), plchan->name, (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", addr_width, maxburst, - cctl); - - return 0; + cctl, ccfg); } /* @@ -1178,26 +1293,35 @@ static int dma_set_runtime_config(struct dma_chan *chan, static void pl08x_issue_pending(struct dma_chan *chan) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; unsigned long flags; spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; + /* Something is already active */ + if (plchan->at) { + spin_unlock_irqrestore(&plchan->lock, flags); + return; } + /* Didn't get a physical channel so waiting for it ... */ + if (plchan->state == PL08X_CHAN_WAITING) + return; + /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { + if (!list_empty(&plchan->desc_list)) { struct pl08x_txd *next; - next = list_first_entry(&plchan->pend_list, + next = list_first_entry(&plchan->desc_list, struct pl08x_txd, node); list_del(&next->node); + plchan->at = next; plchan->state = PL08X_CHAN_RUNNING; - pl08x_start_txd(plchan, next); + /* Configure the physical channel for the active txd */ + pl08x_config_phychan_for_txd(plchan); + pl08x_set_cregs(pl08x, plchan->phychan); + pl08x_enable_phy_chan(pl08x, plchan->phychan); } spin_unlock_irqrestore(&plchan->lock, flags); @@ -1206,17 +1330,30 @@ static void pl08x_issue_pending(struct dma_chan *chan) static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, struct pl08x_txd *txd) { + int num_llis; struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; + int ret; num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - kfree(txd); + + if (!num_llis) return -EINVAL; - } - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->lock, plchan->lockflags); + + /* + * If this device is not using a circular buffer then + * queue this new descriptor for transfer. + * The descriptor for a circular buffer continues + * to be used until the channel is freed. + */ + if (txd->cd->circular_buffer) + dev_err(&pl08x->adev->dev, + "%s attempting to queue a circular buffer\n", + __func__); + else + list_add_tail(&txd->node, + &plchan->desc_list); /* * See if we already have a physical channel allocated, @@ -1225,73 +1362,44 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, ret = prep_phy_channel(plchan, txd); if (ret) { /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. + * No physical channel available, we will + * stack up the memcpy channels until there is a channel + * available to handle it whereas slave transfers may + * have been denied due to platform channel muxing restrictions + * and since there is no guarantee that this will ever be + * resolved, and since the signal must be aquired AFTER + * aquiring the physical channel, we will let them be NACK:ed + * with -EBUSY here. The drivers can alway retry the prep() + * call if they are eager on doing this using DMA. */ if (plchan->slave) { pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); return -EBUSY; } + /* Do this memcpy whenever there is a channel ready */ + plchan->state = PL08X_CHAN_WAITING; + plchan->waiting = txd; } else /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. + * Else we're all set, paused and ready to roll, + * status will switch to PL08X_CHAN_RUNNING when + * we call issue_pending(). If there is something + * running on the channel already we don't change + * its state. */ if (plchan->state == PL08X_CHAN_IDLE) plchan->state = PL08X_CHAN_PAUSED; - spin_unlock_irqrestore(&plchan->lock, flags); + /* + * Notice that we leave plchan->lock locked on purpose: + * it will be unlocked in the subsequent tx_submit() + * call. This is a consequence of the current API. + */ return 0; } -/* - * Given the source and destination available bus masks, select which - * will be routed to each port. We try to have source and destination - * on separate ports, but always respect the allowable settings. - */ -static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) -{ - u32 cctl = 0; - - if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) - cctl |= PL080_CONTROL_DST_AHB2; - if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) - cctl |= PL080_CONTROL_SRC_AHB2; - - return cctl; -} - -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) -{ - struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); - - if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); - - /* Always enable error and terminal interrupts */ - txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | - PL080_CONFIG_TC_IRQ_MASK; - } - return txd; -} - /* * Initialize a descriptor to be used by memcpy submit */ @@ -1304,38 +1412,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_txd *txd; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); return NULL; } + dma_async_tx_descriptor_init(&txd->tx, chan); txd->direction = DMA_NONE; - txd->src_addr = src; - txd->dst_addr = dest; - txd->len = len; + txd->srcbus.addr = src; + txd->dstbus.addr = dest; /* Set platform data for m2m */ - txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & - ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); - + txd->cd = &pl08x->pd->memcpy_channel; /* Both to be incremented or the code will break */ - txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; - - if (pl08x->vd->dualmaster) - txd->cctl |= pl08x_select_bus(pl08x, - pl08x->mem_buses, pl08x->mem_buses); + txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; + txd->tx.tx_submit = pl08x_tx_submit; + txd->tx.callback = NULL; + txd->tx.callback_param = NULL; + txd->len = len; + INIT_LIST_HEAD(&txd->node); ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; + /* + * NB: the channel lock is held at this point so tx_submit() + * must be called in direct succession. + */ return &txd->tx; } -static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( +struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) @@ -1343,7 +1453,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; - u8 src_buses, dst_buses; int ret; /* @@ -1358,12 +1467,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sgl->length, plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } + dma_async_tx_descriptor_init(&txd->tx, chan); + if (direction != plchan->runtime_direction) dev_err(&pl08x->adev->dev, "%s DMA setup does not match " "the direction configured for the PrimeCell\n", @@ -1375,47 +1486,37 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( * channel target address dynamically at runtime. */ txd->direction = direction; - txd->len = sgl->length; - - txd->cctl = plchan->cd->cctl & - ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | - PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | - PL080_CONTROL_PROT_MASK); - - /* Access the cell in privileged mode, non-bufferable, non-cacheable */ - txd->cctl |= PL080_CONTROL_PROT_SYS; - if (direction == DMA_TO_DEVICE) { - txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl |= PL080_CONTROL_SRC_INCR; - txd->src_addr = sgl->dma_address; + txd->srcbus.addr = sgl->dma_address; if (plchan->runtime_addr) - txd->dst_addr = plchan->runtime_addr; + txd->dstbus.addr = plchan->runtime_addr; else - txd->dst_addr = plchan->cd->addr; - src_buses = pl08x->mem_buses; - dst_buses = plchan->cd->periph_buses; + txd->dstbus.addr = plchan->cd->addr; } else if (direction == DMA_FROM_DEVICE) { - txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl |= PL080_CONTROL_DST_INCR; if (plchan->runtime_addr) - txd->src_addr = plchan->runtime_addr; + txd->srcbus.addr = plchan->runtime_addr; else - txd->src_addr = plchan->cd->addr; - txd->dst_addr = sgl->dma_address; - src_buses = plchan->cd->periph_buses; - dst_buses = pl08x->mem_buses; + txd->srcbus.addr = plchan->cd->addr; + txd->dstbus.addr = sgl->dma_address; } else { dev_err(&pl08x->adev->dev, "%s direction unsupported\n", __func__); return NULL; } - - txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); + txd->cd = plchan->cd; + txd->tx.tx_submit = pl08x_tx_submit; + txd->tx.callback = NULL; + txd->tx.callback_param = NULL; + txd->len = sgl->length; + INIT_LIST_HEAD(&txd->node); ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; + /* + * NB: the channel lock is held at this point so tx_submit() + * must be called in direct succession. + */ return &txd->tx; } @@ -1430,8 +1531,10 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, /* Controls applicable to inactive channels */ if (cmd == DMA_SLAVE_CONFIG) { - return dma_set_runtime_config(chan, - (struct dma_slave_config *)arg); + dma_set_runtime_config(chan, + (struct dma_slave_config *) + arg); + return 0; } /* @@ -1455,8 +1558,16 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); + if ((plchan->phychan->signal >= 0) && + pl08x->pd->put_signal) { + pl08x->pd->put_signal(plchan); + plchan->phychan->signal = -1; + } + pl08x_put_phy_channel(pl08x, plchan->phychan); + plchan->phychan = NULL; } + /* Stop any pending tasklet */ + tasklet_disable(&plchan->tasklet); /* Dequeue jobs and free LLIs */ if (plchan->at) { pl08x_free_txd(pl08x, plchan->at); @@ -1498,9 +1609,10 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) /* * Just check that the device is there and active - * TODO: turn this bit on/off depending on the number of physical channels - * actually used, if it is zero... well shut it off. That will save some - * power. Cut the clock at the same time. + * TODO: turn this bit on/off depending on the number of + * physical channels actually used, if it is zero... well + * shut it off. That will save some power. Cut the clock + * at the same time. */ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) { @@ -1508,66 +1620,78 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) val = readl(pl08x->base + PL080_CONFIG); val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); - /* We implicitly clear bit 1 and that means little-endian mode */ + /* We implictly clear bit 1 and that means little-endian mode */ val |= PL080_CONFIG_ENABLE; writel(val, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, txd->src_addr, txd->len, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, txd->src_addr, txd->len, - DMA_TO_DEVICE); - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, txd->dst_addr, txd->len, - DMA_FROM_DEVICE); - else - dma_unmap_page(dev, txd->dst_addr, txd->len, - DMA_FROM_DEVICE); - } -} - static void pl08x_tasklet(unsigned long data) { struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; + struct pl08x_phy_chan *phychan = plchan->phychan; struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); + if (!plchan) + BUG(); - txd = plchan->at; - plchan->at = NULL; + spin_lock(&plchan->lock); - if (txd) { - /* Update last completed */ - plchan->lc = txd->tx.cookie; - } + if (plchan->at) { + dma_async_tx_callback callback = + plchan->at->tx.callback; + void *callback_param = + plchan->at->tx.callback_param; - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { + /* + * Update last completed + */ + plchan->lc = + (plchan->at->tx.cookie); + + /* + * Callback to signal completion + */ + if (callback) + callback(callback_param); + + /* + * Device callbacks should NOT clear + * the current transaction on the channel + * Linus: sometimes they should? + */ + if (!plchan->at) + BUG(); + + /* + * Free the descriptor if it's not for a device + * using a circular buffer + */ + if (!plchan->at->cd->circular_buffer) { + pl08x_free_txd(pl08x, plchan->at); + plchan->at = NULL; + } + /* + * else descriptor for circular + * buffers only freed when + * client has disabled dma + */ + } + /* + * If a new descriptor is queued, set it up + * plchan->at is NULL here + */ + if (!list_empty(&plchan->desc_list)) { struct pl08x_txd *next; - next = list_first_entry(&plchan->pend_list, + next = list_first_entry(&plchan->desc_list, struct pl08x_txd, node); list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ + plchan->at = next; + /* Configure the physical channel for the next txd */ + pl08x_config_phychan_for_txd(plchan); + pl08x_set_cregs(pl08x, plchan->phychan); + pl08x_enable_phy_chan(pl08x, plchan->phychan); } else { struct pl08x_dma_chan *waiting = NULL; @@ -1575,14 +1699,20 @@ static void pl08x_tasklet(unsigned long data) * No more jobs, so free up the physical channel * Free any allocated signal on slave transfers too */ - release_phy_channel(plchan); + if ((phychan->signal >= 0) && pl08x->pd->put_signal) { + pl08x->pd->put_signal(plchan); + phychan->signal = -1; + } + pl08x_put_phy_channel(pl08x, phychan); + plchan->phychan = NULL; plchan->state = PL08X_CHAN_IDLE; /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. + * And NOW before anyone else can grab that free:d + * up physical channel, see if there is some memcpy + * pending that seriously needs to start because of + * being stacked up while we were choking the + * physical channels with data. */ list_for_each_entry(waiting, &pl08x->memcpy.channels, chan.device_node) { @@ -1594,7 +1724,6 @@ static void pl08x_tasklet(unsigned long data) ret = prep_phy_channel(waiting, waiting->waiting); BUG_ON(ret); - waiting->phychan_hold--; waiting->state = PL08X_CHAN_RUNNING; waiting->waiting = NULL; pl08x_issue_pending(&waiting->chan); @@ -1603,25 +1732,7 @@ static void pl08x_tasklet(unsigned long data) } } - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } + spin_unlock(&plchan->lock); } static irqreturn_t pl08x_irq(int irq, void *dev) @@ -1633,7 +1744,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev) val = readl(pl08x->base + PL080_ERR_STATUS); if (val) { - /* An error interrupt (on one or more channels) */ + /* + * An error interrupt (on one or more channels) + */ dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", __func__, val); @@ -1657,7 +1770,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev) mask |= (1 << i); } } - /* Clear only the terminal interrupts on channels we processed */ + /* + * Clear only the terminal interrupts on channels we processed + */ writel(mask, pl08x->base + PL080_TC_CLEAR); return mask ? IRQ_HANDLED : IRQ_NONE; @@ -1676,7 +1791,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, int i; INIT_LIST_HEAD(&dmadev->channels); - /* * Register as many many memcpy as we have physical channels, * we won't always be able to use all but the code will have @@ -1705,23 +1819,16 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_info(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); chan->chan.device = dmadev; - chan->chan.cookie = 0; - chan->lc = 0; + atomic_set(&chan->last_issued, 0); + chan->lc = atomic_read(&chan->last_issued); spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); + INIT_LIST_HEAD(&chan->desc_list); tasklet_init(&chan->tasklet, pl08x_tasklet, (unsigned long) chan); @@ -1791,7 +1898,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { - seq_printf(s, "%s\t\t%s\n", chan->name, + seq_printf(s, "%s\t\t\%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1906,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { - seq_printf(s, "%s\t\t%s\n", chan->name, + seq_printf(s, "%s\t\t\%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1835,7 +1942,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) static int pl08x_probe(struct amba_device *adev, struct amba_id *id) { struct pl08x_driver_data *pl08x; - const struct vendor_data *vd = id->data; + struct vendor_data *vd = id->data; int ret = 0; int i; @@ -1883,14 +1990,6 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) pl08x->adev = adev; pl08x->vd = vd; - /* By default, AHB1 only. If dualmaster, from platform */ - pl08x->lli_buses = PL08X_AHB1; - pl08x->mem_buses = PL08X_AHB1; - if (pl08x->vd->dualmaster) { - pl08x->lli_buses = pl08x->pd->lli_buses; - pl08x->mem_buses = pl08x->pd->mem_buses; - } - /* A DMA memory pool for LLIs, align on 1-byte boundary */ pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); @@ -1910,12 +2009,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) /* Turn on the PL08x */ pl08x_ensure_on(pl08x); - /* Attach the interrupt handler */ + /* + * Attach the interrupt handler + */ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, - DRIVER_NAME, pl08x); + vd->name, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); @@ -1986,9 +2087,8 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) amba_set_drvdata(adev, pl08x); init_pl08x_debugfs(pl08x); - dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", - amba_part(adev), amba_rev(adev), - (unsigned long long)adev->res.start, adev->irq[0]); + dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", + vd->name, adev->res.start); return 0; out_no_slave_reg: @@ -2015,11 +2115,13 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) /* PL080 has 8 channels and the PL080 have just 2 */ static struct vendor_data vendor_pl080 = { + .name = "PL080", .channels = 8, .dualmaster = true, }; static struct vendor_data vendor_pl081 = { + .name = "PL081", .channels = 2, .dualmaster = false, }; @@ -2058,7 +2160,7 @@ static int __init pl08x_init(void) retval = amba_driver_register(&pl08x_amba_driver); if (retval) printk(KERN_WARNING DRIVER_NAME - "failed to register as an AMBA device (%d)\n", + "failed to register as an amba device (%d)\n", retval); return retval; } diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c index 3d7d705f026f..ea0ee81cff53 100644 --- a/trunk/drivers/dma/at_hdmac.c +++ b/trunk/drivers/dma/at_hdmac.c @@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); - /* unmap dma addresses (not on slave channels) */ + /* unmap dma addresses */ if (!atchan->chan_common.private) { struct device *parent = chan2parent(&atchan->chan_common); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { @@ -583,6 +583,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, desc->lli.ctrlb = ctrlb; desc->txd.cookie = 0; + async_tx_ack(&desc->txd); if (!first) { first = desc; @@ -603,7 +604,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); - first->txd.flags = flags; /* client is in control of this ack */ + desc->txd.flags = flags; /* client is in control of this ack */ return &first->txd; @@ -669,7 +670,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!desc) goto err_desc_get; - mem = sg_dma_address(sg); + mem = sg_phys(sg); len = sg_dma_len(sg); mem_width = 2; if (unlikely(mem & 3 || len & 3)) @@ -711,7 +712,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!desc) goto err_desc_get; - mem = sg_dma_address(sg); + mem = sg_phys(sg); len = sg_dma_len(sg); mem_width = 2; if (unlikely(mem & 3 || len & 3)) @@ -748,8 +749,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->len = total_len; - /* first link descriptor of list is responsible of flags */ - first->txd.flags = flags; /* client is in control of this ack */ + /* last link descriptor of list is responsible of flags */ + prev->txd.flags = flags; /* client is in control of this ack */ return &first->txd; @@ -853,11 +854,11 @@ static void atc_issue_pending(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "issue_pending\n"); - spin_lock_bh(&atchan->lock); if (!atc_chan_is_enabled(atchan)) { + spin_lock_bh(&atchan->lock); atc_advance_work(atchan); + spin_unlock_bh(&atchan->lock); } - spin_unlock_bh(&atchan->lock); } /** @@ -1209,7 +1210,7 @@ static int __init at_dma_init(void) { return platform_driver_probe(&at_dma_driver, at_dma_probe); } -subsys_initcall(at_dma_init); +module_init(at_dma_init); static void __exit at_dma_exit(void) { diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index 4de947a450fc..e5e172d21692 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -1,7 +1,7 @@ /* * Freescale MPC85xx, MPC83xx DMA Engine support * - * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * Author: * Zhang Wei , Jul 2007 @@ -1324,8 +1324,6 @@ static int __devinit fsldma_of_probe(struct platform_device *op, fdev->common.device_control = fsl_dma_device_control; fdev->common.dev = &op->dev; - dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); - dev_set_drvdata(&op->dev, fdev); /* diff --git a/trunk/drivers/dma/intel_mid_dma.c b/trunk/drivers/dma/intel_mid_dma.c index 798f46a4590d..78266382797e 100644 --- a/trunk/drivers/dma/intel_mid_dma.c +++ b/trunk/drivers/dma/intel_mid_dma.c @@ -664,20 +664,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( /*calculate CTL_LO*/ ctl_lo.ctl_lo = 0; ctl_lo.ctlx.int_en = 1; + ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; + ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; - /* - * Here we need some translation from "enum dma_slave_buswidth" - * to the format for our dma controller - * standard intel_mid_dmac's format - * 1 Byte 0b000 - * 2 Bytes 0b001 - * 4 Bytes 0b010 - */ - ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; - ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; - if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { ctl_lo.ctlx.tt_fc = 0; ctl_lo.ctlx.sinc = 0; @@ -755,18 +746,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( BUG_ON(!mids); if (!midc->dma->pimr_mask) { - /* We can still handle sg list with only one item */ - if (sg_len == 1) { - txd = intel_mid_dma_prep_memcpy(chan, - mids->dma_slave.dst_addr, - mids->dma_slave.src_addr, - sgl->length, - flags); - return txd; - } else { - pr_warn("MDMA: SG list is not supported by this controller\n"); - return NULL; - } + pr_debug("MDMA: SG list is not supported by this controller\n"); + return NULL; } pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", @@ -777,7 +758,6 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( pr_err("MDMA: Prep memcpy failed\n"); return NULL; } - desc = to_intel_mid_dma_desc(txd); desc->dirn = direction; ctl_lo.ctl_lo = desc->ctl_lo; @@ -1041,6 +1021,11 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) /*DMA Interrupt*/ pr_debug("MDMA:Got an interrupt on irq %d\n", irq); + if (!mid) { + pr_err("ERR_MDMA:null pointer mid\n"); + return -EINVAL; + } + pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); tfr_status &= mid->intr_mask; if (tfr_status) { diff --git a/trunk/drivers/dma/iop-adma.c b/trunk/drivers/dma/iop-adma.c index c6b01f535b29..161c452923b8 100644 --- a/trunk/drivers/dma/iop-adma.c +++ b/trunk/drivers/dma/iop-adma.c @@ -1261,7 +1261,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) return err; } -#ifdef CONFIG_RAID6_PQ +#ifdef CONFIG_MD_RAID6_PQ static int __devinit iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) { @@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { - #ifdef CONFIG_RAID6_PQ + #ifdef CONFIG_MD_RAID6_PQ ret = iop_adma_pq_zero_sum_self_test(adev); dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); #else diff --git a/trunk/drivers/dma/pch_dma.c b/trunk/drivers/dma/pch_dma.c index 1c38418ae61f..c064c89420d0 100644 --- a/trunk/drivers/dma/pch_dma.c +++ b/trunk/drivers/dma/pch_dma.c @@ -1,7 +1,6 @@ /* * Topcliff PCH DMA controller driver * Copyright (c) 2010 Intel Corporation - * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -922,19 +921,12 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) } /* PCI Device ID of DMA device */ -#define PCI_VENDOR_ID_ROHM 0x10DB -#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 -#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 -#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 -#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B -#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 +#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 +#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 static const struct pci_device_id pch_dma_id_table[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, - { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ - { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ - { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, { 0, }, }; @@ -962,7 +954,6 @@ static void __exit pch_dma_exit(void) module_init(pch_dma_init); module_exit(pch_dma_exit); -MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " - "DMA controller driver"); +MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); MODULE_AUTHOR("Yong Wang "); MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/dma/ste_dma40.c b/trunk/drivers/dma/ste_dma40.c index 6e1d46a65d0e..fab68a553205 100644 --- a/trunk/drivers/dma/ste_dma40.c +++ b/trunk/drivers/dma/ste_dma40.c @@ -1,6 +1,5 @@ /* - * Copyright (C) Ericsson AB 2007-2008 - * Copyright (C) ST-Ericsson SA 2008-2010 + * Copyright (C) ST-Ericsson SA 2007-2010 * Author: Per Forlin for ST-Ericsson * Author: Jonas Aaberg for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 @@ -555,67 +554,9 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c) return d; } -static int d40_psize_2_burst_size(bool is_log, int psize) -{ - if (is_log) { - if (psize == STEDMA40_PSIZE_LOG_1) - return 1; - } else { - if (psize == STEDMA40_PSIZE_PHY_1) - return 1; - } - - return 2 << psize; -} - -/* - * The dma only supports transmitting packages up to - * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of - * dma elements required to send the entire sg list - */ -static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) -{ - int dmalen; - u32 max_w = max(data_width1, data_width2); - u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); - - if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); - - if (!IS_ALIGNED(size, 1 << max_w)) - return -EINVAL; - - if (size <= seg_max) - dmalen = 1; - else { - dmalen = size / seg_max; - if (dmalen * seg_max < size) - dmalen++; - } - return dmalen; -} - -static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, - u32 data_width1, u32 data_width2) -{ - struct scatterlist *sg; - int i; - int len = 0; - int ret; - - for_each_sg(sgl, sg, sg_len, i) { - ret = d40_size_2_dmalen(sg_dma_len(sg), - data_width1, data_width2); - if (ret < 0) - return ret; - len += ret; - } - return len; -} - /* Support functions for logical channels */ + static int d40_channel_execute_command(struct d40_chan *d40c, enum d40_command command) { @@ -1300,21 +1241,6 @@ static int d40_validate_conf(struct d40_chan *d40c, res = -EINVAL; } - if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * - (1 << conf->src_info.data_width) != - d40_psize_2_burst_size(is_log, conf->dst_info.psize) * - (1 << conf->dst_info.data_width)) { - /* - * The DMAC hardware only supports - * src (burst x width) == dst (burst x width) - */ - - dev_err(&d40c->chan.dev->device, - "[%s] src (burst x width) != dst (burst x width)\n", - __func__); - res = -EINVAL; - } - return res; } @@ -1712,21 +1638,13 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (d40d == NULL) goto err; - d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); - goto err; - } - + d40d->lli_len = sgl_len; d40d->lli_current = 0; d40d->txd.flags = dma_flags; if (d40c->log_num != D40_PHY_CHAN) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); goto err; @@ -1736,17 +1654,15 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, sgl_len, d40d->lli_log.src, d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); + d40c->dma_cfg.src_info.data_width); (void) d40_log_sg_to_lli(sgl_dst, sgl_len, d40d->lli_log.dst, d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width); + d40c->dma_cfg.dst_info.data_width); } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); goto err; @@ -1759,7 +1675,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, virt_to_phys(d40d->lli_phy.src), d40c->src_def_cfg, d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.psize); if (res < 0) @@ -1772,7 +1687,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, virt_to_phys(d40d->lli_phy.dst), d40c->dst_def_cfg, d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.psize); if (res < 0) @@ -1912,6 +1826,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); unsigned long flags; + int err = 0; if (d40c->phy_chan == NULL) { dev_err(&d40c->chan.dev->device, @@ -1929,15 +1844,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, } d40d->txd.flags = dma_flags; - d40d->lli_len = d40_size_2_dmalen(size, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); - goto err; - } - dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -1945,40 +1851,37 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, if (d40c->log_num != D40_PHY_CHAN) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40d, 1, true) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); goto err; } + d40d->lli_len = 1; d40d->lli_current = 0; - if (d40_log_buf_to_lli(d40d->lli_log.src, - src, - size, - d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - true) == NULL) - goto err; + d40_log_fill_lli(d40d->lli_log.src, + src, + size, + d40c->log_def.lcsp1, + d40c->dma_cfg.src_info.data_width, + true); - if (d40_log_buf_to_lli(d40d->lli_log.dst, - dst, - size, - d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - true) == NULL) - goto err; + d40_log_fill_lli(d40d->lli_log.dst, + dst, + size, + d40c->log_def.lcsp3, + d40c->dma_cfg.dst_info.data_width, + true); } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40d, 1, false) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); goto err; } - if (d40_phy_buf_to_lli(d40d->lli_phy.src, + err = d40_phy_fill_lli(d40d->lli_phy.src, src, size, d40c->dma_cfg.src_info.psize, @@ -1986,11 +1889,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40c->src_def_cfg, true, d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - false) == NULL) - goto err; + false); + if (err) + goto err_fill_lli; - if (d40_phy_buf_to_lli(d40d->lli_phy.dst, + err = d40_phy_fill_lli(d40d->lli_phy.dst, dst, size, d40c->dma_cfg.dst_info.psize, @@ -1998,9 +1901,10 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40c->dst_def_cfg, true, d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - false) == NULL) - goto err; + false); + + if (err) + goto err_fill_lli; (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, d40d->lli_pool.size, DMA_TO_DEVICE); @@ -2009,6 +1913,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; +err_fill_lli: + dev_err(&d40c->chan.dev->device, + "[%s] Failed filling in PHY LLI\n", __func__); err: if (d40d) d40_desc_free(d40c, d40d); @@ -2038,21 +1945,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, dma_addr_t dev_addr = 0; int total_size; - d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); - return -EINVAL; - } - - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); return -ENOMEM; } + d40d->lli_len = sg_len; d40d->lli_current = 0; if (direction == DMA_FROM_DEVICE) @@ -2094,21 +1993,13 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, dma_addr_t dst_dev_addr; int res; - d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); - return -EINVAL; - } - - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { dev_err(&d40c->chan.dev->device, "[%s] Out of memory\n", __func__); return -ENOMEM; } + d40d->lli_len = sgl_len; d40d->lli_current = 0; if (direction == DMA_FROM_DEVICE) { @@ -2133,7 +2024,6 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, virt_to_phys(d40d->lli_phy.src), d40c->src_def_cfg, d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.psize); if (res < 0) return res; @@ -2145,7 +2035,6 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, virt_to_phys(d40d->lli_phy.dst), d40c->dst_def_cfg, d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.psize); if (res < 0) return res; @@ -2355,8 +2244,6 @@ static void d40_set_runtime_config(struct dma_chan *chan, psize = STEDMA40_PSIZE_PHY_8; else if (config_maxburst >= 4) psize = STEDMA40_PSIZE_PHY_4; - else if (config_maxburst >= 2) - psize = STEDMA40_PSIZE_PHY_2; else psize = STEDMA40_PSIZE_PHY_1; } diff --git a/trunk/drivers/dma/ste_dma40_ll.c b/trunk/drivers/dma/ste_dma40_ll.c index 0b096a38322d..8557cb88b255 100644 --- a/trunk/drivers/dma/ste_dma40_ll.c +++ b/trunk/drivers/dma/ste_dma40_ll.c @@ -1,6 +1,6 @@ /* * Copyright (C) ST-Ericsson SA 2007-2010 - * Author: Per Forlin for ST-Ericsson + * Author: Per Friden for ST-Ericsson * Author: Jonas Aaberg for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ @@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, *dst_cfg = dst; } -static int d40_phy_fill_lli(struct d40_phy_lli *lli, - dma_addr_t data, - u32 data_size, - int psize, - dma_addr_t next_lli, - u32 reg_cfg, - bool term_int, - u32 data_width, - bool is_device) +int d40_phy_fill_lli(struct d40_phy_lli *lli, + dma_addr_t data, + u32 data_size, + int psize, + dma_addr_t next_lli, + u32 reg_cfg, + bool term_int, + u32 data_width, + bool is_device) { int num_elems; @@ -139,6 +139,13 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, else num_elems = 2 << psize; + /* + * Size is 16bit. data_width is 8, 16, 32 or 64 bit + * Block large than 64 KiB must be split. + */ + if (data_size > (0xffff << data_width)) + return -EINVAL; + /* Must be aligned */ if (!IS_ALIGNED(data, 0x1 << data_width)) return -EINVAL; @@ -180,118 +187,55 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, return 0; } -static int d40_seg_size(int size, int data_width1, int data_width2) -{ - u32 max_w = max(data_width1, data_width2); - u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); - - if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); - - if (size <= seg_max) - return size; - - if (size <= 2 * seg_max) - return ALIGN(size / 2, 1 << max_w); - - return seg_max; -} - -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t addr, - u32 size, - int psize, - dma_addr_t lli_phys, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device) -{ - int err; - dma_addr_t next = lli_phys; - int size_rest = size; - int size_seg = 0; - - do { - size_seg = d40_seg_size(size_rest, data_width1, data_width2); - size_rest -= size_seg; - - if (term_int && size_rest == 0) - next = 0; - else - next = ALIGN(next + sizeof(struct d40_phy_lli), - D40_LLI_ALIGN); - - err = d40_phy_fill_lli(lli, - addr, - size_seg, - psize, - next, - reg_cfg, - !next, - data_width1, - is_device); - - if (err) - goto err; - - lli++; - if (!is_device) - addr += size_seg; - } while (size_rest); - - return lli; - - err: - return NULL; -} - int d40_phy_sg_to_lli(struct scatterlist *sg, int sg_len, dma_addr_t target, - struct d40_phy_lli *lli_sg, + struct d40_phy_lli *lli, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, + u32 data_width, int psize) { int total_size = 0; int i; struct scatterlist *current_sg = sg; + dma_addr_t next_lli_phys; dma_addr_t dst; - struct d40_phy_lli *lli = lli_sg; - dma_addr_t l_phys = lli_phys; + int err = 0; for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); + /* If this scatter list entry is the last one, no next link */ + if (sg_len - 1 == i) + next_lli_phys = 0; + else + next_lli_phys = ALIGN(lli_phys + (i + 1) * + sizeof(struct d40_phy_lli), + D40_LLI_ALIGN); + if (target) dst = target; else dst = sg_phys(current_sg); - l_phys = ALIGN(lli_phys + (lli - lli_sg) * - sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - - lli = d40_phy_buf_to_lli(lli, - dst, - sg_dma_len(current_sg), - psize, - l_phys, - reg_cfg, - sg_len - 1 == i, - data_width1, - data_width2, - target == dst); - if (lli == NULL) - return -EINVAL; + err = d40_phy_fill_lli(&lli[i], + dst, + sg_dma_len(current_sg), + psize, + next_lli_phys, + reg_cfg, + !next_lli_phys, + data_width, + target == dst); + if (err) + goto err; } return total_size; +err: + return err; } @@ -371,20 +315,17 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, writel(lli_dst->lcsp13, &lcla[1].lcsp13); } -static void d40_log_fill_lli(struct d40_log_lli *lli, - dma_addr_t data, u32 data_size, - u32 reg_cfg, - u32 data_width, - bool addr_inc) +void d40_log_fill_lli(struct d40_log_lli *lli, + dma_addr_t data, u32 data_size, + u32 reg_cfg, + u32 data_width, + bool addr_inc) { lli->lcsp13 = reg_cfg; /* The number of elements to transfer */ lli->lcsp02 = ((data_size >> data_width) << D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; - - BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); - /* 16 LSBs address of the current element */ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; /* 16 MSBs address of the current element */ @@ -407,94 +348,55 @@ int d40_log_sg_to_dev(struct scatterlist *sg, int total_size = 0; struct scatterlist *current_sg = sg; int i; - struct d40_log_lli *lli_src = lli->src; - struct d40_log_lli *lli_dst = lli->dst; for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); if (direction == DMA_TO_DEVICE) { - lli_src = - d40_log_buf_to_lli(lli_src, - sg_phys(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - true); - lli_dst = - d40_log_buf_to_lli(lli_dst, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - false); + d40_log_fill_lli(&lli->src[i], + sg_phys(current_sg), + sg_dma_len(current_sg), + lcsp->lcsp1, src_data_width, + true); + d40_log_fill_lli(&lli->dst[i], + dev_addr, + sg_dma_len(current_sg), + lcsp->lcsp3, dst_data_width, + false); } else { - lli_dst = - d40_log_buf_to_lli(lli_dst, - sg_phys(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - true); - lli_src = - d40_log_buf_to_lli(lli_src, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - false); + d40_log_fill_lli(&lli->dst[i], + sg_phys(current_sg), + sg_dma_len(current_sg), + lcsp->lcsp3, dst_data_width, + true); + d40_log_fill_lli(&lli->src[i], + dev_addr, + sg_dma_len(current_sg), + lcsp->lcsp1, src_data_width, + false); } } return total_size; } -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, - dma_addr_t addr, - int size, - u32 lcsp13, /* src or dst*/ - u32 data_width1, - u32 data_width2, - bool addr_inc) -{ - struct d40_log_lli *lli = lli_sg; - int size_rest = size; - int size_seg = 0; - - do { - size_seg = d40_seg_size(size_rest, data_width1, data_width2); - size_rest -= size_seg; - - d40_log_fill_lli(lli, - addr, - size_seg, - lcsp13, data_width1, - addr_inc); - if (addr_inc) - addr += size_seg; - lli++; - } while (size_rest); - - return lli; -} - int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ - u32 data_width1, u32 data_width2) + u32 data_width) { int total_size = 0; struct scatterlist *current_sg = sg; int i; - struct d40_log_lli *lli = lli_sg; for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); - lli = d40_log_buf_to_lli(lli, - sg_phys(current_sg), - sg_dma_len(current_sg), - lcsp13, - data_width1, data_width2, true); + + d40_log_fill_lli(&lli_sg[i], + sg_phys(current_sg), + sg_dma_len(current_sg), + lcsp13, data_width, + true); } return total_size; } diff --git a/trunk/drivers/dma/ste_dma40_ll.h b/trunk/drivers/dma/ste_dma40_ll.h index 9cc43495bea2..9e419b907544 100644 --- a/trunk/drivers/dma/ste_dma40_ll.h +++ b/trunk/drivers/dma/ste_dma40_ll.h @@ -292,20 +292,18 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, struct d40_phy_lli *lli, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, + u32 data_width, int psize); -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t data, - u32 data_size, - int psize, - dma_addr_t next_lli, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device); +int d40_phy_fill_lli(struct d40_phy_lli *lli, + dma_addr_t data, + u32 data_size, + int psize, + dma_addr_t next_lli, + u32 reg_cfg, + bool term_int, + u32 data_width, + bool is_device); void d40_phy_lli_write(void __iomem *virtbase, u32 phy_chan_num, @@ -314,12 +312,12 @@ void d40_phy_lli_write(void __iomem *virtbase, /* Logical channels */ -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, - dma_addr_t addr, - int size, - u32 lcsp13, /* src or dst*/ - u32 data_width1, u32 data_width2, - bool addr_inc); +void d40_log_fill_lli(struct d40_log_lli *lli, + dma_addr_t data, + u32 data_size, + u32 reg_cfg, + u32 data_width, + bool addr_inc); int d40_log_sg_to_dev(struct scatterlist *sg, int sg_len, @@ -334,7 +332,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ - u32 data_width1, u32 data_width2); + u32 data_width); void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lli_dst, diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h index 01bffc4412d2..46e32573b3a3 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -160,7 +160,6 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_VM (1 << 3) -#define NVOBJ_FLAG_VM_USER (1 << 4) #define NVOBJ_CINST_GLOBAL 0xdeadbeef @@ -1577,20 +1576,6 @@ nv_match_device(struct drm_device *dev, unsigned device, dev->pdev->subsystem_device == sub_device; } -/* returns 1 if device is one of the nv4x using the 0x4497 object class, - * helpful to determine a number of other hardware features - */ -static inline int -nv44_graph_class(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if ((dev_priv->chipset & 0xf0) == 0x60) - return 1; - - return !(0x0baf & (1 << (dev_priv->chipset & 0x0f))); -} - /* memory type/access flags, do not match hardware values */ #define NV_MEM_ACCESS_RO 1 #define NV_MEM_ACCESS_WO 2 diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 60769d2f9a66..6d56a54b6e2e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, FBINFO_HWACCEL_IMAGEBLIT; info->flags |= FBINFO_CAN_FORCE_OUTPUT; info->fbops = &nouveau_fbcon_sw_ops; - info->fix.smem_start = nvbo->bo.mem.bus.base + - nvbo->bo.mem.bus.offset; + info->fix.smem_start = dev->mode_config.fb_base + + (nvbo->bo.mem.start << PAGE_SHIFT); info->fix.smem_len = size; info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..69044eb104bb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -742,24 +742,30 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) { struct nouveau_mm *mm = man->priv; struct nouveau_mm_node *r; - u32 total = 0, free = 0; + u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; + int i; mutex_lock(&mm->mutex); list_for_each_entry(r, &mm->nodes, nl_entry) { - printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", - prefix, r->type, ((u64)r->offset << 12), + printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", + prefix, r->free ? "free" : "used", r->type, + ((u64)r->offset << 12), (((u64)r->offset + r->length) << 12)); - total += r->length; - if (!r->type) - free += r->length; + ttotal[r->type] += r->length; + if (r->free) + tfree[r->type] += r->length; + else + tused[r->type] += r->length; } mutex_unlock(&mm->mutex); - printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", - prefix, (u64)total << 12, (u64)free << 12); - printk(KERN_DEBUG "%s block: 0x%08x\n", - prefix, mm->block_size << 12); + printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); + for (i = 0; i < 3; i++) { + printk(KERN_DEBUG "%s type %d: 0x%010llx, " + "used 0x%010llx, free 0x%010llx\n", prefix, + i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); + } } const struct ttm_mem_type_manager_func nouveau_vram_manager = { diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..cdbb11eb701b 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -48,76 +48,175 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) b->offset = a->offset; b->length = size; + b->free = a->free; b->type = a->type; a->offset += size; a->length -= size; list_add_tail(&b->nl_entry, &a->nl_entry); - if (b->type == 0) + if (b->free) list_add_tail(&b->fl_entry, &a->fl_entry); return b; } -#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ - list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) +static struct nouveau_mm_node * +nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) +{ + struct nouveau_mm_node *prev, *next; + + /* try to merge with free adjacent entries of same type */ + prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.prev != &rmm->nodes) { + if (prev->free && prev->type == this->type) { + prev->length += this->length; + region_put(rmm, this); + this = prev; + } + } + + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next != &rmm->nodes) { + if (next->free && next->type == this->type) { + next->offset = this->offset; + next->length += this->length; + region_put(rmm, this); + this = next; + } + } + + return this; +} void nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) { - struct nouveau_mm_node *prev = node(this, prev); - struct nouveau_mm_node *next = node(this, next); + u32 block_s, block_l; + this->free = true; list_add(&this->fl_entry, &rmm->free); - this->type = 0; + this = nouveau_mm_merge(rmm, this); + + /* any entirely free blocks now? we'll want to remove typing + * on them now so they can be use for any memory allocation + */ + block_s = roundup(this->offset, rmm->block_size); + if (block_s + rmm->block_size > this->offset + this->length) + return; - if (prev && prev->type == 0) { - prev->length += this->length; - region_put(rmm, this); - this = prev; + /* split off any still-typed region at the start */ + if (block_s != this->offset) { + if (!region_split(rmm, this, block_s - this->offset)) + return; } - if (next && next->type == 0) { - next->offset = this->offset; - next->length += this->length; - region_put(rmm, this); + /* split off the soon-to-be-untyped block(s) */ + block_l = rounddown(this->length, rmm->block_size); + if (block_l != this->length) { + this = region_split(rmm, this, block_l); + if (!this) + return; } + + /* mark as having no type, and retry merge with any adjacent + * untyped blocks + */ + this->type = 0; + nouveau_mm_merge(rmm, this); } int nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, u32 align, struct nouveau_mm_node **pnode) { - struct nouveau_mm_node *prev, *this, *next; - u32 min = size_nc ? size_nc : size; - u32 align_mask = align - 1; - u32 splitoff; - u32 s, e; - - list_for_each_entry(this, &rmm->free, fl_entry) { - e = this->offset + this->length; - s = this->offset; - - prev = node(this, prev); - if (prev && prev->type != type) - s = roundup(s, rmm->block_size); - - next = node(this, next); - if (next && next->type != type) - e = rounddown(e, rmm->block_size); - - s = (s + align_mask) & ~align_mask; - e &= ~align_mask; - if (s > e || e - s < min) + struct nouveau_mm_node *this, *tmp, *next; + u32 splitoff, avail, alloc; + + list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next == &rmm->nodes) + next = NULL; + + /* skip wrongly typed blocks */ + if (this->type && this->type != type) continue; - splitoff = s - this->offset; - if (splitoff && !region_split(rmm, this, splitoff)) - return -ENOMEM; + /* account for alignment */ + splitoff = this->offset & (align - 1); + if (splitoff) + splitoff = align - splitoff; - this = region_split(rmm, this, min(size, e - s)); - if (!this) + if (this->length <= splitoff) + continue; + + /* determine total memory available from this, and + * the next block (if appropriate) + */ + avail = this->length; + if (next && next->free && (!next->type || next->type == type)) + avail += next->length; + + avail -= splitoff; + + /* determine allocation size */ + if (size_nc) { + alloc = min(avail, size); + alloc = rounddown(alloc, size_nc); + if (alloc == 0) + continue; + } else { + alloc = size; + if (avail < alloc) + continue; + } + + /* untyped block, split off a chunk that's a multiple + * of block_size and type it + */ + if (!this->type) { + u32 block = roundup(alloc + splitoff, rmm->block_size); + if (this->length < block) + continue; + + this = region_split(rmm, this, block); + if (!this) + return -ENOMEM; + + this->type = type; + } + + /* stealing memory from adjacent block */ + if (alloc > this->length) { + u32 amount = alloc - (this->length - splitoff); + + if (!next->type) { + amount = roundup(amount, rmm->block_size); + + next = region_split(rmm, next, amount); + if (!next) + return -ENOMEM; + + next->type = type; + } + + this->length += amount; + next->offset += amount; + next->length -= amount; + if (!next->length) { + list_del(&next->nl_entry); + list_del(&next->fl_entry); + kfree(next); + } + } + + if (splitoff) { + if (!region_split(rmm, this, splitoff)) + return -ENOMEM; + } + + this = region_split(rmm, this, alloc); + if (this == NULL) return -ENOMEM; - this->type = type; + this->free = false; list_del(&this->fl_entry); *pnode = this; return 0; @@ -135,6 +234,7 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return -ENOMEM; + heap->free = true; heap->offset = roundup(offset, block); heap->length = rounddown(offset + length, block) - heap->offset; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.h b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.h index 798eaf39691c..af3844933036 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -30,7 +30,9 @@ struct nouveau_mm_node { struct list_head fl_entry; struct list_head rl_entry; - u8 type; + bool free; + int type; + u32 offset; u32 length; }; diff --git a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..19ef92a0375a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c @@ -451,7 +451,8 @@ nv40_graph_register(struct drm_device *dev) NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ /* curie */ - if (nv44_graph_class(dev)) + if (dev_priv->chipset >= 0x60 || + 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) NVOBJ_CLASS(dev, 0x4497, GR); else NVOBJ_CLASS(dev, 0x4097, GR); diff --git a/trunk/drivers/gpu/drm/nouveau/nv40_grctx.c b/trunk/drivers/gpu/drm/nouveau/nv40_grctx.c index f70447d131d7..ce585093264e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv40_grctx.c +++ b/trunk/drivers/gpu/drm/nouveau/nv40_grctx.c @@ -117,6 +117,17 @@ * - get vs count from 0x1540 */ +static int +nv40_graph_4097(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if ((dev_priv->chipset & 0xf0) == 0x60) + return 0; + + return !!(0x0baf & (1 << dev_priv->chipset)); +} + static int nv40_graph_vs_count(struct drm_device *dev) { @@ -208,7 +219,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) gr_def(ctx, 0x4009dc, 0x80000000); } else { cp_ctx(ctx, 0x400840, 20); - if (nv44_graph_class(ctx->dev)) { + if (!nv40_graph_4097(ctx->dev)) { for (i = 0; i < 8; i++) gr_def(ctx, 0x400860 + (i * 4), 0x00000001); } @@ -217,7 +228,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) gr_def(ctx, 0x400888, 0x00000040); cp_ctx(ctx, 0x400894, 11); gr_def(ctx, 0x400894, 0x00000040); - if (!nv44_graph_class(ctx->dev)) { + if (nv40_graph_4097(ctx->dev)) { for (i = 0; i < 8; i++) gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); } @@ -535,7 +546,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) static void nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) { - int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; + int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; cp_out (ctx, 0x300000); cp_lsr (ctx, len - 4); @@ -571,11 +582,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) } else { b0_offset = 0x1d40/4; /* 2200 */ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ - vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; + vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; } cp_lsr(ctx, vs_len * vs_nr + 0x300/4); - cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); + cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); offset = ctx->ctxvals_pos; ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); diff --git a/trunk/drivers/gpu/drm/nouveau/nv40_mc.c b/trunk/drivers/gpu/drm/nouveau/nv40_mc.c index 03c0d4c3f355..e4e72c12ab6a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv40_mc.c +++ b/trunk/drivers/gpu/drm/nouveau/nv40_mc.c @@ -6,17 +6,27 @@ int nv40_mc_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; + /* Power up everything, resetting each individual unit will * be done later if needed. */ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); - if (nv44_graph_class(dev)) { - u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); + switch (dev_priv->chipset) { + case 0x44: + case 0x46: /* G72 */ + case 0x4e: + case 0x4c: /* C51_G7X */ + tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); nv_wr32(dev, NV40_PMC_1700, tmp); nv_wr32(dev, NV40_PMC_1704, 0); nv_wr32(dev, NV40_PMC_1708, 0); nv_wr32(dev, NV40_PMC_170C, tmp); + break; + default: + break; } return 0; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..2e1b1cd19a4b 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -332,11 +332,8 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) gpuobj->vinst = node->vram->offset; if (gpuobj->flags & NVOBJ_FLAG_VM) { - u32 flags = NV_MEM_ACCESS_RW; - if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) - flags |= NV_MEM_ACCESS_SYS; - - ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, + ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, + NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, &node->chan_vma); if (ret) { vram->put(dev, &node->vram); diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_graph.c b/trunk/drivers/gpu/drm/nouveau/nvc0_graph.c index e6ea7d83187f..5feacd5d5fa4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -105,8 +105,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) if (ret) return ret; - ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, - NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, + ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, &grch->unk418810); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c b/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c index e4e83c2caf5b..4b9251bb0ff4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c @@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) phys >>= 8; phys |= 0x00000001; /* present */ - if (vma->access & NV_MEM_ACCESS_SYS) - phys |= 0x00000002; +// if (vma->access & NV_MEM_ACCESS_SYS) +// phys |= 0x00000002; phys |= ((u64)target << 32); phys |= ((u64)memtype << 36); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index a8973acb3987..7fe8ebdcdc0e 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -3002,6 +3002,31 @@ int evergreen_copy_blit(struct radeon_device *rdev, return 0; } +static bool evergreen_card_posted(struct radeon_device *rdev) +{ + u32 reg; + + /* first check CRTCs */ + if (rdev->flags & RADEON_IS_IGP) + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + else + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + + /* then check MEM_SIZE, in case the crtcs are off */ + if (RREG32(CONFIG_MEMSIZE)) + return true; + + return false; +} + /* Plan is to move initialization in that function and use * helper function so that radeon_device_init pretty much * do nothing more than calling asic specific function. This @@ -3038,7 +3063,7 @@ int evergreen_init(struct radeon_device *rdev) if (radeon_asic_reset(rdev)) dev_warn(rdev->dev, "GPU reset failed !\n"); /* Post card if necessary */ - if (!radeon_card_posted(rdev)) { + if (!evergreen_card_posted(rdev)) { if (!rdev->bios) { dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -3133,9 +3158,6 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, speed_cntl; - if (radeon_pcie_gen2 == 0) - return; - if (rdev->flags & RADEON_IS_IGP) return; diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index 46da5142b131..f637595b14e1 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -2086,13 +2086,12 @@ int r100_asic_reset(struct radeon_device *rdev) { struct r100_mc_save save; u32 status, tmp; - int ret = 0; + r100_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); if (!G_000E40_GUI_ACTIVE(status)) { return 0; } - r100_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* stop CP */ @@ -2132,11 +2131,11 @@ int r100_asic_reset(struct radeon_device *rdev) G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { dev_err(rdev->dev, "failed to reset GPU\n"); rdev->gpu_lockup = true; - ret = -1; - } else - dev_info(rdev->dev, "GPU reset succeed\n"); + return -1; + } r100_mc_resume(rdev, &save); - return ret; + dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; } void r100_set_common_regs(struct radeon_device *rdev) diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index cf862ca580bf..fae5e709f270 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -405,13 +405,12 @@ int r300_asic_reset(struct radeon_device *rdev) { struct r100_mc_save save; u32 status, tmp; - int ret = 0; + r100_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); if (!G_000E40_GUI_ACTIVE(status)) { return 0; } - r100_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* stop CP */ @@ -452,11 +451,11 @@ int r300_asic_reset(struct radeon_device *rdev) if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { dev_err(rdev->dev, "failed to reset GPU\n"); rdev->gpu_lockup = true; - ret = -1; - } else - dev_info(rdev->dev, "GPU reset succeed\n"); + return -1; + } r100_mc_resume(rdev, &save); - return ret; + dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; } /* diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index aca2236268fa..6b50716267c0 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -2358,6 +2358,24 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) /* FIXME: implement */ } + +bool r600_card_posted(struct radeon_device *rdev) +{ + uint32_t reg; + + /* first check CRTCs */ + reg = RREG32(D1CRTC_CONTROL) | + RREG32(D2CRTC_CONTROL); + if (reg & CRTC_EN) + return true; + + /* then check MEM_SIZE, in case the crtcs are off */ + if (RREG32(CONFIG_MEMSIZE)) + return true; + + return false; +} + int r600_startup(struct radeon_device *rdev) { int r; @@ -2518,7 +2536,7 @@ int r600_init(struct radeon_device *rdev) if (r) return r; /* Post card if necessary */ - if (!radeon_card_posted(rdev)) { + if (!r600_card_posted(rdev)) { if (!rdev->bios) { dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -3640,9 +3658,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; u16 link_cntl2; - if (radeon_pcie_gen2 == 0) - return; - if (rdev->flags & RADEON_IS_IGP) return; diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 71d2a554bbe6..e9486630a467 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -92,7 +92,6 @@ extern int radeon_tv; extern int radeon_audio; extern int radeon_disp_priority; extern int radeon_hw_i2c; -extern int radeon_pcie_gen2; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index d5680a0c87af..be5cb4f28c29 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -104,7 +104,6 @@ int radeon_tv = 1; int radeon_audio = 1; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; -int radeon_pcie_gen2 = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -148,9 +147,6 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); -MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); -module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); - static int radeon_suspend(struct drm_device *dev, pm_message_t state) { drm_radeon_private_t *dev_priv = dev->dev_private; diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen index 9177f9191837..ac40fd39d787 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -439,7 +439,7 @@ evergreen 0x9400 0x000286EC SPI_COMPUTE_NUM_THREAD_X 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z -0x00028724 GDS_ADDR_SIZE +0x000286F8 GDS_ADDR_SIZE 0x00028780 CB_BLEND0_CONTROL 0x00028784 CB_BLEND1_CONTROL 0x00028788 CB_BLEND2_CONTROL diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index 5afe294ed51f..b4192acaab5f 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev) int rs600_asic_reset(struct radeon_device *rdev) { - struct rv515_mc_save save; u32 status, tmp; - int ret = 0; + struct rv515_mc_save save; + + /* Stops all mc clients */ + rv515_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); if (!G_000E40_GUI_ACTIVE(status)) { return 0; } - /* Stops all mc clients */ - rv515_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* stop CP */ @@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev) if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { dev_err(rdev->dev, "failed to reset GPU\n"); rdev->gpu_lockup = true; - ret = -1; - } else - dev_info(rdev->dev, "GPU reset succeed\n"); + return -1; + } rv515_mc_resume(rdev, &save); - return ret; + dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; } /* diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 491dc9000655..3a264aa3a79a 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -1268,7 +1268,7 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; /* Post card if necessary */ - if (!radeon_card_posted(rdev)) { + if (!r600_card_posted(rdev)) { if (!rdev->bios) { dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -1372,9 +1372,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) u32 link_width_cntl, lanes, speed_cntl, tmp; u16 link_cntl2; - if (radeon_pcie_gen2 == 0) - return; - if (rdev->flags & RADEON_IS_IGP) return; diff --git a/trunk/fs/internal.h b/trunk/fs/internal.h index 12ccb86edef7..e8a0b245177d 100644 --- a/trunk/fs/internal.h +++ b/trunk/fs/internal.h @@ -70,6 +70,7 @@ extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, extern void release_mounts(struct list_head *); extern void umount_tree(struct vfsmount *, int, struct list_head *); extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); +extern int finish_automount(struct vfsmount *, struct path *); extern int do_add_mount(struct vfsmount *, struct path *, int); extern void mnt_clear_expiry(struct vfsmount *); diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 8f7b41a14882..b753192d8c3f 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -923,37 +923,13 @@ static int follow_automount(struct path *path, unsigned flags, if (!mnt) /* mount collision */ return 0; - /* The new mount record should have at least 2 refs to prevent it being - * expired before we get a chance to add it - */ - BUG_ON(mnt_get_count(mnt) < 2); - - if (mnt->mnt_sb == path->mnt->mnt_sb && - mnt->mnt_root == path->dentry) { - mnt_clear_expiry(mnt); - mntput(mnt); - mntput(mnt); - return -ELOOP; - } + err = finish_automount(mnt, path); - /* We need to add the mountpoint to the parent. The filesystem may - * have placed it on an expiry list, and so we need to make sure it - * won't be expired under us if do_add_mount() fails (do_add_mount() - * will eat a reference unconditionally). - */ - mntget(mnt); - err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ - err = 0; - default: - mnt_clear_expiry(mnt); - mntput(mnt); - mntput(mnt); - return err; + return 0; case 0: - mntput(mnt); dput(path->dentry); if (*need_mntput) mntput(path->mnt); @@ -961,7 +937,10 @@ static int follow_automount(struct path *path, unsigned flags, path->dentry = dget(mnt->mnt_root); *need_mntput = true; return 0; + default: + return err; } + } /* diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c index 9f544f35ed34..bec51e4e0549 100644 --- a/trunk/fs/namespace.c +++ b/trunk/fs/namespace.c @@ -1895,6 +1895,39 @@ static int do_new_mount(struct path *path, char *type, int flags, return do_add_mount(mnt, path, mnt_flags); } +int finish_automount(struct vfsmount *m, struct path *path) +{ + int err; + /* The new mount record should have at least 2 refs to prevent it being + * expired before we get a chance to add it + */ + BUG_ON(mnt_get_count(m) < 2); + + if (m->mnt_sb == path->mnt->mnt_sb && + m->mnt_root == path->dentry) { + mnt_clear_expiry(m); + mntput(m); + mntput(m); + return -ELOOP; + } + + /* We need to add the mountpoint to the parent. The filesystem may + * have placed it on an expiry list, and so we need to make sure it + * won't be expired under us if do_add_mount() fails (do_add_mount() + * will eat a reference unconditionally). + */ + mntget(m); + err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE); + if (err) { + mnt_clear_expiry(m); + mntput(m); + mntput(m); + } else { + mntput(m); + } + return err; +} + /* * add a mount into a namespace's mount tree * - this unconditionally eats one of the caller's references to newmnt. diff --git a/trunk/include/linux/amba/pl08x.h b/trunk/include/linux/amba/pl08x.h index 3111385b8ca7..521a0f8974ac 100644 --- a/trunk/include/linux/amba/pl08x.h +++ b/trunk/include/linux/amba/pl08x.h @@ -12,6 +12,7 @@ * * Please credit ARM.com * Documentation: ARM DDI 0196D + * */ #ifndef AMBA_PL08X_H @@ -21,15 +22,6 @@ #include #include -struct pl08x_lli; -struct pl08x_driver_data; - -/* Bitmasks for selecting AHB ports for DMA transfers */ -enum { - PL08X_AHB1 = (1 << 0), - PL08X_AHB2 = (1 << 1) -}; - /** * struct pl08x_channel_data - data structure to pass info between * platform and PL08x driver regarding channel configuration @@ -54,10 +46,8 @@ enum { * @circular_buffer: whether the buffer passed in is circular and * shall simply be looped round round (like a record baby round * round round round) - * @single: the device connected to this channel will request single DMA - * transfers, not bursts. (Bursts are default.) - * @periph_buses: the device connected to this channel is accessible via - * these buses (use PL08X_AHB1 | PL08X_AHB2). + * @single: the device connected to this channel will request single + * DMA transfers, not bursts. (Bursts are default.) */ struct pl08x_channel_data { char *bus_id; @@ -65,10 +55,10 @@ struct pl08x_channel_data { int max_signal; u32 muxval; u32 cctl; + u32 ccfg; dma_addr_t addr; bool circular_buffer; bool single; - u8 periph_buses; }; /** @@ -77,23 +67,24 @@ struct pl08x_channel_data { * @addr: current address * @maxwidth: the maximum width of a transfer on this bus * @buswidth: the width of this bus in bytes: 1, 2 or 4 - * @fill_bytes: bytes required to fill to the next bus memory boundary + * @fill_bytes: bytes required to fill to the next bus memory + * boundary */ struct pl08x_bus_data { dma_addr_t addr; u8 maxwidth; u8 buswidth; - size_t fill_bytes; + u32 fill_bytes; }; /** * struct pl08x_phy_chan - holder for the physical channels * @id: physical index to this channel * @lock: a lock to use when altering an instance of this struct - * @signal: the physical signal (aka channel) serving this physical channel - * right now - * @serving: the virtual channel currently being served by this physical - * channel + * @signal: the physical signal (aka channel) serving this + * physical channel right now + * @serving: the virtual channel currently being served by this + * physical channel */ struct pl08x_phy_chan { unsigned int id; @@ -101,6 +92,11 @@ struct pl08x_phy_chan { spinlock_t lock; int signal; struct pl08x_dma_chan *serving; + u32 csrc; + u32 cdst; + u32 clli; + u32 cctl; + u32 ccfg; }; /** @@ -112,23 +108,26 @@ struct pl08x_txd { struct dma_async_tx_descriptor tx; struct list_head node; enum dma_data_direction direction; - dma_addr_t src_addr; - dma_addr_t dst_addr; - size_t len; + struct pl08x_bus_data srcbus; + struct pl08x_bus_data dstbus; + int len; dma_addr_t llis_bus; - struct pl08x_lli *llis_va; - /* Default cctl value for LLIs */ - u32 cctl; + void *llis_va; + struct pl08x_channel_data *cd; + bool active; /* * Settings to be put into the physical channel when we - * trigger this txd. Other registers are in llis_va[0]. + * trigger this txd */ - u32 ccfg; + u32 csrc; + u32 cdst; + u32 clli; + u32 cctl; }; /** - * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel - * states + * struct pl08x_dma_chan_state - holds the PL08x specific virtual + * channel states * @PL08X_CHAN_IDLE: the channel is idle * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport * channel and is running a transfer on it @@ -148,8 +147,6 @@ enum pl08x_dma_chan_state { * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel * @chan: wrappped abstract channel * @phychan: the physical channel utilized by this channel, if there is one - * @phychan_hold: if non-zero, hold on to the physical channel even if we - * have no pending entries * @tasklet: tasklet scheduled by the IRQ to handle actual work etc * @name: name of channel * @cd: channel platform data @@ -157,49 +154,53 @@ enum pl08x_dma_chan_state { * @runtime_direction: current direction of this channel according to * runtime config * @lc: last completed transaction on this channel - * @pend_list: queued transactions pending on this channel + * @desc_list: queued transactions pending on this channel * @at: active transaction on this channel + * @lockflags: sometimes we let a lock last between two function calls, + * especially prep/submit, and then we need to store the IRQ flags + * in the channel state, here * @lock: a lock for this channel data * @host: a pointer to the host (internal use) * @state: whether the channel is idle, paused, running etc * @slave: whether this channel is a device (slave) or for memcpy - * @waiting: a TX descriptor on this channel which is waiting for a physical - * channel to become available + * @waiting: a TX descriptor on this channel which is waiting for + * a physical channel to become available */ struct pl08x_dma_chan { struct dma_chan chan; struct pl08x_phy_chan *phychan; - int phychan_hold; struct tasklet_struct tasklet; char *name; struct pl08x_channel_data *cd; dma_addr_t runtime_addr; enum dma_data_direction runtime_direction; + atomic_t last_issued; dma_cookie_t lc; - struct list_head pend_list; + struct list_head desc_list; struct pl08x_txd *at; + unsigned long lockflags; spinlock_t lock; - struct pl08x_driver_data *host; + void *host; enum pl08x_dma_chan_state state; bool slave; struct pl08x_txd *waiting; }; /** - * struct pl08x_platform_data - the platform configuration for the PL08x - * PrimeCells. + * struct pl08x_platform_data - the platform configuration for the + * PL08x PrimeCells. * @slave_channels: the channels defined for the different devices on the * platform, all inclusive, including multiplexed channels. The available - * physical channels will be multiplexed around these signals as they are - * requested, just enumerate all possible channels. - * @get_signal: request a physical signal to be used for a DMA transfer - * immediately: if there is some multiplexing or similar blocking the use - * of the channel the transfer can be denied by returning less than zero, - * else it returns the allocated signal number + * physical channels will be multiplexed around these signals as they + * are requested, just enumerate all possible channels. + * @get_signal: request a physical signal to be used for a DMA + * transfer immediately: if there is some multiplexing or similar blocking + * the use of the channel the transfer can be denied by returning + * less than zero, else it returns the allocated signal number * @put_signal: indicate to the platform that this physical signal is not * running any DMA transfer and multiplexing can be recycled - * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 - * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 + * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the + * LLI addresses are on 0/1 Master 1/2. */ struct pl08x_platform_data { struct pl08x_channel_data *slave_channels; @@ -207,8 +208,6 @@ struct pl08x_platform_data { struct pl08x_channel_data memcpy_channel; int (*get_signal)(struct pl08x_dma_chan *); void (*put_signal)(struct pl08x_dma_chan *); - u8 lli_buses; - u8 mem_buses; }; #ifdef CONFIG_AMBA_PL08X diff --git a/trunk/include/linux/dmaengine.h b/trunk/include/linux/dmaengine.h index 9bebd7f16ef1..8cd00ad98d37 100644 --- a/trunk/include/linux/dmaengine.h +++ b/trunk/include/linux/dmaengine.h @@ -532,7 +532,7 @@ static inline int dmaengine_resume(struct dma_chan *chan) return dmaengine_device_control(chan, DMA_RESUME, 0); } -static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) +static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) { return desc->tx_submit(desc); }