diff --git a/[refs] b/[refs]
index 8c4c93017886..ff9ff38b3b46 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: fb6d73d3014babb69f5cc2d1d78b31e9d09fc5df
+refs/heads/master: 12da2a435c1e8b9abb780d928ccbf04fc3d860a7
diff --git a/trunk/Documentation/DocBook/kernel-api.tmpl b/trunk/Documentation/DocBook/kernel-api.tmpl
index 096aed62c326..0519c9dc0065 100644
--- a/trunk/Documentation/DocBook/kernel-api.tmpl
+++ b/trunk/Documentation/DocBook/kernel-api.tmpl
@@ -386,7 +386,7 @@ X!Edrivers/pnp/system.c
Block Devices
-!Eblock/ll_rw_blk.c
+!Edrivers/block/ll_rw_blk.c
diff --git a/trunk/Documentation/block/biodoc.txt b/trunk/Documentation/block/biodoc.txt
index 0fe01c805480..2d65c2182161 100644
--- a/trunk/Documentation/block/biodoc.txt
+++ b/trunk/Documentation/block/biodoc.txt
@@ -1063,8 +1063,8 @@ Aside:
4.4 I/O contexts
I/O contexts provide a dynamically allocated per process data area. They may
be used in I/O schedulers, and in the block layer (could be used for IO statis,
-priorities for example). See *io_context in block/ll_rw_blk.c, and as-iosched.c
-for an example of usage in an i/o scheduler.
+priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and
+as-iosched.c for an example of usage in an i/o scheduler.
5. Scalability related changes
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index 24fe8edad304..429db4bf98ec 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -140,12 +140,3 @@ What: EXPORT_SYMBOL(lookup_hash)
When: January 2006
Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
Who: Christoph Hellwig
-
----------------------------
-
-What: START_ARRAY ioctl for md
-When: July 2006
-Files: drivers/md/md.c
-Why: Not reliable by design - can fail when most needed.
- Alternatives exist
-Who: NeilBrown
diff --git a/trunk/Documentation/oops-tracing.txt b/trunk/Documentation/oops-tracing.txt
index 05960f8a748e..9f30ac6ca47b 100644
--- a/trunk/Documentation/oops-tracing.txt
+++ b/trunk/Documentation/oops-tracing.txt
@@ -32,10 +32,7 @@ the disk is not available then you have three options :-
has restarted. Messy but it is the only option if you have not
planned for a crash. Alternatively, you can take a picture of
the screen with a digital camera - not nice, but better than
- nothing. If the messages scroll off the top of the console, you
- may find that booting with a higher resolution (eg, vga=791)
- will allow you to read more of the text. (Caveat: This needs vesafb,
- so won't help for 'early' oopses)
+ nothing.
(2) Boot with a serial console (see Documentation/serial-console.txt),
run a null modem to a second machine and capture the output there
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 4b15f5f1e254..70b007e66926 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -652,11 +652,25 @@ endmenu
menu "Power management options"
-source "kernel/power/Kconfig"
+config PM
+ bool "Power Management support"
+ ---help---
+ "Power Management" means that parts of your computer are shut
+ off or put into a power conserving "sleep" mode if they are not
+ being used. There are two competing standards for doing this: APM
+ and ACPI. If you want to use either one, say Y here and then also
+ to the requisite support below.
+
+ Power Management is most important for battery powered laptop
+ computers; if you have a laptop, check out the Linux Laptop home
+ page on the WWW at or
+ Tuxmobil - Linux on Mobile Computers at
+ and the Battery Powered Linux mini-HOWTO, available from
+ .
config APM
tristate "Advanced Power Management Emulation"
- depends on PM_LEGACY
+ depends on PM
---help---
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with
diff --git a/trunk/arch/arm/common/sa1111.c b/trunk/arch/arm/common/sa1111.c
index 39a6eea300a2..7b07acb03f3b 100644
--- a/trunk/arch/arm/common/sa1111.c
+++ b/trunk/arch/arm/common/sa1111.c
@@ -1266,7 +1266,7 @@ static void __exit sa1111_exit(void)
bus_unregister(&sa1111_bus_type);
}
-subsys_initcall(sa1111_init);
+module_init(sa1111_init);
module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
diff --git a/trunk/arch/i386/kernel/crash.c b/trunk/arch/i386/kernel/crash.c
index 0248e084017c..af809ccf5fbe 100644
--- a/trunk/arch/i386/kernel/crash.c
+++ b/trunk/arch/i386/kernel/crash.c
@@ -21,7 +21,6 @@
#include
#include
#include
-#include
#include
@@ -148,7 +147,6 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
regs = &fixed_regs;
}
crash_save_this_cpu(regs, cpu);
- disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
halt();
@@ -188,7 +186,6 @@ static void nmi_shootdown_cpus(void)
}
/* Leave the nmi callback set */
- disable_local_APIC();
}
#else
static void nmi_shootdown_cpus(void)
@@ -213,9 +210,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* Make a note of crashing cpu. Will be used in NMI callback.*/
crashing_cpu = smp_processor_id();
nmi_shootdown_cpus();
- lapic_shutdown();
-#if defined(CONFIG_X86_IO_APIC)
- disable_IO_APIC();
-#endif
crash_save_self(regs);
}
diff --git a/trunk/arch/ia64/kernel/process.c b/trunk/arch/ia64/kernel/process.c
index 4305d2ba76f6..e92ea64d8040 100644
--- a/trunk/arch/ia64/kernel/process.c
+++ b/trunk/arch/ia64/kernel/process.c
@@ -202,9 +202,12 @@ default_idle (void)
{
local_irq_enable();
while (!need_resched()) {
- if (can_do_pal_halt)
- safe_halt();
- else
+ if (can_do_pal_halt) {
+ local_irq_disable();
+ if (!need_resched())
+ safe_halt();
+ local_irq_enable();
+ } else
cpu_relax();
}
}
@@ -269,14 +272,10 @@ cpu_idle (void)
{
void (*mark_idle)(int) = ia64_mark_idle;
int cpu = smp_processor_id();
+ set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
- if (can_do_pal_halt)
- clear_thread_flag(TIF_POLLING_NRFLAG);
- else
- set_thread_flag(TIF_POLLING_NRFLAG);
-
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
diff --git a/trunk/arch/powerpc/kernel/ppc_ksyms.c b/trunk/arch/powerpc/kernel/ppc_ksyms.c
index 59846b40d521..5dcf4ba05ee8 100644
--- a/trunk/arch/powerpc/kernel/ppc_ksyms.c
+++ b/trunk/arch/powerpc/kernel/ppc_ksyms.c
@@ -105,13 +105,6 @@ EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#ifndef __powerpc64__
-EXPORT_SYMBOL(__ide_mm_insl);
-EXPORT_SYMBOL(__ide_mm_outsw);
-EXPORT_SYMBOL(__ide_mm_insw);
-EXPORT_SYMBOL(__ide_mm_outsl);
-#endif
-
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c
index 4bd7b0a70996..e2c95fcb8055 100644
--- a/trunk/arch/powerpc/mm/mem.c
+++ b/trunk/arch/powerpc/mm/mem.c
@@ -200,8 +200,6 @@ void show_mem(void)
unsigned long flags;
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- if (!pfn_valid(pgdat->node_start_pfn + i))
- continue;
page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
@@ -338,7 +336,7 @@ void __init mem_init(void)
struct page *page;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
- num_physpages = lmb.memory.size >> PAGE_SHIFT;
+ num_physpages = max_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -350,13 +348,11 @@ void __init mem_init(void)
}
}
#else
- max_mapnr = max_pfn;
+ max_mapnr = num_physpages;
totalram_pages += free_all_bootmem();
#endif
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- if (!pfn_valid(pgdat->node_start_pfn + i))
- continue;
page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c
index f72cf87364cb..bd2cf1336885 100644
--- a/trunk/arch/powerpc/mm/numa.c
+++ b/trunk/arch/powerpc/mm/numa.c
@@ -483,7 +483,6 @@ static void __init setup_nonnuma(void)
{
unsigned long top_of_ram = lmb_end_of_DRAM();
unsigned long total_ram = lmb_phys_mem_size();
- unsigned int i;
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
@@ -491,9 +490,7 @@ static void __init setup_nonnuma(void)
(top_of_ram - total_ram) >> 20);
map_cpu_to_node(boot_cpuid, 0);
- for (i = 0; i < lmb.memory.cnt; ++i)
- add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
- lmb_size_pages(&lmb.memory, i));
+ add_region(0, 0, lmb_end_of_DRAM() >> PAGE_SHIFT);
node_set_online(0);
}
diff --git a/trunk/arch/v850/Kconfig b/trunk/arch/v850/Kconfig
index 310865903234..89c053b6c2c4 100644
--- a/trunk/arch/v850/Kconfig
+++ b/trunk/arch/v850/Kconfig
@@ -23,14 +23,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
# Turn off some random 386 crap that can affect device config
config ISA
bool
diff --git a/trunk/arch/v850/kernel/irq.c b/trunk/arch/v850/kernel/irq.c
index 7a151c26f82e..9e85969ba976 100644
--- a/trunk/arch/v850/kernel/irq.c
+++ b/trunk/arch/v850/kernel/irq.c
@@ -1,8 +1,8 @@
/*
* arch/v850/kernel/irq.c -- High-level interrupt handling
*
- * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
- * Copyright (C) 2001,02,03,04,05 Miles Bader
+ * Copyright (C) 2001,02,03,04 NEC Electronics Corporation
+ * Copyright (C) 2001,02,03,04 Miles Bader
* Copyright (C) 1994-2000 Ralf Baechle
* Copyright (C) 1992 Linus Torvalds
*
@@ -27,15 +27,55 @@
#include
/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves, it doesn't deserve
- * a generic callback i think.
+ * Controller mappings for all interrupt sources:
*/
-void ack_bad_irq(unsigned int irq)
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
+
+/*
+ * Special irq handlers.
+ */
+
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_NONE;
+}
+
+/*
+ * Generic no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
{
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesn't deserve
+ * a generic callback i think.
+ */
printk("received IRQ %d with unknown interrupt type\n", irq);
}
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ .typename = "none",
+ .startup = startup_none,
+ .shutdown = shutdown_none,
+ .enable = enable_none,
+ .disable = disable_none,
+ .ack = ack_none,
+ .end = end_none
+};
+
volatile unsigned long irq_err_count, spurious_count;
/*
@@ -44,68 +84,643 @@ volatile unsigned long irq_err_count, spurious_count;
int show_interrupts(struct seq_file *p, void *v)
{
- int irq = *(loff_t *) v;
+ int i = *(loff_t *) v;
+ struct irqaction * action;
+ unsigned long flags;
- if (irq == 0) {
- int cpu;
+ if (i == 0) {
seq_puts(p, " ");
- for (cpu=0; cpu < 1 /*smp_num_cpus*/; cpu++)
- seq_printf(p, "CPU%d ", cpu);
+ for (i=0; i < 1 /*smp_num_cpus*/; i++)
+ seq_printf(p, "CPU%d ", i);
seq_putc(p, '\n');
}
- if (irq < NR_IRQS) {
- unsigned long flags;
- struct irqaction *action;
+ if (i < NR_IRQS) {
+ int j, count, num;
+ const char *type_name = irq_desc[i].handler->typename;
+ spin_lock_irqsave(&irq_desc[j].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ count = 0;
+ num = -1;
+ for (j = 0; j < NR_IRQS; j++)
+ if (irq_desc[j].handler->typename == type_name) {
+ if (i == j)
+ num = count;
+ count++;
+ }
- action = irq_desc[irq].action;
- if (action) {
- int j;
- int count = 0;
- int num = -1;
- const char *type_name = irq_desc[irq].handler->typename;
-
- for (j = 0; j < NR_IRQS; j++)
- if (irq_desc[j].handler->typename == type_name){
- if (irq == j)
- num = count;
- count++;
- }
-
- seq_printf(p, "%3d: ",irq);
- seq_printf(p, "%10u ", kstat_irqs(irq));
- if (count > 1) {
- int prec = (num >= 100 ? 3 : num >= 10 ? 2 : 1);
- seq_printf(p, " %*s%d", 14 - prec,
- type_name, num);
- } else
- seq_printf(p, " %14s", type_name);
+ seq_printf(p, "%3d: ",i);
+ seq_printf(p, "%10u ", kstat_irqs(i));
+ if (count > 1) {
+ int prec = (num >= 100 ? 3 : num >= 10 ? 2 : 1);
+ seq_printf(p, " %*s%d", 14 - prec, type_name, num);
+ } else
+ seq_printf(p, " %14s", type_name);
- seq_printf(p, " %s", action->name);
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
- seq_putc(p, '\n');
- }
-
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
- } else if (irq == NR_IRQS)
+ seq_printf(p, " %s", action->name);
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[j].lock, flags);
+ } else if (i == NR_IRQS)
seq_printf(p, "ERR: %10lu\n", irq_err_count);
-
return 0;
}
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
+{
+ int status = 1; /* Force the "do bottom halves" bit */
+ int ret;
+
+ if (!(action->flags & SA_INTERRUPT))
+ local_irq_enable();
+
+ do {
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ local_irq_disable();
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void inline disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. That is for two disables you need two enables. This
+ * function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ synchronize_irq(irq);
+}
+
+/**
+ * enable_irq - enable interrupt handling on an irq
+ * @irq: Interrupt to enable
+ *
+ * Re-enables the processing of interrupts on this IRQ line
+ * providing no disable_irq calls are now in effect.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk("enable_irq(%u) unbalanced from %p\n", irq,
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
/* Handle interrupt IRQ. REGS are the registers at the time of ther
interrupt. */
unsigned int handle_irq (int irq, struct pt_regs *regs)
{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
+ struct irqaction * action;
+ unsigned int status;
+
irq_enter();
- __do_IRQ(irq, regs);
+ kstat_cpu(cpu).irqs[irq]++;
+ spin_lock(&desc->lock);
+ desc->handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (unlikely(!action))
+ goto out;
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in handle_irq
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (likely(!(desc->status & IRQ_PENDING)))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+
irq_exit();
+
return 1;
}
+/**
+ * request_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. From the point this
+ * call is made your handler function may be invoked. Since
+ * your handler function must clear any interrupt the board
+ * raises, you must take care both to initialise your hardware
+ * and to set up the interrupt handler in the right order.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If your interrupt is shared you must pass a non NULL dev_id
+ * as this is required when freeing the interrupt.
+ *
+ * Flags:
+ *
+ * SA_SHIRQ Interrupt is shared
+ *
+ * SA_INTERRUPT Disable local interrupts while processing
+ *
+ * SA_SAMPLE_RANDOM The interrupt can be used for entropy
+ *
+ */
+
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if (irqflags & SA_SHIRQ) {
+ if (!dev_id)
+ printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
+ }
+#endif
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+/**
+ * free_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function may be called from interrupt context.
+ *
+ * Bugs: Attempting to free an irq in a handler for the same irq hangs
+ * the machine.
+ */
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ synchronize_irq(irq);
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return;
+ }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+
+static DECLARE_MUTEX(probe_sem);
+
+/**
+ * probe_irq_on - begin an interrupt autodetect
+ *
+ * Commence probing for an interrupt. The interrupts are scanned
+ * and a mask of potential interrupt lines is returned.
+ *
+ */
+
+unsigned long probe_irq_on(void)
+{
+ unsigned int i;
+ irq_desc_t *desc;
+ unsigned long val;
+ unsigned long delay;
+
+ down(&probe_sem);
+ /*
+ * something may have generated an irq long ago and we want to
+ * flush such a longstanding irq before considering it as spurious.
+ */
+ for (i = NR_IRQS-1; i > 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ barrier();
+
+ /*
+ * enable any unassigned irqs
+ * (we must startup again here because if a longstanding irq
+ * happened in the previous stage, it may have masked itself)
+ */
+ for (i = NR_IRQS-1; i > 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ barrier();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ val = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+
+/**
+ * probe_irq_mask - scan a bitmap of interrupt lines
+ * @val: mask of interrupts to consider
+ *
+ * Scan the ISA bus interrupt lines and return a bitmap of
+ * active interrupts. The interrupt probe logic state is then
+ * returned to its previous value.
+ *
+ * Note: we need to scan all the irq's even though we will
+ * only return ISA irq numbers - just so that we reset them
+ * all to a known state.
+ */
+unsigned int probe_irq_mask(unsigned long val)
+{
+ int i;
+ unsigned int mask;
+
+ mask = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (i < 16 && !(status & IRQ_WAITING))
+ mask |= 1 << i;
+
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ return mask & val;
+}
+
+/*
+ * Return the one interrupt that triggered (this can
+ * handle any interrupt source).
+ */
+
+/**
+ * probe_irq_off - end an interrupt autodetect
+ * @val: mask of potential interrupts (unused)
+ *
+ * Scans the unused interrupt lines and returns the line which
+ * appears to have triggered the interrupt. If no interrupt was
+ * found then zero is returned. If more than one interrupt is
+ * found then minus the first candidate is returned to indicate
+ * their is doubt.
+ *
+ * The interrupt probe logic state is returned to its previous
+ * value.
+ *
+ * BUGS: When used in a module (which arguably shouldnt happen)
+ * nothing prevents two IRQ probe callers from overlapping. The
+ * results of this are non-optimal.
+ */
+
+int probe_irq_off(unsigned long val)
+{
+ int i, irq_found, nr_irqs;
+
+ nr_irqs = 0;
+ irq_found = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+/* this was setup_x86_irq but it seems pretty generic */
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_desc + irq;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
+ desc->handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* register_irq_proc(irq); */
+ return 0;
+}
+
/* Initialize irq handling for IRQs.
BASE_IRQ, BASE_IRQ+INTERVAL, ..., BASE_IRQ+NUM*INTERVAL
to IRQ_TYPE. An IRQ_TYPE of 0 means to use a generic interrupt type. */
@@ -121,3 +736,9 @@ init_irq_handlers (int base_irq, int num, int interval,
base_irq += interval;
}
}
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+void init_irq_proc(void)
+{
+}
+#endif /* CONFIG_PROC_FS && CONFIG_SYSCTL */
diff --git a/trunk/drivers/block/cciss_scsi.c b/trunk/drivers/block/cciss_scsi.c
index 2942d32280a5..3226aa11c6ef 100644
--- a/trunk/drivers/block/cciss_scsi.c
+++ b/trunk/drivers/block/cciss_scsi.c
@@ -255,7 +255,7 @@ scsi_cmd_stack_free(int ctlr)
#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
"Unknown" : scsi_device_types[n]
-#if 0
+#if 1
static int xmargin=8;
static int amargin=60;
diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c
index 6b302a930e5f..d16bd4b5c117 100644
--- a/trunk/drivers/char/ipmi/ipmi_msghandler.c
+++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
#define PFX "IPMI message handler: "
-#define IPMI_DRIVER_VERSION "38.0"
+#define IPMI_DRIVER_VERSION "36.0"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
diff --git a/trunk/drivers/isdn/hisax/hfc_usb.c b/trunk/drivers/isdn/hisax/hfc_usb.c
index f8457ef48826..32bf0d5d0f9a 100644
--- a/trunk/drivers/isdn/hisax/hfc_usb.c
+++ b/trunk/drivers/isdn/hisax/hfc_usb.c
@@ -71,68 +71,78 @@ typedef struct {
/****************************************/
static struct usb_device_id hfcusb_idtab[] = {
{
- USB_DEVICE(0x0959, 0x2bd0),
+ .idVendor = 0x0959,
+ .idProduct = 0x2bd0,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_OFF, {4, 0, 2, 1},
"ISDN USB TA (Cologne Chip HFC-S USB based)"}),
},
{
- USB_DEVICE(0x0675, 0x1688),
+ .idVendor = 0x0675,
+ .idProduct = 0x1688,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {1, 2, 0, 0},
"DrayTek miniVigor 128 USB ISDN TA"}),
},
{
- USB_DEVICE(0x07b0, 0x0007),
+ .idVendor = 0x07b0,
+ .idProduct = 0x0007,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {0x80, -64, -32, -16},
"Billion tiny USB ISDN TA 128"}),
},
{
- USB_DEVICE(0x0742, 0x2008),
+ .idVendor = 0x0742,
+ .idProduct = 0x2008,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {4, 0, 2, 1},
"Stollmann USB TA"}),
},
{
- USB_DEVICE(0x0742, 0x2009),
+ .idVendor = 0x0742,
+ .idProduct = 0x2009,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {4, 0, 2, 1},
"Aceex USB ISDN TA"}),
},
{
- USB_DEVICE(0x0742, 0x200A),
+ .idVendor = 0x0742,
+ .idProduct = 0x200A,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {4, 0, 2, 1},
"OEM USB ISDN TA"}),
},
{
- USB_DEVICE(0x08e3, 0x0301),
+ .idVendor = 0x08e3,
+ .idProduct = 0x0301,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {2, 0, 1, 4},
"Olitec USB RNIS"}),
},
{
- USB_DEVICE(0x07fa, 0x0846),
+ .idVendor = 0x07fa,
+ .idProduct = 0x0846,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {0x80, -64, -32, -16},
"Bewan Modem RNIS USB"}),
},
{
- USB_DEVICE(0x07fa, 0x0847),
+ .idVendor = 0x07fa,
+ .idProduct = 0x0847,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {0x80, -64, -32, -16},
"Djinn Numeris USB"}),
},
{
- USB_DEVICE(0x07b0, 0x0006),
+ .idVendor = 0x07b0,
+ .idProduct = 0x0006,
.driver_info = (unsigned long) &((hfcsusb_vdata)
{LED_SCHEME1, {0x80, -64, -32, -16},
"Twister ISDN TA"}),
},
- { }
};
+
/***************************************************************/
/* structure defining input+output fifos (interrupt/bulk mode) */
/***************************************************************/
diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c
index f3fed662f32e..adf960d8a7c9 100644
--- a/trunk/drivers/md/md.c
+++ b/trunk/drivers/md/md.c
@@ -3156,7 +3156,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
if (cnt > 0 ) {
printk(KERN_WARNING
"md: %s(pid %d) used deprecated START_ARRAY ioctl. "
- "This will not be supported beyond July 2006\n",
+ "This will not be supported beyond 2.6\n",
current->comm, current->pid);
cnt--;
}
@@ -3437,19 +3437,10 @@ static int md_thread(void * arg)
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
- /* We need to wait INTERRUPTIBLE so that
- * we don't add to the load-average.
- * That means we need to be sure no signals are
- * pending
- */
- if (signal_pending(current))
- flush_signals(current);
-
- wait_event_interruptible_timeout
- (thread->wqueue,
- test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
- thread->timeout);
+ wait_event_timeout(thread->wqueue,
+ test_bit(THREAD_WAKEUP, &thread->flags)
+ || kthread_should_stop(),
+ thread->timeout);
try_to_freeze();
clear_bit(THREAD_WAKEUP, &thread->flags);
diff --git a/trunk/drivers/net/wan/sdladrv.c b/trunk/drivers/net/wan/sdladrv.c
index 032c0f81928e..7c2cf2e76300 100644
--- a/trunk/drivers/net/wan/sdladrv.c
+++ b/trunk/drivers/net/wan/sdladrv.c
@@ -1994,7 +1994,7 @@ static int detect_s514 (sdlahw_t* hw)
modname, hw->irq);
/* map the physical PCI memory to virtual memory */
- hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr,
+ (void *)hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr,
(unsigned long)MAX_SIZEOF_S514_MEMORY);
/* map the physical control register memory to virtual memory */
hw->vector = (unsigned long)ioremap(
diff --git a/trunk/drivers/scsi/ahci.c b/trunk/drivers/scsi/ahci.c
index 894e7113e0b3..4e96ec5f2ff9 100644
--- a/trunk/drivers/scsi/ahci.c
+++ b/trunk/drivers/scsi/ahci.c
@@ -565,17 +565,6 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
u32 tmp;
int work;
- printk(KERN_WARNING "ata%u: port reset, "
- "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
- ap->id,
- irq_stat,
- readl(mmio + HOST_IRQ_STAT),
- readl(port_mmio + PORT_IRQ_STAT),
- readl(port_mmio + PORT_CMD),
- readl(port_mmio + PORT_TFDATA),
- readl(port_mmio + PORT_SCR_STAT),
- readl(port_mmio + PORT_SCR_ERR));
-
/* stop DMA */
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~PORT_CMD_START;
@@ -613,6 +602,8 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
tmp |= PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
+
+ printk(KERN_WARNING "ata%u: error occurred, port reset\n", ap->id);
}
static void ahci_eng_timeout(struct ata_port *ap)
@@ -623,17 +614,17 @@ static void ahci_eng_timeout(struct ata_port *ap)
struct ata_queued_cmd *qc;
unsigned long flags;
- printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id);
+ DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
+ ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
+
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
ap->id);
} else {
- ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
-
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
@@ -668,19 +659,9 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
}
if (status & PORT_IRQ_FATAL) {
- unsigned int err_mask;
- if (status & PORT_IRQ_TF_ERR)
- err_mask = AC_ERR_DEV;
- else if (status & PORT_IRQ_IF_ERR)
- err_mask = AC_ERR_ATA_BUS;
- else
- err_mask = AC_ERR_HOST_BUS;
-
- /* command processing has stopped due to error; restart */
ahci_intr_error(ap, status);
-
if (qc)
- ata_qc_complete(qc, err_mask);
+ ata_qc_complete(qc, AC_ERR_OTHER);
}
return 1;
diff --git a/trunk/drivers/scsi/libata-core.c b/trunk/drivers/scsi/libata-core.c
index ba1eb8b38e00..d81db3a3d4b9 100644
--- a/trunk/drivers/scsi/libata-core.c
+++ b/trunk/drivers/scsi/libata-core.c
@@ -1263,7 +1263,7 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
}
/* ATAPI-specific feature tests */
- else if (dev->class == ATA_DEV_ATAPI) {
+ else {
if (ata_id_is_ata(dev->id)) /* sanity check */
goto err_out_nosup;
@@ -2399,7 +2399,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_SINGLE)
assert(qc->n_elem == 1);
- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+ DPRINTK("unmapping %u sg elements\n", qc->n_elem);
/* if we padded the buffer out to 32-bit bound, and data
* xfer direction is from-device, we must copy from the
@@ -2409,8 +2409,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
if (qc->flags & ATA_QCFLAG_SG) {
- if (qc->n_elem)
- dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
+ dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
/* restore last sg */
sg[qc->orig_n_elem - 1].length += qc->pad_len;
if (pad_buf) {
@@ -2420,10 +2419,8 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
kunmap_atomic(psg->page, KM_IRQ0);
}
} else {
- if (sg_dma_len(&sg[0]) > 0)
- dma_unmap_single(ap->host_set->dev,
- sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
- dir);
+ dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
+ sg_dma_len(&sg[0]), dir);
/* restore sg */
sg->length += qc->pad_len;
if (pad_buf)
@@ -2622,11 +2619,6 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
sg->length, qc->pad_len);
}
- if (!sg->length) {
- sg_dma_address(sg) = 0;
- goto skip_map;
- }
-
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
sg->length, dir);
if (dma_mapping_error(dma_address)) {
@@ -2636,7 +2628,6 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
}
sg_dma_address(sg) = dma_address;
-skip_map:
sg_dma_len(sg) = sg->length;
DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
@@ -2664,7 +2655,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->__sg;
struct scatterlist *lsg = &sg[qc->n_elem - 1];
- int n_elem, pre_n_elem, dir, trim_sg = 0;
+ int n_elem, dir;
VPRINTK("ENTER, ata%u\n", ap->id);
assert(qc->flags & ATA_QCFLAG_SG);
@@ -2698,24 +2689,13 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim last sg */
lsg->length -= qc->pad_len;
- if (lsg->length == 0)
- trim_sg = 1;
DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
qc->n_elem - 1, lsg->length, qc->pad_len);
}
- pre_n_elem = qc->n_elem;
- if (trim_sg && pre_n_elem)
- pre_n_elem--;
-
- if (!pre_n_elem) {
- n_elem = 0;
- goto skip_map;
- }
-
dir = qc->dma_dir;
- n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
+ n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
if (n_elem < 1) {
/* restore last sg */
lsg->length += qc->pad_len;
@@ -2724,7 +2704,6 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
DPRINTK("%d sg elements mapped\n", n_elem);
-skip_map:
qc->n_elem = n_elem;
return 0;
@@ -3284,11 +3263,32 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
+ struct ata_device *dev = qc->dev;
u8 host_stat = 0, drv_stat;
unsigned long flags;
DPRINTK("ENTER\n");
+ /* FIXME: doesn't this conflict with timeout handling? */
+ if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
+ struct scsi_cmnd *cmd = qc->scsicmd;
+
+ if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
+
+ /* finish completing original command */
+ spin_lock_irqsave(&host_set->lock, flags);
+ __ata_qc_complete(qc);
+ spin_unlock_irqrestore(&host_set->lock, flags);
+
+ atapi_request_sense(ap, dev, cmd);
+
+ cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
+ scsi_finish_command(cmd);
+
+ goto out;
+ }
+ }
+
spin_lock_irqsave(&host_set->lock, flags);
/* hack alert! We cannot use the supplied completion
@@ -3327,6 +3327,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
spin_unlock_irqrestore(&host_set->lock, flags);
+out:
DPRINTK("EXIT\n");
}
@@ -3410,11 +3411,16 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
qc = ata_qc_new(ap);
if (qc) {
+ qc->__sg = NULL;
+ qc->flags = 0;
qc->scsicmd = NULL;
qc->ap = ap;
qc->dev = dev;
+ qc->cursect = qc->cursg = qc->cursg_ofs = 0;
+ qc->nsect = 0;
+ qc->nbytes = qc->curbytes = 0;
- ata_qc_reinit(qc);
+ ata_tf_init(ap, &qc->tf, dev->devno);
}
return qc;
diff --git a/trunk/drivers/scsi/libata-scsi.c b/trunk/drivers/scsi/libata-scsi.c
index 3b4ca55a3332..0df4b682965d 100644
--- a/trunk/drivers/scsi/libata-scsi.c
+++ b/trunk/drivers/scsi/libata-scsi.c
@@ -1955,44 +1955,22 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
done(cmd);
}
-static int atapi_sense_complete(struct ata_queued_cmd *qc,unsigned int err_mask)
+void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_cmnd *cmd)
{
- if (err_mask && ((err_mask & AC_ERR_DEV) == 0))
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into
- * a sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
- ata_gen_ata_desc_sense(qc);
-
- qc->scsidone(qc->scsicmd);
- return 0;
-}
-
-/* is it pointless to prefer PIO for "safety reasons"? */
-static inline int ata_pio_use_silly(struct ata_port *ap)
-{
- return (ap->flags & ATA_FLAG_PIO_DMA);
-}
-
-static void atapi_request_sense(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
+ DECLARE_COMPLETION(wait);
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ int rc;
DPRINTK("ATAPI request sense\n");
+ qc = ata_qc_new_init(ap, dev);
+ BUG_ON(qc == NULL);
+
/* FIXME: is this needed? */
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
- ap->ops->tf_read(ap, &qc->tf);
-
- /* fill these in, for the case where they are -not- overwritten */
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = qc->tf.feature >> 4;
-
- ata_qc_reinit(qc);
-
ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
qc->dma_dir = DMA_FROM_DEVICE;
@@ -2003,20 +1981,22 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
qc->tf.command = ATA_CMD_PACKET;
- if (ata_pio_use_silly(ap)) {
- qc->tf.protocol = ATA_PROT_ATAPI_DMA;
- qc->tf.feature |= ATAPI_PKT_DMA;
- } else {
- qc->tf.protocol = ATA_PROT_ATAPI;
- qc->tf.lbam = (8 * 1024) & 0xff;
- qc->tf.lbah = (8 * 1024) >> 8;
- }
+ qc->tf.protocol = ATA_PROT_ATAPI;
+ qc->tf.lbam = (8 * 1024) & 0xff;
+ qc->tf.lbah = (8 * 1024) >> 8;
qc->nbytes = SCSI_SENSE_BUFFERSIZE;
- qc->complete_fn = atapi_sense_complete;
+ qc->waiting = &wait;
+ qc->complete_fn = ata_qc_complete_noop;
- if (ata_qc_issue(qc))
- ata_qc_complete(qc, AC_ERR_OTHER);
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ rc = ata_qc_issue(qc);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ if (rc)
+ ata_port_disable(ap);
+ else
+ wait_for_completion(&wait);
DPRINTK("EXIT\n");
}
@@ -2028,8 +2008,19 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
if (unlikely(err_mask & AC_ERR_DEV)) {
+ DPRINTK("request check condition\n");
+
+ /* FIXME: command completion with check condition
+ * but no sense causes the error handler to run,
+ * which then issues REQUEST SENSE, fills in the sense
+ * buffer, and completes the command (for the second
+ * time). We need to issue REQUEST SENSE some other
+ * way, to avoid completing the command twice.
+ */
cmd->result = SAM_STAT_CHECK_CONDITION;
- atapi_request_sense(qc);
+
+ qc->scsidone(cmd);
+
return 1;
}
diff --git a/trunk/drivers/scsi/libata.h b/trunk/drivers/scsi/libata.h
index 74a84e0ec0a4..fad051ca4672 100644
--- a/trunk/drivers/scsi/libata.h
+++ b/trunk/drivers/scsi/libata.h
@@ -54,6 +54,8 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
/* libata-scsi.c */
+extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_cmnd *cmd);
extern void ata_scsi_scan_host(struct ata_port *ap);
extern int ata_scsi_error(struct Scsi_Host *host);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
diff --git a/trunk/include/asm-v850/hardirq.h b/trunk/include/asm-v850/hardirq.h
index d98488cd5af1..5dfca8047cbe 100644
--- a/trunk/include/asm-v850/hardirq.h
+++ b/trunk/include/asm-v850/hardirq.h
@@ -5,8 +5,6 @@
#include
#include
-#include
-
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
@@ -24,6 +22,4 @@ typedef struct {
# error HARDIRQ_BITS is too low!
#endif
-void ack_bad_irq(unsigned int irq);
-
#endif /* __V850_HARDIRQ_H__ */
diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h
index f2dbb684ce9e..ad5996183ec2 100644
--- a/trunk/include/linux/libata.h
+++ b/trunk/include/linux/libata.h
@@ -59,8 +59,6 @@
#define VPRINTK(fmt, args...)
#endif /* ATA_DEBUG */
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
-
#ifdef ATA_NDEBUG
#define assert(expr)
#else
@@ -121,7 +119,6 @@ enum {
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
* proper HSM is in place. */
- ATA_FLAG_DEBUGMSG = (1 << 10),
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -662,17 +659,6 @@ static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, uns
tf->device = ATA_DEVICE_OBS | ATA_DEV1;
}
-static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
-{
- qc->__sg = NULL;
- qc->flags = 0;
- qc->cursect = qc->cursg = qc->cursg_ofs = 0;
- qc->nsect = 0;
- qc->nbytes = qc->curbytes = 0;
-
- ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
-}
-
/**
* ata_irq_on - Enable interrupts on a port.
diff --git a/trunk/include/linux/sysctl.h b/trunk/include/linux/sysctl.h
index 6bc03c911a83..64f203c45378 100644
--- a/trunk/include/linux/sysctl.h
+++ b/trunk/include/linux/sysctl.h
@@ -20,6 +20,7 @@
#include
#include
+#include
#include
struct file;
@@ -858,7 +859,6 @@ enum
};
#ifdef __KERNEL__
-#include
extern void sysctl_init(void);
diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c
index ddcf7754eec2..56a09a4ac410 100644
--- a/trunk/net/ipv6/addrconf.c
+++ b/trunk/net/ipv6/addrconf.c
@@ -1045,9 +1045,10 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
}
#endif
/* Rule 8: Use longest matching prefix */
- if (hiscore.rule < 8)
+ if (hiscore.rule < 8) {
hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr);
- score.rule++;
+ hiscore.rule++;
+ }
score.matchlen = ipv6_addr_diff(&ifa->addr, daddr);
if (score.matchlen > hiscore.matchlen) {
score.rule = 8;
diff --git a/trunk/net/sunrpc/svcsock.c b/trunk/net/sunrpc/svcsock.c
index c6a51911e71e..e50e7cf43737 100644
--- a/trunk/net/sunrpc/svcsock.c
+++ b/trunk/net/sunrpc/svcsock.c
@@ -1178,7 +1178,6 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
arg->tail[0].iov_len = 0;
try_to_freeze();
- cond_resched();
if (signalled())
return -EINTR;