Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 234768
b: refs/heads/master
c: b5faba2
h: refs/heads/master
v: v3
  • Loading branch information
Thomas Gleixner committed Feb 25, 2011
1 parent b11eeae commit 2d09082
Show file tree
Hide file tree
Showing 5 changed files with 116 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1204e95689f9fbd245a4ce5c1b0cd0a9b77f8d25
refs/heads/master: b5faba21a6805c33b40e258d36f57997ee1de131
2 changes: 2 additions & 0 deletions trunk/include/linux/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_fn: interupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
* @thread_flags: flags related to @thread
* @thread_mask: bitmask for keeping track of @thread activity
*/
struct irqaction {
irq_handler_t handler;
Expand All @@ -109,6 +110,7 @@ struct irqaction {
irq_handler_t thread_fn;
struct task_struct *thread;
unsigned long thread_flags;
unsigned long thread_mask;
const char *name;
struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/irqdesc.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ struct timer_rand_state;
* @lock: locking for SMP
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
Expand Down Expand Up @@ -86,6 +87,7 @@ struct irq_desc {
cpumask_var_t pending_mask;
#endif
#endif
unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
Expand Down
76 changes: 63 additions & 13 deletions trunk/kernel/irq/handle.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,68 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
"but no thread function available.", irq, action->name);
}

static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
/*
* Wake up the handler thread for this action. In case the
* thread crashed and was killed we just pretend that we
* handled the interrupt. The hardirq handler has disabled the
* device interrupt, so no irq storm is lurking. If the
* RUNTHREAD bit is already set, nothing to do.
*/
if (test_bit(IRQTF_DIED, &action->thread_flags) ||
test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
return;

/*
* It's safe to OR the mask lockless here. We have only two
* places which write to threads_oneshot: This code and the
* irq thread.
*
* This code is the hard irq context and can never run on two
* cpus in parallel. If it ever does we have more serious
* problems than this bitmask.
*
* The irq threads of this irq which clear their "running" bit
* in threads_oneshot are serialized via desc->lock against
* each other and they are serialized against this code by
* IRQS_INPROGRESS.
*
* Hard irq handler:
*
* spin_lock(desc->lock);
* desc->state |= IRQS_INPROGRESS;
* spin_unlock(desc->lock);
* set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
* desc->threads_oneshot |= mask;
* spin_lock(desc->lock);
* desc->state &= ~IRQS_INPROGRESS;
* spin_unlock(desc->lock);
*
* irq thread:
*
* again:
* spin_lock(desc->lock);
* if (desc->state & IRQS_INPROGRESS) {
* spin_unlock(desc->lock);
* while(desc->state & IRQS_INPROGRESS)
* cpu_relax();
* goto again;
* }
* if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
* desc->threads_oneshot &= ~mask;
* spin_unlock(desc->lock);
*
* So either the thread waits for us to clear IRQS_INPROGRESS
* or we are waiting in the flow handler for desc->lock to be
* released before we reach this point. The thread also checks
* IRQTF_RUNTHREAD under desc->lock. If set it leaves
* threads_oneshot untouched and runs the thread another time.
*/
desc->threads_oneshot |= action->thread_mask;
wake_up_process(action->thread);
}

irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
Expand Down Expand Up @@ -85,19 +147,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
break;
}

/*
* Wake up the handler thread for this
* action. In case the thread crashed and was
* killed we just pretend that we handled the
* interrupt. The hardirq handler above has
* disabled the device interrupt, so no irq
* storm is lurking.
*/
if (likely(!test_bit(IRQTF_DIED,
&action->thread_flags))) {
set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
wake_up_process(action->thread);
}
irq_wake_thread(desc, action);

/* Fall through to add to randomness */
case IRQ_HANDLED:
Expand Down
54 changes: 48 additions & 6 deletions trunk/kernel/irq/manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,8 +617,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
* handler finished. unmask if the interrupt has not been disabled and
* is marked MASKED.
*/
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
static void irq_finalize_oneshot(struct irq_desc *desc,
struct irqaction *action, bool force)
{
if (!(desc->istate & IRQS_ONESHOT))
return;
again:
chip_bus_lock(desc);
raw_spin_lock_irq(&desc->lock);
Expand All @@ -631,6 +634,11 @@ static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
* on the other CPU. If we unmask the irq line then the
* interrupt can come in again and masks the line, leaves due
* to IRQS_INPROGRESS and the irq line is masked forever.
*
* This also serializes the state of shared oneshot handlers
* versus "desc->threads_onehsot |= action->thread_mask;" in
* irq_wake_thread(). See the comment there which explains the
* serialization.
*/
if (unlikely(desc->istate & IRQS_INPROGRESS)) {
raw_spin_unlock_irq(&desc->lock);
Expand All @@ -639,11 +647,23 @@ static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
goto again;
}

if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) {
/*
* Now check again, whether the thread should run. Otherwise
* we would clear the threads_oneshot bit of this thread which
* was just set.
*/
if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
goto out_unlock;

desc->threads_oneshot &= ~action->thread_mask;

if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
(desc->istate & IRQS_MASKED)) {
irq_compat_clr_masked(desc);
desc->istate &= ~IRQS_MASKED;
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
out_unlock:
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(desc);
}
Expand Down Expand Up @@ -691,7 +711,7 @@ static int irq_thread(void *data)
};
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
int wake, oneshot = desc->istate & IRQS_ONESHOT;
int wake;

sched_setscheduler(current, SCHED_FIFO, &param);
current->irqaction = action;
Expand Down Expand Up @@ -719,8 +739,7 @@ static int irq_thread(void *data)

action->thread_fn(action->irq, action->dev_id);

if (oneshot)
irq_finalize_oneshot(action->irq, desc);
irq_finalize_oneshot(desc, action, false);
}

wake = atomic_dec_and_test(&desc->threads_active);
Expand All @@ -729,6 +748,9 @@ static int irq_thread(void *data)
wake_up(&desc->wait_for_threads);
}

/* Prevent a stale desc->threads_oneshot */
irq_finalize_oneshot(desc, action, true);

/*
* Clear irqaction. Otherwise exit_irq_thread() would make
* fuzz about an active irq thread going into nirvana.
Expand All @@ -743,6 +765,7 @@ static int irq_thread(void *data)
void exit_irq_thread(void)
{
struct task_struct *tsk = current;
struct irq_desc *desc;

if (!tsk->irqaction)
return;
Expand All @@ -751,6 +774,14 @@ void exit_irq_thread(void)
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);

desc = irq_to_desc(tsk->irqaction->irq);

/*
* Prevent a stale desc->threads_oneshot. Must be called
* before setting the IRQTF_DIED flag.
*/
irq_finalize_oneshot(desc, tsk->irqaction, true);

/*
* Set the THREAD DIED flag to prevent further wakeups of the
* soon to be gone threaded handler.
Expand All @@ -767,7 +798,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags;
unsigned long flags, thread_mask = 0;
int ret, nested, shared = 0;
cpumask_var_t mask;

Expand Down Expand Up @@ -865,12 +896,23 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)

/* add new interrupt at end of irq queue */
do {
thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
} while (old);
shared = 1;
}

/*
* Setup the thread mask for this irqaction. Unlikely to have
* 32 resp 64 irqs sharing one line, but who knows.
*/
if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
ret = -EBUSY;
goto out_mask;
}
new->thread_mask = 1 << ffz(thread_mask);

if (!shared) {
irq_chip_set_defaults(desc->irq_data.chip);

Expand Down

0 comments on commit 2d09082

Please sign in to comment.