Skip to content

Commit

Permalink
genirq: reimplement exit_irq_thread() hook via task_work_add()
Browse files Browse the repository at this point in the history
exit_irq_thread() and task->irq_thread are needed to handle the unexpected
(and unlikely) exit of irq-thread.

We can use task_work instead and make this all private to
kernel/irq/manage.c, cleanup plus micro-optimization.

1. rename exit_irq_thread() to irq_thread_dtor(), make it
   static, and move it up before irq_thread().

2. change irq_thread() to do task_work_add(irq_thread_dtor)
   at the start and task_work_cancel() before return.

   tracehook_notify_resume() can never play with kthreads,
   only do_exit()->exit_task_work() can call the callback
   and this is what we want.

3. remove task_struct->irq_thread and the special hook
   in do_exit().

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Gordeev <agordeev@redhat.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Smith <dsmith@redhat.com>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
  • Loading branch information
Oleg Nesterov authored and Al Viro committed May 24, 2012
1 parent e73f895 commit 4d1d61a
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 49 deletions.
4 changes: 0 additions & 4 deletions include/linux/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);

extern void exit_irq_thread(void);
#else

extern int __must_check
Expand Down Expand Up @@ -177,8 +175,6 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
{
return request_irq(irq, handler, 0, devname, percpu_dev_id);
}

static inline void exit_irq_thread(void) { }
#endif

extern void free_irq(unsigned int, void *);
Expand Down
10 changes: 2 additions & 8 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1301,22 +1301,16 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;

#ifdef CONFIG_GENERIC_HARDIRQS
/* IRQ handler threads */
unsigned irq_thread:1;
#endif

pid_t pid;
pid_t tgid;

#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
#endif

/*
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
struct task_struct __rcu *real_parent; /* real parent process */
Expand Down
2 changes: 0 additions & 2 deletions kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -954,8 +954,6 @@ void do_exit(long code)

exit_task_work(tsk);

exit_irq_thread();

if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
Expand Down
68 changes: 33 additions & 35 deletions kernel/irq/manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/task_work.h>

#include "internals.h"

Expand Down Expand Up @@ -773,11 +774,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
wake_up(&desc->wait_for_threads);
}

static void irq_thread_dtor(struct task_work *unused)
{
struct task_struct *tsk = current;
struct irq_desc *desc;
struct irqaction *action;

if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
return;

action = kthread_data(tsk);

pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, action->irq);


desc = irq_to_desc(action->irq);
/*
* If IRQTF_RUNTHREAD is set, we need to decrement
* desc->threads_active and wake possible waiters.
*/
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
wake_threads_waitq(desc);

/* Prevent a stale desc->threads_oneshot */
irq_finalize_oneshot(desc, action);
}

/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
{
struct task_work on_exit_work;
static const struct sched_param param = {
.sched_priority = MAX_USER_RT_PRIO/2,
};
Expand All @@ -793,7 +822,9 @@ static int irq_thread(void *data)
handler_fn = irq_thread_fn;

sched_setscheduler(current, SCHED_FIFO, &param);
current->irq_thread = 1;

init_task_work(&on_exit_work, irq_thread_dtor, NULL);
task_work_add(current, &on_exit_work, false);

while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret;
Expand All @@ -815,44 +846,11 @@ static int irq_thread(void *data)
* cannot touch the oneshot mask at this point anymore as
* __setup_irq() might have given out currents thread_mask
* again.
*
* Clear irq_thread. Otherwise exit_irq_thread() would make
* fuzz about an active irq thread going into nirvana.
*/
current->irq_thread = 0;
task_work_cancel(current, irq_thread_dtor);
return 0;
}

/*
* Called from do_exit()
*/
void exit_irq_thread(void)
{
struct task_struct *tsk = current;
struct irq_desc *desc;
struct irqaction *action;

if (!tsk->irq_thread)
return;

action = kthread_data(tsk);

pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, action->irq);

desc = irq_to_desc(action->irq);

/*
* If IRQTF_RUNTHREAD is set, we need to decrement
* desc->threads_active and wake possible waiters.
*/
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
wake_threads_waitq(desc);

/* Prevent a stale desc->threads_oneshot */
irq_finalize_oneshot(desc, action);
}

static void irq_setup_forced_threading(struct irqaction *new)
{
if (!force_irqthreads)
Expand Down

0 comments on commit 4d1d61a

Please sign in to comment.