Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 350271
b: refs/heads/master
c: 111c225
h: refs/heads/master
i:
  350269: 8046f6a
  350267: 91550b8
  350263: 32df3be
  350255: afdd673
  350239: 33aa54a
  350207: 6c71417
v: v3
  • Loading branch information
Tejun Heo committed Jan 18, 2013
1 parent 569f937 commit 9a90fc1
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 023f27d3d6fcc9048754d879fe5e7d63402a5b16
refs/heads/master: 111c225a5f8d872bc9327ada18d13b75edaa34be
35 changes: 28 additions & 7 deletions trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,9 @@ struct worker {

/* for rebinding worker to CPU */
struct work_struct rebind_work; /* L: for busy worker */

/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
};

struct worker_pool {
Expand Down Expand Up @@ -763,12 +766,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
unsigned int cpu)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool = worker->pool;
atomic_t *nr_running = get_pool_nr_running(pool);
struct worker_pool *pool;
atomic_t *nr_running;

/*
* Rescuers, which may not have all the fields set up like normal
* workers, also reach here, let's not access anything before
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
return NULL;

pool = worker->pool;
nr_running = get_pool_nr_running(pool);

/* this can only happen on the local cpu */
BUG_ON(cpu != raw_smp_processor_id());

Expand Down Expand Up @@ -2357,7 +2368,7 @@ static int worker_thread(void *__worker)

/**
* rescuer_thread - the rescuer thread function
* @__wq: the associated workqueue
* @__rescuer: self
*
* Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_RESCUER set.
Expand All @@ -2374,20 +2385,27 @@ static int worker_thread(void *__worker)
*
* This should happen rarely.
*/
static int rescuer_thread(void *__wq)
static int rescuer_thread(void *__rescuer)
{
struct workqueue_struct *wq = __wq;
struct worker *rescuer = wq->rescuer;
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
struct list_head *scheduled = &rescuer->scheduled;
bool is_unbound = wq->flags & WQ_UNBOUND;
unsigned int cpu;

set_user_nice(current, RESCUER_NICE_LEVEL);

/*
* Mark rescuer as worker too. As WORKER_PREP is never cleared, it
* doesn't participate in concurrency management.
*/
rescuer->task->flags |= PF_WQ_WORKER;
repeat:
set_current_state(TASK_INTERRUPTIBLE);

if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
rescuer->task->flags &= ~PF_WQ_WORKER;
return 0;
}

Expand Down Expand Up @@ -2431,6 +2449,8 @@ static int rescuer_thread(void *__wq)
spin_unlock_irq(&gcwq->lock);
}

/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule();
goto repeat;
}
Expand Down Expand Up @@ -3266,7 +3286,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (!rescuer)
goto err;

rescuer->task = kthread_create(rescuer_thread, wq, "%s",
rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
wq->name);
if (IS_ERR(rescuer->task))
goto err;
Expand Down

0 comments on commit 9a90fc1

Please sign in to comment.