Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 365475
b: refs/heads/master
c: f36dc67
h: refs/heads/master
i:
  365473: ee3222c
  365471: b528e8f
v: v3
  • Loading branch information
Lai Jiangshan authored and Tejun Heo committed Mar 4, 2013
1 parent 8ed9811 commit a4f9151
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f5faa0774e07eada85b0c55ec789b3f337d01412
refs/heads/master: f36dc67b27a689eeb3631b11ebef17bbff257fbb
18 changes: 9 additions & 9 deletions trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -1504,8 +1504,10 @@ static void worker_leave_idle(struct worker *worker)
}

/**
* worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
* @worker: self
* worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
* @pool: target worker_pool
*
* Bind %current to the cpu of @pool if it is associated and lock @pool.
*
* Works which are scheduled while the cpu is online must at least be
* scheduled to a worker which is bound to the cpu so that if they are
Expand Down Expand Up @@ -1533,11 +1535,9 @@ static void worker_leave_idle(struct worker *worker)
* %true if the associated pool is online (@worker is successfully
* bound), %false if offline.
*/
static bool worker_maybe_bind_and_lock(struct worker *worker)
static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
__acquires(&pool->lock)
{
struct worker_pool *pool = worker->pool;

while (true) {
/*
* The following call may fail, succeed or succeed
Expand Down Expand Up @@ -1575,7 +1575,7 @@ __acquires(&pool->lock)
static void idle_worker_rebind(struct worker *worker)
{
/* CPU may go down again inbetween, clear UNBOUND only on success */
if (worker_maybe_bind_and_lock(worker))
if (worker_maybe_bind_and_lock(worker->pool))
worker_clr_flags(worker, WORKER_UNBOUND);

/* rebind complete, become available again */
Expand All @@ -1593,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
{
struct worker *worker = container_of(work, struct worker, rebind_work);

if (worker_maybe_bind_and_lock(worker))
if (worker_maybe_bind_and_lock(worker->pool))
worker_clr_flags(worker, WORKER_UNBOUND);

spin_unlock_irq(&worker->pool->lock);
Expand Down Expand Up @@ -2038,7 +2038,7 @@ static bool manage_workers(struct worker *worker)
* on @pool's current state. Try it and adjust
* %WORKER_UNBOUND accordingly.
*/
if (worker_maybe_bind_and_lock(worker))
if (worker_maybe_bind_and_lock(pool))
worker->flags &= ~WORKER_UNBOUND;
else
worker->flags |= WORKER_UNBOUND;
Expand Down Expand Up @@ -2358,7 +2358,7 @@ static int rescuer_thread(void *__rescuer)

/* migrate to the target cpu if possible */
rescuer->pool = pool;
worker_maybe_bind_and_lock(rescuer);
worker_maybe_bind_and_lock(pool);

/*
* Slurp in all works issued via this workqueue and
Expand Down

0 comments on commit a4f9151

Please sign in to comment.