Skip to content

Commit

Permalink
workqueue: factor out start_flush_work()
Browse files Browse the repository at this point in the history
Factor out start_flush_work() from flush_work().  start_flush_work()
has @wait_executing argument which controls whether the barrier is
queued only if the work is pending or also if executing.  As
flush_work() needs to wait for execution too, it uses %true.

This commit doesn't cause any behavior difference.  start_flush_work()
will be used to implement flush_work_sync().

Signed-off-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Tejun Heo committed Sep 19, 2010
1 parent 401a8d0 commit baf5902
Showing 1 changed file with 37 additions and 27 deletions.
64 changes: 37 additions & 27 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -2326,35 +2326,17 @@ void flush_workqueue(struct workqueue_struct *wq)
}
EXPORT_SYMBOL_GPL(flush_workqueue);

/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct wq_barrier barr;

might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
return 0;
return false;

spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
Expand All @@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work)
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
} else {
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
}
} else
goto already_gone;

insert_wq_barrier(cwq, &barr, work, worker);
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);

lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);

wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}

/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;

if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
EXPORT_SYMBOL_GPL(flush_work);

static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
Expand Down

0 comments on commit baf5902

Please sign in to comment.