Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 213223
b: refs/heads/master
c: baf5902
h: refs/heads/master
i:
  213221: a0d04d2
  213219: 5f661f6
  213215: d2dea68
v: v3
  • Loading branch information
Tejun Heo committed Sep 19, 2010
1 parent b06dca6 commit b13553c
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 28 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 401a8d048eadfbe1b1c1bf53d3b614fcc894c61a
refs/heads/master: baf59022c37d43f202e62d5130e4bac5e825b426
64 changes: 37 additions & 27 deletions trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -2326,35 +2326,17 @@ void flush_workqueue(struct workqueue_struct *wq)
}
EXPORT_SYMBOL_GPL(flush_workqueue);

/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct wq_barrier barr;

might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
return 0;
return false;

spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
Expand All @@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work)
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
} else {
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
}
} else
goto already_gone;

insert_wq_barrier(cwq, &barr, work, worker);
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);

lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);

wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}

/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;

if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
EXPORT_SYMBOL_GPL(flush_work);

static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
Expand Down

0 comments on commit b13553c

Please sign in to comment.