Skip to content

Commit

Permalink
Revert "workqueue: allow work_on_cpu() to be called recursively"
Browse files Browse the repository at this point in the history
This reverts commit c2fda50.

c2fda50 removed lockdep annotation from work_on_cpu() to work around
the PCI path that calls work_on_cpu() from within a work_on_cpu() work item
(PF driver .probe() method -> pci_enable_sriov() -> add VFs -> VF driver
.probe method).

961da7fb6b22 ("PCI: Avoid unnecessary CPU switch when calling driver
.probe() method) avoids that recursive work_on_cpu() use in a different
way, so this revert restores the work_on_cpu() lockdep annotation.

Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Bjorn Helgaas committed Nov 25, 2013
1 parent 12c3156 commit 12997d1
Showing 1 changed file with 10 additions and 22 deletions.
32 changes: 10 additions & 22 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -2840,19 +2840,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
return false;
}

static bool __flush_work(struct work_struct *work)
{
struct wq_barrier barr;

if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
}

/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
Expand All @@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;

lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);

return __flush_work(work);
if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
}
EXPORT_SYMBOL_GPL(flush_work);

Expand Down Expand Up @@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)

INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);

/*
* The work item is on-stack and can't lead to deadlock through
* flushing. Use __flush_work() to avoid spurious lockdep warnings
* when work_on_cpu()s are nested.
*/
__flush_work(&wfc.work);

flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
Expand Down

0 comments on commit 12997d1

Please sign in to comment.