Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 43324
b: refs/heads/master
c: 68380b5
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Dec 7, 2006
1 parent 287a436 commit 7321f23
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2fd8507d14ef7af3ae05316b3277044cf6daa381
refs/heads/master: 68380b581383c028830f79ec2670f4a193854aa6
3 changes: 1 addition & 2 deletions trunk/drivers/net/phy/phy.c
Original file line number Diff line number Diff line change
Expand Up @@ -587,8 +587,7 @@ int phy_stop_interrupts(struct phy_device *phydev)
* Finish any pending work; we might have been scheduled
* to be called from keventd ourselves, though.
*/
if (!current_is_keventd())
flush_scheduled_work();
run_scheduled_work(&phydev->phy_queue);

free_irq(phydev->irq, phydev);

Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/workqueue.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));

extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(run_scheduled_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));

extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
Expand Down
73 changes: 73 additions & 0 deletions trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
}

static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
{
int ret = 0;
unsigned long flags;

spin_lock_irqsave(&cwq->lock, flags);
/*
* We need to re-validate the work info after we've gotten
* the cpu_workqueue lock. We can run the work now iff:
*
* - the wq_data still matches the cpu_workqueue_struct
* - AND the work is still marked pending
* - AND the work is still on a list (which will be this
* workqueue_struct list)
*
* All these conditions are important, because we
* need to protect against the work being run right
* now on another CPU (all but the last one might be
* true if it's currently running and has not been
* released yet, for example).
*/
if (get_wq_data(work) == cwq
&& work_pending(work)
&& !list_empty(&work->entry)) {
work_func_t f = work->func;
list_del_init(&work->entry);
spin_unlock_irqrestore(&cwq->lock, flags);

if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
work_release(work);
f(work);

spin_lock_irqsave(&cwq->lock, flags);
cwq->remove_sequence++;
wake_up(&cwq->work_done);
ret = 1;
}
spin_unlock_irqrestore(&cwq->lock, flags);
return ret;
}

/**
* run_scheduled_work - run scheduled work synchronously
* @work: work to run
*
* This checks if the work was pending, and runs it
* synchronously if so. It returns a boolean to indicate
* whether it had any scheduled work to run or not.
*
* NOTE! This _only_ works for normal work_structs. You
* CANNOT use this for delayed work, because the wq data
* for delayed work will not point properly to the per-
* CPU workqueue struct, but will change!
*/
int fastcall run_scheduled_work(struct work_struct *work)
{
for (;;) {
struct cpu_workqueue_struct *cwq;

if (!work_pending(work))
return 0;
if (list_empty(&work->entry))
return 0;
/* NOTE! This depends intimately on __queue_work! */
cwq = get_wq_data(work);
if (!cwq)
return 0;
if (__run_work(cwq, work))
return 1;
}
}
EXPORT_SYMBOL(run_scheduled_work);

/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)
Expand Down

0 comments on commit 7321f23

Please sign in to comment.