Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 168816
b: refs/heads/master
c: 0160950
h: refs/heads/master
v: v3
  • Loading branch information
Jens Axboe authored and David Howells committed Nov 19, 2009
1 parent 12b510a commit fb5c99d
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4
refs/heads/master: 0160950297c08f8233c89b9f9e7dd59cfb080809
12 changes: 11 additions & 1 deletion trunk/Documentation/slow-work.txt
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,17 @@ on the item, 0 otherwise.


The items are reference counted, so there ought to be no need for a flush
operation. When all a module's slow work items have been processed, and the
operation. But as the reference counting is optional, means to cancel
existing work items are also included:

cancel_slow_work(&myitem);

can be used to cancel pending work. The above cancel function waits for
existing work to have been executed (or prevent execution of them, depending
on timing).


When all a module's slow work items have been processed, and the
module has no further interest in the facility, it should unregister its
interest:

Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/slow-work.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ struct slow_work {
#define SLOW_WORK_EXECUTING 1 /* item currently executing */
#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
const struct slow_work_ops *ops; /* operations table for this item */
struct list_head link; /* link in queue */
};
Expand Down Expand Up @@ -88,6 +89,7 @@ static inline void vslow_work_init(struct slow_work *work,
}

extern int slow_work_enqueue(struct slow_work *work);
extern void slow_work_cancel(struct slow_work *work);
extern int slow_work_register_user(struct module *owner);
extern void slow_work_unregister_user(struct module *owner);

Expand Down
81 changes: 75 additions & 6 deletions trunk/kernel/slow-work.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,12 +236,17 @@ static bool slow_work_execute(int id)
if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
BUG();

work->ops->execute(work);
/* don't execute if the work is in the process of being cancelled */
if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
work->ops->execute(work);

if (very_slow)
atomic_dec(&vslow_work_executing_count);
clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);

/* wake up anyone waiting for this work to be complete */
wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);

/* if someone tried to enqueue the item whilst we were executing it,
* then it'll be left unenqueued to avoid multiple threads trying to
* execute it simultaneously
Expand Down Expand Up @@ -314,11 +319,16 @@ static bool slow_work_execute(int id)
* allowed to pick items to execute. This ensures that very slow items won't
* overly block ones that are just ordinarily slow.
*
* Returns 0 if successful, -EAGAIN if not.
* Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
* attempted queued)
*/
int slow_work_enqueue(struct slow_work *work)
{
unsigned long flags;
int ret;

if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
return -ECANCELED;

BUG_ON(slow_work_user_count <= 0);
BUG_ON(!work);
Expand All @@ -335,6 +345,9 @@ int slow_work_enqueue(struct slow_work *work)
if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
spin_lock_irqsave(&slow_work_queue_lock, flags);

if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
goto cancelled;

/* we promise that we will not attempt to execute the work
* function in more than one thread simultaneously
*
Expand All @@ -352,8 +365,9 @@ int slow_work_enqueue(struct slow_work *work)
if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
} else {
if (slow_work_get_ref(work) < 0)
goto cant_get_ref;
ret = slow_work_get_ref(work);
if (ret < 0)
goto failed;
if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
list_add_tail(&work->link, &vslow_work_queue);
else
Expand All @@ -365,12 +379,67 @@ int slow_work_enqueue(struct slow_work *work)
}
return 0;

cant_get_ref:
cancelled:
ret = -ECANCELED;
failed:
spin_unlock_irqrestore(&slow_work_queue_lock, flags);
return -EAGAIN;
return ret;
}
EXPORT_SYMBOL(slow_work_enqueue);

static int slow_work_wait(void *word)
{
schedule();
return 0;
}

/**
* slow_work_cancel - Cancel a slow work item
* @work: The work item to cancel
*
* This function will cancel a previously enqueued work item. If we cannot
* cancel the work item, it is guarenteed to have run when this function
* returns.
*/
void slow_work_cancel(struct slow_work *work)
{
bool wait = true, put = false;

set_bit(SLOW_WORK_CANCELLING, &work->flags);

spin_lock_irq(&slow_work_queue_lock);

if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
!list_empty(&work->link)) {
/* the link in the pending queue holds a reference on the item
* that we will need to release */
list_del_init(&work->link);
wait = false;
put = true;
clear_bit(SLOW_WORK_PENDING, &work->flags);

} else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
/* the executor is holding our only reference on the item, so
* we merely need to wait for it to finish executing */
clear_bit(SLOW_WORK_PENDING, &work->flags);
}

spin_unlock_irq(&slow_work_queue_lock);

/* the EXECUTING flag is set by the executor whilst the spinlock is set
* and before the item is dequeued - so assuming the above doesn't
* actually dequeue it, simply waiting for the EXECUTING flag to be
* released here should be sufficient */
if (wait)
wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
TASK_UNINTERRUPTIBLE);

clear_bit(SLOW_WORK_CANCELLING, &work->flags);
if (put)
slow_work_put_ref(work);
}
EXPORT_SYMBOL(slow_work_cancel);

/*
* Schedule a cull of the thread pool at some time in the near future
*/
Expand Down

0 comments on commit fb5c99d

Please sign in to comment.