Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69118
b: refs/heads/master
c: 8cbbe86
h: refs/heads/master
v: v3
  • Loading branch information
Andi Kleen authored and Ingo Molnar committed Oct 15, 2007
1 parent bd8aba1 commit 7f56bf9
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 140 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3a5c359a58c39801d838c508f127bdb228af28b0
refs/heads/master: 8cbbe86dfcfd68ad69916164bdc838d9e09adca8
188 changes: 49 additions & 139 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -3697,206 +3697,116 @@ void fastcall complete_all(struct completion *x)
}
EXPORT_SYMBOL(complete_all);

void fastcall __sched wait_for_completion(struct completion *x)
static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state)
{
might_sleep();

spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);

wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&x->wait.lock);
schedule();
spin_lock_irq(&x->wait.lock);
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
spin_unlock_irq(&x->wait.lock);
}
EXPORT_SYMBOL(wait_for_completion);

unsigned long fastcall __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
might_sleep();

spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);

wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
__set_current_state(TASK_UNINTERRUPTIBLE);
if (state == TASK_INTERRUPTIBLE &&
signal_pending(current)) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
if (!timeout) {
__remove_wait_queue(&x->wait, &wait);
goto out;
return timeout;
}
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
out:
spin_unlock_irq(&x->wait.lock);
return timeout;
}
EXPORT_SYMBOL(wait_for_completion_timeout);

int fastcall __sched wait_for_completion_interruptible(struct completion *x)
static long __sched
wait_for_common(struct completion *x, long timeout, int state)
{
int ret = 0;

might_sleep();

spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);

wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
__remove_wait_queue(&x->wait, &wait);
goto out;
}
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&x->wait.lock);
schedule();
spin_lock_irq(&x->wait.lock);
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
out:
timeout = do_wait_for_common(x, timeout, state);
spin_unlock_irq(&x->wait.lock);
return timeout;
}

return ret;
void fastcall __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
EXPORT_SYMBOL(wait_for_completion);

unsigned long fastcall __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
might_sleep();

spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);

wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
if (signal_pending(current)) {
timeout = -ERESTARTSYS;
__remove_wait_queue(&x->wait, &wait);
goto out;
}
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
if (!timeout) {
__remove_wait_queue(&x->wait, &wait);
goto out;
}
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
out:
spin_unlock_irq(&x->wait.lock);
return timeout;
return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
EXPORT_SYMBOL(wait_for_completion_timeout);

static inline void
sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
int __sched wait_for_completion_interruptible(struct completion *x)
{
spin_lock_irqsave(&q->lock, *flags);
__add_wait_queue(q, wait);
spin_unlock(&q->lock);
return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible);

static inline void
sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
unsigned long fastcall __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
spin_lock_irq(&q->lock);
__remove_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, *flags);
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);

void __sched interruptible_sleep_on(wait_queue_head_t *q)
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;

init_waitqueue_entry(&wait, current);

current->state = TASK_INTERRUPTIBLE;
__set_current_state(state);

sleep_on_head(q, &wait, &flags);
schedule();
sleep_on_tail(q, &wait, &flags);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);

return timeout;
}

void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);

long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
unsigned long flags;
wait_queue_t wait;

init_waitqueue_entry(&wait, current);

current->state = TASK_INTERRUPTIBLE;

sleep_on_head(q, &wait, &flags);
timeout = schedule_timeout(timeout);
sleep_on_tail(q, &wait, &flags);

return timeout;
return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);

void __sched sleep_on(wait_queue_head_t *q)
{
unsigned long flags;
wait_queue_t wait;

init_waitqueue_entry(&wait, current);

current->state = TASK_UNINTERRUPTIBLE;

sleep_on_head(q, &wait, &flags);
schedule();
sleep_on_tail(q, &wait, &flags);
sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);

long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
unsigned long flags;
wait_queue_t wait;

init_waitqueue_entry(&wait, current);

current->state = TASK_UNINTERRUPTIBLE;

sleep_on_head(q, &wait, &flags);
timeout = schedule_timeout(timeout);
sleep_on_tail(q, &wait, &flags);

return timeout;
return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);

Expand Down

0 comments on commit 7f56bf9

Please sign in to comment.