Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140189
b: refs/heads/master
c: 109d927
h: refs/heads/master
i:
  140187: 2b655ea
v: v3
  • Loading branch information
David Howells committed Apr 3, 2009
1 parent ac1dc8a commit 98c97ba
Show file tree
Hide file tree
Showing 2 changed files with 138 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 07fe7cb7c7c179f473fd9c823348fd3eb5dad369
refs/heads/master: 109d9272c423f46604d45fedfe87e21ee0b25180
138 changes: 137 additions & 1 deletion trunk/kernel/slow-work.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@
#include <linux/wait.h>
#include <asm/system.h>

#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
* things to do */
#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
* OOM */

static void slow_work_cull_timeout(unsigned long);
static void slow_work_oom_timeout(unsigned long);

/*
* The pool of threads has at least min threads in it as long as someone is
* using the facility, and may have as many as max.
Expand All @@ -29,6 +37,12 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
static atomic_t slow_work_thread_count;
static atomic_t vslow_work_executing_count;

static bool slow_work_may_not_start_new_thread;
static bool slow_work_cull; /* cull a thread due to lack of activity */
static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
static struct slow_work slow_work_new_thread; /* new thread starter */

/*
* The queues of work items and the lock governing access to them. These are
* shared between all the CPUs. It doesn't make sense to have per-CPU queues
Expand Down Expand Up @@ -89,6 +103,14 @@ static bool slow_work_execute(void)

vsmax = slow_work_calc_vsmax();

/* see if we can schedule a new thread to be started if we're not
* keeping up with the work */
if (!waitqueue_active(&slow_work_thread_wq) &&
(!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
!slow_work_may_not_start_new_thread)
slow_work_enqueue(&slow_work_new_thread);

/* find something to execute */
spin_lock_irq(&slow_work_queue_lock);
if (!list_empty(&vslow_work_queue) &&
Expand Down Expand Up @@ -242,6 +264,33 @@ int slow_work_enqueue(struct slow_work *work)
}
EXPORT_SYMBOL(slow_work_enqueue);

/*
* Worker thread culling algorithm
*/
static bool slow_work_cull_thread(void)
{
unsigned long flags;
bool do_cull = false;

spin_lock_irqsave(&slow_work_queue_lock, flags);

if (slow_work_cull) {
slow_work_cull = false;

if (list_empty(&slow_work_queue) &&
list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) >
slow_work_min_threads) {
mod_timer(&slow_work_cull_timer,
jiffies + SLOW_WORK_CULL_TIMEOUT);
do_cull = true;
}
}

spin_unlock_irqrestore(&slow_work_queue_lock, flags);
return do_cull;
}

/*
* Determine if there is slow work available for dispatch
*/
Expand Down Expand Up @@ -273,7 +322,8 @@ static int slow_work_thread(void *_data)
TASK_INTERRUPTIBLE);
if (!freezing(current) &&
!slow_work_threads_should_exit &&
!slow_work_available(vsmax))
!slow_work_available(vsmax) &&
!slow_work_cull)
schedule();
finish_wait(&slow_work_thread_wq, &wait);

Expand All @@ -285,18 +335,98 @@ static int slow_work_thread(void *_data)

if (slow_work_available(vsmax) && slow_work_execute()) {
cond_resched();
if (list_empty(&slow_work_queue) &&
list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) >
slow_work_min_threads)
mod_timer(&slow_work_cull_timer,
jiffies + SLOW_WORK_CULL_TIMEOUT);
continue;
}

if (slow_work_threads_should_exit)
break;

if (slow_work_cull && slow_work_cull_thread())
break;
}

if (atomic_dec_and_test(&slow_work_thread_count))
complete_and_exit(&slow_work_last_thread_exited, 0);
return 0;
}

/*
* Handle thread cull timer expiration
*/
static void slow_work_cull_timeout(unsigned long data)
{
slow_work_cull = true;
wake_up(&slow_work_thread_wq);
}

/*
* Get a reference on slow work thread starter
*/
static int slow_work_new_thread_get_ref(struct slow_work *work)
{
return 0;
}

/*
* Drop a reference on slow work thread starter
*/
static void slow_work_new_thread_put_ref(struct slow_work *work)
{
}

/*
* Start a new slow work thread
*/
static void slow_work_new_thread_execute(struct slow_work *work)
{
struct task_struct *p;

if (slow_work_threads_should_exit)
return;

if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
return;

if (!mutex_trylock(&slow_work_user_lock))
return;

slow_work_may_not_start_new_thread = true;
atomic_inc(&slow_work_thread_count);
p = kthread_run(slow_work_thread, NULL, "kslowd");
if (IS_ERR(p)) {
printk(KERN_DEBUG "Slow work thread pool: OOM\n");
if (atomic_dec_and_test(&slow_work_thread_count))
BUG(); /* we're running on a slow work thread... */
mod_timer(&slow_work_oom_timer,
jiffies + SLOW_WORK_OOM_TIMEOUT);
} else {
/* ratelimit the starting of new threads */
mod_timer(&slow_work_oom_timer, jiffies + 1);
}

mutex_unlock(&slow_work_user_lock);
}

static const struct slow_work_ops slow_work_new_thread_ops = {
.get_ref = slow_work_new_thread_get_ref,
.put_ref = slow_work_new_thread_put_ref,
.execute = slow_work_new_thread_execute,
};

/*
* post-OOM new thread start suppression expiration
*/
static void slow_work_oom_timeout(unsigned long data)
{
slow_work_may_not_start_new_thread = false;
}

/**
* slow_work_register_user - Register a user of the facility
*
Expand All @@ -316,6 +446,10 @@ int slow_work_register_user(void)
init_completion(&slow_work_last_thread_exited);

slow_work_threads_should_exit = false;
slow_work_init(&slow_work_new_thread,
&slow_work_new_thread_ops);
slow_work_may_not_start_new_thread = false;
slow_work_cull = false;

/* start the minimum number of threads */
for (loop = 0; loop < slow_work_min_threads; loop++) {
Expand Down Expand Up @@ -369,6 +503,8 @@ void slow_work_unregister_user(void)
" Shut down complete\n");
}

del_timer_sync(&slow_work_cull_timer);

mutex_unlock(&slow_work_user_lock);
}
EXPORT_SYMBOL(slow_work_unregister_user);
Expand Down

0 comments on commit 98c97ba

Please sign in to comment.