From 12b510aaff0a43c71e2c399f3c97b228553febfd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:39 +0000 Subject: [PATCH] --- yaml --- r: 168815 b: refs/heads/master c: 4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4 h: refs/heads/master i: 168813: 92e24402c8184c9f231b4c1c25d23fbf7034e9be 168811: f43aac991e0230fc85dcc96114da8c8d9c78589b 168807: cc1162694c534df8409d80a997f0948d6744813a 168799: 0f522de4a00acf384b7212b6c979143d458cb28e v: v3 --- [refs] | 2 +- trunk/Documentation/slow-work.txt | 2 +- trunk/kernel/slow-work.c | 36 ++++++++++++++----------------- 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/[refs] b/[refs] index 94dfcc1904dc..bac4efd02452 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3d7a641e544e428191667e8b1f83f96fa46dbd65 +refs/heads/master: 4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4 diff --git a/trunk/Documentation/slow-work.txt b/trunk/Documentation/slow-work.txt index f12fda31dcdc..c655c517fc68 100644 --- a/trunk/Documentation/slow-work.txt +++ b/trunk/Documentation/slow-work.txt @@ -125,7 +125,7 @@ ITEM OPERATIONS =============== Each work item requires a table of operations of type struct slow_work_ops. -All members are required: +Only ->execute() is required, getting and putting of a reference are optional. (*) Get a reference on an item: diff --git a/trunk/kernel/slow-work.c b/trunk/kernel/slow-work.c index dd08f376e406..fccf421eb5c1 100644 --- a/trunk/kernel/slow-work.c +++ b/trunk/kernel/slow-work.c @@ -145,6 +145,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited); static int slow_work_user_count; static DEFINE_MUTEX(slow_work_user_lock); +static inline int slow_work_get_ref(struct slow_work *work) +{ + if (work->ops->get_ref) + return work->ops->get_ref(work); + + return 0; +} + +static inline void slow_work_put_ref(struct slow_work *work) +{ + if (work->ops->put_ref) + work->ops->put_ref(work); +} + /* * Calculate the maximum number of active threads in the pool that are * permitted to process very slow work items. @@ -248,7 +262,7 @@ static bool slow_work_execute(int id) } /* sort out the race between module unloading and put_ref() */ - work->ops->put_ref(work); + slow_work_put_ref(work); #ifdef CONFIG_MODULES module = slow_work_thread_processing[id]; @@ -309,7 +323,6 @@ int slow_work_enqueue(struct slow_work *work) BUG_ON(slow_work_user_count <= 0); BUG_ON(!work); BUG_ON(!work->ops); - BUG_ON(!work->ops->get_ref); /* when honouring an enqueue request, we only promise that we will run * the work function in the future; we do not promise to run it once @@ -339,7 +352,7 @@ int slow_work_enqueue(struct slow_work *work) if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); } else { - if (work->ops->get_ref(work) < 0) + if (slow_work_get_ref(work) < 0) goto cant_get_ref; if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); @@ -479,21 +492,6 @@ static void slow_work_cull_timeout(unsigned long data) wake_up(&slow_work_thread_wq); } -/* - * Get a reference on slow work thread starter - */ -static int slow_work_new_thread_get_ref(struct slow_work *work) -{ - return 0; -} - -/* - * Drop a reference on slow work thread starter - */ -static void slow_work_new_thread_put_ref(struct slow_work *work) -{ -} - /* * Start a new slow work thread */ @@ -529,8 +527,6 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, - .get_ref = slow_work_new_thread_get_ref, - .put_ref = slow_work_new_thread_put_ref, .execute = slow_work_new_thread_execute, };