Skip to content

Commit

Permalink
Input: serio - convert to common workqueue instead of a thread
Browse files Browse the repository at this point in the history
Instead of creating an exclusive thread to handle serio events (which
happen rarely), let's switch to using common workqueue. With the arrival
of concurrency-managed workqueue infrastructure we are not concerned
that our callers or callees also using workqueue (no deadlocks anymore)
and it should reduce total number of threads in the system.

Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
  • Loading branch information
Dmitry Torokhov committed Nov 18, 2010
1 parent ce16a47 commit 8ee294c
Showing 1 changed file with 69 additions and 86 deletions.
155 changes: 69 additions & 86 deletions drivers/input/serio/serio.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,9 @@
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>

MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
Expand All @@ -44,7 +43,7 @@ MODULE_LICENSE("GPL");

/*
* serio_mutex protects entire serio subsystem and is taken every time
* serio port or driver registrered or unregistered.
* serio port or driver registered or unregistered.
*/
static DEFINE_MUTEX(serio_mutex);

Expand Down Expand Up @@ -165,58 +164,22 @@ struct serio_event {

static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
static LIST_HEAD(serio_event_list);
static DECLARE_WAIT_QUEUE_HEAD(serio_wait);
static struct task_struct *serio_task;

static int serio_queue_event(void *object, struct module *owner,
enum serio_event_type event_type)
static struct serio_event *serio_get_event(void)
{
struct serio_event *event = NULL;
unsigned long flags;
struct serio_event *event;
int retval = 0;

spin_lock_irqsave(&serio_event_lock, flags);

/*
* Scan event list for the other events for the same serio port,
* starting with the most recent one. If event is the same we
* do not need add new one. If event is of different type we
* need to add this event and should not look further because
* we need to preseve sequence of distinct events.
*/
list_for_each_entry_reverse(event, &serio_event_list, node) {
if (event->object == object) {
if (event->type == event_type)
goto out;
break;
}
}

event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
if (!event) {
pr_err("Not enough memory to queue event %d\n", event_type);
retval = -ENOMEM;
goto out;
}

if (!try_module_get(owner)) {
pr_warning("Can't get module reference, dropping event %d\n",
event_type);
kfree(event);
retval = -EINVAL;
goto out;
if (!list_empty(&serio_event_list)) {
event = list_first_entry(&serio_event_list,
struct serio_event, node);
list_del_init(&event->node);
}

event->type = event_type;
event->object = object;
event->owner = owner;

list_add_tail(&event->node, &serio_event_list);
wake_up(&serio_wait);

out:
spin_unlock_irqrestore(&serio_event_lock, flags);
return retval;
return event;
}

static void serio_free_event(struct serio_event *event)
Expand Down Expand Up @@ -250,25 +213,7 @@ static void serio_remove_duplicate_events(struct serio_event *event)
spin_unlock_irqrestore(&serio_event_lock, flags);
}


static struct serio_event *serio_get_event(void)
{
struct serio_event *event = NULL;
unsigned long flags;

spin_lock_irqsave(&serio_event_lock, flags);

if (!list_empty(&serio_event_list)) {
event = list_first_entry(&serio_event_list,
struct serio_event, node);
list_del_init(&event->node);
}

spin_unlock_irqrestore(&serio_event_lock, flags);
return event;
}

static void serio_handle_event(void)
static void serio_handle_event(struct work_struct *work)
{
struct serio_event *event;

Expand Down Expand Up @@ -307,6 +252,59 @@ static void serio_handle_event(void)
mutex_unlock(&serio_mutex);
}

static DECLARE_WORK(serio_event_work, serio_handle_event);

static int serio_queue_event(void *object, struct module *owner,
enum serio_event_type event_type)
{
unsigned long flags;
struct serio_event *event;
int retval = 0;

spin_lock_irqsave(&serio_event_lock, flags);

/*
* Scan event list for the other events for the same serio port,
* starting with the most recent one. If event is the same we
* do not need add new one. If event is of different type we
* need to add this event and should not look further because
* we need to preseve sequence of distinct events.
*/
list_for_each_entry_reverse(event, &serio_event_list, node) {
if (event->object == object) {
if (event->type == event_type)
goto out;
break;
}
}

event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
if (!event) {
pr_err("Not enough memory to queue event %d\n", event_type);
retval = -ENOMEM;
goto out;
}

if (!try_module_get(owner)) {
pr_warning("Can't get module reference, dropping event %d\n",
event_type);
kfree(event);
retval = -EINVAL;
goto out;
}

event->type = event_type;
event->object = object;
event->owner = owner;

list_add_tail(&event->node, &serio_event_list);
schedule_work(&serio_event_work);

out:
spin_unlock_irqrestore(&serio_event_lock, flags);
return retval;
}

/*
* Remove all events that have been submitted for a given
* object, be it serio port or driver.
Expand Down Expand Up @@ -356,18 +354,6 @@ static struct serio *serio_get_pending_child(struct serio *parent)
return child;
}

static int serio_thread(void *nothing)
{
do {
serio_handle_event();
wait_event_interruptible(serio_wait,
kthread_should_stop() || !list_empty(&serio_event_list));
} while (!kthread_should_stop());

return 0;
}


/*
* Serio port operations
*/
Expand Down Expand Up @@ -1040,21 +1026,18 @@ static int __init serio_init(void)
return error;
}

serio_task = kthread_run(serio_thread, NULL, "kseriod");
if (IS_ERR(serio_task)) {
bus_unregister(&serio_bus);
error = PTR_ERR(serio_task);
pr_err("Failed to start kseriod, error: %d\n", error);
return error;
}

return 0;
}

static void __exit serio_exit(void)
{
bus_unregister(&serio_bus);
kthread_stop(serio_task);

/*
* There should not be any outstanding events but work may
* still be scheduled so simply cancel it.
*/
cancel_work_sync(&serio_event_work);
}

subsys_initcall(serio_init);
Expand Down

0 comments on commit 8ee294c

Please sign in to comment.