Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140512
b: refs/heads/master
c: 337fff8
h: refs/heads/master
v: v3
  • Loading branch information
Thomas Gleixner committed Mar 17, 2009
1 parent 56ddcb7 commit ec507fb
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1be1cb7b47f0744141ed61cdb25648819ae1a56f
refs/heads/master: 337fff8b5ed0573ea106491c6de47bd7fe623500
53 changes: 41 additions & 12 deletions trunk/lib/debugobjects.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ static int debug_objects_enabled __read_mostly

static struct debug_obj_descr *descr_test __read_mostly;

static void free_obj_work(struct work_struct *work);
static DECLARE_WORK(debug_obj_work, free_obj_work);

static int __init enable_object_debug(char *str)
{
debug_objects_enabled = 1;
Expand Down Expand Up @@ -154,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
}

/*
* Put the object back into the pool or give it back to kmem_cache:
* workqueue function to free objects.
*/
static void free_object(struct debug_obj *obj)
static void free_obj_work(struct work_struct *work)
{
unsigned long idx = (unsigned long)(obj - obj_static_pool);
struct debug_obj *obj;
unsigned long flags;

if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
spin_unlock_irqrestore(&pool_lock, flags);
} else {
spin_lock_irqsave(&pool_lock, flags);
obj_pool_used--;
spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
obj_pool_free--;
/*
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
spin_lock_irqsave(&pool_lock, flags);
}
spin_unlock_irqrestore(&pool_lock, flags);
}

/*
* Put the object back into the pool and schedule work to free objects
* if necessary.
*/
static void free_object(struct debug_obj *obj)
{
unsigned long flags;
int sched = 0;

spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
sched = !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}

/*
Expand Down

0 comments on commit ec507fb

Please sign in to comment.