Skip to content

Commit

Permalink
lockdep: lock_set_subclass - reset a held lock's subclass
Browse files Browse the repository at this point in the history
this can be used to reset a held lock's subclass, for arbitrary-depth
iterated data structures such as trees or lists which have per-node
locks.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 11, 2008
1 parent 5e710e3 commit 64aa348
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 0 deletions.
4 changes: 4 additions & 0 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,9 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);

extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
unsigned long ip);

# define INIT_LOCKDEP .lockdep_recursion = 0,

#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Expand All @@ -316,6 +319,7 @@ static inline void lockdep_on(void)

# define lock_acquire(l, s, t, r, c, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
Expand Down
69 changes: 69 additions & 0 deletions kernel/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}

static int
__lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class *class;
unsigned int depth;
int i;

depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;

prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);

found_it:
class = register_lock_class(lock, subclass, 0);
hlock->class = class;

curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;

for (; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
hlock->class->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
hlock->acquire_ip))
return 0;
}

if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0;
return 1;
}

/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
Expand Down Expand Up @@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags)
#endif
}

void
lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
unsigned long flags;

if (unlikely(current->lockdep_recursion))
return;

raw_local_irq_save(flags);
current->lockdep_recursion = 1;
check_flags(flags);
if (__lock_set_subclass(lock, subclass, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}

EXPORT_SYMBOL_GPL(lock_set_subclass);

/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
Expand Down

0 comments on commit 64aa348

Please sign in to comment.