Skip to content

Commit

Permalink
locking, kprobes: Annotate the hash locks and kretprobe.lock as raw
Browse files Browse the repository at this point in the history
The kprobe locks can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Sep 13, 2011
1 parent 76bf687 commit ec48460
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 18 deletions.
2 changes: 1 addition & 1 deletion include/linux/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ struct kretprobe {
int nmissed;
size_t data_size;
struct hlist_head free_instances;
spinlock_t lock;
raw_spinlock_t lock;
};

struct kretprobe_instance {
Expand Down
34 changes: 17 additions & 17 deletions kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
spinlock_t lock ____cacheline_aligned_in_smp;
raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];

static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
}
Expand Down Expand Up @@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_del(&ri->hlist);
INIT_HLIST_NODE(&ri->hlist);
if (likely(rp)) {
spin_lock(&rp->lock);
raw_spin_lock(&rp->lock);
hlist_add_head(&ri->hlist, &rp->free_instances);
spin_unlock(&rp->lock);
raw_spin_unlock(&rp->lock);
} else
/* Unregistering */
hlist_add_head(&ri->hlist, head);
Expand All @@ -1026,38 +1026,38 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
__acquires(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock;
raw_spinlock_t *hlist_lock;

*head = &kretprobe_inst_table[hash];
hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags);
raw_spin_lock_irqsave(hlist_lock, *flags);
}

static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags)
__acquires(hlist_lock)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags);
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
raw_spin_lock_irqsave(hlist_lock, *flags);
}

void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
unsigned long *flags)
__releases(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock;
raw_spinlock_t *hlist_lock;

hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags);
raw_spin_unlock_irqrestore(hlist_lock, *flags);
}

static void __kprobes kretprobe_table_unlock(unsigned long hash,
unsigned long *flags)
__releases(hlist_lock)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags);
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
raw_spin_unlock_irqrestore(hlist_lock, *flags);
}

/*
Expand Down Expand Up @@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,

/*TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
spin_lock_irqsave(&rp->lock, flags);
raw_spin_lock_irqsave(&rp->lock, flags);
if (!hlist_empty(&rp->free_instances)) {
ri = hlist_entry(rp->free_instances.first,
struct kretprobe_instance, hlist);
hlist_del(&ri->hlist);
spin_unlock_irqrestore(&rp->lock, flags);
raw_spin_unlock_irqrestore(&rp->lock, flags);

ri->rp = rp;
ri->task = current;
Expand All @@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
kretprobe_table_unlock(hash, &flags);
} else {
rp->nmissed++;
spin_unlock_irqrestore(&rp->lock, flags);
raw_spin_unlock_irqrestore(&rp->lock, flags);
}
return 0;
}
Expand Down Expand Up @@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->maxactive = num_possible_cpus();
#endif
}
spin_lock_init(&rp->lock);
raw_spin_lock_init(&rp->lock);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
inst = kmalloc(sizeof(struct kretprobe_instance) +
Expand Down Expand Up @@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
spin_lock_init(&(kretprobe_table_locks[i].lock));
raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
}

/*
Expand Down

0 comments on commit ec48460

Please sign in to comment.