Skip to content

Commit

Permalink
[PATCH] kprobes: changed from using spinlock to mutex
Browse files Browse the repository at this point in the history
Since Kprobes runtime exception handlers is now lock free as this code path is
now using RCU to walk through the list, there is no need for the
register/unregister{_kprobe} to use spin_{lock/unlock}_isr{save/restore}.  The
serialization during registration/unregistration is now possible using just a
mutex.

In the above process, this patch also fixes a minor memory leak for x86_64 and
powerpc.

Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Anil S Keshavamurthy authored and Linus Torvalds committed Jan 10, 2006
1 parent 41dead4 commit 49a2a1b
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 77 deletions.
6 changes: 1 addition & 5 deletions arch/i386/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,10 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
}

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
return 0;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
return 0;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
Expand Down
14 changes: 6 additions & 8 deletions arch/powerpc/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (!p->ainsn.insn)
ret = -ENOMEM;
}
return ret;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
if (!ret) {
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}

return ret;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
Expand All @@ -85,9 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)

void __kprobes arch_remove_kprobe(struct kprobe *p)
{
down(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
up(&kprobe_mutex);
}

static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
Expand Down
6 changes: 1 addition & 5 deletions arch/sparc64/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,11 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
return 0;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
p->opcode = *p->addr;
return 0;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
Expand Down
7 changes: 2 additions & 5 deletions arch/x86_64/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>

static DECLARE_MUTEX(kprobe_mutex);
void jprobe_return_end(void);
void __kprobes arch_copy_kprobe(struct kprobe *p);

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
Expand All @@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on x86_64. */
down(&kprobe_mutex);
p->ainsn.insn = get_insn_slot();
up(&kprobe_mutex);
if (!p->ainsn.insn) {
return -ENOMEM;
}
arch_copy_kprobe(p);
return 0;
}

Expand Down Expand Up @@ -223,9 +222,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)

void __kprobes arch_remove_kprobe(struct kprobe *p)
{
down(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
up(&kprobe_mutex);
}

static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
Expand Down
5 changes: 0 additions & 5 deletions include/asm-ia64/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,6 @@ struct arch_specific_insn {
unsigned short target_br_reg;
};

/* ia64 does not need this */
static inline void arch_copy_kprobe(struct kprobe *p)
{
}

extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);

Expand Down
1 change: 0 additions & 1 deletion include/linux/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ struct kretprobe_instance {

extern spinlock_t kretprobe_lock;
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_copy_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern void arch_remove_kprobe(struct kprobe *p);
Expand Down
91 changes: 43 additions & 48 deletions kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */
static DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;

Expand Down Expand Up @@ -167,7 +167,7 @@ static inline void reset_kprobe_instance(void)

/*
* This routine is called either:
* - under the kprobe_lock spinlock - during kprobe_[un]register()
* - under the kprobe_mutex - during kprobe_[un]register()
* OR
* - with preemption disabled - from arch/xxx/kernel/kprobes.c
*/
Expand Down Expand Up @@ -420,7 +420,6 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
/*
* This is the second or subsequent kprobe at the address - handle
* the intricacies
* TODO: Move kcalloc outside the spin_lock
*/
static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p)
Expand All @@ -442,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
return ret;
}

/* kprobe removal house-keeping routines */
static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
{
arch_disarm_kprobe(p);
hlist_del_rcu(&p->hlist);
spin_unlock_irqrestore(&kprobe_lock, flags);
arch_remove_kprobe(p);
}

static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p, unsigned long flags)
{
list_del_rcu(&p->list);
if (list_empty(&old_p->list))
cleanup_kprobe(old_p, flags);
else
spin_unlock_irqrestore(&kprobe_lock, flags);
}

static int __kprobes in_kprobes_functions(unsigned long addr)
{
if (addr >= (unsigned long)__kprobes_text_start
Expand All @@ -472,7 +452,6 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
int __kprobes register_kprobe(struct kprobe *p)
{
int ret = 0;
unsigned long flags = 0;
struct kprobe *old_p;
struct module *mod;

Expand All @@ -484,59 +463,75 @@ int __kprobes register_kprobe(struct kprobe *p)
(unlikely(!try_module_get(mod))))
return -EINVAL;

if ((ret = arch_prepare_kprobe(p)) != 0)
goto rm_kprobe;

p->nmissed = 0;
spin_lock_irqsave(&kprobe_lock, flags);
down(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
goto out;
}

arch_copy_kprobe(p);
if ((ret = arch_prepare_kprobe(p)) != 0)
goto out;

INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);

arch_arm_kprobe(p);

out:
spin_unlock_irqrestore(&kprobe_lock, flags);
rm_kprobe:
if (ret == -EEXIST)
arch_remove_kprobe(p);
up(&kprobe_mutex);

if (ret && mod)
module_put(mod);
return ret;
}

void __kprobes unregister_kprobe(struct kprobe *p)
{
unsigned long flags;
struct kprobe *old_p;
struct module *mod;
struct kprobe *old_p, *cleanup_p;

spin_lock_irqsave(&kprobe_lock, flags);
down(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
/* cleanup_*_kprobe() does the spin_unlock_irqrestore */
if (old_p->pre_handler == aggr_pre_handler)
cleanup_aggr_kprobe(old_p, p, flags);
else
cleanup_kprobe(p, flags);
if (unlikely(!old_p)) {
up(&kprobe_mutex);
return;
}

synchronize_sched();
if ((old_p->pre_handler == aggr_pre_handler) &&
(p->list.next == &old_p->list) &&
(p->list.prev == &old_p->list)) {
/* Only one element in the aggregate list */
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
cleanup_p = old_p;
} else if (old_p == p) {
/* Only one kprobe element in the hash list */
arch_disarm_kprobe(p);
hlist_del_rcu(&p->hlist);
cleanup_p = p;
} else {
list_del_rcu(&p->list);
cleanup_p = NULL;
}

if ((mod = module_text_address((unsigned long)p->addr)))
module_put(mod);
up(&kprobe_mutex);

if (old_p->pre_handler == aggr_pre_handler &&
list_empty(&old_p->list))
synchronize_sched();
if ((mod = module_text_address((unsigned long)p->addr)))
module_put(mod);

if (cleanup_p) {
if (cleanup_p->pre_handler == aggr_pre_handler) {
list_del_rcu(&p->list);
kfree(old_p);
} else
spin_unlock_irqrestore(&kprobe_lock, flags);
}
down(&kprobe_mutex);
arch_remove_kprobe(p);
up(&kprobe_mutex);
}
}

static struct notifier_block kprobe_exceptions_nb = {
Expand Down

0 comments on commit 49a2a1b

Please sign in to comment.