Skip to content

Commit

Permalink
uprobes: Reuse return_instances between multiple uretprobes within task
Browse files Browse the repository at this point in the history
Instead of constantly allocating and freeing very short-lived
struct return_instance, reuse it as much as possible within current
task. For that, store a linked list of reusable return_instances within
current->utask.

The only complication is that ri_timer() might be still processing such
return_instance. And so while the main uretprobe processing logic might
be already done with return_instance and would be OK to immediately
reuse it for the next uretprobe instance, it's not correct to
unconditionally reuse it just like that.

Instead we make sure that ri_timer() can't possibly be processing it by
using seqcount_t, with ri_timer() being "a writer", while
free_ret_instance() being "a reader". If, after we unlink return
instance from utask->return_instances list, we know that ri_timer()
hasn't gotten to processing utask->return_instances yet, then we can be
sure that immediate return_instance reuse is OK, and so we put it
onto utask->ri_pool for future (potentially, almost immediate) reuse.

This change shows improvements both in single CPU performance (by
avoiding relatively expensive kmalloc/free combon) and in terms of
multi-CPU scalability, where you can see that per-CPU throughput doesn't
decline as steeply with increased number of CPUs (which were previously
attributed to kmalloc()/free() through profiling):

	BASELINE (latest perf/core)
	===========================
	uretprobe-nop         ( 1 cpus):    1.898 ± 0.002M/s  (  1.898M/s/cpu)
	uretprobe-nop         ( 2 cpus):    3.574 ± 0.011M/s  (  1.787M/s/cpu)
	uretprobe-nop         ( 3 cpus):    5.279 ± 0.066M/s  (  1.760M/s/cpu)
	uretprobe-nop         ( 4 cpus):    6.824 ± 0.047M/s  (  1.706M/s/cpu)
	uretprobe-nop         ( 5 cpus):    8.339 ± 0.060M/s  (  1.668M/s/cpu)
	uretprobe-nop         ( 6 cpus):    9.812 ± 0.047M/s  (  1.635M/s/cpu)
	uretprobe-nop         ( 7 cpus):   11.030 ± 0.048M/s  (  1.576M/s/cpu)
	uretprobe-nop         ( 8 cpus):   12.453 ± 0.126M/s  (  1.557M/s/cpu)
	uretprobe-nop         (10 cpus):   14.838 ± 0.044M/s  (  1.484M/s/cpu)
	uretprobe-nop         (12 cpus):   17.092 ± 0.115M/s  (  1.424M/s/cpu)
	uretprobe-nop         (14 cpus):   19.576 ± 0.022M/s  (  1.398M/s/cpu)
	uretprobe-nop         (16 cpus):   22.264 ± 0.015M/s  (  1.391M/s/cpu)
	uretprobe-nop         (24 cpus):   33.534 ± 0.078M/s  (  1.397M/s/cpu)
	uretprobe-nop         (32 cpus):   43.262 ± 0.127M/s  (  1.352M/s/cpu)
	uretprobe-nop         (40 cpus):   53.252 ± 0.080M/s  (  1.331M/s/cpu)
	uretprobe-nop         (48 cpus):   55.778 ± 0.045M/s  (  1.162M/s/cpu)
	uretprobe-nop         (56 cpus):   56.850 ± 0.227M/s  (  1.015M/s/cpu)
	uretprobe-nop         (64 cpus):   62.005 ± 0.077M/s  (  0.969M/s/cpu)
	uretprobe-nop         (72 cpus):   66.445 ± 0.236M/s  (  0.923M/s/cpu)
	uretprobe-nop         (80 cpus):   68.353 ± 0.180M/s  (  0.854M/s/cpu)

	THIS PATCHSET (on top of latest perf/core)
	==========================================
	uretprobe-nop         ( 1 cpus):    2.253 ± 0.004M/s  (  2.253M/s/cpu)
	uretprobe-nop         ( 2 cpus):    4.281 ± 0.003M/s  (  2.140M/s/cpu)
	uretprobe-nop         ( 3 cpus):    6.389 ± 0.027M/s  (  2.130M/s/cpu)
	uretprobe-nop         ( 4 cpus):    8.328 ± 0.005M/s  (  2.082M/s/cpu)
	uretprobe-nop         ( 5 cpus):   10.353 ± 0.001M/s  (  2.071M/s/cpu)
	uretprobe-nop         ( 6 cpus):   12.513 ± 0.010M/s  (  2.086M/s/cpu)
	uretprobe-nop         ( 7 cpus):   14.525 ± 0.017M/s  (  2.075M/s/cpu)
	uretprobe-nop         ( 8 cpus):   15.633 ± 0.013M/s  (  1.954M/s/cpu)
	uretprobe-nop         (10 cpus):   19.532 ± 0.011M/s  (  1.953M/s/cpu)
	uretprobe-nop         (12 cpus):   21.405 ± 0.009M/s  (  1.784M/s/cpu)
	uretprobe-nop         (14 cpus):   24.857 ± 0.020M/s  (  1.776M/s/cpu)
	uretprobe-nop         (16 cpus):   26.466 ± 0.018M/s  (  1.654M/s/cpu)
	uretprobe-nop         (24 cpus):   40.513 ± 0.222M/s  (  1.688M/s/cpu)
	uretprobe-nop         (32 cpus):   54.180 ± 0.074M/s  (  1.693M/s/cpu)
	uretprobe-nop         (40 cpus):   66.100 ± 0.082M/s  (  1.652M/s/cpu)
	uretprobe-nop         (48 cpus):   70.544 ± 0.068M/s  (  1.470M/s/cpu)
	uretprobe-nop         (56 cpus):   74.494 ± 0.055M/s  (  1.330M/s/cpu)
	uretprobe-nop         (64 cpus):   79.317 ± 0.029M/s  (  1.239M/s/cpu)
	uretprobe-nop         (72 cpus):   84.875 ± 0.020M/s  (  1.179M/s/cpu)
	uretprobe-nop         (80 cpus):   92.318 ± 0.224M/s  (  1.154M/s/cpu)

For reference, with uprobe-nop we hit the following throughput:

	uprobe-nop            (80 cpus):  143.485 ± 0.035M/s  (  1.794M/s/cpu)

So now uretprobe stays a bit closer to that performance.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Link: https://lore.kernel.org/r/20241206002417.3295533-5-andrii@kernel.org
  • Loading branch information
Andrii Nakryiko authored and Ingo Molnar committed Dec 9, 2024
1 parent 0cf981d commit 8622e45
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 14 deletions.
6 changes: 5 additions & 1 deletion include/linux/uprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/seqlock.h>

struct uprobe;
struct vm_area_struct;
Expand Down Expand Up @@ -124,6 +125,10 @@ struct uprobe_task {
unsigned int depth;
struct return_instance *return_instances;

struct return_instance *ri_pool;
struct timer_list ri_timer;
seqcount_t ri_seqcount;

union {
struct {
struct arch_uprobe_task autask;
Expand All @@ -137,7 +142,6 @@ struct uprobe_task {
};

struct uprobe *active_uprobe;
struct timer_list ri_timer;
unsigned long xol_vaddr;

struct arch_uprobe *auprobe;
Expand Down
83 changes: 70 additions & 13 deletions kernel/events/uprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -1888,17 +1888,57 @@ unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
return instruction_pointer(regs);
}

static void free_ret_instance(struct return_instance *ri, bool cleanup_hprobe)
static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
{
ri->cons_cnt = 0;
ri->next = utask->ri_pool;
utask->ri_pool = ri;
}

static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
{
struct return_instance *ri = utask->ri_pool;

if (likely(ri))
utask->ri_pool = ri->next;

return ri;
}

static void ri_free(struct return_instance *ri)
{
kfree(ri->extra_consumers);
kfree_rcu(ri, rcu);
}

static void free_ret_instance(struct uprobe_task *utask,
struct return_instance *ri, bool cleanup_hprobe)
{
unsigned seq;

if (cleanup_hprobe) {
enum hprobe_state hstate;

(void)hprobe_consume(&ri->hprobe, &hstate);
hprobe_finalize(&ri->hprobe, hstate);
}

kfree(ri->extra_consumers);
kfree_rcu(ri, rcu);
/*
* At this point return_instance is unlinked from utask's
* return_instances list and this has become visible to ri_timer().
* If seqcount now indicates that ri_timer's return instance
* processing loop isn't active, we can return ri into the pool of
* to-be-reused return instances for future uretprobes. If ri_timer()
* happens to be running right now, though, we fallback to safety and
* just perform RCU-delated freeing of ri.
*/
if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
/* immediate reuse of ri without RCU GP is OK */
ri_pool_push(utask, ri);
} else {
/* we might be racing with ri_timer(), so play it safe */
ri_free(ri);
}
}

/*
Expand All @@ -1920,7 +1960,15 @@ void uprobe_free_utask(struct task_struct *t)
ri = utask->return_instances;
while (ri) {
ri_next = ri->next;
free_ret_instance(ri, true /* cleanup_hprobe */);
free_ret_instance(utask, ri, true /* cleanup_hprobe */);
ri = ri_next;
}

/* free_ret_instance() above might add to ri_pool, so this loop should come last */
ri = utask->ri_pool;
while (ri) {
ri_next = ri->next;
ri_free(ri);
ri = ri_next;
}

Expand All @@ -1943,8 +1991,12 @@ static void ri_timer(struct timer_list *timer)
/* RCU protects return_instance from freeing. */
guard(rcu)();

write_seqcount_begin(&utask->ri_seqcount);

for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);

write_seqcount_end(&utask->ri_seqcount);
}

static struct uprobe_task *alloc_utask(void)
Expand All @@ -1956,6 +2008,7 @@ static struct uprobe_task *alloc_utask(void)
return NULL;

timer_setup(&utask->ri_timer, ri_timer, 0);
seqcount_init(&utask->ri_seqcount);

return utask;
}
Expand All @@ -1975,10 +2028,14 @@ static struct uprobe_task *get_utask(void)
return current->utask;
}

static struct return_instance *alloc_return_instance(void)
static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
{
struct return_instance *ri;

ri = ri_pool_pop(utask);
if (ri)
return ri;

ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return ZERO_SIZE_PTR;
Expand Down Expand Up @@ -2119,7 +2176,7 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;

free_ret_instance(ri, true /* cleanup_hprobe */);
free_ret_instance(utask, ri, true /* cleanup_hprobe */);
ri = ri_next;
}
}
Expand Down Expand Up @@ -2186,7 +2243,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,

return;
free:
kfree(ri);
ri_free(ri);
}

/* Prepare to single-step probed instruction out of line. */
Expand Down Expand Up @@ -2385,8 +2442,7 @@ static struct return_instance *push_consumer(struct return_instance *ri, __u64 i
if (unlikely(ri->cons_cnt > 0)) {
ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
if (!ric) {
kfree(ri->extra_consumers);
kfree_rcu(ri, rcu);
ri_free(ri);
return ZERO_SIZE_PTR;
}
ri->extra_consumers = ric;
Expand Down Expand Up @@ -2428,8 +2484,9 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
struct uprobe_consumer *uc;
bool has_consumers = false, remove = true;
struct return_instance *ri = NULL;
struct uprobe_task *utask = current->utask;

current->utask->auprobe = &uprobe->arch;
utask->auprobe = &uprobe->arch;

list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
Expand All @@ -2449,12 +2506,12 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
continue;

if (!ri)
ri = alloc_return_instance();
ri = alloc_return_instance(utask);

if (session)
ri = push_consumer(ri, uc->id, cookie);
}
current->utask->auprobe = NULL;
utask->auprobe = NULL;

if (!ZERO_OR_NULL_PTR(ri))
prepare_uretprobe(uprobe, regs, ri);
Expand Down Expand Up @@ -2554,7 +2611,7 @@ void uprobe_handle_trampoline(struct pt_regs *regs)
hprobe_finalize(&ri->hprobe, hstate);

/* We already took care of hprobe, no need to waste more time on that. */
free_ret_instance(ri, false /* !cleanup_hprobe */);
free_ret_instance(utask, ri, false /* !cleanup_hprobe */);
ri = ri_next;
} while (ri != next_chain);
} while (!valid);
Expand Down

0 comments on commit 8622e45

Please sign in to comment.