Skip to content

Commit

Permalink
Merge tag 'trace-v4.12-rc2' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/rostedt/linux-trace

Pull ftrace fixes from Steven Rostedt:
 "There's been a few memory issues found with ftrace.

  One was simply a memory leak where not all was being freed that should
  have been in releasing a file pointer on set_graph_function.

  Then Thomas found that the ftrace trampolines were marked for
  read/write as well as execute. To shrink the possible attack surface,
  he added calls to set them to ro. Which also uncovered some other
  issues with freeing module allocated memory that had its permissions
  changed.

  Kprobes had a similar issue which is fixed and a selftest was added to
  trigger that issue again"

* tag 'trace-v4.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  x86/ftrace: Make sure that ftrace trampolines are not RWX
  x86/mm/ftrace: Do not bug in early boot on irqs_disabled in cpu_flush_range()
  selftests/ftrace: Add a testcase for many kprobe events
  kprobes/x86: Fix to set RWX bits correctly before releasing trampoline
  ftrace: Fix memory leak in ftrace_graph_release()
  • Loading branch information
Linus Torvalds committed May 27, 2017
2 parents c86daad + 6ee98ff commit 77d6465
Show file tree
Hide file tree
Showing 6 changed files with 47 additions and 9 deletions.
20 changes: 14 additions & 6 deletions arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
static inline void tramp_free(void *tramp)
static inline void tramp_free(void *tramp, int size)
{
int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;

set_memory_nx((unsigned long)tramp, npages);
set_memory_rw((unsigned long)tramp, npages);
module_memfree(tramp);
}
#else
Expand All @@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
static inline void tramp_free(void *tramp) { }
static inline void tramp_free(void *tramp, int size) { }
#endif

/* Defined as markers to the end of the ftrace default trampolines */
Expand Down Expand Up @@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) {
tramp_free(trampoline);
tramp_free(trampoline, *tramp_size);
return 0;
}

Expand All @@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)

/* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
tramp_free(trampoline);
tramp_free(trampoline, *tramp_size);
return 0;
}

Expand Down Expand Up @@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
unsigned long offset;
unsigned long ip;
unsigned int size;
int ret;
int ret, npages;

if (ops->trampoline) {
/*
Expand All @@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
*/
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
set_memory_rw(ops->trampoline, npages);
} else {
ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline)
return;
ops->trampoline_size = size;
npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
}

offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
Expand All @@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
set_memory_ro(ops->trampoline, npages);

/* The update should never fail */
WARN_ON(ret);
Expand Down Expand Up @@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;

tramp_free((void *)ops->trampoline);
tramp_free((void *)ops->trampoline, ops->trampoline_size);
ops->trampoline = 0;
}

Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kernel/kprobes/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
#include <linux/ftrace.h>
#include <linux/frame.h>
#include <linux/kasan.h>
#include <linux/moduleloader.h>

#include <asm/text-patching.h>
#include <asm/cacheflush.h>
Expand Down Expand Up @@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
}
}

/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
set_memory_nx((unsigned long)page & PAGE_MASK, 1);
set_memory_rw((unsigned long)page & PAGE_MASK, 1);
module_memfree(page);
}

static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level;
unsigned long addr;

BUG_ON(irqs_disabled());
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start);

on_each_cpu(__cpa_flush_range, NULL, 1);
Expand Down
2 changes: 1 addition & 1 deletion kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ static void *alloc_insn_page(void)
return module_alloc(PAGE_SIZE);
}

static void free_insn_page(void *page)
void __weak free_insn_page(void *page)
{
module_memfree(page);
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
}

out:
kfree(fgd->new_hash);
free_ftrace_hash(fgd->new_hash);
kfree(fgd);

return ret;
Expand Down
21 changes: 21 additions & 0 deletions tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/sh
# description: Register/unregister many kprobe events

# ftrace fentry skip size depends on the machine architecture.
# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
case `uname -m` in
x86_64|i[3456]86) OFFS=5;;
ppc*) OFFS=4;;
*) OFFS=0;;
esac

echo "Setup up to 256 kprobes"
grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:

echo 1 > events/kprobes/enable
echo 0 > events/kprobes/enable
echo > kprobe_events
echo "Waiting for unoptimizing & freeing"
sleep 5
echo "Done"

0 comments on commit 77d6465

Please sign in to comment.