Skip to content

Commit

Permalink
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/rostedt/linux-2.6-trace into perf/core
  • Loading branch information
Ingo Molnar committed Jul 21, 2011
2 parents 492f73a + 14a8fd7 commit 40bcea7
Show file tree
Hide file tree
Showing 26 changed files with 1,663 additions and 1,003 deletions.
9 changes: 5 additions & 4 deletions Documentation/trace/kprobetrace.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,15 @@ current_tracer. Instead of that, add probe points via

Synopsis of kprobe_events
-------------------------
p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe
r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe
p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
r[:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
-:[GRP/]EVENT : Clear a probe

GRP : Group name. If omitted, use "kprobes" for it.
EVENT : Event name. If omitted, the event name is generated
based on SYMBOL+offs or MEMADDR.
SYMBOL[+offs] : Symbol+offset where the probe is inserted.
based on SYM+offs or MEMADDR.
MOD : Module name which has given SYM.
SYM[+offs] : Symbol+offset where the probe is inserted.
MEMADDR : Address where the probe is inserted.

FETCHARGS : Arguments. Each probe can have up to 128 args.
Expand Down
33 changes: 33 additions & 0 deletions arch/x86/include/asm/perf_event_p4.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,14 @@
#define P4_CONFIG_HT_SHIFT 63
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)

/*
* If an event has alias it should be marked
* with a special bit. (Don't forget to check
* P4_PEBS_CONFIG_MASK and related bits on
* modification.)
*/
#define P4_CONFIG_ALIASABLE (1 << 9)

/*
* The bits we allow to pass for RAW events
*/
Expand All @@ -123,6 +131,31 @@
(p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
(p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))

/*
* In case of event aliasing we need to preserve some
* caller bits otherwise the mapping won't be complete.
*/
#define P4_CONFIG_EVENT_ALIAS_MASK \
(p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \
p4_config_pack_cccr(P4_CCCR_EDGE | \
P4_CCCR_THRESHOLD_MASK | \
P4_CCCR_COMPLEMENT | \
P4_CCCR_COMPARE))

#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \
((P4_CONFIG_HT) | \
p4_config_pack_escr(P4_ESCR_T0_OS | \
P4_ESCR_T0_USR | \
P4_ESCR_T1_OS | \
P4_ESCR_T1_USR) | \
p4_config_pack_cccr(P4_CCCR_OVF | \
P4_CCCR_CASCADE | \
P4_CCCR_FORCE_OVF | \
P4_CCCR_THREAD_ANY | \
P4_CCCR_OVF_PMI_T0 | \
P4_CCCR_OVF_PMI_T1 | \
P4_CONFIG_ALIASABLE))

static inline bool p4_is_event_cascaded(u64 config)
{
u32 cccr = p4_config_unpack_cccr(config);
Expand Down
7 changes: 0 additions & 7 deletions arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,6 @@ struct x86_pmu {
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
void (*hw_watchdog_set_attr)(struct perf_event_attr *attr);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
Expand Down Expand Up @@ -360,12 +359,6 @@ static u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];

void hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
if (x86_pmu.hw_watchdog_set_attr)
x86_pmu.hw_watchdog_set_attr(wd_attr);
}

/*
* Propagate event elapsed time into the generic event.
* Can only be executed on the CPU where the event is active.
Expand Down
135 changes: 106 additions & 29 deletions arch/x86/kernel/cpu/perf_event_p4.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,11 +570,92 @@ static __initconst const u64 p4_hw_cache_event_ids
},
};

/*
* Because of Netburst being quite restricted in now
* many same events can run simultaneously, we use
* event aliases, ie different events which have the
* same functionallity but use non-intersected resources
* (ESCR/CCCR/couter registers). This allow us to run
* two or more semi-same events together. It is done
* transparently to a user space.
*
* Never set any cusom internal bits such as P4_CONFIG_HT,
* P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
* either up-to-dated automatically either not appliable
* at all.
*
* And be really carefull choosing aliases!
*/
struct p4_event_alias {
u64 orig;
u64 alter;
} p4_event_aliases[] = {
{
/*
* Non-halted cycles can be substituted with
* non-sleeping cycles (see Intel SDM Vol3b for
* details).
*/
.orig =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
.alter =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)|
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)|
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)|
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)|
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))|
p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
P4_CCCR_COMPARE),
},
};

static u64 p4_get_alias_event(u64 config)
{
u64 config_match;
int i;

/*
* Probably we're lucky and don't have to do
* matching over all config bits.
*/
if (!(config & P4_CONFIG_ALIASABLE))
return 0;

config_match = config & P4_CONFIG_EVENT_ALIAS_MASK;

/*
* If an event was previously swapped to the alter config
* we should swap it back otherwise contnention on registers
* will return back.
*/
for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) {
if (config_match == p4_event_aliases[i].orig) {
config_match = p4_event_aliases[i].alter;
break;
} else if (config_match == p4_event_aliases[i].alter) {
config_match = p4_event_aliases[i].orig;
break;
}
}

if (i >= ARRAY_SIZE(p4_event_aliases))
return 0;

return config_match |
(config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS);
}

static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
/* non-halted CPU clocks */
[PERF_COUNT_HW_CPU_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) |
P4_CONFIG_ALIASABLE,

/*
* retired instructions
Expand Down Expand Up @@ -719,31 +800,6 @@ static int p4_validate_raw_event(struct perf_event *event)
return 0;
}

static void p4_hw_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
/*
* Watchdog ticks are special on Netburst, we use
* that named "non-sleeping" ticks as recommended
* by Intel SDM Vol3b.
*/
WARN_ON_ONCE(wd_attr->type != PERF_TYPE_HARDWARE ||
wd_attr->config != PERF_COUNT_HW_CPU_CYCLES);

wd_attr->type = PERF_TYPE_RAW;
wd_attr->config =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3)) |
p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
P4_CCCR_COMPARE);
}

static int p4_hw_config(struct perf_event *event)
{
int cpu = get_cpu();
Expand Down Expand Up @@ -1159,6 +1215,8 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
struct p4_event_bind *bind;
unsigned int i, thread, num;
int cntr_idx, escr_idx;
u64 config_alias;
int pass;

bitmap_zero(used_mask, X86_PMC_IDX_MAX);
bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
Expand All @@ -1167,6 +1225,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign

hwc = &cpuc->event_list[i]->hw;
thread = p4_ht_thread(cpu);
pass = 0;

again:
/*
* Aliases are swappable so we may hit circular
* lock if both original config and alias need
* resources (MSR registers) which already busy.
*/
if (pass > 2)
goto done;

bind = p4_config_get_bind(hwc->config);
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
if (unlikely(escr_idx == -1))
Expand All @@ -1180,8 +1249,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
}

cntr_idx = p4_next_cntr(thread, used_mask, bind);
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
goto done;
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) {
/*
* Probably an event alias is still available.
*/
config_alias = p4_get_alias_event(hwc->config);
if (!config_alias)
goto done;
hwc->config = config_alias;
pass++;
goto again;
}

p4_pmu_swap_config_ts(hwc, cpu);
if (assign)
Expand Down Expand Up @@ -1218,7 +1296,6 @@ static __initconst const struct x86_pmu p4_pmu = {
.cntval_bits = ARCH_P4_CNTRVAL_BITS,
.cntval_mask = ARCH_P4_CNTRVAL_MASK,
.max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
.hw_watchdog_set_attr = p4_hw_watchdog_set_attr,
.hw_config = p4_hw_config,
.schedule_events = p4_pmu_schedule_events,
/*
Expand Down
1 change: 1 addition & 0 deletions include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ struct trace_iterator {
struct trace_entry *ent;
unsigned long lost_events;
int leftover;
int ent_size;
int cpu;
u64 ts;

Expand Down
33 changes: 23 additions & 10 deletions kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -1255,19 +1255,29 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
/*
* If we have a symbol_name argument, look it up and add the offset field
* to it. This way, we can specify a relative address to a symbol.
* This returns encoded errors if it fails to look up symbol or invalid
* combination of parameters.
*/
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
{
kprobe_opcode_t *addr = p->addr;

if ((p->symbol_name && p->addr) ||
(!p->symbol_name && !p->addr))
goto invalid;

if (p->symbol_name) {
if (addr)
return NULL;
kprobe_lookup_name(p->symbol_name, addr);
if (!addr)
return ERR_PTR(-ENOENT);
}

if (!addr)
return NULL;
return (kprobe_opcode_t *)(((char *)addr) + p->offset);
addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
if (addr)
return addr;

invalid:
return ERR_PTR(-EINVAL);
}

/* Check passed kprobe is valid and return kprobe in kprobe_table. */
Expand Down Expand Up @@ -1311,8 +1321,8 @@ int __kprobes register_kprobe(struct kprobe *p)
kprobe_opcode_t *addr;

addr = kprobe_addr(p);
if (!addr)
return -EINVAL;
if (IS_ERR(addr))
return PTR_ERR(addr);
p->addr = addr;

ret = check_kprobe_rereg(p);
Expand All @@ -1335,6 +1345,8 @@ int __kprobes register_kprobe(struct kprobe *p)
*/
probed_mod = __module_text_address((unsigned long) p->addr);
if (probed_mod) {
/* Return -ENOENT if fail. */
ret = -ENOENT;
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
Expand All @@ -1351,6 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
module_put(probed_mod);
goto fail_with_jump_label;
}
/* ret will be updated by following code */
}
preempt_enable();
jump_label_unlock();
Expand Down Expand Up @@ -1399,7 +1412,7 @@ int __kprobes register_kprobe(struct kprobe *p)
fail_with_jump_label:
preempt_enable();
jump_label_unlock();
return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(register_kprobe);

Expand Down Expand Up @@ -1686,8 +1699,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)

if (kretprobe_blacklist_size) {
addr = kprobe_addr(&rp->kp);
if (!addr)
return -EINVAL;
if (IS_ERR(addr))
return PTR_ERR(addr);

for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
if (kretprobe_blacklist[i].addr == addr)
Expand Down
Loading

0 comments on commit 40bcea7

Please sign in to comment.