Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 257064
b: refs/heads/master
c: a8b0ca1
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jul 1, 2011
1 parent 78eefb0 commit 84c1951
Show file tree
Hide file tree
Showing 47 changed files with 120 additions and 142 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1880c4ae182afb5650c5678949ecfe7ff66a724e
refs/heads/master: a8b0ca17b80e92faab46ee7179ba9e99ccb61233
2 changes: 1 addition & 1 deletion trunk/arch/alpha/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
data.period = event->hw.last_period;

if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, 1, &data, regs)) {
if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/perf_event_v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
if (!armpmu_event_set_period(event, hwc, idx))
continue;

if (perf_event_overflow(event, 0, &data, regs))
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;

if (perf_event_overflow(event, 0, &data, regs))
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/arm/kernel/perf_event_xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;

if (perf_event_overflow(event, 0, &data, regs))
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}

Expand Down Expand Up @@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;

if (perf_event_overflow(event, 0, &data, regs))
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
/*
* Handle hitting a HW-breakpoint.
*/
static void ptrace_hbptriggered(struct perf_event *bp, int unused,
static void ptrace_hbptriggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/swp_emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
unsigned int address, destreg, data, type;
unsigned int res = 0;

perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);

if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/arm/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (fault & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
else if (fault & VM_FAULT_MINOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);

/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mips/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
if (!mipspmu_event_set_period(event, hwc, idx))
return;

if (perf_event_overflow(event, 0, data, regs))
if (perf_event_overflow(event, data, regs))
mipspmu->disable_event(idx);
}

Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/mips/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
1, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
1, regs, 0);
return simulate_sc(regs, opcode);
}

Expand All @@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
1, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
Expand Down Expand Up @@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
1, regs, 0);
return 0;
}

Expand Down
5 changes: 2 additions & 3 deletions trunk/arch/mips/kernel/unaligned.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;

perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);

/*
* This load never faults.
Expand Down Expand Up @@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
mm_segment_t seg;

perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/mips/math-emu/cp1emu.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
}

emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, xcp, 0);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
Expand Down
8 changes: 3 additions & 5 deletions trunk/arch/mips/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
Expand All @@ -154,12 +154,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
BUG();
}
if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
tsk->maj_flt++;
} else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
tsk->min_flt++;
}

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/powerpc/include/asm/emulated_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
1, 0, regs, 0); \
1, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)

#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
1, 0, regs, regs->dar); \
1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)

Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/powerpc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
Expand Down Expand Up @@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);

if (perf_event_overflow(event, nmi, &data, regs))
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
}
Expand Down Expand Up @@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
record_and_restart(event, val, regs);
}
}

Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/powerpc/kernel/perf_event_fsl_emb.c
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
Expand Down Expand Up @@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;

if (perf_event_overflow(event, nmi, &data, regs))
if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
Expand Down Expand Up @@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
record_and_restart(event, val, regs);
} else {
/*
* Disabled counter is negative,
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
}

#ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp, int nmi,
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/powerpc/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
Expand Down Expand Up @@ -319,7 +319,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
Expand All @@ -330,7 +330,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#endif
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
up_read(&mm->mmap_sem);
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/s390/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
goto out;

address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
Expand Down Expand Up @@ -345,11 +345,11 @@ static inline int do_exception(struct pt_regs *regs, int access,
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/kernel/ptrace_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
return 0;
}

void ptrace_triggered(struct perf_event *bp, int nmi,
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/kernel/traps_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address);
}

Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/sh/kernel/traps_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
return error;
}

perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);

destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
Expand Down Expand Up @@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
return error;
}

perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);

srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
Expand Down Expand Up @@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
return error;
}

perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);

destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
Expand Down Expand Up @@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
return error;
}

perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);

srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/math-emu/math.c
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);

perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);

if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/sh/mm/fault_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

/*
* If we're in an interrupt, have no user context or are running
Expand Down Expand Up @@ -210,11 +210,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}

Expand Down
Loading

0 comments on commit 84c1951

Please sign in to comment.