Skip to content

Commit

Permalink
Merge branch 'perfcounters-core-for-linus' of git://git.kernel.org/pu…
Browse files Browse the repository at this point in the history
…b/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits)
  perf tools: Avoid unnecessary work in directory lookups
  perf stat: Clean up statistics calculations a bit more
  perf stat: More advanced variance computation
  perf stat: Use stddev_mean in stead of stddev
  perf stat: Remove the limit on repeat
  perf stat: Change noise calculation to use stddev
  x86, perf_counter, bts: Do not allow kernel BTS tracing for now
  x86, perf_counter, bts: Correct pointer-to-u64 casts
  x86, perf_counter, bts: Fail if BTS is not available
  perf_counter: Fix output-sharing error path
  perf trace: Fix read_string()
  perf trace: Print out in nanoseconds
  perf tools: Seek to the end of the header area
  perf trace: Fix parsing of perf.data
  perf trace: Sample timestamps as well
  perf_counter: Introduce new (non-)paranoia level to allow raw tracepoint access
  perf trace: Sample the CPU too
  perf tools: Work around strict aliasing related warnings
  perf tools: Clean up warnings list in the Makefile
  perf tools: Complete support for dynamic strings
  ...
  • Loading branch information
Linus Torvalds committed Sep 11, 2009
2 parents b9356c5 + 6b58e7f commit 4f0ac85
Show file tree
Hide file tree
Showing 54 changed files with 7,110 additions and 1,405 deletions.
6 changes: 3 additions & 3 deletions arch/powerpc/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
else
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));

#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
/* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care,
* in the hash code, to pre-invalidate if the PTE was already hashed,
Expand Down Expand Up @@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,

#else
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 64-bit PTEs in UP mode
* cases, and 32-bit non-hash with 32-bit PTEs.
*/
*ptep = pte;
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o

obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ int main(void)
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
Expand Down
19 changes: 19 additions & 0 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,11 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)

clrrdi r11,r1,THREAD_SHIFT
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */

/*
* On iSeries, we soft-disable interrupts here, then
* hard-enable interrupts so that the hash_page code can spin on
Expand Down Expand Up @@ -833,6 +838,20 @@ handle_page_fault:
bl .low_hash_fault
b .ret_from_except

/*
* We come here as a result of a DSI at a point where we don't want
* to call hash_page, such as when we are accessing memory (possibly
* user memory) inside a PMU interrupt that occurred while interrupts
* were soft-disabled. We want to invoke the exception handler for
* the access, or panic if there isn't a handler.
*/
77: bl .save_nvgprs
mr r4,r3
addi r3,r1,STACK_FRAME_OVERHEAD
li r5,SIGSEGV
bl .bad_page_fault
b .ret_from_except

/* here we have a segment miss */
do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
Expand Down
Loading

0 comments on commit 4f0ac85

Please sign in to comment.