Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 375543
b: refs/heads/master
c: 416821d
h: refs/heads/master
i:
  375541: ee904eb
  375539: 69e3921
  375535: c7b66fb
v: v3
  • Loading branch information
Helge Deller committed May 11, 2013
1 parent b2eb683 commit 9ca30e0
Show file tree
Hide file tree
Showing 206 changed files with 3,006 additions and 12,599 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dbbffe6898fd0d7bac66ded5d3c58835b13ddefc
refs/heads/master: 416821d3d68164909b2cbcf398e4ba0797f5f8a2
4 changes: 2 additions & 2 deletions trunk/Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 10
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION =
NAME = Unicycling Gorilla

# *DOCUMENTATION*
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/parisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ config SMP

config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
default n
default y
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
Expand Down
9 changes: 9 additions & 0 deletions trunk/arch/parisc/include/asm/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,18 @@
#include <linux/threads.h>
#include <linux/irq.h>

#ifdef CONFIG_IRQSTACKS
#define __ARCH_HAS_DO_SOFTIRQ
#endif

typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
unsigned int kernel_stack_usage;
#ifdef CONFIG_IRQSTACKS
unsigned int irq_stack_usage;
unsigned int irq_stack_counter;
#endif
#endif
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
Expand All @@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)

#define __ARCH_SET_SOFTIRQ_PENDING
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/parisc/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,13 @@
*/
#ifdef __KERNEL__

#include <linux/spinlock_types.h>

#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */

union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
raw_spinlock_t lock;
};

DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
Expand Down
101 changes: 87 additions & 14 deletions trunk/arch/parisc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: ", prec, "STK");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
seq_printf(p, " Kernel stack usage\n");
seq_puts(p, " Kernel stack usage\n");
# ifdef CONFIG_IRQSTACKS
seq_printf(p, "%*s: ", prec, "IST");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
seq_puts(p, " Interrupt stack usage\n");
seq_printf(p, "%*s: ", prec, "ISC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
seq_puts(p, " Interrupt stack usage counter\n");
# endif
#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_printf(p, " Function call interrupts\n");
seq_puts(p, " Function call interrupts\n");
#endif
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
seq_puts(p, " TLB shootdowns\n");
return 0;
}

Expand Down Expand Up @@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
unsigned long sp = regs->gr[30];
unsigned long stack_usage;
unsigned int *last_usage;
int cpu = smp_processor_id();

/* if sr7 != 0, we interrupted a userspace process which we do not want
* to check for stack overflow. We will only check the kernel stack. */
Expand All @@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)

/* calculate kernel stack usage */
stack_usage = sp - stack_start;
last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
#ifdef CONFIG_IRQSTACKS
if (likely(stack_usage <= THREAD_SIZE))
goto check_kernel_stack; /* found kernel stack */

/* check irq stack usage */
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
stack_usage = sp - stack_start;

last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;

if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
return;

pr_emerg("stackcheck: %s will most likely overflow irq stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
goto panic_check;

check_kernel_stack:
#endif

/* check kernel stack usage */
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);

if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
Expand All @@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + THREAD_SIZE);

#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
#endif
}

#ifdef CONFIG_IRQSTACKS
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
};

static void execute_on_irq_stack(void *func, unsigned long param1)
{
unsigned long *irq_stack_start;
union irq_stack_union *union_ptr;
unsigned long irq_stack;
int cpu = smp_processor_id();
raw_spinlock_t *irq_stack_in_use;

irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
irq_stack = (unsigned long) irq_stack_start;
irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
irq_stack = (unsigned long) &union_ptr->stack;
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
64); /* align for stack frame usage */

BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
*irq_stack_start = 1;
/* We may be called recursive. If we are already using the irq stack,
* just continue to use it. Use spinlocks to serialize
* the irq stack usage.
*/
irq_stack_in_use = &union_ptr->lock;
if (!raw_spin_trylock(irq_stack_in_use)) {
void (*direct_call)(unsigned long p1) = func;

/* We are using the IRQ stack already.
* Do direct call on current stack. */
direct_call(param1);
return;
}

/* This is where we switch to the IRQ stack. */
call_on_stack(param1, func, irq_stack);

*irq_stack_start = 0;
__inc_irq_stat(irq_stack_counter);

/* free up irq stack usage. */
do_raw_spin_unlock(irq_stack_in_use);
}

asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;

if (in_interrupt())
return;

local_irq_save(flags);

pending = local_softirq_pending();

if (pending)
execute_on_irq_stack(__do_softirq, 0);

local_irq_restore(flags);
}
#endif /* CONFIG_IRQSTACKS */

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/parisc/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
{
int do_recycle;

inc_irq_stat(irq_tlb_count);
__inc_irq_stat(irq_tlb_count);
do_recycle = 0;
spin_lock(&sid_lock);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
Expand All @@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
#else
void flush_tlb_all(void)
{
inc_irq_stat(irq_tlb_count);
__inc_irq_stat(irq_tlb_count);
spin_lock(&sid_lock);
flush_tlb_all_local(NULL);
recycle_sids();
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne syscall_dotrace
bne- syscall_dotrace
.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
Expand Down
3 changes: 0 additions & 3 deletions trunk/arch/x86/include/uapi/asm/msr-index.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,6 @@
#define MSR_CORE_C6_RESIDENCY 0x000003fd
#define MSR_CORE_C7_RESIDENCY 0x000003fe
#define MSR_PKG_C2_RESIDENCY 0x0000060d
#define MSR_PKG_C8_RESIDENCY 0x00000630
#define MSR_PKG_C9_RESIDENCY 0x00000631
#define MSR_PKG_C10_RESIDENCY 0x00000632

/* Run Time Average Power Limiting (RAPL) Interface */

Expand Down
5 changes: 3 additions & 2 deletions trunk/arch/x86/pci/xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -295,10 +295,11 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int pos;
u32 table_offset, bir;

pos = dev->msix_cap;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);

pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);

map_irq.table_base = pci_resource_start(dev, bir);
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
Expand Down
50 changes: 1 addition & 49 deletions trunk/arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,29 +85,7 @@

EXPORT_SYMBOL_GPL(hypercall_page);

/*
* Pointer to the xen_vcpu_info structure or
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
* to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
* acknowledge pending events.
* Also more subtly it is used by the patched version of irq enable/disable
* e.g. xen_irq_enable_direct and xen_iret in PV mode.
*
* The desire to be able to do those mask/unmask operations as a single
* instruction by using the per-cpu offset held in %gs is the real reason
* vcpu info is in a per-cpu pointer and the original reason for this
* hypercall.
*
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);

/*
* Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
* hypercall. This can be used both in PV and PVHVM mode. The structure
* overrides the default per_cpu(xen_vcpu, cpu) value.
*/
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);

enum xen_domain_type xen_domain_type = XEN_NATIVE;
Expand Down Expand Up @@ -179,21 +157,6 @@ static void xen_vcpu_setup(int cpu)

BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

/*
* This path is called twice on PVHVM - first during bootup via
* smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
* hotplugged: cpu_up -> xen_hvm_cpu_notify.
* As we can only do the VCPUOP_register_vcpu_info once lets
* not over-write its result.
*
* For PV it is called during restore (xen_vcpu_restore) and bootup
* (xen_setup_vcpu_info_placement). The hotplug mechanism does not
* use this function.
*/
if (xen_hvm_domain()) {
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
return;
}
if (cpu < MAX_VIRT_CPUS)
per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];

Expand All @@ -209,12 +172,7 @@ static void xen_vcpu_setup(int cpu)

/* Check to see if the hypervisor will put the vcpu_info
structure where we want it, which allows direct access via
a percpu-variable.
N.B. This hypercall can _only_ be called once per CPU. Subsequent
calls will error out with -EINVAL. This is due to the fact that
hypervisor has no unregister variant and this hypercall does not
allow to over-write info.mfn and info.offset.
*/
a percpu-variable. */
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);

if (err) {
Expand Down Expand Up @@ -429,9 +387,6 @@ static void __init xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask &=
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */

cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));

ax = 1;
cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
Expand Down Expand Up @@ -1648,9 +1603,6 @@ void __ref xen_hvm_init_shared_info(void)
* online but xen_hvm_init_shared_info is run at resume time too and
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
/* Leave it to be NULL. */
if (cpu >= MAX_VIRT_CPUS)
continue;
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/xen/spinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
int irq;
const char *name;

WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));

/*
Expand Down
5 changes: 0 additions & 5 deletions trunk/drivers/gpu/drm/drm_crtc.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,6 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;

/* Locking is currently fubar in the panic handler. */
if (oops_in_progress)
return;

list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));

Expand Down Expand Up @@ -250,7 +246,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
else
return "unknown";
}
EXPORT_SYMBOL(drm_get_connector_status_name);

/**
* drm_mode_object_get - allocate a new modeset identifier
Expand Down
Loading

0 comments on commit 9ca30e0

Please sign in to comment.