Skip to content

Commit

Permalink
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
  [IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
  [IA64] run some drivers/misc/sgi-xp through scripts/Lindent
  [IA64] move XP and XPC to drivers/misc/sgi-xp
  [IA64] minor irq handler cleanups
  [IA64] simplify notify hooks in mca.c
  [IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs
  [IA64] disable interrupts on exit of ia64_trace_syscall
  • Loading branch information
Linus Torvalds committed Apr 22, 2008
2 parents 16abef0 + 2c2b94f commit aca239b
Show file tree
Hide file tree
Showing 20 changed files with 965 additions and 1,420 deletions.
11 changes: 0 additions & 11 deletions arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -266,17 +266,6 @@ config IOSAPIC
depends on !IA64_HP_SIM
default y

config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR
help
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.

config FORCE_MAX_ZONEORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/kernel/crash.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
unw_init_running(kdump_cpu_freeze, NULL);
break;
case DIE_MCA_MONARCH_LEAVE:
/* die_register->signr indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !args->signr) {
/* *(nd->data) indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !(*(nd->data))) {
atomic_set(&kdump_in_progress, 1);
*(nd->monarch_cpu) = -1;
machine_kdump_on_init();
Expand Down
1 change: 1 addition & 0 deletions arch/ia64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
(pUStk) rsm psr.i // disable interrupts
br.cond.sptk .work_pending_syscall_end

strace_error:
Expand Down
77 changes: 37 additions & 40 deletions arch/ia64/kernel/mca.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,20 @@
# define IA64_MCA_DEBUG(fmt...)
#endif

#define NOTIFY_INIT(event, regs, arg, spin) \
do { \
if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)

#define NOTIFY_MCA(event, regs, arg, spin) \
do { \
if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)

/* Used by mca_asm.S */
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
Expand Down Expand Up @@ -766,27 +780,22 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)

/* Mask all interrupts */
local_irq_save(flags);
if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);

NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);

ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has
* reached SAL
*/
ia64_sal_mc_rendez();

if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);

/* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */

if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);

ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */
Expand Down Expand Up @@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
int recover, cpu = smp_processor_id();
struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
{ .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
static atomic_t mca_count;
static cpumask_t mca_cpu;

Expand All @@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,

previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");

if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);

ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) {
Expand All @@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* does not work.
*/
ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
} else {
while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */
}
}

NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);

/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
Expand All @@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
mca_insert_tr(0x2); /*Reload dynamic itrs*/
}

if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);

if (atomic_dec_return(&mca_count) > 0) {
int i;
Expand Down Expand Up @@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };

(void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);

mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch);
Expand Down Expand Up @@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */
if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);

NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);

while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);

NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);

mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
Expand All @@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
}

monarch_cpu = cpu;
if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);

/*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
Expand All @@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
* to default_monarch_init_process() above and just print all the
* tasks.
*/
if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);

mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs);
set_curr_task(cpu, previous_current);
Expand Down Expand Up @@ -1953,7 +1950,7 @@ ia64_mca_init(void)
printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0;
(void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
continue;
}
printk(KERN_ERR "Failed to register rendezvous interrupt "
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/kernel/perfmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -5511,7 +5511,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
}

static int
pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
{
struct task_struct *task;
pfm_context_t *ctx;
Expand Down Expand Up @@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)

start_cycles = ia64_get_itc();

ret = pfm_do_interrupt_handler(irq, arg, regs);
ret = pfm_do_interrupt_handler(arg, regs);

total_cycles = ia64_get_itc();

Expand Down
7 changes: 1 addition & 6 deletions arch/ia64/sn/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
#

EXTRA_CFLAGS += -Iarch/ia64/sn/include
Expand All @@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
obj-$(CONFIG_PCI_MSI) += msi_sn.o
4 changes: 2 additions & 2 deletions arch/ia64/sn/kernel/huberror.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
{

if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
"SN_hub_error", (void *)hubdev_info)) {
printk("hub_error_init: Failed to request_irq for 0x%p\n",
"SN_hub_error", hubdev_info)) {
printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
return;
}
Expand Down
6 changes: 4 additions & 2 deletions arch/ia64/sn/pci/tioce_provider.c
Original file line number Diff line number Diff line change
Expand Up @@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
*
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address.
*/ static u64
*/
static u64
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
Expand All @@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
*
* Handle a CE error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/ static irqreturn_t
*/
static irqreturn_t
tioce_error_intr_handler(int irq, void *arg)
{
struct tioce_common *soft = arg;
Expand Down
12 changes: 12 additions & 0 deletions drivers/misc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services.

config SGI_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
---help---
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.

endif # MISC_DEVICES
1 change: 1 addition & 0 deletions drivers/misc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
11 changes: 11 additions & 0 deletions drivers/misc/sgi-xp/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#
# Makefile for SGI's XP devices.
#

obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o xp_nofault.o

obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o

obj-$(CONFIG_SGI_XP) += xpnet.o
Loading

0 comments on commit aca239b

Please sign in to comment.