From 10c26e44fc1d048e1e5d9fdda6aa2f559de2af6a Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Fri, 18 May 2007 17:17:17 -0500 Subject: [PATCH] --- yaml --- r: 59193 b: refs/heads/master c: 1612b18ccb2318563ba51268289dc3271a6052f7 h: refs/heads/master i: 59191: 87921d5d2e3bca2ee2ec1adae3b8dfbcc9862873 v: v3 --- [refs] | 2 +- trunk/arch/ia64/kernel/mca.c | 60 ++++++++++++++++++++++++++++---- trunk/arch/ia64/kernel/mca_asm.S | 12 ------- trunk/include/asm-ia64/mca.h | 1 + 4 files changed, 55 insertions(+), 20 deletions(-) diff --git a/[refs] b/[refs] index 42496a68cc4d..f764c834fc93 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 256a7e097ba3d1179867b4c9aba1b75fb32d44f2 +refs/heads/master: 1612b18ccb2318563ba51268289dc3271a6052f7 diff --git a/trunk/arch/ia64/kernel/mca.c b/trunk/arch/ia64/kernel/mca.c index 1ead5ea6c5ce..4b5daa3cc0fe 100644 --- a/trunk/arch/ia64/kernel/mca.c +++ b/trunk/arch/ia64/kernel/mca.c @@ -57,6 +57,9 @@ * * 2006-09-15 Hidetoshi Seto * Add printing support for MCA/INIT. + * + * 2007-04-27 Russ Anderson + * Support multiple cpus going through OS_MCA in the same event. */ #include #include @@ -96,7 +99,6 @@ #endif /* Used by mca_asm.S */ -u32 ia64_mca_serialize; DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ @@ -963,11 +965,12 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, goto no_mod; } + if (r13 != sos->prev_IA64_KR_CURRENT) { + msg = "inconsistent previous current and r13"; + goto no_mod; + } + if (!mca_recover_range(ms->pmsa_iip)) { - if (r13 != sos->prev_IA64_KR_CURRENT) { - msg = "inconsistent previous current and r13"; - goto no_mod; - } if ((r12 - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent r12 and r13"; goto no_mod; @@ -1187,6 +1190,13 @@ ia64_wait_for_slaves(int monarch, const char *type) * further MCA logging is enabled by clearing logs. * Monarch also has the duty of sending wakeup-IPIs to pull the * slave processors out of rendezvous spinloop. + * + * If multiple processors call into OS_MCA, the first will become + * the monarch. Subsequent cpus will be recorded in the mca_cpu + * bitmask. After the first monarch has processed its MCA, it + * will wake up the next cpu in the mca_cpu bitmask and then go + * into the rendezvous loop. When all processors have serviced + * their MCA, the last monarch frees up the rest of the processors. */ void ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, @@ -1196,16 +1206,32 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; + static atomic_t mca_count; + static cpumask_t mca_cpu; + if (atomic_add_return(1, &mca_count) == 1) { + monarch_cpu = cpu; + sos->monarch = 1; + } else { + cpu_set(cpu, mca_cpu); + sos->monarch = 0; + } mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); - monarch_cpu = cpu; + if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); - ia64_wait_for_slaves(cpu, "MCA"); + if (sos->monarch) { + ia64_wait_for_slaves(cpu, "MCA"); + } else { + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; + while (cpu_isset(cpu, mca_cpu)) + cpu_relax(); /* spin until monarch wakes us */ + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; + } /* Wakeup all the processors which are spinning in the rendezvous loop. * They will leave SAL, then spin in the OS with interrupts disabled @@ -1244,6 +1270,26 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); + + if (atomic_dec_return(&mca_count) > 0) { + int i; + + /* wake up the next monarch cpu, + * and put this cpu in the rendez loop. + */ + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; + for_each_online_cpu(i) { + if (cpu_isset(i, mca_cpu)) { + monarch_cpu = i; + cpu_clear(i, mca_cpu); /* wake next cpu */ + while (monarch_cpu != -1) + cpu_relax(); /* spin until last cpu leaves */ + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; + set_curr_task(cpu, previous_current); + return; + } + } + } set_curr_task(cpu, previous_current); monarch_cpu = -1; } diff --git a/trunk/arch/ia64/kernel/mca_asm.S b/trunk/arch/ia64/kernel/mca_asm.S index 8c9c26aa6ae0..0f5965fcdf85 100644 --- a/trunk/arch/ia64/kernel/mca_asm.S +++ b/trunk/arch/ia64/kernel/mca_asm.S @@ -133,14 +133,6 @@ ia64_do_tlb_purge: //StartMain//////////////////////////////////////////////////////////////////// ia64_os_mca_dispatch: - // Serialize all MCA processing - mov r3=1;; - LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; -ia64_os_mca_spin: - xchg4 r4=[r2],r3;; - cmp.ne p6,p0=r4,r0 -(p6) br ia64_os_mca_spin - mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address mov r19=1 // All MCA events are treated as monarch (for now) @@ -291,10 +283,6 @@ END(ia64_os_mca_virtual_begin) mov b0=r12 // SAL_CHECK return address - // release lock - LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; - st4.rel [r3]=r0 - br b0 //EndMain////////////////////////////////////////////////////////////////////// diff --git a/trunk/include/asm-ia64/mca.h b/trunk/include/asm-ia64/mca.h index 41098f459684..edd5d01028df 100644 --- a/trunk/include/asm-ia64/mca.h +++ b/trunk/include/asm-ia64/mca.h @@ -48,6 +48,7 @@ enum { IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, + IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA = 0x3, }; /* Information maintained by the MC infrastructure */