From 7d9e4c7778c9ac8d159b256d32f4919adda89454 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 26 Jan 2012 15:47:37 +0000 Subject: [PATCH] --- yaml --- r: 287139 b: refs/heads/master c: fc395b9291925b1880e0afc61274fe2f6ddc1269 h: refs/heads/master i: 287137: 254ebb9b7e5262e1498c070da7027d1bed59de8c 287135: 8fb9c7698bc13bbe720908d60cfbe228cbcf8757 v: v3 --- [refs] | 2 +- trunk/arch/x86/Kconfig | 1 - trunk/arch/x86/boot/compressed/misc.c | 2 ++ trunk/arch/x86/include/asm/cmpxchg.h | 6 ++--- trunk/arch/x86/include/asm/cpufeature.h | 1 + trunk/arch/x86/include/asm/uv/uv_hub.h | 4 +-- trunk/arch/x86/kernel/microcode_amd.c | 24 +++++++++++++++-- trunk/arch/x86/platform/uv/tlb_uv.c | 2 ++ trunk/arch/x86/platform/uv/uv_irq.c | 2 +- trunk/kernel/exit.c | 16 ------------ trunk/kernel/sched/core.c | 19 +++++++++----- trunk/kernel/sched/fair.c | 34 ++++--------------------- trunk/kernel/sched/rt.c | 5 ---- 13 files changed, 51 insertions(+), 67 deletions(-) diff --git a/[refs] b/[refs] index da445c25f842..d6953f7d4587 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cb297a3e433dbdcf7ad81e0564e7b804c941ff0d +refs/heads/master: fc395b9291925b1880e0afc61274fe2f6ddc1269 diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 864cc6e6ac8e..5bed94e189fa 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -360,7 +360,6 @@ config X86_NUMACHIP depends on NUMA depends on SMP depends on X86_X2APIC - depends on !EDAC_AMD64 ---help--- Adds support for Numascale NumaChip large-SMP systems. Needed to enable more than ~168 cores. diff --git a/trunk/arch/x86/boot/compressed/misc.c b/trunk/arch/x86/boot/compressed/misc.c index 3a19d04cebeb..7116dcba0c9e 100644 --- a/trunk/arch/x86/boot/compressed/misc.c +++ b/trunk/arch/x86/boot/compressed/misc.c @@ -321,6 +321,8 @@ static void parse_elf(void *output) default: /* Ignore other PT_* */ break; } } + + free(phdrs); } asmlinkage void decompress_kernel(void *rmode, memptr heap, diff --git a/trunk/arch/x86/include/asm/cmpxchg.h b/trunk/arch/x86/include/asm/cmpxchg.h index 0c9fa2745f13..b3b733262909 100644 --- a/trunk/arch/x86/include/asm/cmpxchg.h +++ b/trunk/arch/x86/include/asm/cmpxchg.h @@ -145,13 +145,13 @@ extern void __add_wrong_size(void) #ifdef __HAVE_ARCH_CMPXCHG #define cmpxchg(ptr, old, new) \ - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) + __cmpxchg(ptr, old, new, sizeof(*(ptr))) #define sync_cmpxchg(ptr, old, new) \ - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) + __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) #define cmpxchg_local(ptr, old, new) \ - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) + __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) #endif /* diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 17c5d4bdee5e..8d67d428b0f9 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -159,6 +159,7 @@ #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ #define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ #define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ diff --git a/trunk/arch/x86/include/asm/uv/uv_hub.h b/trunk/arch/x86/include/asm/uv/uv_hub.h index 54a13aaebc40..21f7385badb8 100644 --- a/trunk/arch/x86/include/asm/uv/uv_hub.h +++ b/trunk/arch/x86/include/asm/uv/uv_hub.h @@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa) /* UV global physical address --> socket phys RAM */ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) { - unsigned long paddr = gpa & uv_hub_info->gpa_mask; + unsigned long paddr; unsigned long remap_base = uv_hub_info->lowmem_remap_base; unsigned long remap_top = uv_hub_info->lowmem_remap_top; gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); - gpa = gpa & uv_hub_info->gpa_mask; + paddr = gpa & uv_hub_info->gpa_mask; if (paddr >= remap_base && paddr < remap_base + remap_top) paddr -= remap_base; return paddr; diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index fe86493f3ed1..ac0417be9131 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -311,13 +311,33 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) return state; } +/* + * AMD microcode firmware naming convention, up to family 15h they are in + * the legacy file: + * + * amd-ucode/microcode_amd.bin + * + * This legacy file is always smaller than 2K in size. + * + * Starting at family 15h they are in family specific firmware files: + * + * amd-ucode/microcode_amd_fam15h.bin + * amd-ucode/microcode_amd_fam16h.bin + * ... + * + * These might be larger than 2K. + */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - const char *fw_name = "amd-ucode/microcode_amd.bin"; + char fw_name[36] = "amd-ucode/microcode_amd.bin"; const struct firmware *fw; enum ucode_state ret = UCODE_NFOUND; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); - if (request_firmware(&fw, fw_name, device)) { + if (request_firmware(&fw, (const char *)fw_name, device)) { pr_err("failed to load file %s\n", fw_name); goto out; } diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c index 9be4cff00a2d..3ae0e61abd23 100644 --- a/trunk/arch/x86/platform/uv/tlb_uv.c +++ b/trunk/arch/x86/platform/uv/tlb_uv.c @@ -1851,6 +1851,8 @@ static void __init init_per_cpu_tunables(void) bcp->cong_reps = congested_reps; bcp->cong_period = congested_period; bcp->clocks_per_100_usec = usec_2_cycles(100); + spin_lock_init(&bcp->queue_lock); + spin_lock_init(&bcp->uvhub_lock); } } diff --git a/trunk/arch/x86/platform/uv/uv_irq.c b/trunk/arch/x86/platform/uv/uv_irq.c index 374a05d8ad22..f25c2765a5c9 100644 --- a/trunk/arch/x86/platform/uv/uv_irq.c +++ b/trunk/arch/x86/platform/uv/uv_irq.c @@ -25,7 +25,7 @@ struct uv_irq_2_mmr_pnode{ int irq; }; -static spinlock_t uv_irq_lock; +static DEFINE_SPINLOCK(uv_irq_lock); static struct rb_root uv_irq_root; static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 4b4042f9bc6a..294b1709170d 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -1038,22 +1038,6 @@ void do_exit(long code) if (tsk->nr_dirtied) __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); exit_rcu(); - - /* - * The setting of TASK_RUNNING by try_to_wake_up() may be delayed - * when the following two conditions become true. - * - There is race condition of mmap_sem (It is acquired by - * exit_mm()), and - * - SMI occurs before setting TASK_RUNINNG. - * (or hypervisor of virtual machine switches to other guest) - * As a result, we may become TASK_RUNNING after becoming TASK_DEAD - * - * To avoid it, we have to wait for releasing tsk->pi_lock which - * is held by try_to_wake_up() - */ - smp_mb(); - raw_spin_unlock_wait(&tsk->pi_lock); - /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 5255c9d2e053..df00cb09263e 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -74,7 +74,6 @@ #include #include -#include #ifdef CONFIG_PARAVIRT #include #endif @@ -724,6 +723,9 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); } +/* + * activate_task - move a task to the runqueue. + */ void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) @@ -732,6 +734,9 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) enqueue_task(rq, p, flags); } +/* + * deactivate_task - remove a task from the runqueue. + */ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) @@ -4129,7 +4134,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) - dequeue_task(rq, p, 0); + deactivate_task(rq, p, 0); if (running) p->sched_class->put_prev_task(rq, p); @@ -4142,7 +4147,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, if (running) p->sched_class->set_curr_task(rq); if (on_rq) - enqueue_task(rq, p, 0); + activate_task(rq, p, 0); check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); @@ -4993,9 +4998,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) * placed properly. */ if (p->on_rq) { - dequeue_task(rq_src, p, 0); + deactivate_task(rq_src, p, 0); set_task_cpu(p, dest_cpu); - enqueue_task(rq_dest, p, 0); + activate_task(rq_dest, p, 0); check_preempt_curr(rq_dest, p, 0); } done: @@ -7027,10 +7032,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p) on_rq = p->on_rq; if (on_rq) - dequeue_task(rq, p, 0); + deactivate_task(rq, p, 0); __setscheduler(rq, p, SCHED_NORMAL, 0); if (on_rq) { - enqueue_task(rq, p, 0); + activate_task(rq, p, 0); resched_task(rq->curr); } diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 7c6414fc669d..84adb2d66cbd 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -4866,15 +4866,6 @@ static void nohz_balancer_kick(int cpu) return; } -static inline void clear_nohz_tick_stopped(int cpu) -{ - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { - cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); - atomic_dec(&nohz.nr_cpus); - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); - } -} - static inline void set_cpu_sd_state_busy(void) { struct sched_domain *sd; @@ -4913,12 +4904,6 @@ void select_nohz_load_balancer(int stop_tick) { int cpu = smp_processor_id(); - /* - * If this cpu is going down, then nothing needs to be done. - */ - if (!cpu_active(cpu)) - return; - if (stop_tick) { if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) return; @@ -4929,18 +4914,6 @@ void select_nohz_load_balancer(int stop_tick) } return; } - -static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DYING: - clear_nohz_tick_stopped(smp_processor_id()); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} #endif static DEFINE_SPINLOCK(balancing); @@ -5097,7 +5070,11 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) * busy tick after returning from idle, we will update the busy stats. */ set_cpu_sd_state_busy(); - clear_nohz_tick_stopped(cpu); + if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { + clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); + atomic_dec(&nohz.nr_cpus); + } /* * None are in tickless mode and hence no need for NOHZ idle load @@ -5613,7 +5590,6 @@ __init void init_sched_fair_class(void) #ifdef CONFIG_NO_HZ zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); - cpu_notifier(sched_ilb_notifier, 0); #endif #endif /* SMP */ diff --git a/trunk/kernel/sched/rt.c b/trunk/kernel/sched/rt.c index f42ae7fb5ec5..3640ebbb466b 100644 --- a/trunk/kernel/sched/rt.c +++ b/trunk/kernel/sched/rt.c @@ -1587,11 +1587,6 @@ static int push_rt_task(struct rq *rq) if (!next_task) return 0; -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - if (unlikely(task_running(rq, next_task))) - return 0; -#endif - retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1);