diff --git a/[refs] b/[refs] index 1e4cec6ce02d..441130c7e7c4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 09d314425f5bc69fcf793c7890d9e6a3cdcb44be +refs/heads/master: 46befd6b38d802dfc5998e7d7938854578b45d9d diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 1b71f6ceae0a..d1d9ae6173b8 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -5910,7 +5910,7 @@ M: Ingo Molnar M: Peter Zijlstra T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core S: Maintained -F: kernel/sched/ +F: kernel/sched* F: include/linux/sched.h SCORE ARCHITECTURE diff --git a/trunk/arch/arm/mach-exynos/pm_domains.c b/trunk/arch/arm/mach-exynos/pm_domains.c index 373c3c00d24c..e9fafcf163de 100644 --- a/trunk/arch/arm/mach-exynos/pm_domains.c +++ b/trunk/arch/arm/mach-exynos/pm_domains.c @@ -119,9 +119,7 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd) { if (pdev->dev.bus) { - if (!pm_genpd_add_device(&pd->pd, &pdev->dev)) - pm_genpd_dev_need_restore(&pdev->dev, true); - else + if (pm_genpd_add_device(&pd->pd, &pdev->dev)) pr_info("%s: error in adding %s device to %s power" "domain\n", __func__, dev_name(&pdev->dev), pd->name); @@ -153,12 +151,9 @@ static __init int exynos4_pm_init_power_domain(void) if (of_have_populated_dt()) return exynos_pm_dt_parse_domains(); - for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) { - struct exynos_pm_domain *pd = exynos4_pm_domains[idx]; - int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN; - - pm_genpd_init(&pd->pd, NULL, !on); - } + for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) + pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL, + exynos4_pm_domains[idx]->is_off); #ifdef CONFIG_S5P_DEV_FIMD0 exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0); diff --git a/trunk/arch/arm/mach-s3c24xx/clock-s3c2440.c b/trunk/arch/arm/mach-s3c24xx/clock-s3c2440.c index cb2883d553b5..414364eb426c 100644 --- a/trunk/arch/arm/mach-s3c24xx/clock-s3c2440.c +++ b/trunk/arch/arm/mach-s3c24xx/clock-s3c2440.c @@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = { static struct clk s3c2440_clk_ac97 = { .name = "ac97", .enable = s3c2410_clkcon_enable, - .ctrlbit = S3C2440_CLKCON_AC97, + .ctrlbit = S3C2440_CLKCON_CAMERA, }; static unsigned long s3c2440_fclk_n_getrate(struct clk *clk) diff --git a/trunk/arch/arm/plat-samsung/adc.c b/trunk/arch/arm/plat-samsung/adc.c index b1e05ccff3ac..33ecd0c9f0c3 100644 --- a/trunk/arch/arm/plat-samsung/adc.c +++ b/trunk/arch/arm/plat-samsung/adc.c @@ -157,12 +157,10 @@ int s3c_adc_start(struct s3c_adc_client *client, return -EINVAL; } - spin_lock_irqsave(&adc->lock, flags); - - if (client->is_ts && adc->ts_pend) { - spin_unlock_irqrestore(&adc->lock, flags); + if (client->is_ts && adc->ts_pend) return -EAGAIN; - } + + spin_lock_irqsave(&adc->lock, flags); client->channel = channel; client->nr_samples = nr_samples; diff --git a/trunk/arch/arm/plat-samsung/devs.c b/trunk/arch/arm/plat-samsung/devs.c index 6303974c2ee0..1d214cb9d770 100644 --- a/trunk/arch/arm/plat-samsung/devs.c +++ b/trunk/arch/arm/plat-samsung/devs.c @@ -126,8 +126,7 @@ struct platform_device s3c_device_adc = { #ifdef CONFIG_CPU_S3C2440 static struct resource s3c_camif_resource[] = { [0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF), - [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C), - [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P), + [1] = DEFINE_RES_IRQ(IRQ_CAM), }; struct platform_device s3c_device_camif = { diff --git a/trunk/arch/arm/plat-samsung/s5p-clock.c b/trunk/arch/arm/plat-samsung/s5p-clock.c index 48a159911037..031a61899bef 100644 --- a/trunk/arch/arm/plat-samsung/s5p-clock.c +++ b/trunk/arch/arm/plat-samsung/s5p-clock.c @@ -37,7 +37,6 @@ struct clk clk_ext_xtal_mux = { struct clk clk_xusbxti = { .name = "xusbxti", .id = -1, - .rate = 24000000, }; struct clk s5p_clk_27m = { diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c index 43b39d61b538..88e466b159dc 100644 --- a/trunk/arch/um/drivers/mconsole_kern.c +++ b/trunk/arch/um/drivers/mconsole_kern.c @@ -705,6 +705,7 @@ static void stack_proc(void *arg) struct task_struct *from = current, *to = arg; to->thread.saved_task = from; + rcu_switch_from(from); switch_to(from, to, from); } diff --git a/trunk/arch/x86/kernel/vsyscall_64.c b/trunk/arch/x86/kernel/vsyscall_64.c index 5db36caf4289..08a18d0dcc5a 100644 --- a/trunk/arch/x86/kernel/vsyscall_64.c +++ b/trunk/arch/x86/kernel/vsyscall_64.c @@ -139,7 +139,6 @@ static int addr_to_vsyscall_nr(unsigned long addr) return nr; } -#ifdef CONFIG_SECCOMP static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr) { if (!seccomp_mode(&tsk->seccomp)) @@ -148,9 +147,6 @@ static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr) task_pt_regs(tsk)->ax = syscall_nr; return __secure_computing(syscall_nr); } -#else -#define vsyscall_seccomp(_tsk, _nr) 0 -#endif static bool write_ok_or_segv(unsigned long ptr, size_t size) { diff --git a/trunk/drivers/acpi/acpica/nspredef.c b/trunk/drivers/acpi/acpica/nspredef.c index 23ce09686418..fe6626035495 100644 --- a/trunk/drivers/acpi/acpica/nspredef.c +++ b/trunk/drivers/acpi/acpica/nspredef.c @@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data, /* Create the new outer package and populate it */ status = - acpi_ns_wrap_with_package(data, *elements, + acpi_ns_wrap_with_package(data, return_object, return_object_ptr); if (ACPI_FAILURE(status)) { return (status); diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c index eff722278ff5..c850de4c9a14 100644 --- a/trunk/drivers/acpi/processor_core.c +++ b/trunk/drivers/acpi/processor_core.c @@ -189,12 +189,10 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) * Processor (CPU3, 0x03, 0x00000410, 0x06) {} * } * - * Ignores apic_id and always returns 0 for the processor - * handle with acpi id 0 if nr_cpu_ids is 1. - * This should be the case if SMP tables are not found. + * Ignores apic_id and always return 0 for CPU0's handle. * Return -1 for other CPU's handle. */ - if (nr_cpu_ids <= 1 && acpi_id == 0) + if (acpi_id == 0) return acpi_id; else return apic_id; diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 9cac722b169c..26d1a47591f1 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -184,6 +184,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); +extern void rcu_preempt_note_context_switch(void); extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); diff --git a/trunk/include/linux/rcutiny.h b/trunk/include/linux/rcutiny.h index 4e56a9c69a35..854dc4c5c271 100644 --- a/trunk/include/linux/rcutiny.h +++ b/trunk/include/linux/rcutiny.h @@ -87,10 +87,6 @@ static inline void kfree_call_rcu(struct rcu_head *head, #ifdef CONFIG_TINY_RCU -static inline void rcu_preempt_note_context_switch(void) -{ -} - static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) { *delta_jiffies = ULONG_MAX; @@ -99,7 +95,6 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) #else /* #ifdef CONFIG_TINY_RCU */ -void rcu_preempt_note_context_switch(void); int rcu_preempt_needs_cpu(void); static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) @@ -113,7 +108,6 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) static inline void rcu_note_context_switch(int cpu) { rcu_sched_qs(cpu); - rcu_preempt_note_context_switch(); } /* diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 4a1f493e0fef..4059c0f33f07 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1871,12 +1871,22 @@ static inline void rcu_copy_process(struct task_struct *p) INIT_LIST_HEAD(&p->rcu_node_entry); } +static inline void rcu_switch_from(struct task_struct *prev) +{ + if (prev->rcu_read_lock_nesting != 0) + rcu_preempt_note_context_switch(); +} + #else static inline void rcu_copy_process(struct task_struct *p) { } +static inline void rcu_switch_from(struct task_struct *prev) +{ +} + #endif #ifdef CONFIG_SMP @@ -1899,14 +1909,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif -#ifdef CONFIG_NO_HZ -void calc_load_enter_idle(void); -void calc_load_exit_idle(void); -#else -static inline void calc_load_enter_idle(void) { } -static inline void calc_load_exit_idle(void) { } -#endif /* CONFIG_NO_HZ */ - #ifndef CONFIG_CPUMASK_OFFSTACK static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index f00e319d8376..ab5211b9e622 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -304,17 +304,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) } err = arch_dup_task_struct(tsk, orig); - - /* - * We defer looking at err, because we will need this setup - * for the clean up path to work correctly. - */ - tsk->stack = ti; - setup_thread_stack(tsk, orig); - if (err) goto out; + tsk->stack = ti; + + setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 4b97bba7396e..38ecdda3f55f 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -201,7 +201,6 @@ void rcu_note_context_switch(int cpu) { trace_rcu_utilization("Start context switch"); rcu_sched_qs(cpu); - rcu_preempt_note_context_switch(cpu); trace_rcu_utilization("End context switch"); } EXPORT_SYMBOL_GPL(rcu_note_context_switch); diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 19b61ac1079f..ea056495783e 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -444,7 +444,6 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); long rcu_batches_completed(void); -static void rcu_preempt_note_context_switch(int cpu); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 3e4899459f3d..5271a020887e 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu) * * Caller must disable preemption. */ -static void rcu_preempt_note_context_switch(int cpu) +void rcu_preempt_note_context_switch(void) { struct task_struct *t = current; unsigned long flags; @@ -164,7 +164,7 @@ static void rcu_preempt_note_context_switch(int cpu) (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ - rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); + rdp = __this_cpu_ptr(rcu_preempt_state.rda); rnp = rdp->mynode; raw_spin_lock_irqsave(&rnp->lock, flags); t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; @@ -228,7 +228,7 @@ static void rcu_preempt_note_context_switch(int cpu) * means that we continue to block the current grace period. */ local_irq_save(flags); - rcu_preempt_qs(cpu); + rcu_preempt_qs(smp_processor_id()); local_irq_restore(flags); } @@ -1001,14 +1001,6 @@ void rcu_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -/* - * Because preemptible RCU does not exist, we never have to check for - * CPUs being in quiescent states. - */ -static void rcu_preempt_note_context_switch(int cpu) -{ -} - /* * Because preemptible RCU does not exist, there are never any preempted * RCU readers. diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 468bdd44c1ba..d5594a4268d4 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev, #endif /* Here we just switch the register state and the stack. */ + rcu_switch_from(prev); switch_to(prev, next, prev); barrier(); @@ -2160,73 +2161,11 @@ unsigned long this_cpu_load(void) } -/* - * Global load-average calculations - * - * We take a distributed and async approach to calculating the global load-avg - * in order to minimize overhead. - * - * The global load average is an exponentially decaying average of nr_running + - * nr_uninterruptible. - * - * Once every LOAD_FREQ: - * - * nr_active = 0; - * for_each_possible_cpu(cpu) - * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible; - * - * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n) - * - * Due to a number of reasons the above turns in the mess below: - * - * - for_each_possible_cpu() is prohibitively expensive on machines with - * serious number of cpus, therefore we need to take a distributed approach - * to calculating nr_active. - * - * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 - * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } - * - * So assuming nr_active := 0 when we start out -- true per definition, we - * can simply take per-cpu deltas and fold those into a global accumulate - * to obtain the same result. See calc_load_fold_active(). - * - * Furthermore, in order to avoid synchronizing all per-cpu delta folding - * across the machine, we assume 10 ticks is sufficient time for every - * cpu to have completed this task. - * - * This places an upper-bound on the IRQ-off latency of the machine. Then - * again, being late doesn't loose the delta, just wrecks the sample. - * - * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because - * this would add another cross-cpu cacheline miss and atomic operation - * to the wakeup path. Instead we increment on whatever cpu the task ran - * when it went into uninterruptible state and decrement on whatever cpu - * did the wakeup. This means that only the sum of nr_uninterruptible over - * all cpus yields the correct result. - * - * This covers the NO_HZ=n code, for extra head-aches, see the comment below. - */ - /* Variables and functions for calc_load */ static atomic_long_t calc_load_tasks; static unsigned long calc_load_update; unsigned long avenrun[3]; -EXPORT_SYMBOL(avenrun); /* should be removed */ - -/** - * get_avenrun - get the load average array - * @loads: pointer to dest load array - * @offset: offset to add - * @shift: shift count to shift the result left - * - * These values are estimates at best, so no need for locking. - */ -void get_avenrun(unsigned long *loads, unsigned long offset, int shift) -{ - loads[0] = (avenrun[0] + offset) << shift; - loads[1] = (avenrun[1] + offset) << shift; - loads[2] = (avenrun[2] + offset) << shift; -} +EXPORT_SYMBOL(avenrun); static long calc_load_fold_active(struct rq *this_rq) { @@ -2243,9 +2182,6 @@ static long calc_load_fold_active(struct rq *this_rq) return delta; } -/* - * a1 = a0 * e + a * (1 - e) - */ static unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { @@ -2257,118 +2193,30 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) #ifdef CONFIG_NO_HZ /* - * Handle NO_HZ for the global load-average. - * - * Since the above described distributed algorithm to compute the global - * load-average relies on per-cpu sampling from the tick, it is affected by - * NO_HZ. - * - * The basic idea is to fold the nr_active delta into a global idle-delta upon - * entering NO_HZ state such that we can include this as an 'extra' cpu delta - * when we read the global state. - * - * Obviously reality has to ruin such a delightfully simple scheme: - * - * - When we go NO_HZ idle during the window, we can negate our sample - * contribution, causing under-accounting. - * - * We avoid this by keeping two idle-delta counters and flipping them - * when the window starts, thus separating old and new NO_HZ load. - * - * The only trick is the slight shift in index flip for read vs write. - * - * 0s 5s 10s 15s - * +10 +10 +10 +10 - * |-|-----------|-|-----------|-|-----------|-| - * r:0 0 1 1 0 0 1 1 0 - * w:0 1 1 0 0 1 1 0 0 - * - * This ensures we'll fold the old idle contribution in this window while - * accumlating the new one. - * - * - When we wake up from NO_HZ idle during the window, we push up our - * contribution, since we effectively move our sample point to a known - * busy state. - * - * This is solved by pushing the window forward, and thus skipping the - * sample, for this cpu (effectively using the idle-delta for this cpu which - * was in effect at the time the window opened). This also solves the issue - * of having to deal with a cpu having been in NOHZ idle for multiple - * LOAD_FREQ intervals. + * For NO_HZ we delay the active fold to the next LOAD_FREQ update. * * When making the ILB scale, we should try to pull this in as well. */ -static atomic_long_t calc_load_idle[2]; -static int calc_load_idx; +static atomic_long_t calc_load_tasks_idle; -static inline int calc_load_write_idx(void) +void calc_load_account_idle(struct rq *this_rq) { - int idx = calc_load_idx; - - /* - * See calc_global_nohz(), if we observe the new index, we also - * need to observe the new update time. - */ - smp_rmb(); - - /* - * If the folding window started, make sure we start writing in the - * next idle-delta. - */ - if (!time_before(jiffies, calc_load_update)) - idx++; - - return idx & 1; -} - -static inline int calc_load_read_idx(void) -{ - return calc_load_idx & 1; -} - -void calc_load_enter_idle(void) -{ - struct rq *this_rq = this_rq(); long delta; - /* - * We're going into NOHZ mode, if there's any pending delta, fold it - * into the pending idle delta. - */ delta = calc_load_fold_active(this_rq); - if (delta) { - int idx = calc_load_write_idx(); - atomic_long_add(delta, &calc_load_idle[idx]); - } -} - -void calc_load_exit_idle(void) -{ - struct rq *this_rq = this_rq(); - - /* - * If we're still before the sample window, we're done. - */ - if (time_before(jiffies, this_rq->calc_load_update)) - return; - - /* - * We woke inside or after the sample window, this means we're already - * accounted through the nohz accounting, so skip the entire deal and - * sync up for the next window. - */ - this_rq->calc_load_update = calc_load_update; - if (time_before(jiffies, this_rq->calc_load_update + 10)) - this_rq->calc_load_update += LOAD_FREQ; + if (delta) + atomic_long_add(delta, &calc_load_tasks_idle); } static long calc_load_fold_idle(void) { - int idx = calc_load_read_idx(); long delta = 0; - if (atomic_long_read(&calc_load_idle[idx])) - delta = atomic_long_xchg(&calc_load_idle[idx], 0); + /* + * Its got a race, we don't care... + */ + if (atomic_long_read(&calc_load_tasks_idle)) + delta = atomic_long_xchg(&calc_load_tasks_idle, 0); return delta; } @@ -2454,39 +2302,66 @@ static void calc_global_nohz(void) { long delta, active, n; - if (!time_before(jiffies, calc_load_update + 10)) { - /* - * Catch-up, fold however many we are behind still - */ - delta = jiffies - calc_load_update - 10; - n = 1 + (delta / LOAD_FREQ); - - active = atomic_long_read(&calc_load_tasks); - active = active > 0 ? active * FIXED_1 : 0; - - avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); - avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); - avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); + /* + * If we crossed a calc_load_update boundary, make sure to fold + * any pending idle changes, the respective CPUs might have + * missed the tick driven calc_load_account_active() update + * due to NO_HZ. + */ + delta = calc_load_fold_idle(); + if (delta) + atomic_long_add(delta, &calc_load_tasks); - calc_load_update += n * LOAD_FREQ; - } + /* + * It could be the one fold was all it took, we done! + */ + if (time_before(jiffies, calc_load_update + 10)) + return; /* - * Flip the idle index... - * - * Make sure we first write the new time then flip the index, so that - * calc_load_write_idx() will see the new time when it reads the new - * index, this avoids a double flip messing things up. + * Catch-up, fold however many we are behind still */ - smp_wmb(); - calc_load_idx++; + delta = jiffies - calc_load_update - 10; + n = 1 + (delta / LOAD_FREQ); + + active = atomic_long_read(&calc_load_tasks); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); + avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); + avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); + + calc_load_update += n * LOAD_FREQ; +} +#else +void calc_load_account_idle(struct rq *this_rq) +{ } -#else /* !CONFIG_NO_HZ */ -static inline long calc_load_fold_idle(void) { return 0; } -static inline void calc_global_nohz(void) { } +static inline long calc_load_fold_idle(void) +{ + return 0; +} -#endif /* CONFIG_NO_HZ */ +static void calc_global_nohz(void) +{ +} +#endif + +/** + * get_avenrun - get the load average array + * @loads: pointer to dest load array + * @offset: offset to add + * @shift: shift count to shift the result left + * + * These values are estimates at best, so no need for locking. + */ +void get_avenrun(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} /* * calc_load - update the avenrun load estimates 10 ticks after the @@ -2494,18 +2369,11 @@ static inline void calc_global_nohz(void) { } */ void calc_global_load(unsigned long ticks) { - long active, delta; + long active; if (time_before(jiffies, calc_load_update + 10)) return; - /* - * Fold the 'old' idle-delta to include all NO_HZ cpus. - */ - delta = calc_load_fold_idle(); - if (delta) - atomic_long_add(delta, &calc_load_tasks); - active = atomic_long_read(&calc_load_tasks); active = active > 0 ? active * FIXED_1 : 0; @@ -2516,7 +2384,12 @@ void calc_global_load(unsigned long ticks) calc_load_update += LOAD_FREQ; /* - * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. + * Account one period with whatever state we found before + * folding in the nohz state and ageing the entire idle period. + * + * This avoids loosing a sample when we go idle between + * calc_load_account_active() (10 ticks ago) and now and thus + * under-accounting. */ calc_global_nohz(); } @@ -2533,16 +2406,13 @@ static void calc_load_account_active(struct rq *this_rq) return; delta = calc_load_fold_active(this_rq); + delta += calc_load_fold_idle(); if (delta) atomic_long_add(delta, &calc_load_tasks); this_rq->calc_load_update += LOAD_FREQ; } -/* - * End of global load-average stuff - */ - /* * The exact cpuload at various idx values, calculated at every tick would be * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load diff --git a/trunk/kernel/sched/idle_task.c b/trunk/kernel/sched/idle_task.c index b6baf370cae9..b44d604b35d1 100644 --- a/trunk/kernel/sched/idle_task.c +++ b/trunk/kernel/sched/idle_task.c @@ -25,6 +25,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); + calc_load_account_idle(rq); return rq->idle; } diff --git a/trunk/kernel/sched/sched.h b/trunk/kernel/sched/sched.h index 55844f24435a..6d52cea7f33d 100644 --- a/trunk/kernel/sched/sched.h +++ b/trunk/kernel/sched/sched.h @@ -942,6 +942,8 @@ static inline u64 sched_avg_period(void) return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; } +void calc_load_account_idle(struct rq *this_rq); + #ifdef CONFIG_SCHED_HRTICK /* diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 4a08472c3ca7..869997833928 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -406,7 +406,6 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) */ if (!ts->tick_stopped) { select_nohz_load_balancer(1); - calc_load_enter_idle(); ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; @@ -598,7 +597,6 @@ void tick_nohz_idle_exit(void) account_idle_ticks(ticks); #endif - calc_load_exit_idle(); touch_softlockup_watchdog(); /* * Cancel the scheduled timer and restore the tick diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index f765465bffe4..1d0f6a8a0e5e 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -1075,7 +1075,6 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) rb_init_page(bpage->page); INIT_LIST_HEAD(&cpu_buffer->reader_page->list); - INIT_LIST_HEAD(&cpu_buffer->new_pages); ret = rb_allocate_pages(cpu_buffer, nr_pages); if (ret < 0) @@ -1347,9 +1346,10 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) * If something was added to this page, it was full * since it is not the tail page. So we deduct the * bytes consumed in ring buffer from here. - * Increment overrun to account for the lost events. + * No need to update overruns, since this page is + * deleted from ring buffer and its entries are + * already accounted for. */ - local_add(page_entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); } diff --git a/trunk/tools/perf/util/map.c b/trunk/tools/perf/util/map.c index a1f4e3669142..35ae56864e4f 100644 --- a/trunk/tools/perf/util/map.c +++ b/trunk/tools/perf/util/map.c @@ -669,26 +669,25 @@ struct machine *machines__find(struct rb_root *self, pid_t pid) struct machine *machines__findnew(struct rb_root *self, pid_t pid) { char path[PATH_MAX]; - const char *root_dir = ""; + const char *root_dir; struct machine *machine = machines__find(self, pid); - if (machine && (machine->pid == pid)) - goto out; - - if ((pid != HOST_KERNEL_ID) && - (pid != DEFAULT_GUEST_KERNEL_ID) && - (symbol_conf.guestmount)) { - sprintf(path, "%s/%d", symbol_conf.guestmount, pid); - if (access(path, R_OK)) { - pr_err("Can't access file %s\n", path); - machine = NULL; - goto out; + if (!machine || machine->pid != pid) { + if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID) + root_dir = ""; + else { + if (!symbol_conf.guestmount) + goto out; + sprintf(path, "%s/%d", symbol_conf.guestmount, pid); + if (access(path, R_OK)) { + pr_err("Can't access file %s\n", path); + goto out; + } + root_dir = path; } - root_dir = path; + machine = machines__add(self, pid, root_dir); } - machine = machines__add(self, pid, root_dir); - out: return machine; } diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index 56142d0fb8d7..c3e399bcf18d 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -926,7 +926,7 @@ static struct machine * else pid = event->ip.pid; - return perf_session__findnew_machine(session, pid); + return perf_session__find_machine(session, pid); } return perf_session__find_host_machine(session); diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 5dd3b5ec8411..df2fddbf0cd2 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -198,8 +198,9 @@ void print_trace_event(int cpu, void *data, int size) record.data = data; trace_seq_init(&s); - pevent_event_info(&s, event, &record); + pevent_print_event(pevent, &s, &record); trace_seq_do_printf(&s); + printf("\n"); } void print_event(int cpu, void *data, int size, unsigned long long nsecs,