From 7c198edc8aac36eed6207e97d5b0d2f03ad1b6e6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 20 Sep 2012 16:59:47 -0700 Subject: [PATCH] --- yaml --- r: 338814 b: refs/heads/master c: abfd6e58aed4f89fd69b9b17bc4b4527efe3a645 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 2 -- trunk/kernel/rcutree.c | 44 +++++-------------------------------- trunk/kernel/sched/core.c | 6 ----- trunk/lib/Kconfig.debug | 2 +- 5 files changed, 8 insertions(+), 48 deletions(-) diff --git a/[refs] b/[refs] index 0f575e43a6e4..89e72b1de00f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c896054f75f9a720ecf2ab3e688f4da79a55fe05 +refs/heads/master: abfd6e58aed4f89fd69b9b17bc4b4527efe3a645 diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index ba69b5adea30..0dd42a02df2e 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -109,8 +109,6 @@ extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); -extern void dump_cpu_task(int cpu); - struct seq_file; struct cfs_rq; struct task_group; diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index 24b21cba2cc8..ac8aed8ee417 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -873,29 +873,6 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); } -/* - * Dump stacks of all tasks running on stalled CPUs. This is a fallback - * for architectures that do not implement trigger_all_cpu_backtrace(). - * The NMI-triggered stack traces are more accurate because they are - * printed by the target CPU. - */ -static void rcu_dump_cpu_stacks(struct rcu_state *rsp) -{ - int cpu; - unsigned long flags; - struct rcu_node *rnp; - - rcu_for_each_leaf_node(rsp, rnp) { - raw_spin_lock_irqsave(&rnp->lock, flags); - if (rnp->qsmask != 0) { - for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) - if (rnp->qsmask & (1UL << cpu)) - dump_cpu_task(rnp->grplo + cpu); - } - raw_spin_unlock_irqrestore(&rnp->lock, flags); - } -} - static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; @@ -903,7 +880,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp) unsigned long flags; int ndetected = 0; struct rcu_node *rnp = rcu_get_root(rsp); - long totqlen = 0; /* Only let one CPU complain about others per time interval. */ @@ -948,15 +924,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp) raw_spin_unlock_irqrestore(&rnp->lock, flags); print_cpu_stall_info_end(); - for_each_possible_cpu(cpu) - totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; - pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n", - smp_processor_id(), (long)(jiffies - rsp->gp_start), - rsp->gpnum, rsp->completed, totqlen); + printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n", + smp_processor_id(), (long)(jiffies - rsp->gp_start)); if (ndetected == 0) printk(KERN_ERR "INFO: Stall ended before state dump start\n"); else if (!trigger_all_cpu_backtrace()) - rcu_dump_cpu_stacks(rsp); + dump_stack(); /* Complain about tasks blocking the grace period. */ @@ -967,10 +940,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp) static void print_cpu_stall(struct rcu_state *rsp) { - int cpu; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); - long totqlen = 0; /* * OK, time to rat on ourselves... @@ -981,10 +952,7 @@ static void print_cpu_stall(struct rcu_state *rsp) print_cpu_stall_info_begin(); print_cpu_stall_info(rsp, smp_processor_id()); print_cpu_stall_info_end(); - for_each_possible_cpu(cpu) - totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; - pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n", - jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen); + printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start); if (!trigger_all_cpu_backtrace()) dump_stack(); @@ -1613,8 +1581,8 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, { /* * Orphan the callbacks. First adjust the counts. This is safe - * because ->onofflock excludes _rcu_barrier()'s adoption of - * the callbacks, thus no memory barrier is required. + * because _rcu_barrier() excludes CPU-hotplug operations, so it + * cannot be running now. Thus no memory barrier is required. */ if (rdp->nxtlist != NULL) { rsp->qlen_lazy += rdp->qlen_lazy; diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 59d08fb1a9e3..2d8927fda712 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -8076,9 +8076,3 @@ struct cgroup_subsys cpuacct_subsys = { .base_cftypes = files, }; #endif /* CONFIG_CGROUP_CPUACCT */ - -void dump_cpu_task(int cpu) -{ - pr_info("Task dump for CPU %d:\n", cpu); - sched_show_task(cpu_curr(cpu)); -} diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 41faf0b8df1d..28e9d6c98941 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -972,7 +972,7 @@ config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" depends on TREE_RCU || TREE_PREEMPT_RCU range 3 300 - default 21 + default 60 help If a given RCU grace period extends more than the specified number of seconds, a CPU stall warning is printed. If the