Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 338814
b: refs/heads/master
c: abfd6e5
h: refs/heads/master
v: v3
  • Loading branch information
Paul E. McKenney authored and Paul E. McKenney committed Oct 23, 2012
1 parent c46e795 commit 7c198ed
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 48 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c896054f75f9a720ecf2ab3e688f4da79a55fe05
refs/heads/master: abfd6e58aed4f89fd69b9b17bc4b4527efe3a645
2 changes: 0 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,6 @@ extern void update_cpu_load_nohz(void);

extern unsigned long get_parent_ip(unsigned long addr);

extern void dump_cpu_task(int cpu);

struct seq_file;
struct cfs_rq;
struct task_group;
Expand Down
44 changes: 6 additions & 38 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -873,37 +873,13 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
}

/*
* Dump stacks of all tasks running on stalled CPUs. This is a fallback
* for architectures that do not implement trigger_all_cpu_backtrace().
* The NMI-triggered stack traces are more accurate because they are
* printed by the target CPU.
*/
static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
{
int cpu;
unsigned long flags;
struct rcu_node *rnp;

rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
if (rnp->qsmask != 0) {
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
if (rnp->qsmask & (1UL << cpu))
dump_cpu_task(rnp->grplo + cpu);
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
}

static void print_other_cpu_stall(struct rcu_state *rsp)
{
int cpu;
long delta;
unsigned long flags;
int ndetected = 0;
struct rcu_node *rnp = rcu_get_root(rsp);
long totqlen = 0;

/* Only let one CPU complain about others per time interval. */

Expand Down Expand Up @@ -948,15 +924,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rnp->lock, flags);

print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start),
rsp->gpnum, rsp->completed, totqlen);
printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
if (ndetected == 0)
printk(KERN_ERR "INFO: Stall ended before state dump start\n");
else if (!trigger_all_cpu_backtrace())
rcu_dump_cpu_stacks(rsp);
dump_stack();

/* Complain about tasks blocking the grace period. */

Expand All @@ -967,10 +940,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)

static void print_cpu_stall(struct rcu_state *rsp)
{
int cpu;
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
long totqlen = 0;

/*
* OK, time to rat on ourselves...
Expand All @@ -981,10 +952,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
print_cpu_stall_info_begin();
print_cpu_stall_info(rsp, smp_processor_id());
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start);
if (!trigger_all_cpu_backtrace())
dump_stack();

Expand Down Expand Up @@ -1613,8 +1581,8 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
{
/*
* Orphan the callbacks. First adjust the counts. This is safe
* because ->onofflock excludes _rcu_barrier()'s adoption of
* the callbacks, thus no memory barrier is required.
* because _rcu_barrier() excludes CPU-hotplug operations, so it
* cannot be running now. Thus no memory barrier is required.
*/
if (rdp->nxtlist != NULL) {
rsp->qlen_lazy += rdp->qlen_lazy;
Expand Down
6 changes: 0 additions & 6 deletions trunk/kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -8076,9 +8076,3 @@ struct cgroup_subsys cpuacct_subsys = {
.base_cftypes = files,
};
#endif /* CONFIG_CGROUP_CPUACCT */

void dump_cpu_task(int cpu)
{
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
2 changes: 1 addition & 1 deletion trunk/lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,7 @@ config RCU_CPU_STALL_TIMEOUT
int "RCU CPU stall timeout in seconds"
depends on TREE_RCU || TREE_PREEMPT_RCU
range 3 300
default 21
default 60
help
If a given RCU grace period extends more than the specified
number of seconds, a CPU stall warning is printed. If the
Expand Down

0 comments on commit 7c198ed

Please sign in to comment.