diff --git a/[refs] b/[refs] index ee6b277be57f..75419c5b9e2b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 391e43da797a96aeb65410281891f6d0b0e9611c +refs/heads/master: b781a602ac745ee3d5d745276f1e1905a2c101f9 diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h index 959ff18b63b6..e33ed1bfa113 100644 --- a/trunk/include/trace/events/sched.h +++ b/trunk/include/trace/events/sched.h @@ -330,6 +330,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); +/* + * Tracepoint for accounting blocked time (time the task is in uninterruptible). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_blocked, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + /* * Tracepoint for accounting runtime (time the task is executing * on a CPU). diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index cd3b64219d9f..7c62e2bf234f 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -1030,6 +1030,8 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) trace_sched_stat_iowait(tsk, delta); } + trace_sched_stat_blocked(tsk, delta); + /* * Blocking time is in units of nanosecs, so shift by * 20 to get a milliseconds-range estimation of the