diff --git a/[refs] b/[refs] index 19b78c6ae59c..124fa779b7b2 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6bc6cf2b61336ed0c55a615eb4c0c8ed5daf3f08 +refs/heads/master: 5ca9880c6f4ba4c84b517bc2fed5366adf63d191 diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index de98e2e9d6e1..97682f925ed5 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -738,7 +738,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) vruntime += sched_vslice(cfs_rq, se); /* sleeps up to a single latency don't count. */ - if (!initial && sched_feat(FAIR_SLEEPERS)) { + if (!initial) { unsigned long thresh = sysctl_sched_latency; /* diff --git a/trunk/kernel/sched_features.h b/trunk/kernel/sched_features.h index 404288354aee..850f9809cf81 100644 --- a/trunk/kernel/sched_features.h +++ b/trunk/kernel/sched_features.h @@ -1,10 +1,3 @@ -/* - * Disregards a certain amount of sleep time (sched_latency_ns) and - * considers the task to be running during that period. This gives it - * a service deficit on wakeup, allowing it to run sooner. - */ -SCHED_FEAT(FAIR_SLEEPERS, 1) - /* * Only give sleepers 50% of their service deficit. This allows * them to run sooner, but does not allow tons of sleepers to