From ca58c48d2e777a0ae9b9523ebc580c4f5a4bf62f Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:55:35 -0700 Subject: [PATCH] --- yaml --- r: 117609 b: refs/heads/master c: 90d6e24a3686325edea7748b966e138c9923017d h: refs/heads/master i: 117607: 38136ca91b6baedb5f875cf74be6383f1d97fc46 v: v3 --- [refs] | 2 +- trunk/fs/select.c | 64 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 071bd3a7bd96..be3878526763 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6976675d94042fbd446231d1bd8b7de71a980ada +refs/heads/master: 90d6e24a3686325edea7748b966e138c9923017d diff --git a/trunk/fs/select.c b/trunk/fs/select.c index f6dceb56793f..5e61b43d0766 100644 --- a/trunk/fs/select.c +++ b/trunk/fs/select.c @@ -28,6 +28,58 @@ #include + +/* + * Estimate expected accuracy in ns from a timeval. + * + * After quite a bit of churning around, we've settled on + * a simple thing of taking 0.1% of the timeout as the + * slack, with a cap of 100 msec. + * "nice" tasks get a 0.5% slack instead. + * + * Consider this comment an open invitation to come up with even + * better solutions.. + */ + +static unsigned long __estimate_accuracy(struct timespec *tv) +{ + unsigned long slack; + int divfactor = 1000; + + if (task_nice(current)) + divfactor = divfactor / 5; + + slack = tv->tv_nsec / divfactor; + slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); + + if (slack > 100 * NSEC_PER_MSEC) + slack = 100 * NSEC_PER_MSEC; + return slack; +} + +static unsigned long estimate_accuracy(struct timespec *tv) +{ + unsigned long ret; + struct timespec now; + + /* + * Realtime tasks get a slack of 0 for obvious reasons. + */ + + if (current->policy == SCHED_FIFO || + current->policy == SCHED_RR) + return 0; + + ktime_get_ts(&now); + now = timespec_sub(*tv, now); + ret = __estimate_accuracy(&now); + if (ret < current->timer_slack_ns) + return current->timer_slack_ns; + return ret; +} + + + struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; @@ -262,6 +314,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) struct poll_wqueues table; poll_table *wait; int retval, i, timed_out = 0; + unsigned long slack = 0; rcu_read_lock(); retval = max_select_fd(n, fds); @@ -278,6 +331,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) timed_out = 1; } + if (end_time) + slack = estimate_accuracy(end_time); + retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; @@ -353,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) to = &expire; } - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; } __set_current_state(TASK_RUNNING); @@ -593,6 +649,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, poll_table* pt = &wait->pt; ktime_t expire, *to = NULL; int timed_out = 0, count = 0; + unsigned long slack = 0; /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -600,6 +657,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list, timed_out = 1; } + if (end_time) + slack = estimate_accuracy(end_time); + for (;;) { struct poll_list *walk; @@ -646,7 +706,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, to = &expire; } - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; } __set_current_state(TASK_RUNNING);