Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223429
b: refs/heads/master
c: 8e92c20
h: refs/heads/master
i:
  223427: ece82f9
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Dec 16, 2010
1 parent c4a3956 commit b50b955
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fe44d62122829959e960bc699318d58966922a69
refs/heads/master: 8e92c20183ed0579d94501311b81c42b65cb2129
51 changes: 45 additions & 6 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1920,10 +1920,9 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
* race with irq/account_system_vtime on this CPU. We would either get old
* or new value (or semi updated value on 32 bit) with a side effect of
* accounting a slice of irq time to wrong task when irq is in progress
* while we read rq->clock. That is a worthy compromise in place of having
* locks on each irq in account_system_time.
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
*/
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
static DEFINE_PER_CPU(u64, cpu_softirq_time);
Expand All @@ -1941,10 +1940,48 @@ void disable_sched_clock_irqtime(void)
sched_clock_irqtime = 0;
}

static inline u64 irq_time_cpu(int cpu)
#ifndef CONFIG_64BIT
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);

static inline void irq_time_write_begin(void)
{
__this_cpu_inc(irq_time_seq.sequence);
smp_wmb();
}

static inline void irq_time_write_end(void)
{
smp_wmb();
__this_cpu_inc(irq_time_seq.sequence);
}

static inline u64 irq_time_read(int cpu)
{
u64 irq_time;
unsigned seq;

do {
seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
irq_time = per_cpu(cpu_softirq_time, cpu) +
per_cpu(cpu_hardirq_time, cpu);
} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));

return irq_time;
}
#else /* CONFIG_64BIT */
static inline void irq_time_write_begin(void)
{
}

static inline void irq_time_write_end(void)
{
}

static inline u64 irq_time_read(int cpu)
{
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
}
#endif /* CONFIG_64BIT */

/*
* Called before incrementing preempt_count on {soft,}irq_enter
Expand All @@ -1965,6 +2002,7 @@ void account_system_vtime(struct task_struct *curr)
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);

irq_time_write_begin();
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
Expand All @@ -1976,6 +2014,7 @@ void account_system_vtime(struct task_struct *curr)
else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
__this_cpu_add(cpu_softirq_time, delta);

irq_time_write_end();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
Expand All @@ -1984,7 +2023,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
{
s64 irq_delta;

irq_delta = irq_time_cpu(cpu_of(rq)) - rq->prev_irq_time;
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;

/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
Expand Down

0 comments on commit b50b955

Please sign in to comment.