Skip to content

Commit

Permalink
seqlock: Introduce raw_read_seqcount_latch()
Browse files Browse the repository at this point in the history
Because with latches there is a strict data dependency on the seq load
we can avoid the rmb in favour of a read_barrier_depends.

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
  • Loading branch information
Peter Zijlstra authored and Rusty Russell committed May 28, 2015
1 parent 0a04b01 commit 7fc2632
Showing 2 changed files with 8 additions and 3 deletions.
9 changes: 7 additions & 2 deletions include/linux/seqlock.h
Original file line number Diff line number Diff line change
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <asm/processor.h>

/*
@@ -233,6 +234,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
s->sequence++;
}

static inline int raw_read_seqcount_latch(seqcount_t *s)
{
return lockless_dereference(s->sequence);
}

/**
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
@@ -284,8 +290,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* unsigned seq, idx;
*
* do {
* seq = latch->seq;
* smp_rmb();
* seq = lockless_dereference(latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
2 changes: 1 addition & 1 deletion kernel/time/timekeeping.c
Original file line number Diff line number Diff line change
@@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;

do {
seq = raw_read_seqcount(&tkf->seq);
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
} while (read_seqcount_retry(&tkf->seq, seq));

0 comments on commit 7fc2632

Please sign in to comment.