Skip to content

Commit

Permalink
seqlock: Use seqcount infrastructure
Browse files Browse the repository at this point in the history
No point in having different implementations for the same
thing. Change the macro mess to inline functions where possible.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Feb 19, 2013
1 parent 65c9d1b commit 6617fec
Showing 1 changed file with 92 additions and 82 deletions.
174 changes: 92 additions & 82 deletions include/linux/seqlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,81 +30,12 @@
#include <linux/preempt.h>
#include <asm/processor.h>

typedef struct {
unsigned sequence;
spinlock_t lock;
} seqlock_t;

/*
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ 0, __SPIN_LOCK_UNLOCKED(lockname) }

#define seqlock_init(x) \
do { \
(x)->sequence = 0; \
spin_lock_init(&(x)->lock); \
} while (0)

#define DEFINE_SEQLOCK(x) \
seqlock_t x = __SEQLOCK_UNLOCKED(x)

/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
++sl->sequence;
smp_wmb();
}

static inline void write_sequnlock(seqlock_t *sl)
{
smp_wmb();
sl->sequence++;
spin_unlock(&sl->lock);
}

/* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;

repeat:
ret = ACCESS_ONCE(sl->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb();

return ret;
}

/*
* Test if reader processed invalid data.
*
* If sequence value changed then writer changed data while in section.
*/
static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();

return unlikely(sl->sequence != start);
}


/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
* after the write_seqcount_end().
*/

typedef struct seqcount {
unsigned sequence;
} seqcount_t;
Expand Down Expand Up @@ -207,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();

return __read_seqcount_retry(s, start);
}

Expand Down Expand Up @@ -241,21 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s)
s->sequence+=2;
}

typedef struct {
struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;

/*
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_ZERO, \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}

#define seqlock_init(x) \
do { \
seqcount_init(&(x)->seqcount); \
spin_lock_init(&(x)->lock); \
} while (0)

#define DEFINE_SEQLOCK(x) \
seqlock_t x = __SEQLOCK_UNLOCKED(x)

/*
* Read side functions for starting and finalizing a read side section.
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}

static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
return read_seqcount_retry(&sl->seqcount, start);
}

/*
* Possible sw/hw IRQ protected versions of the interfaces.
* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}

static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock_bh(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}

static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock_irq(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}

static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
{
unsigned long flags;

spin_lock_irqsave(&sl->lock, flags);
write_seqcount_begin(&sl->seqcount);
return flags;
}

#define write_seqlock_irqsave(lock, flags) \
do { local_irq_save(flags); write_seqlock(lock); } while (0)
#define write_seqlock_irq(lock) \
do { local_irq_disable(); write_seqlock(lock); } while (0)
#define write_seqlock_bh(lock) \
do { local_bh_disable(); write_seqlock(lock); } while (0)
do { flags = __write_seqlock_irqsave(lock); } while (0)

#define write_sequnlock_irqrestore(lock, flags) \
do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
#define write_sequnlock_irq(lock) \
do { write_sequnlock(lock); local_irq_enable(); } while(0)
#define write_sequnlock_bh(lock) \
do { write_sequnlock(lock); local_bh_enable(); } while(0)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}

#endif /* __LINUX_SEQLOCK_H */

0 comments on commit 6617fec

Please sign in to comment.