Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 356289
b: refs/heads/master
c: 6617fec
h: refs/heads/master
i:
  356287: f58cc28
v: v3
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Feb 19, 2013
1 parent afabff5 commit ecc434e
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 83 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 65c9d1bbc9f32c568a4f7ad154c9a10b0028df52
refs/heads/master: 6617feca15c68aad12f4a1edd8d8e2ef8d5b4ae5
174 changes: 92 additions & 82 deletions trunk/include/linux/seqlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,81 +30,12 @@
#include <linux/preempt.h>
#include <asm/processor.h>

typedef struct {
unsigned sequence;
spinlock_t lock;
} seqlock_t;

/*
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ 0, __SPIN_LOCK_UNLOCKED(lockname) }

#define seqlock_init(x) \
do { \
(x)->sequence = 0; \
spin_lock_init(&(x)->lock); \
} while (0)

#define DEFINE_SEQLOCK(x) \
seqlock_t x = __SEQLOCK_UNLOCKED(x)

/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
++sl->sequence;
smp_wmb();
}

static inline void write_sequnlock(seqlock_t *sl)
{
smp_wmb();
sl->sequence++;
spin_unlock(&sl->lock);
}

/* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;

repeat:
ret = ACCESS_ONCE(sl->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb();

return ret;
}

/*
* Test if reader processed invalid data.
*
* If sequence value changed then writer changed data while in section.
*/
static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();

return unlikely(sl->sequence != start);
}


/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
* after the write_seqcount_end().
*/

typedef struct seqcount {
unsigned sequence;
} seqcount_t;
Expand Down Expand Up @@ -207,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();

return __read_seqcount_retry(s, start);
}

Expand Down Expand Up @@ -241,21 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s)
s->sequence+=2;
}

typedef struct {
struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;

/*
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_ZERO, \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}

#define seqlock_init(x) \
do { \
seqcount_init(&(x)->seqcount); \
spin_lock_init(&(x)->lock); \
} while (0)

#define DEFINE_SEQLOCK(x) \
seqlock_t x = __SEQLOCK_UNLOCKED(x)

/*
* Read side functions for starting and finalizing a read side section.
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}

static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
return read_seqcount_retry(&sl->seqcount, start);
}

/*
* Possible sw/hw IRQ protected versions of the interfaces.
* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}

static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock_bh(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}

static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}

static inline void write_sequnlock_irq(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}

static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
{
unsigned long flags;

spin_lock_irqsave(&sl->lock, flags);
write_seqcount_begin(&sl->seqcount);
return flags;
}

#define write_seqlock_irqsave(lock, flags) \
do { local_irq_save(flags); write_seqlock(lock); } while (0)
#define write_seqlock_irq(lock) \
do { local_irq_disable(); write_seqlock(lock); } while (0)
#define write_seqlock_bh(lock) \
do { local_bh_disable(); write_seqlock(lock); } while (0)
do { flags = __write_seqlock_irqsave(lock); } while (0)

#define write_sequnlock_irqrestore(lock, flags) \
do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
#define write_sequnlock_irq(lock) \
do { write_sequnlock(lock); local_irq_enable(); } while(0)
#define write_sequnlock_bh(lock) \
do { write_sequnlock(lock); local_bh_enable(); } while(0)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}

#endif /* __LINUX_SEQLOCK_H */

0 comments on commit ecc434e

Please sign in to comment.