Skip to content

Commit

Permalink
[IA64] Re-implement spinaphores using ticket lock concepts
Browse files Browse the repository at this point in the history
Bound the wait time for the ptcg_sem by using similar idea to the
ticket spin locks.  In this case we have only one instance of a
spinaphore, so make it 8 bytes rather than try to squeeze it into
4-bytes to keep the code simpler (and shorter).

Signed-off-by: Tony Luck <tony.luck@intel.com>
  • Loading branch information
Tony Luck committed Oct 9, 2009
1 parent 36a0790 commit 883a3ac
Showing 1 changed file with 18 additions and 6 deletions.
24 changes: 18 additions & 6 deletions arch/ia64/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm)
* this primitive it can be moved up to a spinaphore.h header.
*/
struct spinaphore {
atomic_t cur;
unsigned long ticket;
unsigned long serve;
};

static inline void spinaphore_init(struct spinaphore *ss, int val)
{
atomic_set(&ss->cur, val);
ss->ticket = 0;
ss->serve = val;
}

static inline void down_spin(struct spinaphore *ss)
{
while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
while (atomic_read(&ss->cur) == 0)
cpu_relax();
unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;

if (time_before(t, ss->serve))
return;

ia64_invala();

for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
if (time_before(t, serve))
return;
cpu_relax();
}
}

static inline void up_spin(struct spinaphore *ss)
{
atomic_add(1, &ss->cur);
ia64_fetchadd(1, &ss->serve, rel);
}

static struct spinaphore ptcg_sem;
Expand Down

0 comments on commit 883a3ac

Please sign in to comment.