Skip to content

Commit

Permalink
Pull mm-context-fix into release branch
Browse files Browse the repository at this point in the history
  • Loading branch information
Tony Luck committed Aug 29, 2005
2 parents dcf8296 + badea12 commit 7ee175f
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 25 deletions.
8 changes: 5 additions & 3 deletions include/asm-ia64/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@
#define __MMU_H

/*
* Type for a context number. We declare it volatile to ensure proper ordering when it's
* accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
* init_new_context()).
* Type for a context number. We declare it volatile to ensure proper
* ordering when it's accessed outside of spinlock'd critical sections
* (e.g., as done in activate_mm() and init_new_context()).
*/
typedef volatile unsigned long mm_context_t;

typedef unsigned long nv_mm_context_t;

#endif
54 changes: 32 additions & 22 deletions include/asm-ia64/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,34 +55,46 @@ static inline void
delayed_tlb_flush (void)
{
extern void local_flush_tlb_all (void);
unsigned long flags;

if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
local_flush_tlb_all();
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
spin_lock_irqsave(&ia64_ctx.lock, flags);
{
if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
local_flush_tlb_all();
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
}
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
}

static inline mm_context_t
static inline nv_mm_context_t
get_mmu_context (struct mm_struct *mm)
{
unsigned long flags;
mm_context_t context = mm->context;

if (context)
return context;

spin_lock_irqsave(&ia64_ctx.lock, flags);
{
/* re-check, now that we've got the lock: */
context = mm->context;
if (context == 0) {
cpus_clear(mm->cpu_vm_mask);
if (ia64_ctx.next >= ia64_ctx.limit)
wrap_mmu_context(mm);
mm->context = context = ia64_ctx.next++;
nv_mm_context_t context = mm->context;

if (unlikely(!context)) {
spin_lock_irqsave(&ia64_ctx.lock, flags);
{
/* re-check, now that we've got the lock: */
context = mm->context;
if (context == 0) {
cpus_clear(mm->cpu_vm_mask);
if (ia64_ctx.next >= ia64_ctx.limit)
wrap_mmu_context(mm);
mm->context = context = ia64_ctx.next++;
}
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
/*
* Ensure we're not starting to use "context" before any old
* uses of it are gone from our TLB.
*/
delayed_tlb_flush();

return context;
}

Expand All @@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
}

static inline void
reload_context (mm_context_t context)
reload_context (nv_mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
Expand Down Expand Up @@ -138,7 +150,7 @@ reload_context (mm_context_t context)
static inline void
activate_context (struct mm_struct *mm)
{
mm_context_t context;
nv_mm_context_t context;

do {
context = get_mmu_context(mm);
Expand All @@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
delayed_tlb_flush();

/*
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.
Expand Down

0 comments on commit 7ee175f

Please sign in to comment.