Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 339557
b: refs/heads/master
c: bf51bb8
h: refs/heads/master
i:
  339555: bcc81df
v: v3
  • Loading branch information
Will Deacon committed Nov 5, 2012
1 parent 487c08b commit c9d681d
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4b883160835faf38c9356f0885cf491a1e661e88
refs/heads/master: bf51bb82ccd9a74e9702d06107b23e54b27a5707
54 changes: 35 additions & 19 deletions trunk/arch/arm/mm/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,14 @@
* should be unique within all running processes.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)

#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)

static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t cpu_last_asid = ATOMIC64_INIT(ASID_FIRST_VERSION);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);

static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
Expand Down Expand Up @@ -111,12 +116,19 @@ arch_initcall(contextidr_notifier_init);
static void flush_context(unsigned int cpu)
{
int i;

/* Update the list of reserved ASIDs. */
for_each_possible_cpu(i)
per_cpu(reserved_asids, i) =
atomic64_xchg(&per_cpu(active_asids, i), 0);
per_cpu(reserved_asids, cpu) = 0;
u64 asid;

/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
for_each_possible_cpu(i) {
if (i == cpu) {
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
__set_bit(ASID_TO_IDX(asid), asid_map);
}
per_cpu(reserved_asids, i) = asid;
}

/* Queue a TLB invalidate and flush the I-cache if necessary. */
if (!tlb_ops_need_broadcast())
Expand All @@ -128,37 +140,41 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}

static int is_reserved_asid(u64 asid, u64 mask)
static int is_reserved_asid(u64 asid)
{
int cpu;
for_each_possible_cpu(cpu)
if ((per_cpu(reserved_asids, cpu) & mask) == (asid & mask))
if (per_cpu(reserved_asids, cpu) == asid)
return 1;
return 0;
}

static void new_context(struct mm_struct *mm, unsigned int cpu)
{
u64 asid = mm->context.id;
u64 generation = atomic64_read(&asid_generation);

if (asid != 0 && is_reserved_asid(asid, ULLONG_MAX)) {
if (asid != 0 && is_reserved_asid(asid)) {
/*
* Our current ASID was active during a rollover, we can
* continue to use it and this was just a false alarm.
*/
asid = (atomic64_read(&cpu_last_asid) & ASID_MASK) | \
(asid & ~ASID_MASK);
asid = generation | (asid & ~ASID_MASK);
} else {
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
* as requiring flushes.
*/
do {
asid = atomic64_inc_return(&cpu_last_asid);
if ((asid & ~ASID_MASK) == 0)
flush_context(cpu);
} while (is_reserved_asid(asid, ~ASID_MASK));
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
}
__set_bit(asid, asid_map);
asid = generation | IDX_TO_ASID(asid);
cpumask_clear(mm_cpumask(mm));
}

Expand All @@ -179,13 +195,13 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();

if (!((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
&& atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
goto switch_mm_fastpath;

raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
if ((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
new_context(mm, cpu);

atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
Expand Down

0 comments on commit c9d681d

Please sign in to comment.