Skip to content

Commit

Permalink
MIPS: c-r4k: Fix flush_icache_range() for EVA
Browse files Browse the repository at this point in the history
flush_icache_range() flushes icache lines in a protected fashion for
kernel addresses, however this isn't correct with EVA where protected
cache ops only operate on user addresses, making flush_icache_range()
ineffective.

Split the implementations of __flush_icache_user_range() from
flush_icache_range(), changing the normal flush_icache_range() to use
unprotected normal cache ops.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14156/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
  • Loading branch information
James Hogan authored and Ralf Baechle committed Oct 4, 2016
1 parent 24d1a6e commit b2ff717
Showing 1 changed file with 35 additions and 8 deletions.
43 changes: 35 additions & 8 deletions arch/mips/mm/c-r4k.c
Original file line number Diff line number Diff line change
Expand Up @@ -722,19 +722,24 @@ struct flush_icache_range_args {
unsigned long start;
unsigned long end;
unsigned int type;
bool user;
};

static inline void __local_r4k_flush_icache_range(unsigned long start,
unsigned long end,
unsigned int type)
unsigned int type,
bool user)
{
if (!cpu_has_ic_fills_f_dc) {
if (type == R4K_INDEX ||
(type & R4K_INDEX && end - start >= dcache_size)) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
protected_blast_dcache_range(start, end);
if (user)
protected_blast_dcache_range(start, end);
else
blast_dcache_range(start, end);
}
}

Expand All @@ -748,7 +753,10 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
break;

default:
protected_blast_icache_range(start, end);
if (user)
protected_blast_icache_range(start, end);
else
blast_icache_range(start, end);
break;
}
}
Expand All @@ -757,7 +765,13 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
static inline void local_r4k_flush_icache_range(unsigned long start,
unsigned long end)
{
__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
}

static inline void local_r4k_flush_icache_user_range(unsigned long start,
unsigned long end)
{
__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
}

static inline void local_r4k_flush_icache_range_ipi(void *args)
Expand All @@ -766,18 +780,21 @@ static inline void local_r4k_flush_icache_range_ipi(void *args)
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned int type = fir_args->type;
bool user = fir_args->user;

__local_r4k_flush_icache_range(start, end, type);
__local_r4k_flush_icache_range(start, end, type, user);
}

static void r4k_flush_icache_range(unsigned long start, unsigned long end)
static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
bool user)
{
struct flush_icache_range_args args;
unsigned long size, cache_size;

args.start = start;
args.end = end;
args.type = R4K_HIT | R4K_INDEX;
args.user = user;

/*
* Indexed cache ops require an SMP call.
Expand All @@ -803,6 +820,16 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
instruction_hazard();
}

static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
return __r4k_flush_icache_range(start, end, false);
}

static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
{
return __r4k_flush_icache_range(start, end, true);
}

#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)

static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
Expand Down Expand Up @@ -1904,8 +1931,8 @@ void r4k_cache_init(void)
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
__flush_icache_user_range = r4k_flush_icache_range;
__local_flush_icache_user_range = local_r4k_flush_icache_range;
__flush_icache_user_range = r4k_flush_icache_user_range;
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;

#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
if (coherentio) {
Expand Down

0 comments on commit b2ff717

Please sign in to comment.