Skip to content

Commit

Permalink
csky: Add flush_icache_mm to defer flush icache all
Browse files Browse the repository at this point in the history
Some CPUs don't support icache.va instruction to maintain the whole
smp cores' icache. Using icache.all + IPI casue a lot on performace
and using defer mechanism could reduce the number of calling icache
_flush_all functions.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
  • Loading branch information
Guo Ren committed Feb 21, 2020
1 parent cc1f656 commit 997153b
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 11 deletions.
2 changes: 2 additions & 0 deletions arch/csky/abiv1/inc/abi/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u

#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
#define flush_icache_deferred(mm) do {} while (0);

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
Expand Down
55 changes: 55 additions & 0 deletions arch/csky/abiv2/cacheflush.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,

kunmap_atomic((void *) addr);
}

void flush_icache_deferred(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;

if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
local_icache_inv_all(NULL);
}
}

void flush_icache_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
unsigned int cpu;
cpumask_t others, *mask;

preempt_disable();

#ifdef CONFIG_CPU_HAS_ICACHE_INS
if (mm == current->mm) {
icache_inv_range(start, end);
preempt_enable();
return;
}
#endif

/* Mark every hart's icache as needing a flush for this MM. */
mask = &mm->context.icache_stale_mask;
cpumask_setall(mask);

/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mask);
local_icache_inv_all(NULL);

/*
* Flush the I$ of other harts concurrently executing, and mark them as
* flushed.
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));

if (mm != current->active_mm || !cpumask_empty(&others)) {
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
cpumask_clear(mask);
}

preempt_enable();
}
14 changes: 11 additions & 3 deletions arch/csky/abiv2/inc/abi/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,23 @@ static inline void flush_dcache_page(struct page *page)

#define flush_icache_range(start, end) cache_wbinv_range(start, end)

void flush_icache_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
void flush_icache_deferred(struct mm_struct *mm);

#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if (vma->vm_flags & VM_EXEC) \
cache_wbinv_range((unsigned long)dst, \
(unsigned long)dst + len); \
if (vma->vm_flags & VM_EXEC) { \
dcache_wb_range((unsigned long)dst, \
(unsigned long)dst + len); \
flush_icache_mm_range(current->mm, \
(unsigned long)dst, \
(unsigned long)dst + len); \
} \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
Expand Down
1 change: 1 addition & 0 deletions arch/csky/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#ifndef __ASM_CSKY_CACHEFLUSH_H
#define __ASM_CSKY_CACHEFLUSH_H

#include <linux/mm.h>
#include <abi/cacheflush.h>

#endif /* __ASM_CSKY_CACHEFLUSH_H */
1 change: 1 addition & 0 deletions arch/csky/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
typedef struct {
atomic64_t asid;
void *vdso;
cpumask_t icache_stale_mask;
} mm_context_t;

#endif /* __ASM_CSKY_MMU_H */
2 changes: 2 additions & 0 deletions arch/csky/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,

TLBMISS_HANDLER_SETUP_PGD(next->pgd);
write_mmu_entryhi(next->context.asid.counter);

flush_icache_deferred(next);
}
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
13 changes: 5 additions & 8 deletions arch/csky/mm/syscache.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include <linux/syscalls.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>

SYSCALL_DEFINE3(cacheflush,
Expand All @@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
{
switch (cache) {
case ICACHE:
icache_inv_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
case BCACHE:
flush_icache_mm_range(current->mm,
(unsigned long)addr,
(unsigned long)addr + bytes);
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
case BCACHE:
cache_wbinv_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
default:
return -EINVAL;
}
Expand Down

0 comments on commit 997153b

Please sign in to comment.