Skip to content

Commit

Permalink
Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux
Browse files Browse the repository at this point in the history
Pull csky updates from Guo Ren:
 "This round of csky subsystem just some fixups:

   - Fix mb() synchronization problem

   - Fix dma_alloc_coherent with PAGE_SO attribute

   - Fix cache_op failed when cross memory ZONEs

   - Optimize arch_sync_dma_for_cpu/device with dma_inv_range

   - Fix ioremap function losing

   - Fix arch_get_unmapped_area() implementation

   - Fix defer cache flush for 610

   - Support kernel non-aligned access

   - Fix 610 vipt cache flush mechanism

   - Fix add zero_fp fixup perf backtrace panic

   - Move static keyword to the front of declaration

   - Fix csky_pmu.max_period assignment

   - Use generic free_initrd_mem()

   - entry: Remove unneeded need_resched() loop"

* tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux:
  csky: Move static keyword to the front of declaration
  csky: entry: Remove unneeded need_resched() loop
  csky: Fixup csky_pmu.max_period assignment
  csky: Fixup add zero_fp fixup perf backtrace panic
  csky: Use generic free_initrd_mem()
  csky: Fixup 610 vipt cache flush mechanism
  csky: Support kernel non-aligned access
  csky: Fixup defer cache flush for 610
  csky: Fixup arch_get_unmapped_area() implementation
  csky: Fixup ioremap function losing
  csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range
  csky/dma: Fixup cache_op failed when cross memory ZONEs
  csky: Fixup dma_alloc_coherent with PAGE_SO attribute
  csky: Fixup mb() synchronization problem
  • Loading branch information
Linus Torvalds committed Sep 30, 2019
2 parents cef0aa0 + 9af032a commit 80b29b6
Show file tree
Hide file tree
Showing 17 changed files with 291 additions and 212 deletions.
62 changes: 45 additions & 17 deletions arch/csky/abiv1/alignment.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>

static int align_enable = 1;
static int align_count;
static int align_kern_enable = 1;
static int align_usr_enable = 1;
static int align_kern_count = 0;
static int align_usr_count = 0;

static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
{
Expand All @@ -32,9 +34,6 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
uint32_t val;
int err;

if (!access_ok((void *)addr, 1))
return 1;

asm volatile (
"movi %0, 0\n"
"1:\n"
Expand Down Expand Up @@ -67,9 +66,6 @@ static int stb_asm(uint32_t addr, uint32_t val)
{
int err;

if (!access_ok((void *)addr, 1))
return 1;

asm volatile (
"movi %0, 0\n"
"1:\n"
Expand Down Expand Up @@ -203,8 +199,6 @@ static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
if (stb_asm(addr, byte3))
return 1;

align_count++;

return 0;
}

Expand All @@ -226,14 +220,34 @@ void csky_alignment(struct pt_regs *regs)
uint32_t addr = 0;

if (!user_mode(regs))
goto kernel_area;

if (!align_usr_enable) {
pr_err("%s user disabled.\n", __func__);
goto bad_area;
}

align_usr_count++;

ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
if (ret) {
pr_err("%s get_user failed.\n", __func__);
goto bad_area;
}

goto good_area;

kernel_area:
if (!align_kern_enable) {
pr_err("%s kernel disabled.\n", __func__);
goto bad_area;
}

align_kern_count++;

tmp = *(uint16_t *)instruction_pointer(regs);

good_area:
opcode = (uint32_t)tmp;

rx = opcode & 0xf;
Expand Down Expand Up @@ -286,18 +300,32 @@ void csky_alignment(struct pt_regs *regs)
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}

static struct ctl_table alignment_tbl[4] = {
static struct ctl_table alignment_tbl[5] = {
{
.procname = "kernel_enable",
.data = &align_kern_enable,
.maxlen = sizeof(align_kern_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "user_enable",
.data = &align_usr_enable,
.maxlen = sizeof(align_usr_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "enable",
.data = &align_enable,
.maxlen = sizeof(align_enable),
.procname = "kernel_count",
.data = &align_kern_count,
.maxlen = sizeof(align_kern_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "count",
.data = &align_count,
.maxlen = sizeof(align_count),
.procname = "user_count",
.data = &align_usr_count,
.maxlen = sizeof(align_usr_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},
Expand Down
70 changes: 47 additions & 23 deletions arch/csky/abiv1/cacheflush.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,42 +11,66 @@
#include <asm/cacheflush.h>
#include <asm/cachectl.h>

#define PG_dcache_clean PG_arch_1

void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long addr;
struct address_space *mapping;

if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_arch_1, &(page)->flags);
if (page == ZERO_PAGE(0))
return;
}

/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
addr = (unsigned long) page_address(page);
dcache_wb_range(addr, addr + PAGE_SIZE);
mapping = page_mapping_file(page);

if (mapping && !page_mapcount(page))
clear_bit(PG_dcache_clean, &page->flags);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);

void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
unsigned long addr;
unsigned long pfn = pte_pfn(*ptep);
struct page *page;
unsigned long pfn;

pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn)))
if (!pfn_valid(pfn))
return;

page = pfn_to_page(pfn);
addr = (unsigned long) page_address(page);
if (page == ZERO_PAGE(0))
return;

if (!test_and_set_bit(PG_dcache_clean, &page->flags))
dcache_wbinv_all();

if (vma->vm_flags & VM_EXEC ||
pages_do_alias(addr, address & PAGE_MASK))
cache_wbinv_all();
if (page_mapping_file(page)) {
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
}

void flush_kernel_dcache_page(struct page *page)
{
struct address_space *mapping;

mapping = page_mapping_file(page);

if (!mapping || mapping_mapped(mapping))
dcache_wbinv_all();
}
EXPORT_SYMBOL(flush_kernel_dcache_page);

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
dcache_wbinv_all();

clear_bit(PG_arch_1, &(page)->flags);
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
45 changes: 31 additions & 14 deletions arch/csky/abiv1/inc/abi/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,46 +4,63 @@
#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H

#include <linux/compiler.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm/cache.h>

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);

#define flush_cache_mm(mm) cache_wbinv_all()
#define flush_cache_mm(mm) dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
#define flush_cache_dup_mm(mm) cache_wbinv_all()

#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
extern void flush_kernel_dcache_page(struct page *);

#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)

static inline void flush_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}

#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
cache_wbinv_all();
}

/*
* if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
* Use cache_wbinv_all() here and need to be improved in future.
*/
#define flush_cache_range(vma, start, end) cache_wbinv_all()
#define flush_cache_vmap(start, end) cache_wbinv_range(start, end)
#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end)
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_all()
#define flush_cache_vunmap(start, end) cache_wbinv_all()

#define flush_icache_page(vma, page) cache_wbinv_all()
#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)

#define flush_icache_user_range(vma, pg, adr, len) \
cache_wbinv_range(adr, adr + len)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0)

#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)

#endif /* __ABI_CSKY_CACHEFLUSH_H */
5 changes: 3 additions & 2 deletions arch/csky/abiv1/inc/abi/page.h
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

extern unsigned long shm_align_mask;
#include <asm/shmparam.h>

extern void flush_dcache_page(struct page *page);

static inline unsigned long pages_do_alias(unsigned long addr1,
unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
return (addr1 ^ addr2) & (SHMLBA-1);
}

static inline void clear_user_page(void *addr, unsigned long vaddr,
Expand Down
Loading

0 comments on commit 80b29b6

Please sign in to comment.