Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 123560
b: refs/heads/master
c: 063b0a4
h: refs/heads/master
v: v3
  • Loading branch information
Russell King authored and Russell King committed Nov 27, 2008
1 parent cbfc407 commit f48f8ca
Show file tree
Hide file tree
Showing 11 changed files with 162 additions and 84 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d73e60b7144a86baf0fdfcc9537a70bb4f72e11c
refs/heads/master: 063b0a4207e43acbeff3d4b09f43e750e0212b48
23 changes: 14 additions & 9 deletions trunk/arch/arm/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,30 +108,35 @@
#error Unknown user operations model
#endif

struct page;

struct cpu_user_fns {
void (*cpu_clear_user_page)(void *p, unsigned long user);
void (*cpu_copy_user_page)(void *to, const void *from,
unsigned long user);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
unsigned long vaddr);
};

#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;

#define __cpu_clear_user_page cpu_user.cpu_clear_user_page
#define __cpu_copy_user_page cpu_user.cpu_copy_user_page
#define __cpu_clear_user_page cpu_user.cpu_clear_user_page
#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage

#else

#define __cpu_clear_user_page __glue(_USER,_clear_user_page)
#define __cpu_copy_user_page __glue(_USER,_copy_user_page)
#define __cpu_clear_user_page __glue(_USER,_clear_user_page)
#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)

extern void __cpu_clear_user_page(void *p, unsigned long user);
extern void __cpu_copy_user_page(void *to, const void *from,
unsigned long user);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr);
#endif

#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)

#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
__cpu_copy_user_highpage(to, from, vaddr)

#define clear_page(page) memzero((void *)(page), PAGE_SIZE)
extern void copy_page(void *to, const void *from);
Expand Down
23 changes: 17 additions & 6 deletions trunk/arch/arm/mm/copypage-feroceon.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,14 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles copy_user_page and clear_user_page on Feroceon
* This handles copy_user_highpage and clear_user_page on Feroceon
* more optimally than the generic implementations.
*/
#include <linux/init.h>
#include <linux/highmem.h>

#include <asm/page.h>

void __attribute__((naked))
feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
static void __attribute__((naked))
feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4-r9, lr} \n\
Expand Down Expand Up @@ -68,6 +67,18 @@ feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE));
}

void feroceon_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;

kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}

void __attribute__((naked))
feroceon_clear_user_page(void *kaddr, unsigned long vaddr)
{
Expand Down Expand Up @@ -95,6 +106,6 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr)

struct cpu_user_fns feroceon_user_fns __initdata = {
.cpu_clear_user_page = feroceon_clear_user_page,
.cpu_copy_user_page = feroceon_copy_user_page,
.cpu_copy_user_highpage = feroceon_copy_user_highpage,
};

23 changes: 17 additions & 6 deletions trunk/arch/arm/mm/copypage-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,15 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>

#include <asm/page.h>
#include <linux/highmem.h>

/*
* ARMv3 optimised copy_user_page
* ARMv3 optimised copy_user_highpage
*
* FIXME: do we need to handle cache stuff...
*/
void __attribute__((naked))
v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
static void __attribute__((naked))
v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
stmfd sp!, {r4, lr} @ 2\n\
Expand All @@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
}

void v3_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;

kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v3_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}

/*
* ARMv3 optimised clear_user_page
*
Expand Down Expand Up @@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr)

struct cpu_user_fns v3_user_fns __initdata = {
.cpu_clear_user_page = v3_clear_user_page,
.cpu_copy_user_page = v3_copy_user_page,
.cpu_copy_user_highpage = v3_copy_user_highpage,
};
21 changes: 12 additions & 9 deletions trunk/arch/arm/mm/copypage-v4mc.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
Expand All @@ -33,7 +33,7 @@
static DEFINE_SPINLOCK(minicache_lock);

/*
* ARMv4 mini-dcache optimised copy_user_page
* ARMv4 mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
Expand All @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
* own copy_user_highpage that does the right thing.
*/
static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
Expand All @@ -68,21 +68,24 @@ mc_copy_user_page(void *from, void *to)
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
}

void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
void v4_mc_copy_user_highpage(struct page *from, struct page *to,
unsigned long vaddr)
{
struct page *page = virt_to_page(kfrom);
void *kto = kmap_atomic(to, KM_USER1);

if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
__flush_dcache_page(page_mapping(from), from);

spin_lock(&minicache_lock);

set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(0xffff8000);

mc_copy_user_page((void *)0xffff8000, kto);

spin_unlock(&minicache_lock);

kunmap_atomic(kto, KM_USER1);
}

/*
Expand Down Expand Up @@ -113,5 +116,5 @@ v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)

struct cpu_user_fns v4_mc_user_fns __initdata = {
.cpu_clear_user_page = v4_mc_clear_user_page,
.cpu_copy_user_page = v4_mc_copy_user_page,
.cpu_copy_user_highpage = v4_mc_copy_user_highpage,
};
25 changes: 18 additions & 7 deletions trunk/arch/arm/mm/copypage-v4wb.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>

#include <asm/page.h>
#include <linux/highmem.h>

/*
* ARMv4 optimised copy_user_page
* ARMv4 optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
Expand All @@ -21,10 +20,10 @@
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
* own copy_user_highpage that does the right thing.
*/
void __attribute__((naked))
v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
static void __attribute__((naked))
v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
Expand All @@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE / 64));
}

void v4wb_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;

kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}

/*
* ARMv4 optimised clear_user_page
*
Expand Down Expand Up @@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr)

struct cpu_user_fns v4wb_user_fns __initdata = {
.cpu_clear_user_page = v4wb_clear_user_page,
.cpu_copy_user_page = v4wb_copy_user_page,
.cpu_copy_user_highpage = v4wb_copy_user_highpage,
};
23 changes: 17 additions & 6 deletions trunk/arch/arm/mm/copypage-v4wt.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,17 @@
* the only supported cache operation.
*/
#include <linux/init.h>

#include <asm/page.h>
#include <linux/highmem.h>

/*
* ARMv4 optimised copy_user_page
* ARMv4 optimised copy_user_highpage
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
void __attribute__((naked))
v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
static void __attribute__((naked))
v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
Expand All @@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE / 64));
}

void v4wt_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;

kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v4wt_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}

/*
* ARMv4 optimised clear_user_page
*
Expand Down Expand Up @@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr)

struct cpu_user_fns v4wt_user_fns __initdata = {
.cpu_clear_user_page = v4wt_clear_user_page,
.cpu_copy_user_page = v4wt_copy_user_page,
.cpu_copy_user_highpage = v4wt_copy_user_highpage,
};
Loading

0 comments on commit f48f8ca

Please sign in to comment.