Skip to content

Commit

Permalink
ARM: Pass VMA to copy_user_highpage() implementations
Browse files Browse the repository at this point in the history
Our copy_user_highpage() implementations may require cache maintainence.
Ensure that implementations have all necessary details to perform this
maintainence.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Russell King committed Oct 5, 2009
1 parent 8a0382f commit f00a75c
Show file tree
Hide file tree
Showing 9 changed files with 13 additions and 12 deletions.
7 changes: 4 additions & 3 deletions arch/arm/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,12 @@
#endif

struct page;
struct vm_area_struct;

struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
unsigned long vaddr);
unsigned long vaddr, struct vm_area_struct *vma);
};

#ifdef MULTI_USER
Expand All @@ -137,15 +138,15 @@ extern struct cpu_user_fns cpu_user;

extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr);
unsigned long vaddr, struct vm_area_struct *vma);
#endif

#define clear_user_highpage(page,vaddr) \
__cpu_clear_user_highpage(page, vaddr)

#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
__cpu_copy_user_highpage(to, from, vaddr)
__cpu_copy_user_highpage(to, from, vaddr, vma)

#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-feroceon.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
}

void feroceon_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom)
}

void v3_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-v4mc.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to)
}

void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-v4wb.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
}

void v4wb_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-v4wt.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
}

void v4wt_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mm/copypage-v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock);
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
struct page *from, unsigned long vaddr)
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down Expand Up @@ -73,7 +73,7 @@ static void discard_old_kernel_data(void *kto)
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
struct page *from, unsigned long vaddr)
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-xsc3.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
}

void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/copypage-xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to)
}

void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);

Expand Down

0 comments on commit f00a75c

Please sign in to comment.