Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 218732
b: refs/heads/master
c: 5a226c6
h: refs/heads/master
v: v3
  • Loading branch information
Mark Salter authored and David Howells committed Oct 27, 2010
1 parent f58ddf6 commit 6eae8da
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 368dd5acd154b09c043cc4392a74da01599b37d5
refs/heads/master: 5a226c6f5c374a0d565dac609907085b944979b5
3 changes: 3 additions & 0 deletions trunk/arch/mn10300/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)

#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
#define PAGE_USERIO __pgprot(__PAGE_USERIO)

/*
* Whilst the MN10300 can do page protection for execute (given separate data
* and insn TLBs), we are not supporting it at the moment. Write permission,
Expand Down
21 changes: 21 additions & 0 deletions trunk/arch/mn10300/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

unsigned long highstart_pfn, highend_pfn;

#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
static struct vm_struct user_iomap_vm;
#endif

/*
* set up paging
*/
Expand Down Expand Up @@ -73,6 +77,23 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);

#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
/* The Atomic Operation Unit registers need to be mapped to userspace
* for all processes. The following uses vm_area_register_early() to
* reserve the first page of the vmalloc area and sets the pte for that
* page.
*
* glibc hardcodes this virtual mapping, so we're pretty much stuck with
* it from now on.
*/
user_iomap_vm.flags = VM_USERMAP;
user_iomap_vm.size = 1 << PAGE_SHIFT;
vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
ppte = kernel_vmalloc_ptes;
set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
PAGE_USERIO));
#endif

local_flush_tlb_all();
}

Expand Down

0 comments on commit 6eae8da

Please sign in to comment.