Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 360638
b: refs/heads/master
c: f5df8e2
h: refs/heads/master
v: v3
  • Loading branch information
James Hogan committed Mar 2, 2013
1 parent 75dde90 commit e13006c
Show file tree
Hide file tree
Showing 11 changed files with 1,834 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 99ef7c2ac1e3b01f532bfdebbe92e9960e95bebc
refs/heads/master: f5df8e268f749987c32c7eee001f7623fd7be69c
77 changes: 77 additions & 0 deletions trunk/arch/metag/include/asm/mmu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#ifndef __MMU_H
#define __MMU_H

#ifdef CONFIG_METAG_USER_TCM
#include <linux/list.h>
#endif

#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
#endif

typedef struct {
/* Software pgd base pointer used for Meta 1.x MMU. */
unsigned long pgd_base;
#ifdef CONFIG_METAG_USER_TCM
struct list_head tcm;
#endif
#ifdef CONFIG_HUGETLB_PAGE
#if HPAGE_SHIFT < HUGEPT_SHIFT
/* last partially filled huge page table address */
unsigned long part_huge;
#endif
#endif
} mm_context_t;

/* Given a virtual address, return the pte for the top level 4meg entry
* that maps that address.
* Returns 0 (an empty pte) if that range is not mapped.
*/
unsigned long mmu_read_first_level_page(unsigned long vaddr);

/* Given a linear (virtual) address, return the second level 4k pte
* that maps that address. Returns 0 if the address is not mapped.
*/
unsigned long mmu_read_second_level_page(unsigned long vaddr);

/* Get the virtual base address of the MMU */
unsigned long mmu_get_base(void);

/* Initialize the MMU. */
void mmu_init(unsigned long mem_end);

#ifdef CONFIG_METAG_META21_MMU
/*
* For cpu "cpu" calculate and return the address of the
* MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
* MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
*/
static inline unsigned long mmu_phys0_addr(unsigned int cpu)
{
unsigned long phys0;

phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
(MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
(MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));

return phys0;
}

/*
* For cpu "cpu" calculate and return the address of the
* MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
* MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
*/
static inline unsigned long mmu_phys1_addr(unsigned int cpu)
{
unsigned long phys1;

phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
(MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
(MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));

return phys1;
}
#endif /* CONFIG_METAG_META21_MMU */

#endif
113 changes: 113 additions & 0 deletions trunk/arch/metag/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#ifndef __METAG_MMU_CONTEXT_H
#define __METAG_MMU_CONTEXT_H

#include <asm-generic/mm_hooks.h>

#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

#include <linux/io.h>

static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}

static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
#ifndef CONFIG_METAG_META21_MMU
/* We use context to store a pointer to the page holding the
* pgd of a process while it is running. While a process is not
* running the pgd and context fields should be equal.
*/
mm->context.pgd_base = (unsigned long) mm->pgd;
#endif
#ifdef CONFIG_METAG_USER_TCM
INIT_LIST_HEAD(&mm->context.tcm);
#endif
return 0;
}

#ifdef CONFIG_METAG_USER_TCM

#include <linux/slab.h>
#include <asm/tcm.h>

static inline void destroy_context(struct mm_struct *mm)
{
struct tcm_allocation *pos, *n;

list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
tcm_free(pos->tag, pos->addr, pos->size);
list_del(&pos->list);
kfree(pos);
}
}
#else
#define destroy_context(mm) do { } while (0)
#endif

#ifdef CONFIG_METAG_META21_MMU
static inline void load_pgd(pgd_t *pgd, int thread)
{
unsigned long phys0 = mmu_phys0_addr(thread);
unsigned long phys1 = mmu_phys1_addr(thread);

/*
* 0x900 2Gb address space
* The permission bits apply to MMU table region which gives a 2MB
* window into physical memory. We especially don't want userland to be
* able to access this.
*/
metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
_PAGE_PRESENT, phys0);
/* Set new MMU base address */
metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
}
#endif

static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
{
#ifdef CONFIG_METAG_META21_MMU
load_pgd(next->pgd, hard_processor_id());
#else
unsigned int i;

/* prev->context == prev->pgd in the case where we are initially
switching from the init task to the first process. */
if (prev->context.pgd_base != (unsigned long) prev->pgd) {
for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
} else
prev->pgd = (pgd_t *)mmu_get_base();

next->pgd = prev->pgd;
prev->pgd = (pgd_t *) prev->context.pgd_base;

for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];

flush_cache_all();
#endif
flush_tlb_all();
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev != next)
switch_mmu(prev, next);
}

static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
switch_mmu(prev_mm, next_mm);
}

#define deactivate_mm(tsk, mm) do { } while (0)

#endif
128 changes: 128 additions & 0 deletions trunk/arch/metag/include/asm/page.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
#ifndef _METAG_PAGE_H
#define _METAG_PAGE_H

#include <linux/const.h>

#include <asm/metag_mem.h>

/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_PAGE_SIZE_4K)
#define PAGE_SHIFT 12
#elif defined(CONFIG_PAGE_SIZE_8K)
#define PAGE_SHIFT 13
#elif defined(CONFIG_PAGE_SIZE_16K)
#define PAGE_SHIFT 14
#endif

#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))

#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define HPAGE_SHIFT 13
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define HPAGE_SHIFT 14
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define HPAGE_SHIFT 15
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define HPAGE_SHIFT 16
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define HPAGE_SHIFT 17
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define HPAGE_SHIFT 18
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define HPAGE_SHIFT 20
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define HPAGE_SHIFT 21
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define HPAGE_SHIFT 22
#endif

#ifdef CONFIG_HUGETLB_PAGE
# define HPAGE_SIZE (1UL << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE-1))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
/*
* We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
* page tables with normal pages in them.
*/
# define HUGEPT_SHIFT (22)
# define HUGEPT_ALIGN (1 << HUGEPT_SHIFT)
# define HUGEPT_MASK (HUGEPT_ALIGN - 1)
# define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN)
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif

#ifndef __ASSEMBLY__

/* On the Meta, we would like to know if the address (heap) we have is
* in local or global space.
*/
#define is_global_space(addr) ((addr) > 0x7fffffff)
#define is_local_space(addr) (!is_global_space(addr))

extern void clear_page(void *to);
extern void copy_page(void *to, void *from);

#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)

/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;

#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)

#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })

/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
* being either global or local space.
*/
#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)

#if PAGE_OFFSET >= LINGLOBAL_BASE
#define META_MEMORY_BASE LINGLOBAL_BASE
#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
#else
#define META_MEMORY_BASE LINLOCAL_BASE
#define META_MEMORY_LIMIT LINLOCAL_LIMIT
#endif

/* Offset between physical and virtual mapping of kernel memory. */
extern unsigned int meta_memoffset;

#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))

extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
extern unsigned long max_pfn;
extern unsigned long min_low_pfn;
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn)
#endif

#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)

#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

#endif /* __ASSMEBLY__ */

#endif /* _METAG_PAGE_H */
Loading

0 comments on commit e13006c

Please sign in to comment.