Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 308074
b: refs/heads/master
c: d5d14ed
h: refs/heads/master
v: v3
  • Loading branch information
Chris Metcalf committed May 25, 2012
1 parent dd6c878 commit 1fc82a8
Show file tree
Hide file tree
Showing 21 changed files with 346 additions and 198 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 47d632f9f8f3ed62b21f725e98b726d65769b6d7
refs/heads/master: d5d14ed6f2db7287a5088e1350cf422bf72140b3
25 changes: 25 additions & 0 deletions trunk/arch/tile/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,31 @@ config NR_CPUS
smaller kernel memory footprint results from using a smaller
value on chips with fewer tiles.

if TILEGX

choice
prompt "Kernel page size"
default PAGE_SIZE_64KB
help
This lets you select the page size of the kernel. For best
performance on memory-intensive applications, a page size of 64KB
is recommended. For workloads involving many small files, many
connections, etc., it may be better to select 16KB, which uses
memory more efficiently at some cost in TLB performance.

Note that this option is TILE-Gx specific; currently
TILEPro page size is set by rebuilding the hypervisor.

config PAGE_SIZE_16KB
bool "16KB"

config PAGE_SIZE_64KB
bool "64KB"

endchoice

endif

source "kernel/time/Kconfig"

source "kernel/Kconfig.hz"
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/tile/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += module.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/tile/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ struct mm_context {
* Written under the mmap_sem semaphore; read without the
* semaphore but atomically, but it is conservatively set.
*/
unsigned int priority_cached;
unsigned long priority_cached;
};

typedef struct mm_context mm_context_t;
Expand Down
8 changes: 6 additions & 2 deletions trunk/arch/tile/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}

/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
/*
* Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
* also call hv_install_context().
*/
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
/* FIXME: DIRECTIO should not always be set. FIXME. */
int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
int rc = hv_install_context(__pa(pgdir), prot, asid,
HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
if (rc < 0)
panic("hv_install_context failed: %d", rc);
}
Expand Down
40 changes: 40 additions & 0 deletions trunk/arch/tile/include/asm/module.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/

#ifndef _ASM_TILE_MODULE_H
#define _ASM_TILE_MODULE_H

#include <arch/chip.h>

#include <asm-generic/module.h>

/* We can't use modules built with different page sizes. */
#if defined(CONFIG_PAGE_SIZE_16KB)
# define MODULE_PGSZ " 16KB"
#elif defined(CONFIG_PAGE_SIZE_64KB)
# define MODULE_PGSZ " 64KB"
#else
# define MODULE_PGSZ ""
#endif

/* We don't really support no-SMP so tag if someone tries. */
#ifdef CONFIG_SMP
#define MODULE_NOSMP ""
#else
#define MODULE_NOSMP " nosmp"
#endif

#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP

#endif /* _ASM_TILE_MODULE_H */
13 changes: 11 additions & 2 deletions trunk/arch/tile/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,17 @@
#include <arch/chip.h>

/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#if defined(CONFIG_PAGE_SIZE_16KB)
#define PAGE_SHIFT 14
#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PAGE_SHIFT 16
#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
#else
#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
#define CTX_PAGE_FLAG 0
#endif
#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE

#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
Expand Down
92 changes: 67 additions & 25 deletions trunk/arch/tile/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,24 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <hv/hypervisor.h>

/* Bits for the size of the second-level page table. */
#define L2_KERNEL_PGTABLE_SHIFT \
(HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)

/* How big is a kernel L2 page table? */
#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)

/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif

/* How many pages do we need, as an "order", for a user L2 page table? */
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)

/* How big is a kernel L2 page table? */
#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)

static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
Expand All @@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *ptep)
{
set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
__pgprot(_PAGE_PRESENT)));
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t page)
{
set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
__pgprot(_PAGE_PRESENT)));
}

Expand All @@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);

extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pte_free(struct mm_struct *mm, struct page *pte);
extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order);
extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);

static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
}

static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
}

#define pmd_pgtable(pmd) pmd_page(pmd)

Expand All @@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
pte_free(mm, virt_to_page(pte));
}

extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address);
extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address, int order);
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address)
{
__pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
}

#define check_pgt_cache() do { } while (0)

Expand All @@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
void shatter_huge_page(unsigned long addr);

#ifdef __tilegx__
/* We share a single page allocator for both L1 and L2 page tables. */
#if HV_L1_SIZE != HV_L2_SIZE
# error Rework assumption that L1 and L2 page tables are same size.
#endif
#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER

#define pud_populate(mm, pud, pmd) \
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
#define pmd_alloc_one(mm, addr) \
((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
#define pmd_free(mm, pmdp) \
pte_free((mm), virt_to_page(pmdp))
#define __pmd_free_tlb(tlb, pmdp, address) \
__pte_free_tlb((tlb), virt_to_page(pmdp), (address))

/* Bits for the size of the L1 (intermediate) page table. */
#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)

/* How big is a kernel L2 page table? */
#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)

/* We currently allocate L1 page tables by page. */
#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
#endif

/* How many pages do we need, as an "order", for an L1 page table? */
#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
return (pmd_t *)page_to_virt(p);
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
}

static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long address)
{
__pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
L1_USER_PGTABLE_ORDER);
}

#endif /* __tilegx__ */

#endif /* _ASM_TILE_PGALLOC_H */
10 changes: 6 additions & 4 deletions trunk/arch/tile/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/page.h>

struct mm_struct;
struct vm_area_struct;
Expand Down Expand Up @@ -162,7 +164,7 @@ extern void set_page_homes(void);
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }

/* Just setting the PFN to zero suffices. */
#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
#define pte_pgprot(x) hv_pte_set_pa((x), 0)

/*
* For PTEs and PDEs, we must clear the Present bit first when
Expand Down Expand Up @@ -262,7 +264,7 @@ static inline int pte_none(pte_t pte)

static inline unsigned long pte_pfn(pte_t pte)
{
return hv_pte_get_pfn(pte);
return PFN_DOWN(hv_pte_get_pa(pte));
}

/* Set or get the remote cache cpu in a pgprot with remote caching. */
Expand All @@ -271,7 +273,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);

static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
return hv_pte_set_pfn(prot, pfn);
return hv_pte_set_pa(prot, PFN_PHYS(pfn));
}

/* Support for priority mappings. */
Expand Down Expand Up @@ -471,7 +473,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* OK for pte_lockptr(), since we just end up with potentially one
* lock being used for several pte_t arrays.
*/
#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))

static inline void pmd_clear(pmd_t *pmdp)
{
Expand Down
14 changes: 8 additions & 6 deletions trunk/arch/tile/include/asm/pgtable_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@
* The level-1 index is defined by the huge page size. A PGD is composed
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
*/
#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
#define PGDIR_SHIFT HPAGE_SHIFT
#define PGDIR_SIZE HPAGE_SIZE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)

/*
* The level-2 index is defined by the difference between the huge
Expand All @@ -33,8 +34,9 @@
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)

#ifndef __ASSEMBLY__

Expand Down
Loading

0 comments on commit 1fc82a8

Please sign in to comment.