From 2eb70244bf31b43bbd55c7569b833636aacf9ee4 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 14 Dec 2005 16:08:40 +1100 Subject: [PATCH] --- yaml --- r: 17205 b: refs/heads/master c: 14c89e7fc84ae55354b8bf12fee1b6d14f259c8a h: refs/heads/master i: 17203: eed581a872ffe2259afdd148a4b5a97ca0452bff v: v3 --- [refs] | 2 +- trunk/arch/powerpc/kernel/lparmap.c | 4 ++-- trunk/arch/powerpc/mm/slb.c | 6 +++--- trunk/include/asm-powerpc/page_64.h | 10 ---------- trunk/include/asm-powerpc/pgtable.h | 11 +++++++++++ 5 files changed, 17 insertions(+), 16 deletions(-) diff --git a/[refs] b/[refs] index 3fd6a4f495d7..ede68995ad4f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 56c8eaee65d688b526c12dca54a30276335679e5 +refs/heads/master: 14c89e7fc84ae55354b8bf12fee1b6d14f259c8a diff --git a/trunk/arch/powerpc/kernel/lparmap.c b/trunk/arch/powerpc/kernel/lparmap.c index 8a53d436ad9a..92d947447565 100644 --- a/trunk/arch/powerpc/kernel/lparmap.c +++ b/trunk/arch/powerpc/kernel/lparmap.c @@ -18,8 +18,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { .xEsids = { { .xKernelEsid = GET_ESID(PAGE_OFFSET), .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, - { .xKernelEsid = GET_ESID(VMALLOCBASE), - .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, + { .xKernelEsid = GET_ESID(VMALLOC_START), + .xKernelVsid = KERNEL_VSID(VMALLOC_START), }, }, .xRanges = { diff --git a/trunk/arch/powerpc/mm/slb.c b/trunk/arch/powerpc/mm/slb.c index cc22570856af..ffc8ed4de62d 100644 --- a/trunk/arch/powerpc/mm/slb.c +++ b/trunk/arch/powerpc/mm/slb.c @@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void) /* Slot 2 - kernel stack */ "slbmte %2,%3\n" "isync" - :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), - "r"(mk_esid_data(VMALLOCBASE, 1)), + :: "r"(mk_vsid_data(VMALLOC_START, vflags)), + "r"(mk_esid_data(VMALLOC_START, 1)), "r"(mk_vsid_data(ksp_esid_data, lflags)), "r"(ksp_esid_data) : "memory"); @@ -216,7 +216,7 @@ void slb_initialize(void) create_slbe(PAGE_OFFSET, lflags, 0); /* VMALLOC space has 4K pages always for now */ - create_slbe(VMALLOCBASE, vflags, 1); + create_slbe(VMALLOC_START, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes diff --git a/trunk/include/asm-powerpc/page_64.h b/trunk/include/asm-powerpc/page_64.h index 6642c0125001..8a07a93b0321 100644 --- a/trunk/include/asm-powerpc/page_64.h +++ b/trunk/include/asm-powerpc/page_64.h @@ -25,16 +25,6 @@ */ #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) -#define REGION_SIZE 4UL -#define REGION_SHIFT 60UL -#define REGION_MASK (((1UL<> REGION_SHIFT) -#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) -#define USER_REGION_ID (0UL) -#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) - /* Segment size */ #define SID_SHIFT 28 #define SID_MASK 0xfffffffffUL diff --git a/trunk/include/asm-powerpc/pgtable.h b/trunk/include/asm-powerpc/pgtable.h index 0303f57366c1..3518adb2cc18 100644 --- a/trunk/include/asm-powerpc/pgtable.h +++ b/trunk/include/asm-powerpc/pgtable.h @@ -57,6 +57,17 @@ struct mm_struct; #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) +/* + * Region IDs + */ +#define REGION_SHIFT 60UL +#define REGION_MASK (0xfUL << REGION_SHIFT) +#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) + +#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) +#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) +#define USER_REGION_ID (0UL) + /* * Common bits in a linux-style PTE. These match the bits in the * (hardware-defined) PowerPC PTE as closely as possible. Additional