Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 280615
b: refs/heads/master
c: 14045eb
h: refs/heads/master
i:
  280613: b824edf
  280611: d121f84
  280607: 1d5f23a
v: v3
  • Loading branch information
Martin Schwidefsky committed Dec 27, 2011
1 parent 03001a8 commit e1fbe00
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 54 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4999023aa95a00507d3f100ea75510c5c7270f74
refs/heads/master: 14045ebf1e1156d966a796cacad91028e01797e5
23 changes: 3 additions & 20 deletions trunk/arch/s390/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,28 +128,11 @@ static inline int is_zero_pfn(unsigned long pfn)
* effect, this also makes sure that 64 bit module code cannot be used
* as system call address.
*/

extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;

#ifndef __s390x__
#define VMALLOC_SIZE (96UL << 20)
#define VMALLOC_END 0x7e000000UL
#define VMEM_MAP_END 0x80000000UL
#else /* __s390x__ */
#define VMALLOC_SIZE (128UL << 30)
#define VMALLOC_END 0x3e000000000UL
#define VMEM_MAP_END 0x40000000000UL
#endif /* __s390x__ */

/*
* VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
* mapping. This needs to be calculated at compile time since the size of the
* VMEM_MAP is static but the size of struct page can change.
*/
#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
#define vmemmap ((struct page *) VMALLOC_END)
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)

/*
* A 31 bit pagetable entry of S390 has following format:
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/s390/include/asm/sparsemem.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
#ifdef CONFIG_64BIT

#define SECTION_SIZE_BITS 28
#define MAX_PHYSADDR_BITS 42
#define MAX_PHYSMEM_BITS 42
#define MAX_PHYSADDR_BITS 46
#define MAX_PHYSMEM_BITS 46

#else

Expand Down
67 changes: 53 additions & 14 deletions trunk/arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,15 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
int __initdata memory_end_set;
unsigned long __initdata memory_end;

unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);

unsigned long VMALLOC_END;
EXPORT_SYMBOL(VMALLOC_END);

struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);

/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
Expand Down Expand Up @@ -277,6 +286,15 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);

static int __init parse_vmalloc(char *arg)
{
if (!arg)
return -EINVAL;
VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
return 0;
}
early_param("vmalloc", parse_vmalloc);

unsigned int user_mode = HOME_SPACE_MODE;
EXPORT_SYMBOL_GPL(user_mode);

Expand Down Expand Up @@ -478,8 +496,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);

static void __init setup_memory_end(void)
{
unsigned long memory_size;
unsigned long max_mem;
unsigned long vmax, vmalloc_size, tmp;
int i;


Expand All @@ -489,12 +506,9 @@ static void __init setup_memory_end(void)
memory_end_set = 1;
}
#endif
memory_size = 0;
real_memory_size = 0;
memory_end &= PAGE_MASK;

max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
memory_end = min(max_mem, memory_end);

/*
* Make sure all chunks are MAX_ORDER aligned so we don't need the
* extra checks that HOLES_IN_ZONE would require.
Expand All @@ -514,23 +528,48 @@ static void __init setup_memory_end(void)
chunk->addr = start;
chunk->size = end - start;
}
real_memory_size = max(real_memory_size,
chunk->addr + chunk->size);
}

/* Choose kernel address space layout: 2, 3, or 4 levels. */
#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: 128UL << 30;
tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
#else
vmalloc_size = VMALLOC_END ?: 96UL << 20;
vmax = 1UL << 31; /* 2-level kernel page table */
#endif
/* vmalloc area is at the end of the kernel address space. */
VMALLOC_END = vmax;
VMALLOC_START = vmax - vmalloc_size;

/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
vmemmap = (struct page *) tmp;

/* Take care that memory_end is set and <= vmemmap */
memory_end = min(memory_end ?: real_memory_size, tmp);

/* Fixup memory chunk array to fit into 0..memory_end */
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];

real_memory_size = max(real_memory_size,
chunk->addr + chunk->size);
if (chunk->addr >= max_mem) {
if (chunk->addr >= memory_end) {
memset(chunk, 0, sizeof(*chunk));
continue;
}
if (chunk->addr + chunk->size > max_mem)
chunk->size = max_mem - chunk->addr;
memory_size = max(memory_size, chunk->addr + chunk->size);
if (chunk->addr + chunk->size > memory_end)
chunk->size = memory_end - chunk->addr;
}
if (!memory_end)
memory_end = memory_size;
}

void *restart_stack __attribute__((__section__(".data")));
Expand Down
16 changes: 10 additions & 6 deletions trunk/arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,18 +93,22 @@ static unsigned long setup_zero_pages(void)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
unsigned long pgd_type, asce_bits;

init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
/* A three level page table (4TB) is enough for the kernel space. */
S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
if (VMALLOC_END > (1UL << 42)) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION2_ENTRY_EMPTY;
} else {
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
#else
S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
Expand Down
11 changes: 0 additions & 11 deletions trunk/arch/s390/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,6 @@
#define FRAG_MASK 0x03
#endif

unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
EXPORT_SYMBOL(VMALLOC_START);

static int __init parse_vmalloc(char *arg)
{
if (!arg)
return -EINVAL;
VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
return 0;
}
early_param("vmalloc", parse_vmalloc);

unsigned long *crst_table_alloc(struct mm_struct *mm)
{
Expand Down

0 comments on commit e1fbe00

Please sign in to comment.