Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 333011
b: refs/heads/master
c: c972cc6
h: refs/heads/master
i:
  333009: 235b8e7
  333007: 38b54b8
v: v3
  • Loading branch information
Heiko Carstens authored and Martin Schwidefsky committed Oct 9, 2012
1 parent b95b97b commit 71e5626
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 021d48be48481821f6e3f53028915c0571874135
refs/heads/master: c972cc60c23f5a6309292bfcc91a441743ba027e
21 changes: 14 additions & 7 deletions trunk/arch/s390/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,20 +119,27 @@ static inline int is_zero_pfn(unsigned long pfn)

#ifndef __ASSEMBLY__
/*
* The vmalloc area will always be on the topmost area of the kernel
* mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
* which should be enough for any sane case.
* By putting vmalloc at the top, we maximise the gap between physical
* memory and vmalloc to catch misplaced memory accesses. As a side
* effect, this also makes sure that 64 bit module code cannot be used
* as system call address.
* The vmalloc and module area will always be on the topmost area of the kernel
* mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
* On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
* modules will reside. That makes sure that inter module branches always
* happen without trampolines and in addition the placement within a 2GB frame
* is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;

#define VMEM_MAX_PHYS ((unsigned long) vmemmap)

#ifdef CONFIG_64BIT
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
#endif

/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
Expand Down
11 changes: 11 additions & 0 deletions trunk/arch/s390/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,17 @@
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */

#ifdef CONFIG_64BIT
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
}
#endif

/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
Expand Down
13 changes: 11 additions & 2 deletions trunk/arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,11 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);

#ifdef CONFIG_64BIT
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
#endif

/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
Expand Down Expand Up @@ -544,19 +549,23 @@ static void __init setup_memory_end(void)

/* Choose kernel address space layout: 2, 3, or 4 levels. */
#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: 128UL << 30;
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
#else
vmalloc_size = VMALLOC_END ?: 96UL << 20;
vmax = 1UL << 31; /* 2-level kernel page table */
#endif
/* vmalloc area is at the end of the kernel address space. */
VMALLOC_END = vmax;
#endif
VMALLOC_START = vmax - vmalloc_size;

/* Split remaining virtual space between 1:1 mapping & vmemmap array */
Expand Down
13 changes: 10 additions & 3 deletions trunk/arch/s390/mm/dump_pagetables.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ enum address_markers_idx {
KERNEL_END_NR,
VMEMMAP_NR,
VMALLOC_NR,
#ifdef CONFIG_64BIT
MODULES_NR,
#endif
};

static struct addr_marker address_markers[] = {
Expand All @@ -26,6 +29,9 @@ static struct addr_marker address_markers[] = {
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
#ifdef CONFIG_64BIT
[MODULES_NR] = {0, "Modules Area"},
#endif
{ -1, NULL }
};

Expand Down Expand Up @@ -205,11 +211,12 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
#ifdef CONFIG_64BIT
#ifdef CONFIG_32BIT
max_addr = 1UL << 31;
#else
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
#else
max_addr = 1UL << 31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
#endif
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
Expand Down

0 comments on commit 71e5626

Please sign in to comment.