Skip to content

Commit

Permalink
[PATCH] s390: put sys_call_table into .rodata section and write prote…
Browse files Browse the repository at this point in the history
…ct it

Put s390's syscall tables into .rodata section and write protect this
section to prevent misuse of it.  Suggested by Arjan van de Ven
<arjan@infradead.org>.

Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Heiko Carstens authored and Linus Torvalds committed Jul 1, 2006
1 parent a581c2a commit d882b17
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 17 deletions.
9 changes: 6 additions & 3 deletions arch/s390/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,9 @@ sysc_do_svc:
sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
l %r8,BASED(.Lsysc_table)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
l %r8,0(%r7,%r8) # get system call addr.
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
Expand Down Expand Up @@ -330,9 +331,10 @@ sysc_tracesys:
basr %r14,%r1
clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo)
l %r8,BASED(.Lsysc_table)
l %r7,SP_R2(%r15) # strace might have changed the
sll %r7,2 # system call
l %r8,sys_call_table-system_call(%r7,%r13)
l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
l %r2,SP_ORIG_R2(%r15)
Expand Down Expand Up @@ -1009,6 +1011,7 @@ cleanup_io_leave_insn:
.Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
.Lschedtail: .long schedule_tail
.Lsysc_table: .long sys_call_table

.Lcritical_start:
.long __critical_start + 0x80000000
Expand All @@ -1017,8 +1020,8 @@ cleanup_io_leave_insn:
.Lcleanup_critical:
.long cleanup_critical

.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
sys_call_table:
#include "syscalls.S"
#undef SYSCALL

1 change: 1 addition & 0 deletions arch/s390/kernel/entry64.S
Original file line number Diff line number Diff line change
Expand Up @@ -991,6 +991,7 @@ cleanup_io_leave_insn:
.Lcritical_end:
.quad __critical_end

.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
sys_call_table:
#include "syscalls.S"
Expand Down
35 changes: 21 additions & 14 deletions arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/pfn.h>

#include <asm/processor.h>
#include <asm/system.h>
Expand All @@ -33,6 +34,7 @@
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

Expand Down Expand Up @@ -89,17 +91,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
}

/* References to section boundaries */

extern unsigned long _text;
extern unsigned long _etext;
extern unsigned long _edata;
extern unsigned long __bss_start;
extern unsigned long _end;

extern unsigned long __init_begin;
extern unsigned long __init_end;

extern unsigned long __initdata zholes_size[];
/*
* paging_init() sets up the page tables
Expand All @@ -116,6 +107,10 @@ void __init paging_init(void)
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
unsigned long ro_start_pfn, ro_end_pfn;

ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);

/* unmap whole virtual address space */

Expand Down Expand Up @@ -143,7 +138,10 @@ void __init paging_init(void)
pg_dir++;

for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
else
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&init_mm, 0, &pte);
set_pte(pg_table, pte);
Expand Down Expand Up @@ -175,6 +173,7 @@ void __init paging_init(void)
}

#else /* CONFIG_64BIT */

void __init paging_init(void)
{
pgd_t * pg_dir;
Expand All @@ -186,13 +185,15 @@ void __init paging_init(void)
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;

unsigned long zones_size[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
unsigned long ro_start_pfn, ro_end_pfn;

memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);

if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
Expand Down Expand Up @@ -231,7 +232,10 @@ void __init paging_init(void)
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);

for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
else
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&init_mm, 0, &pte);
continue;
Expand Down Expand Up @@ -282,6 +286,9 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&__start_rodata,
PFN_ALIGN((unsigned long)&__end_rodata) - 1);
}

void free_initmem(void)
Expand Down

0 comments on commit d882b17

Please sign in to comment.