Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 305208
b: refs/heads/master
c: 4d6ddb0
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mundt committed Apr 11, 2012
1 parent ed0d2c4 commit 7929611
Show file tree
Hide file tree
Showing 27 changed files with 338 additions and 387 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a4e02f6d83d4fcdb13bcaba76878fc5ea0da9911
refs/heads/master: 4d6ddb08acc48368c5b7ac431f9d00db7227d2ed
2 changes: 1 addition & 1 deletion trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -6545,7 +6545,7 @@ M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org
W: http://www.linux-sh.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
T: git git://github.com/pmundt/linux-sh.git sh-latest
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git sh-latest
S: Supported
F: Documentation/sh/
F: arch/sh/
Expand Down
38 changes: 9 additions & 29 deletions trunk/arch/sh/mm/fault_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
int si_code;
int fault;
siginfo_t info;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(writeaccess ? FAULT_FLAG_WRITE : 0));

tsk = current;
mm = tsk->mm;
Expand Down Expand Up @@ -171,7 +169,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (in_atomic() || !mm)
goto no_context;

retry:
down_read(&mm->mmap_sem);

vma = find_vma(mm, address);
Expand Down Expand Up @@ -203,39 +200,22 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);

if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;

fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}

if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;

/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}

up_read(&mm->mmap_sem);
Expand Down
114 changes: 79 additions & 35 deletions trunk/arch/sh/mm/tlbflush_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
* Copyright (C) 2003 - 2012 Paul Mundt
* Copyright (C) 2003 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
Expand All @@ -28,6 +28,33 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>

extern void die(const char *,struct pt_regs *,long);

#define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
#define PPROT(flag) PFLAG(pgprot_val(prot),flag)

static inline void print_prots(pgprot_t prot)
{
printk("prot is 0x%016llx\n",pgprot_val(prot));

printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
}

static inline void print_vma(struct vm_area_struct *vma)
{
printk("vma start 0x%08lx\n", vma->vm_start);
printk("vma end 0x%08lx\n", vma->vm_end);

print_prots(vma->vm_page_prot);
printk("vm_flags 0x%08lx\n", vma->vm_flags);
}

static inline void print_task(struct task_struct *tsk)
{
printk("Task pid %d\n", task_pid_nr(tsk));
}

static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
{
pgd_t *dir;
Expand Down Expand Up @@ -68,8 +95,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
struct mm_struct *mm;
struct vm_area_struct * vma;
const struct exception_table_entry *fixup;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(writeaccess ? FAULT_FLAG_WRITE : 0));
pte_t *pte;
int fault;

Expand Down Expand Up @@ -99,20 +124,47 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
if (in_atomic() || !mm)
goto no_context;

retry:
/* TLB misses upon some cache flushes get done under cli() */
down_read(&mm->mmap_sem);

vma = find_vma(mm, address);
if (!vma)

if (!vma) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;
if (vma->vm_start <= address)
}
if (vma->vm_start <= address) {
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
}

if (!(vma->vm_flags & VM_GROWSDOWN)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);

print_vma(vma);
#endif
goto bad_area;
if (expand_stack(vma, address))
}
if (expand_stack(vma, address)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;

}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
Expand All @@ -136,11 +188,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);

if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;

fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
Expand All @@ -149,27 +197,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
BUG();
}

if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}

if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;

/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}

/* If we get here, the page fault has been handled. Do the TLB refill
Expand All @@ -196,6 +231,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* Fix it, but check if it's kernel or user first..
*/
bad_area:
#ifdef DEBUG_FAULT
printk("fault:bad area\n");
#endif
up_read(&mm->mmap_sem);

if (user_mode(regs)) {
Expand All @@ -208,6 +246,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
address, task_pid_nr(current), current->comm,
(unsigned long) regs->pc);
#if 0
show_regs(regs);
#endif
}
if (is_global_init(tsk)) {
panic("INIT had user mode bad_area\n");
Expand All @@ -222,6 +263,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
}

no_context:
#ifdef DEBUG_FAULT
printk("fault:No context\n");
#endif
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
Expand Down
13 changes: 0 additions & 13 deletions trunk/arch/sparc/kernel/leon_pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)

void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
{
struct leon_pci_info *info = pbus->sysdata;
struct pci_dev *dev;
int i, has_io, has_mem;
u16 cmd;
Expand Down Expand Up @@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask);
}

struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
{
/*
* Currently the OpenBoot nodes are not connected with the PCI device,
* this is because the LEON PROM does not create PCI nodes. Eventually
* this will change and the same approach as pcic.c can be used to
* match PROM nodes with pci devices.
*/
return NULL;
}
EXPORT_SYMBOL(pci_device_to_OF_node);

void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
{
#ifdef CONFIG_PCI_DEBUG
Expand Down
37 changes: 30 additions & 7 deletions trunk/arch/sparc/mm/fault_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int fault, code;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0));

if(text_fault)
address = regs->pc;
Expand All @@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

retry:
down_read(&mm->mmap_sem);

/*
Expand Down Expand Up @@ -289,21 +292,41 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
fault = handle_mm_fault(mm, vma, address, flags);

if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;

if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);

if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;

/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/

goto retry;
}
}

up_read(&mm->mmap_sem);
return;

Expand Down
Loading

0 comments on commit 7929611

Please sign in to comment.