Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew Morton)
Browse files Browse the repository at this point in the history
Merge more patches from Andrew Morton:
 "The rest of MM.  Plus one misc cleanup"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
  mm/Kconfig: add MMU dependency for MIGRATION.
  kernel: replace strict_strto*() with kstrto*()
  mm, thp: count thp_fault_fallback anytime thp fault fails
  thp: consolidate code between handle_mm_fault() and do_huge_pmd_anonymous_page()
  thp: do_huge_pmd_anonymous_page() cleanup
  thp: move maybe_pmd_mkwrite() out of mk_huge_pmd()
  mm: cleanup add_to_page_cache_locked()
  thp: account anon transparent huge pages into NR_ANON_PAGES
  truncate: drop 'oldsize' truncate_pagecache() parameter
  mm: make lru_add_drain_all() selective
  memcg: document cgroup dirty/writeback memory statistics
  memcg: add per cgroup writeback pages accounting
  memcg: check for proper lock held in mem_cgroup_update_page_stat
  memcg: remove MEMCG_NR_FILE_MAPPED
  memcg: reduce function dereference
  memcg: avoid overflow caused by PAGE_ALIGN
  memcg: rename RESOURCE_MAX to RES_COUNTER_MAX
  memcg: correct RESOURCE_MAX to ULLONG_MAX
  mm: memcg: do not trap chargers with full callstack on OOM
  mm: memcg: rework and document OOM waiting and wakeup
  ...
  • Loading branch information
Linus Torvalds committed Sep 12, 2013
2 parents 26935fb + de32a81 commit ac4de95
Show file tree
Hide file tree
Showing 79 changed files with 973 additions and 919 deletions.
2 changes: 2 additions & 0 deletions Documentation/cgroups/memory.txt
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,8 @@ pgpgin - # of charging events to the memory cgroup. The charging
pgpgout - # of uncharging events to the memory cgroup. The uncharging
event happens each time a page is unaccounted from the cgroup.
swap - # of bytes of swap usage
writeback - # of bytes of file/anon cache that are queued for syncing to
disk.
inactive_anon - # of bytes of anonymous and swap cache memory on inactive
LRU list.
active_anon - # of bytes of anonymous and swap cache memory on active
Expand Down
7 changes: 4 additions & 3 deletions arch/alpha/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int fault, si_code = SEGV_MAPERR;
siginfo_t info;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(cause > 0 ? FAULT_FLAG_WRITE : 0));
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
Expand All @@ -115,7 +114,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (address >= TASK_SIZE)
goto vmalloc_fault;
#endif

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
Expand All @@ -142,6 +142,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
} else {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
}

/* If for any reason at all we couldn't handle the fault,
Expand Down
11 changes: 4 additions & 7 deletions arch/arc/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
siginfo_t info;
int fault, ret;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

/*
* We fault-in kernel-space virtual memory on-demand. The
Expand Down Expand Up @@ -89,6 +88,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
if (in_atomic() || !mm)
goto no_context;

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
Expand Down Expand Up @@ -117,12 +118,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}

survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
Expand Down Expand Up @@ -201,10 +202,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
die("Oops", regs, address);

out_of_memory:
if (is_global_init(tsk)) {
yield();
goto survive;
}
up_read(&mm->mmap_sem);

if (user_mode(regs)) {
Expand Down
23 changes: 13 additions & 10 deletions arch/arm/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,9 +261,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
int write = fsr & FSR_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

if (notify_page_fault(regs, fsr))
return 0;
Expand All @@ -282,6 +280,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (in_atomic() || !mm)
goto no_context;

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (fsr & FSR_WRITE)
flags |= FAULT_FLAG_WRITE;

/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
Expand Down Expand Up @@ -349,6 +352,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;

/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;

if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
Expand All @@ -359,13 +369,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 0;
}

/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;

if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
Expand Down
31 changes: 17 additions & 14 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
} else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}

tsk = current;
mm = tsk->mm;

Expand All @@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (in_atomic() || !mm)
goto no_context;

if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;

if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
} else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}

/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
Expand Down Expand Up @@ -288,6 +291,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
VM_FAULT_BADACCESS))))
return 0;

/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
if (!user_mode(regs))
goto no_context;

if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
Expand All @@ -298,13 +308,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
return 0;
}

/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
if (!user_mode(regs))
goto no_context;

if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to successfully fix up
Expand Down
4 changes: 3 additions & 1 deletion arch/avr32/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)

local_irq_enable();

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);

Expand Down Expand Up @@ -228,9 +230,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
*/
out_of_memory:
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;

do_sigbus:
Expand Down
6 changes: 4 additions & 2 deletions arch/cris/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
struct vm_area_struct * vma;
siginfo_t info;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

D(printk(KERN_DEBUG
"Page fault for %lX on %X at %lX, prot %d write %d\n",
Expand Down Expand Up @@ -117,6 +116,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
if (in_atomic() || !mm)
goto no_context;

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
Expand Down Expand Up @@ -155,6 +156,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
} else if (writeaccess == 1) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
Expand Down
10 changes: 6 additions & 4 deletions arch/frv/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long _pme, lrai, lrad, fixup;
unsigned long flags = 0;
siginfo_t info;
pgd_t *pge;
pud_t *pue;
pte_t *pte;
int write;
int fault;

#if 0
Expand Down Expand Up @@ -81,6 +81,9 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (in_atomic() || !mm)
goto no_context;

if (user_mode(__frame))
flags |= FAULT_FLAG_USER;

down_read(&mm->mmap_sem);

vma = find_vma(mm, ear0);
Expand Down Expand Up @@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
*/
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
switch (esr0 & ESR0_ATXC) {
default:
/* handle write to write protected page */
Expand All @@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write = 1;
flags |= FAULT_FLAG_WRITE;
break;

/* handle read from protected page */
Expand All @@ -162,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
fault = handle_mm_fault(mm, vma, ear0, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
Expand Down
6 changes: 4 additions & 2 deletions arch/hexagon/mm/vm_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
int si_code = SEGV_MAPERR;
int fault;
const struct exception_table_entry *fixup;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(cause > 0 ? FAULT_FLAG_WRITE : 0);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

/*
* If we're in an interrupt or have no user context,
Expand All @@ -65,6 +64,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)

local_irq_enable();

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
Expand Down Expand Up @@ -96,6 +97,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
case FLT_STORE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
break;
}

Expand Down
6 changes: 4 additions & 2 deletions arch/ia64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));

flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);

/* mmap_sem is performance critical.... */
prefetchw(&mm->mmap_sem);

Expand Down Expand Up @@ -119,6 +117,10 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (notify_page_fault(regs, TRAP_BRKPT))
return;

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (mask & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);

Expand Down
10 changes: 6 additions & 4 deletions arch/m32r/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long page, addr;
int write;
unsigned long flags = 0;
int fault;
siginfo_t info;

Expand Down Expand Up @@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (in_atomic() || !mm)
goto bad_area_nosemaphore;

if (error_code & ACE_USERMODE)
flags |= FAULT_FLAG_USER;

/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
Expand Down Expand Up @@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
default: /* 3: write, present */
/* fall through */
case ACE_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
flags |= FAULT_FLAG_WRITE;
break;
case ACE_PROTECTION: /* read, present */
case 0: /* read, not present */
Expand All @@ -194,7 +196,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
fault = handle_mm_fault(mm, vma, addr, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
Expand Down
2 changes: 2 additions & 0 deletions arch/m68k/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (in_atomic() || !mm)
goto no_context;

if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);

Expand Down
Loading

0 comments on commit ac4de95

Please sign in to comment.