Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "Rather a lot of fixes, almost all affecting mm/"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (26 commits)
  scripts/gdb: fix debugging modules on s390
  kernel/events/uprobes.c: only do FOLL_SPLIT_PMD for uprobe register
  mm/thp: allow dropping THP from page cache
  mm/vmscan.c: support removing arbitrary sized pages from mapping
  mm/thp: fix node page state in split_huge_page_to_list()
  proc/meminfo: fix output alignment
  mm/init-mm.c: include <linux/mman.h> for vm_committed_as_batch
  mm/filemap.c: include <linux/ramfs.h> for generic_file_vm_ops definition
  mm: include <linux/huge_mm.h> for is_vma_temporary_stack
  zram: fix race between backing_dev_show and backing_dev_store
  mm/memcontrol: update lruvec counters in mem_cgroup_move_account
  ocfs2: fix panic due to ocfs2_wq is null
  hugetlbfs: don't access uninitialized memmaps in pfn_range_valid_gigantic()
  mm: memblock: do not enforce current limit for memblock_phys* family
  mm: memcg: get number of pages on the LRU list in memcgroup base on lru_zone_size
  mm/gup: fix a misnamed "write" argument, and a related bug
  mm/gup_benchmark: add a missing "w" to getopt string
  ocfs2: fix error handling in ocfs2_setattr()
  mm: memcg/slab: fix panic in __free_slab() caused by premature memcg pointer release
  mm/memunmap: don't access uninitialized memmap in memunmap_pages()
  ...
  • Loading branch information
Linus Torvalds committed Oct 19, 2019
2 parents d418d07 + 585d730 commit 998d755
Show file tree
Hide file tree
Showing 27 changed files with 165 additions and 139 deletions.
3 changes: 3 additions & 0 deletions drivers/base/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
if (!pfn_to_online_page(pfn))
return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
Expand Down
5 changes: 3 additions & 2 deletions drivers/block/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct file *file;
struct zram *zram = dev_to_zram(dev);
struct file *file = zram->backing_dev;
char *p;
ssize_t ret;

down_read(&zram->init_lock);
if (!zram->backing_dev) {
file = zram->backing_dev;
if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
Expand Down
2 changes: 2 additions & 0 deletions fs/ocfs2/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
Expand All @@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
Expand Down
3 changes: 2 additions & 1 deletion fs/ocfs2/journal.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
flush_workqueue(osb->ocfs2_wq);
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);

/*
* Now that recovery is shut down, and the osb is about to be
Expand Down
3 changes: 2 additions & 1 deletion fs/ocfs2/localalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc = NULL;

cancel_delayed_work(&osb->la_enable_wq);
flush_workqueue(osb->ocfs2_wq);
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);

if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
Expand Down
4 changes: 2 additions & 2 deletions fs/proc/meminfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
show_val_kb(m, "FileHugePages: ",
show_val_kb(m, "FileHugePages: ",
global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "FilePmdMapped: ",
show_val_kb(m, "FilePmdMapped: ",
global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
#endif

Expand Down
28 changes: 16 additions & 12 deletions fs/proc/page.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
return -EINVAL;

while (count > 0) {
if (pfn_valid(pfn))
ppage = pfn_to_page(pfn);
else
ppage = NULL;
/*
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
ppage = pfn_to_online_page(pfn);

if (!ppage || PageSlab(ppage) || page_has_type(ppage))
pcount = 0;
else
Expand Down Expand Up @@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
return -EINVAL;

while (count > 0) {
if (pfn_valid(pfn))
ppage = pfn_to_page(pfn);
else
ppage = NULL;
/*
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
ppage = pfn_to_online_page(pfn);

if (put_user(stable_page_flags(ppage), out)) {
ret = -EFAULT;
Expand Down Expand Up @@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
return -EINVAL;

while (count > 0) {
if (pfn_valid(pfn))
ppage = pfn_to_page(pfn);
else
ppage = NULL;
/*
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
ppage = pfn_to_online_page(pfn);

if (ppage)
ino = page_cgroup_ino(ppage);
Expand Down
13 changes: 11 additions & 2 deletions kernel/events/uprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -474,21 +474,30 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
bool orig_page_huge = false;
unsigned int gup_flags = FOLL_FORCE;

is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);

retry:
if (is_register)
gup_flags |= FOLL_SPLIT_PMD;
/* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1,
FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
&old_page, &vma, NULL);
if (ret <= 0)
return ret;

ret = verify_opcode(old_page, vaddr, &opcode);
if (ret <= 0)
goto put_old;

if (WARN(!is_register && PageCompound(old_page),
"uprobe unregister should never work on compound page\n")) {
ret = -EINVAL;
goto put_old;
}

/* We are going to replace instruction, update ref_ctr. */
if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
Expand Down
1 change: 1 addition & 0 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <linux/rmap.h>
#include <linux/delayacct.h>
#include <linux/psi.h>
#include <linux/ramfs.h>
#include "internal.h"

#define CREATE_TRACE_POINTS
Expand Down
14 changes: 8 additions & 6 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1973,7 +1973,8 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
}

static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
unsigned long pte_end;
struct page *head, *page;
Expand All @@ -1986,7 +1987,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,

pte = READ_ONCE(*ptep);

if (!pte_access_permitted(pte, write))
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;

/* hugepages are never "special" */
Expand Down Expand Up @@ -2023,7 +2024,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
}

static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
unsigned int pdshift, unsigned long end, int write,
unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
pte_t *ptep;
Expand All @@ -2033,23 +2034,24 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
return 0;
} while (ptep++, addr = next, addr != end);

return 1;
}
#else
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
unsigned pdshift, unsigned long end, int write,
unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_HUGEPD */

static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags, struct page **pages, int *nr)
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
Expand Down
9 changes: 7 additions & 2 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2789,8 +2789,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
if (mapping)
__dec_node_page_state(page, NR_SHMEM_THPS);
if (mapping) {
if (PageSwapBacked(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
else
__dec_node_page_state(page, NR_FILE_THPS);
}

spin_unlock(&ds_queue->split_queue_lock);
__split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
Expand Down
5 changes: 2 additions & 3 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
struct page *page;

for (i = start_pfn; i < end_pfn; i++) {
if (!pfn_valid(i))
page = pfn_to_online_page(i);
if (!page)
return false;

page = pfn_to_page(i);

if (page_zone(page) != z)
return false;

Expand Down
1 change: 1 addition & 0 deletions mm/init-mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/mman.h>

#include <linux/atomic.h>
#include <linux/user_namespace.h>
Expand Down
6 changes: 3 additions & 3 deletions mm/memblock.c
Original file line number Diff line number Diff line change
Expand Up @@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
align = SMP_CACHE_BYTES;
}

if (end > memblock.current_limit)
end = memblock.current_limit;

again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
Expand Down Expand Up @@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);

if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;

alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);

/* retry allocation without lower limit */
Expand Down
18 changes: 12 additions & 6 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -5420,6 +5420,8 @@ static int mem_cgroup_move_account(struct page *page,
struct mem_cgroup *from,
struct mem_cgroup *to)
{
struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat;
unsigned long flags;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
int ret;
Expand All @@ -5443,11 +5445,15 @@ static int mem_cgroup_move_account(struct page *page,

anon = PageAnon(page);

pgdat = page_pgdat(page);
from_vec = mem_cgroup_lruvec(pgdat, from);
to_vec = mem_cgroup_lruvec(pgdat, to);

spin_lock_irqsave(&from->move_lock, flags);

if (!anon && page_mapped(page)) {
__mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
__mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
}

/*
Expand All @@ -5459,14 +5465,14 @@ static int mem_cgroup_move_account(struct page *page,
struct address_space *mapping = page_mapping(page);

if (mapping_cap_account_dirty(mapping)) {
__mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
__mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
__mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
}
}

if (PageWriteback(page)) {
__mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Expand Down
14 changes: 8 additions & 6 deletions mm/memory-failure.c
Original file line number Diff line number Diff line change
Expand Up @@ -1257,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);

if (!pfn_valid(pfn)) {
p = pfn_to_online_page(pfn);
if (!p) {
if (pfn_valid(pfn)) {
pgmap = get_dev_pagemap(pfn, NULL);
if (pgmap)
return memory_failure_dev_pagemap(pfn, flags,
pgmap);
}
pr_err("Memory failure: %#lx: memory outside kernel control\n",
pfn);
return -ENXIO;
}

pgmap = get_dev_pagemap(pfn, NULL);
if (pgmap)
return memory_failure_dev_pagemap(pfn, flags, pgmap);

p = pfn_to_page(pfn);
if (PageHuge(p))
return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) {
Expand Down
Loading

0 comments on commit 998d755

Please sign in to comment.