Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "13 hotfixes, 10 of which pertain to post-6.5 issues. The other three
  are cc:stable"

* tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  proc: nommu: fix empty /proc/<pid>/maps
  filemap: add filemap_map_order0_folio() to handle order0 folio
  proc: nommu: /proc/<pid>/maps: release mmap read lock
  mm: memcontrol: fix GFP_NOFS recursion in memory.high enforcement
  pidfd: prevent a kernel-doc warning
  argv_split: fix kernel-doc warnings
  scatterlist: add missing function params to kernel-doc
  selftests/proc: fixup proc-empty-vm test after KSM changes
  revert "scripts/gdb/symbols: add specific ko module load command"
  selftests: link libasan statically for tests with -fsanitize=address
  task_work: add kerneldoc annotation for 'data' argument
  mm: page_alloc: fix CMA and HIGHATOMIC landing on the wrong buddy list
  sh: mm: re-add lost __ref to ioremap_prot() to fix modpost warning
  • Loading branch information
Linus Torvalds committed Sep 23, 2023
2 parents 8565bdf + fe44198 commit 85eba5f
Show file tree
Hide file tree
Showing 16 changed files with 111 additions and 91 deletions.
4 changes: 2 additions & 2 deletions arch/sh/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
#define __ioremap_29bit(offset, size, prot) NULL
#endif /* CONFIG_29BIT */

void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
void __iomem *mapped;
pgprot_t pgprot = __pgprot(prot);
Expand Down
2 changes: 0 additions & 2 deletions fs/proc/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -289,9 +289,7 @@ struct proc_maps_private {
struct inode *inode;
struct task_struct *task;
struct mm_struct *mm;
#ifdef CONFIG_MMU
struct vma_iterator iter;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
Expand Down
64 changes: 37 additions & 27 deletions fs/proc/task_nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,15 +175,28 @@ static int show_map(struct seq_file *m, void *_p)
return nommu_vma_show(m, _p);
}

static void *m_start(struct seq_file *m, loff_t *pos)
static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
loff_t *ppos)
{
struct vm_area_struct *vma = vma_next(&priv->iter);

if (vma) {
*ppos = vma->vm_start;
} else {
*ppos = -1UL;
}

return vma;
}

static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = *ppos;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long addr = *pos;

/* See m_next(). Zero at the start or after lseek. */
if (addr == -1UL)
/* See proc_get_vma(). Zero at the start or after lseek. */
if (last_addr == -1UL)
return NULL;

/* pin the task and mm whilst we play with them */
Expand All @@ -192,44 +205,41 @@ static void *m_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-ESRCH);

mm = priv->mm;
if (!mm || !mmget_not_zero(mm))
if (!mm || !mmget_not_zero(mm)) {
put_task_struct(priv->task);
priv->task = NULL;
return NULL;
}

if (mmap_read_lock_killable(mm)) {
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
return ERR_PTR(-EINTR);
}

/* start the next element from addr */
vma = find_vma(mm, addr);
if (vma)
return vma;
vma_iter_init(&priv->iter, mm, last_addr);

mmap_read_unlock(mm);
mmput(mm);
return NULL;
return proc_get_vma(priv, ppos);
}

static void m_stop(struct seq_file *m, void *_vml)
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct mm_struct *mm = priv->mm;

if (!IS_ERR_OR_NULL(_vml)) {
mmap_read_unlock(priv->mm);
mmput(priv->mm);
}
if (priv->task) {
put_task_struct(priv->task);
priv->task = NULL;
}
if (!priv->task)
return;

mmap_read_unlock(mm);
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
}

static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
{
struct vm_area_struct *vma = _p;

*pos = vma->vm_end;
return find_vma(vma->vm_mm, vma->vm_end);
return proc_get_vma(m->private, ppos);
}

static const struct seq_operations proc_pid_maps_ops = {
Expand Down
4 changes: 2 additions & 2 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -920,7 +920,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}

void mem_cgroup_handle_over_high(void);
void mem_cgroup_handle_over_high(gfp_t gfp_mask);

unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);

Expand Down Expand Up @@ -1458,7 +1458,7 @@ static inline void mem_cgroup_unlock_pages(void)
rcu_read_unlock();
}

static inline void mem_cgroup_handle_over_high(void)
static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}

Expand Down
2 changes: 1 addition & 1 deletion include/linux/resume_user_mode.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
}
#endif

mem_cgroup_handle_over_high();
mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();

rseq_handle_notify_resume(NULL, regs);
Expand Down
2 changes: 1 addition & 1 deletion kernel/pid.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ int pidfd_create(struct pid *pid, unsigned int flags)
}

/**
* pidfd_open() - Open new pid file descriptor.
* sys_pidfd_open() - Open new pid file descriptor.
*
* @pid: pid for which to retrieve a pidfd
* @flags: flags to pass
Expand Down
1 change: 1 addition & 0 deletions kernel/task_work.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
* task_work_cancel_match - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @match: match function to call
* @data: data to be passed in to match function
*
* RETURNS:
* The found work or NULL if not found.
Expand Down
4 changes: 2 additions & 2 deletions lib/argv_split.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static int count_argc(const char *str)

/**
* argv_free - free an argv
* @argv - the argument vector to be freed
* @argv: the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
Expand All @@ -46,7 +46,7 @@ EXPORT_SYMBOL(argv_free);
* @str: the string to be split
* @argcp: returned argument count
*
* Returns an array of pointers to strings which are split out from
* Returns: an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
Expand Down
4 changes: 3 additions & 1 deletion lib/scatterlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,8 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
* @nents_first_chunk: Number of entries int the (preallocated) first
* @first_chunk: first SGL if preallocated (may be %NULL)
* @nents_first_chunk: Number of entries in the (preallocated) first
* scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
Expand Down Expand Up @@ -788,6 +789,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next);
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
* @flags: sg iterator flags
*
* Description:
* Starts mapping iterator @miter.
Expand Down
69 changes: 48 additions & 21 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -3475,22 +3475,19 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
*/
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages)
unsigned long addr, unsigned int nr_pages,
unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct page *page = folio_page(folio, start);
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
unsigned int count = 0;
pte_t *old_ptep = vmf->pte;

do {
if (PageHWPoison(page + count))
goto skip;

if (mmap_miss > 0)
mmap_miss--;
(*mmap_miss)++;

/*
* NOTE: If there're PTE markers, we'll leave them to be
Expand Down Expand Up @@ -3525,7 +3522,35 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
}

vmf->pte = old_ptep;
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);

return ret;
}

static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
struct folio *folio, unsigned long addr,
unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
struct page *page = &folio->page;

if (PageHWPoison(page))
return ret;

(*mmap_miss)++;

/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit
* the fault-around logic.
*/
if (!pte_none(ptep_get(vmf->pte)))
return ret;

if (vmf->address == addr)
ret = VM_FAULT_NOPAGE;

set_pte_range(vmf, folio, page, 1, addr);
folio_ref_inc(folio);

return ret;
}
Expand All @@ -3541,7 +3566,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
vm_fault_t ret = 0;
int nr_pages = 0;
unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;

rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
Expand Down Expand Up @@ -3569,25 +3594,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end = folio->index + folio_nr_pages(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;

/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit the
* fault-around logic.
*/
if (!pte_none(ptep_get(vmf->pte)))
goto unlock;

ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr, nr_pages);
if (!folio_test_large(folio))
ret |= filemap_map_order0_folio(vmf,
folio, addr, &mmap_miss);
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
nr_pages, &mmap_miss);

unlock:
folio_unlock(folio);
folio_put(folio);
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
} while (folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();

mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
if (mmap_miss >= mmap_miss_saved)
WRITE_ONCE(file->f_ra.mmap_miss, 0);
else
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);

return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
Expand Down
6 changes: 3 additions & 3 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2555,7 +2555,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
void mem_cgroup_handle_over_high(void)
void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
unsigned long penalty_jiffies;
unsigned long pflags;
Expand Down Expand Up @@ -2583,7 +2583,7 @@ void mem_cgroup_handle_over_high(void)
*/
nr_reclaimed = reclaim_high(memcg,
in_retry ? SWAP_CLUSTER_MAX : nr_pages,
GFP_KERNEL);
gfp_mask);

/*
* memory.high is breached and reclaim is unable to keep up. Throttle
Expand Down Expand Up @@ -2819,7 +2819,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
gfpflags_allow_blocking(gfp_mask)) {
mem_cgroup_handle_over_high();
mem_cgroup_handle_over_high(gfp_mask);
}
return 0;
}
Expand Down
12 changes: 6 additions & 6 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2400,32 +2400,32 @@ void free_unref_page(struct page *page, unsigned int order)
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
int migratetype;
int migratetype, pcpmigratetype;

if (!free_unref_page_prepare(page, pfn, order))
return;

/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
* offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* offlined but treat HIGHATOMIC and CMA as movable pages so we can
* get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
migratetype = get_pcppage_migratetype(page);
migratetype = pcpmigratetype = get_pcppage_migratetype(page);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
migratetype = MIGRATE_MOVABLE;
pcpmigratetype = MIGRATE_MOVABLE;
}

zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
free_unref_page_commit(zone, pcp, page, migratetype, order);
free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
Expand Down
Loading

0 comments on commit 85eba5f

Please sign in to comment.