Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "16 hotfixes, 11 of which are cc:stable.

  A few nilfs2 fixes, the remainder are for MM: a couple of selftests
  fixes, various singletons fixing various issues in various parts"

* tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/ksm: fix possible UAF of stable_node
  mm/memory-failure: fix handling of dissolved but not taken off from buddy pages
  mm: /proc/pid/smaps_rollup: avoid skipping vma after getting mmap_lock again
  nilfs2: fix potential hang in nilfs_detach_log_writer()
  nilfs2: fix unexpected freezing of nilfs_segctor_sync()
  nilfs2: fix use-after-free of timer for log writer thread
  selftests/mm: fix build warnings on ppc64
  arm64: patching: fix handling of execmem addresses
  selftests/mm: compaction_test: fix bogus test success and reduce probability of OOM-killer invocation
  selftests/mm: compaction_test: fix incorrect write of zero to nr_hugepages
  selftests/mm: compaction_test: fix bogus test success on Aarch64
  mailmap: update email address for Satya Priya
  mm/huge_memory: don't unpoison huge_zero_folio
  kasan, fortify: properly rename memintrinsics
  lib: add version into /proc/allocinfo output
  mm/vmalloc: fix vmalloc which may return null if called with __GFP_NOFAIL
  • Loading branch information
Linus Torvalds committed May 25, 2024
2 parents a0db36e + 90e8234 commit 9b62e02
Show file tree
Hide file tree
Showing 13 changed files with 187 additions and 69 deletions.
2 changes: 1 addition & 1 deletion .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
Sahitya Tummala <quic_stummala@quicinc.com> <stummala@codeaurora.org>
Sathishkumar Muruganandam <quic_murugana@quicinc.com> <murugana@codeaurora.org>
Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
Satya Priya <quic_skakitap@quicinc.com> <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
S.Çağlar Onur <caglar@pardus.org.tr>
Sayali Lokhande <quic_sayalil@quicinc.com> <sayalil@codeaurora.org>
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
Expand Down
5 changes: 3 additions & 2 deletions Documentation/filesystems/proc.rst
Original file line number Diff line number Diff line change
Expand Up @@ -961,13 +961,14 @@ Provides information about memory allocations at all locations in the code
base. Each allocation in the code is identified by its source file, line
number, module (if originates from a loadable module) and the function calling
the allocation. The number of bytes allocated and number of calls at each
location are reported.
location are reported. The first line indicates the version of the file, the
second line is the header listing fields in the file.

Example output.

::

> sort -rn /proc/allocinfo
> tail -n +3 /proc/allocinfo | sort -rn
127664128 31168 mm/page_ext.c:270 func:alloc_page_ext
56373248 4737 mm/slub.c:2259 func:alloc_slab_page
14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/patching.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)

if (image)
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
else if (IS_ENABLED(CONFIG_EXECMEM))
page = vmalloc_to_page(addr);
else
return addr;
Expand Down
63 changes: 50 additions & 13 deletions fs/nilfs2/segment.c
Original file line number Diff line number Diff line change
Expand Up @@ -2118,8 +2118,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
sci->sc_timer.expires = jiffies + sci->sc_interval;
add_timer(&sci->sc_timer);
if (sci->sc_task) {
sci->sc_timer.expires = jiffies + sci->sc_interval;
add_timer(&sci->sc_timer);
}
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
Expand Down Expand Up @@ -2166,19 +2168,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
struct nilfs_segctor_wait_request wait_req;
int err = 0;

spin_lock(&sci->sc_state_lock);
init_wait(&wait_req.wq);
wait_req.err = 0;
atomic_set(&wait_req.done, 0);
init_waitqueue_entry(&wait_req.wq, current);

/*
* To prevent a race issue where completion notifications from the
* log writer thread are missed, increment the request sequence count
* "sc_seq_request" and insert a wait queue entry using the current
* sequence number into the "sc_wait_request" queue at the same time
* within the lock section of "sc_state_lock".
*/
spin_lock(&sci->sc_state_lock);
wait_req.seq = ++sci->sc_seq_request;
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
spin_unlock(&sci->sc_state_lock);

init_waitqueue_entry(&wait_req.wq, current);
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
set_current_state(TASK_INTERRUPTIBLE);
wake_up(&sci->sc_wait_daemon);

for (;;) {
set_current_state(TASK_INTERRUPTIBLE);

/*
* Synchronize only while the log writer thread is alive.
* Leave flushing out after the log writer thread exits to
* the cleanup work in nilfs_segctor_destroy().
*/
if (!sci->sc_task)
break;

if (atomic_read(&wait_req.done)) {
err = wait_req.err;
break;
Expand All @@ -2194,15 +2213,15 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
return err;
}

static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
{
struct nilfs_segctor_wait_request *wrq, *n;
unsigned long flags;

spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
if (!atomic_read(&wrq->done) &&
nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
(force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
wrq->err = err;
atomic_set(&wrq->done, 1);
}
Expand Down Expand Up @@ -2320,10 +2339,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
*/
static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
bool thread_is_alive;

spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
thread_is_alive = (bool)sci->sc_task;
spin_unlock(&sci->sc_state_lock);
del_timer_sync(&sci->sc_timer);

/*
* This function does not race with the log writer thread's
* termination. Therefore, deleting sc_timer, which should not be
* done after the log writer thread exits, can be done safely outside
* the area protected by sc_state_lock.
*/
if (thread_is_alive)
del_timer_sync(&sci->sc_timer);
}

/**
Expand All @@ -2340,7 +2370,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
sci->sc_seq_done = sci->sc_seq_accepted;
nilfs_segctor_wakeup(sci, err);
nilfs_segctor_wakeup(sci, err, false);
sci->sc_flush_request = 0;
} else {
if (mode == SC_FLUSH_FILE)
Expand All @@ -2349,7 +2379,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;

/* re-enable timer if checkpoint creation was not done */
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
time_before(jiffies, sci->sc_timer.expires))
add_timer(&sci->sc_timer);
}
Expand Down Expand Up @@ -2539,6 +2569,7 @@ static int nilfs_segctor_thread(void *arg)
int timeout = 0;

sci->sc_timer_task = current;
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);

/* start sync. */
sci->sc_task = current;
Expand Down Expand Up @@ -2606,6 +2637,7 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
/* end sync. */
sci->sc_task = NULL;
timer_shutdown_sync(&sci->sc_timer);
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
Expand Down Expand Up @@ -2669,7 +2701,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_iput_queue);
INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);

sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
Expand Down Expand Up @@ -2723,6 +2754,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);

/*
* Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
* be called from delayed iput() via nilfs_evict_inode() and can race
* with the above log writer thread termination.
*/
nilfs_segctor_wakeup(sci, 0, true);

if (flush_work(&sci->sc_iput_work))
flag = true;

Expand All @@ -2748,7 +2786,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)

down_write(&nilfs->ns_segctor_sem);

timer_shutdown_sync(&sci->sc_timer);
kfree(sci);
}

Expand Down
9 changes: 7 additions & 2 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -970,12 +970,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
break;

/* Case 1 and 2 above */
if (vma->vm_start >= last_vma_end)
if (vma->vm_start >= last_vma_end) {
smap_gather_stats(vma, &mss, 0);
last_vma_end = vma->vm_end;
continue;
}

/* Case 4 above */
if (vma->vm_end > last_vma_end)
if (vma->vm_end > last_vma_end) {
smap_gather_stats(vma, &mss, last_vma_end);
last_vma_end = vma->vm_end;
}
}
} for_each_vma(vmi, vma);

Expand Down
22 changes: 18 additions & 4 deletions include/linux/fortify-string.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
__ret; \
})

#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#if defined(__SANITIZE_ADDRESS__)

#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
#elif defined(CONFIG_KASAN_GENERIC)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
#else /* CONFIG_KASAN_SW_TAGS */
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
#endif

extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);

#else

#if defined(__SANITIZE_MEMORY__)
Expand All @@ -110,6 +123,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy

#endif

/**
Expand Down
47 changes: 32 additions & 15 deletions lib/alloc_tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,47 +16,60 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);

struct allocinfo_private {
struct codetag_iterator iter;
bool print_header;
};

static void *allocinfo_start(struct seq_file *m, loff_t *pos)
{
struct codetag_iterator *iter;
struct allocinfo_private *priv;
struct codetag *ct;
loff_t node = *pos;

iter = kzalloc(sizeof(*iter), GFP_KERNEL);
m->private = iter;
if (!iter)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
m->private = priv;
if (!priv)
return NULL;

priv->print_header = (node == 0);
codetag_lock_module_list(alloc_tag_cttype, true);
*iter = codetag_get_ct_iter(alloc_tag_cttype);
while ((ct = codetag_next_ct(iter)) != NULL && node)
priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
node--;

return ct ? iter : NULL;
return ct ? priv : NULL;
}

static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
{
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
struct codetag *ct = codetag_next_ct(iter);
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
struct codetag *ct = codetag_next_ct(&priv->iter);

(*pos)++;
if (!ct)
return NULL;

return iter;
return priv;
}

static void allocinfo_stop(struct seq_file *m, void *arg)
{
struct codetag_iterator *iter = (struct codetag_iterator *)m->private;
struct allocinfo_private *priv = (struct allocinfo_private *)m->private;

if (iter) {
if (priv) {
codetag_lock_module_list(alloc_tag_cttype, false);
kfree(iter);
kfree(priv);
}
}

static void print_allocinfo_header(struct seq_buf *buf)
{
/* Output format version, so we can change it. */
seq_buf_printf(buf, "allocinfo - version: 1.0\n");
seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
}

static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
{
struct alloc_tag *tag = ct_to_alloc_tag(ct);
Expand All @@ -71,13 +84,17 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)

static int allocinfo_show(struct seq_file *m, void *arg)
{
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
char *bufp;
size_t n = seq_get_buf(m, &bufp);
struct seq_buf buf;

seq_buf_init(&buf, bufp, n);
alloc_tag_to_text(&buf, iter->ct);
if (priv->print_header) {
print_allocinfo_header(&buf);
priv->print_header = false;
}
alloc_tag_to_text(&buf, priv->iter.ct);
seq_commit(m, seq_buf_used(&buf));
return 0;
}
Expand Down
3 changes: 2 additions & 1 deletion mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2153,7 +2153,6 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)

INIT_HLIST_HEAD(&stable_node_dup->hlist);
stable_node_dup->kpfn = kpfn;
folio_set_stable_node(kfolio, stable_node_dup);
stable_node_dup->rmap_hlist_len = 0;
DO_NUMA(stable_node_dup->nid = nid);
if (!need_chain) {
Expand All @@ -2172,6 +2171,8 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
stable_node_chain_add_dup(stable_node_dup, stable_node);
}

folio_set_stable_node(kfolio, stable_node_dup);

return stable_node_dup;
}

Expand Down
11 changes: 9 additions & 2 deletions mm/memory-failure.c
Original file line number Diff line number Diff line change
Expand Up @@ -1221,7 +1221,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
* subpages.
*/
folio_put(folio);
if (__page_handle_poison(p) >= 0) {
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
Expand Down Expand Up @@ -2091,7 +2091,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
*/
if (res == 0) {
folio_unlock(folio);
if (__page_handle_poison(p) >= 0) {
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
Expand Down Expand Up @@ -2546,6 +2546,13 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}

if (is_huge_zero_folio(folio)) {
unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
pfn, &unpoison_rs);
ret = -EOPNOTSUPP;
goto unlock_mutex;
}

if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);
Expand Down
Loading

0 comments on commit 9b62e02

Please sign in to comment.