Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "23 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (23 commits)
  mm, memory_hotplug: fix off-by-one in is_pageblock_removable
  mm: don't let userspace spam allocations warnings
  slub: fix a crash with SLUB_DEBUG + KASAN_SW_TAGS
  kasan, slab: remove redundant kasan_slab_alloc hooks
  kasan, slab: make freelist stored without tags
  kasan, slab: fix conflicts with CONFIG_HARDENED_USERCOPY
  kasan: prevent tracing of tags.c
  kasan: fix random seed generation for tag-based mode
  tmpfs: fix link accounting when a tmpfile is linked in
  psi: avoid divide-by-zero crash inside virtual machines
  mm: handle lru_add_drain_all for UP properly
  mm, page_alloc: fix a division by zero error when boosting watermarks v2
  mm/debug.c: fix __dump_page() for poisoned pages
  proc, oom: do not report alien mms when setting oom_score_adj
  slub: fix SLAB_CONSISTENCY_CHECKS + KASAN_SW_TAGS
  kasan, slub: fix more conflicts with CONFIG_SLAB_FREELIST_HARDENED
  kasan, slub: fix conflicts with CONFIG_SLAB_FREELIST_HARDENED
  kasan, slub: move kasan_poison_slab hook before page_address
  kmemleak: account for tagged pointers when calculating pointer range
  kasan, kmemleak: pass tagged pointers to kmemleak
  ...
  • Loading branch information
Linus Torvalds committed Feb 21, 2019
2 parents f6163d6 + 891cb2a commit 7c90325
Show file tree
Hide file tree
Showing 20 changed files with 140 additions and 82 deletions.
3 changes: 3 additions & 0 deletions arch/arm64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();

/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_tags();

#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/mm/kasan_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,6 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));

kasan_init_tags();

/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
Expand Down
4 changes: 0 additions & 4 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)

task_lock(p);
if (!p->vfork_done && process_shares_mm(p, mm)) {
pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
task_pid_nr(p), p->comm,
p->signal->oom_score_adj, oom_adj,
task_pid_nr(task), task->comm);
p->signal->oom_score_adj = oom_adj;
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
p->signal->oom_score_adj_min = (short)oom_adj;
Expand Down
6 changes: 3 additions & 3 deletions init/initramfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,7 @@ static void __init free_initrd(void)
initrd_end = 0;
}

#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
Expand Down Expand Up @@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
ksys_close(fd);
kfree(buf);
}
#endif

static int __init populate_rootfs(void)
{
Expand Down Expand Up @@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
if (err) {
if (err)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
clean_rootfs();
}
free_initrd();
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/psi.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
expires = group->next_update;
if (now < expires)
goto out;
if (now - expires > psi_period)
if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);

/*
Expand Down
4 changes: 3 additions & 1 deletion mm/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {

void __dump_page(struct page *page, const char *reason)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
int mapcount;

Expand All @@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
goto hex_only;
}

mapping = page_mapping(page);

/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
Expand Down
2 changes: 2 additions & 0 deletions mm/kasan/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n

CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_generic.o = -pg
CFLAGS_REMOVE_tags.o = -pg

# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533

Expand Down
29 changes: 17 additions & 12 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
* get different tags.
*/
static u8 assign_tag(struct kmem_cache *cache, const void *object,
bool init, bool krealloc)
bool init, bool keep_tag)
{
/* Reuse the same tag for krealloc'ed objects. */
if (krealloc)
/*
* 1. When an object is kmalloc()'ed, two hooks are called:
* kasan_slab_alloc() and kasan_kmalloc(). We assign the
* tag only in the first one.
* 2. We reuse the same tag for krealloc'ed objects.
*/
if (keep_tag)
return get_tag(object);

/*
Expand Down Expand Up @@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}

void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
gfp_t flags)
{
return kasan_kmalloc(cache, object, cache->object_size, flags);
}

static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
Expand Down Expand Up @@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
}

static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags, bool krealloc)
size_t size, gfp_t flags, bool keep_tag)
{
unsigned long redzone_start;
unsigned long redzone_end;
Expand All @@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_SHADOW_SCALE_SIZE);

if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, krealloc);
tag = assign_tag(cache, object, false, keep_tag);

/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow(set_tag(object, tag), size);
Expand All @@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return set_tag(object, tag);
}

void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
gfp_t flags)
{
return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
}

void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
return __kasan_kmalloc(cache, object, size, flags, false);
return __kasan_kmalloc(cache, object, size, flags, true);
}
EXPORT_SYMBOL(kasan_kmalloc);

Expand Down
2 changes: 1 addition & 1 deletion mm/kasan/tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void kasan_init_tags(void)
int cpu;

for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = get_random_u32();
per_cpu(prng_state, cpu) = (u32)get_cycles();
}

/*
Expand Down
10 changes: 7 additions & 3 deletions mm/kmemleak.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
unsigned long flags;
struct kmemleak_object *object, *parent;
struct rb_node **link, *rb_parent;
unsigned long untagged_ptr;

object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (!object) {
Expand Down Expand Up @@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,

write_lock_irqsave(&kmemleak_lock, flags);

min_addr = min(min_addr, ptr);
max_addr = max(max_addr, ptr + size);
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
link = &object_tree_root.rb_node;
rb_parent = NULL;
while (*link) {
Expand Down Expand Up @@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
unsigned long untagged_ptr;

read_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
Expand All @@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
pointer = *ptr;
kasan_enable_current();

if (pointer < min_addr || pointer >= max_addr)
untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
continue;

/*
Expand Down
27 changes: 15 additions & 12 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1188,28 +1188,30 @@ static inline int pageblock_free(struct page *page)
return PageBuddy(page) && page_order(page) >= pageblock_order;
}

/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
/* Return the pfn of the start of the next active pageblock after a given pfn */
static unsigned long next_active_pageblock(unsigned long pfn)
{
struct page *page = pfn_to_page(pfn);

/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
BUG_ON(pfn & (pageblock_nr_pages - 1));

/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) {
int order;
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order))
return page + (1 << order);
return pfn + (1 << order);
}

return page + pageblock_nr_pages;
return pfn + pageblock_nr_pages;
}

static bool is_pageblock_removable_nolock(struct page *page)
static bool is_pageblock_removable_nolock(unsigned long pfn)
{
struct page *page = pfn_to_page(pfn);
struct zone *zone;
unsigned long pfn;

/*
* We have to be careful here because we are iterating over memory
Expand All @@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
/* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
struct page *page = pfn_to_page(start_pfn);
unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
struct page *end_page = pfn_to_page(end_pfn);
unsigned long end_pfn, pfn;

end_pfn = min(start_pfn + nr_pages,
zone_end_pfn(page_zone(pfn_to_page(start_pfn))));

/* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) {
if (!is_pageblock_removable_nolock(page))
for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
if (!is_pageblock_removable_nolock(pfn))
return false;
cond_resched();
}
Expand Down
6 changes: 3 additions & 3 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);

if (copy > nbytes) {
if (copy > PAGE_SIZE)
Expand Down Expand Up @@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;

if (nmask != NULL && maxnode < MAX_NUMNODES)
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;

err = do_get_mempolicy(&pval, &nodes, addr, flags);
Expand Down Expand Up @@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);

nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;

if (nmask)
Expand Down
12 changes: 12 additions & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)

max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000);

/*
* high watermark may be uninitialised if fragmentation occurs
* very early in boot so do not boost. We do not fall
* through and boost by pageblock_nr_pages as failing
* allocations that early means that reclaim is not going
* to help and it may even be impossible to reclaim the
* boosted watermark resulting in a hang.
*/
if (!max_boost)
return;

max_boost = max(pageblock_nr_pages, max_boost);

zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
Expand Down
10 changes: 7 additions & 3 deletions mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
* But if an O_TMPFILE file is linked into the tmpfs, the
* first link must skip that, to get the accounting right.
*/
ret = shmem_reserve_inode(inode->i_sb);
if (ret)
goto out;
if (inode->i_nlink) {
ret = shmem_reserve_inode(inode->i_sb);
if (ret)
goto out;
}

dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
Expand Down
15 changes: 11 additions & 4 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist;
void *addr = page_address(page);

page->s_mem = kasan_reset_tag(addr) + colour_off;
page->s_mem = addr + colour_off;
page->active = 0;

if (OBJFREELIST_SLAB(cachep))
Expand All @@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
Expand Down Expand Up @@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,

offset *= cachep->colour_off;

/*
* Call kasan_poison_slab() before calling alloc_slabmgmt(), so
* page_address() in the latter returns a non-tagged pointer,
* as it should be for slab pages.
*/
kasan_poison_slab(page);

/* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, page_node);
Expand All @@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,

slab_map_pages(cachep, page, freelist);

kasan_poison_slab(page);
cache_init_objs(cachep, page);

if (gfpflags_allow_blocking(local_flags))
Expand Down Expand Up @@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);

ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);

Expand Down Expand Up @@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);

ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
Expand Down Expand Up @@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int objnr;
unsigned long offset;

ptr = kasan_reset_tag(ptr);

/* Find and validate object. */
cachep = page->slab_cache;
objnr = obj_to_index(cachep, page, (void *)ptr);
Expand Down
Loading

0 comments on commit 7c90325

Please sign in to comment.