Skip to content

Commit

Permalink
mm: vmalloc: support more granular vrealloc() sizing
Browse files Browse the repository at this point in the history
Introduce struct vm_struct::requested_size so that the requested
(re)allocation size is retained separately from the allocated area size. 
This means that KASAN will correctly poison the correct spans of requested
bytes.  This also means we can support growing the usable portion of an
allocation that can already be supported by the existing area's existing
allocation.

Link: https://lkml.kernel.org/r/20250426001105.it.679-kees@kernel.org
Fixes: 3ddc2fe ("mm: vmalloc: implement vrealloc()")
Signed-off-by: Kees Cook <kees@kernel.org>
Reported-by: Erhard Furtner <erhard_f@mailbox.org>
Closes: https://lore.kernel.org/all/20250408192503.6149a816@outsider.home/
Reviewed-by: Danilo Krummrich <dakr@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Kees Cook authored and Andrew Morton committed May 8, 2025
1 parent a8efadd commit a0309fa
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 7 deletions.
1 change: 1 addition & 0 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ struct vm_struct {
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
unsigned long requested_size;
};

struct vmap_area {
Expand Down
31 changes: 24 additions & 7 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
vm->size = va_size(va);
vm->size = vm->requested_size = va_size(va);
vm->caller = caller;
va->vm = vm;
}
Expand Down Expand Up @@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,

area->flags = flags;
area->caller = caller;
area->requested_size = requested_size;

va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
if (IS_ERR(va)) {
Expand Down Expand Up @@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
*/
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
{
struct vm_struct *vm = NULL;
size_t alloced_size = 0;
size_t old_size = 0;
void *n;

Expand All @@ -4072,30 +4075,44 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
}

if (p) {
struct vm_struct *vm;

vm = find_vm_area(p);
if (unlikely(!vm)) {
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
return NULL;
}

old_size = get_vm_area_size(vm);
alloced_size = get_vm_area_size(vm);
old_size = vm->requested_size;
if (WARN(alloced_size < old_size,
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
return NULL;
}

/*
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
* would be a good heuristic for when to shrink the vm_area?
*/
if (size <= old_size) {
/* Zero out spare memory. */
if (want_init_on_alloc(flags))
/* Zero out "freed" memory. */
if (want_init_on_free())
memset((void *)p + size, 0, old_size - size);
vm->requested_size = size;
kasan_poison_vmalloc(p + size, old_size - size);
kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
return (void *)p;
}

/*
* We already have the bytes available in the allocation; use them.
*/
if (size <= alloced_size) {
kasan_unpoison_vmalloc(p + old_size, size - old_size,
KASAN_VMALLOC_PROT_NORMAL);
/* Zero out "alloced" memory. */
if (want_init_on_alloc(flags))
memset((void *)p + old_size, 0, size - old_size);
vm->requested_size = size;
}

/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
n = __vmalloc_noprof(size, flags);
if (!n)
Expand Down

0 comments on commit a0309fa

Please sign in to comment.