Skip to content

Commit

Permalink
Merge tag 'slab-for-6.1-rc4-fixes' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:
 "Most are small fixups as described below.

  The !CONFIG_TRACING fix is a bit bigger and would normally be done in
  the next merge window as part of upcoming hardening changes. But we
  realized it can make the kmalloc waste tracking introduced in this
  window inaccurate, so decided to go with it now.

  Summary:

   - Remove !CONFIG_TRACING kmalloc() wrappers intended to save a
     function call, due to incompatilibity with recently introduced
     wasted space tracking and planned hardening changes.

   - A tracing parameter regression fix, by Kees Cook.

   - Two kernel-doc warning fixups, by Lukas Bulwahn and myself

* tag 'slab-for-6.1-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm, slab: remove duplicate kernel-doc comment for ksize()
  mm/slab_common: Restore passing "caller" for tracing
  mm/slab: remove !CONFIG_TRACING variants of kmalloc_[node_]trace()
  mm/slab_common: repair kernel-doc for __ksize()
  • Loading branch information
Linus Torvalds committed Nov 9, 2022
2 parents f141df3 + c18c20f commit f67dd6c
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 43 deletions.
23 changes: 0 additions & 23 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,35 +470,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;

#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);

void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
#else /* CONFIG_TRACING */
/* Save a function call when CONFIG_TRACING=n */
static __always_inline __alloc_size(3)
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);

ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}

static __always_inline __alloc_size(4)
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);

ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */

void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1);

Expand Down
24 changes: 4 additions & 20 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller

if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = __kmalloc_large_node(size, flags, node);
trace_kmalloc(_RET_IP_, ret, size,
trace_kmalloc(caller, ret, size,
PAGE_SIZE << get_order(size), flags, node);
return ret;
}
Expand All @@ -953,7 +953,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller

ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags, node);
trace_kmalloc(caller, ret, size, s->size, flags, node);
return ret;
}

Expand Down Expand Up @@ -1010,15 +1010,15 @@ EXPORT_SYMBOL(kfree);

/**
* __ksize -- Report full size of underlying allocation
* @objp: pointer to the object
* @object: pointer to the object
*
* This should only be used internally to query the true size of allocations.
* It is not meant to be a way to discover the usable size of an allocation
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
* and/or FORTIFY_SOURCE.
*
* Return: size of the actual memory used by @objp in bytes
* Return: size of the actual memory used by @object in bytes
*/
size_t __ksize(const void *object)
{
Expand All @@ -1040,7 +1040,6 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache);
}

#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
Expand All @@ -1064,7 +1063,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
return ret;
}
EXPORT_SYMBOL(kmalloc_node_trace);
#endif /* !CONFIG_TRACING */
#endif /* !CONFIG_SLOB */

gfp_t kmalloc_fix_flags(gfp_t flags)
Expand Down Expand Up @@ -1411,20 +1409,6 @@ void kfree_sensitive(const void *p)
}
EXPORT_SYMBOL(kfree_sensitive);

/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp)
{
size_t size;
Expand Down

0 comments on commit f67dd6c

Please sign in to comment.