Skip to content

Commit

Permalink
mm/kasan: Convert to struct folio and struct slab
Browse files Browse the repository at this point in the history
KASAN accesses some slab related struct page fields so we need to
convert it to struct slab. Some places are a bit simplified thanks to
kasan_addr_to_slab() encapsulating the PageSlab flag check through
virt_to_slab().  When resolving object address to either a real slab or
a large kmalloc, use struct folio as the intermediate type for testing
the slab flag to avoid unnecessary implicit compound_head().

[ vbabka@suse.cz: use struct folio, adjust to differences in previous
  patches ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Tested-by: Hyeongogn Yoo <42.hyeyoo@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
  • Loading branch information
Matthew Wilcox (Oracle) authored and Vlastimil Babka committed Jan 6, 2022
1 parent 5075701 commit 6e48a96
Show file tree
Hide file tree
Showing 9 changed files with 42 additions and 28 deletions.
9 changes: 5 additions & 4 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

struct kmem_cache;
struct page;
struct slab;
struct vm_struct;
struct task_struct;

Expand Down Expand Up @@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
return 0;
}

void __kasan_poison_slab(struct page *page);
static __always_inline void kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
if (kasan_enabled())
__kasan_poison_slab(page);
__kasan_poison_slab(slab);
}

void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
Expand Down Expand Up @@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
Expand Down
23 changes: 13 additions & 10 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
}
#endif

void __kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
unsigned long i;

for (i = 0; i < compound_nr(page); i++)
Expand Down Expand Up @@ -401,22 +402,24 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)

void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{
struct page *page;
struct folio *folio;

page = virt_to_head_page(ptr);
folio = virt_to_folio(ptr);

/*
* Even though this function is only called for kmem_cache_alloc and
* kmalloc backed mempool allocations, those allocations can still be
* !PageSlab() when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/
if (unlikely(!PageSlab(page))) {
if (unlikely(!folio_test_slab(folio))) {
if (____kasan_kfree_large(ptr, ip))
return;
kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
} else {
____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
struct slab *slab = folio_slab(folio);

____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
}
}

Expand Down Expand Up @@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,

void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct page *page;
struct slab *slab;

if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
Expand All @@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
*/
kasan_unpoison(object, size, false);

page = virt_to_head_page(object);
slab = virt_to_slab(object);

/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!PageSlab(page)))
if (unlikely(!slab))
return __kasan_kmalloc_large(object, size, flags);
else
return ____kasan_kmalloc(page->slab_cache, object, size, flags);
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
}

bool __kasan_check_byte(const void *address, unsigned long ip)
Expand Down
8 changes: 4 additions & 4 deletions mm/kasan/generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);

static void __kasan_record_aux_stack(void *addr, bool can_alloc)
{
struct page *page = kasan_addr_to_page(addr);
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;

if (is_kfence_address(addr) || !(page && PageSlab(page)))
if (is_kfence_address(addr) || !slab)
return;

cache = page->slab_cache;
object = nearest_obj(cache, page_slab(page), addr);
cache = slab->slab_cache;
object = nearest_obj(cache, slab, addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return;
Expand Down
1 change: 1 addition & 0 deletions mm/kasan/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size,
void kasan_report_invalid_free(void *object, unsigned long ip);

struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);

depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
void kasan_set_track(struct kasan_track *track, gfp_t flags);
Expand Down
2 changes: 1 addition & 1 deletion mm/kasan/quarantine.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;

static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
{
return virt_to_head_page(qlink)->slab_cache;
return virt_to_slab(qlink)->slab_cache;
}

static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
Expand Down
13 changes: 11 additions & 2 deletions mm/kasan/report.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,14 @@ struct page *kasan_addr_to_page(const void *addr)
return NULL;
}

struct slab *kasan_addr_to_slab(const void *addr)
{
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory))
return virt_to_slab(addr);
return NULL;
}

static void describe_object_addr(struct kmem_cache *cache, void *object,
const void *addr)
{
Expand Down Expand Up @@ -248,8 +256,9 @@ static void print_address_description(void *addr, u8 tag)
pr_err("\n");

if (page && PageSlab(page)) {
struct kmem_cache *cache = page->slab_cache;
void *object = nearest_obj(cache, page_slab(page), addr);
struct slab *slab = page_slab(page);
struct kmem_cache *cache = slab->slab_cache;
void *object = nearest_obj(cache, slab, addr);

describe_object(cache, object, addr, tag);
}
Expand Down
10 changes: 5 additions & 5 deletions mm/kasan/report_tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
#ifdef CONFIG_KASAN_TAGS_IDENTIFY
struct kasan_alloc_meta *alloc_meta;
struct kmem_cache *cache;
struct page *page;
struct slab *slab;
const void *addr;
void *object;
u8 tag;
int i;

tag = get_tag(info->access_addr);
addr = kasan_reset_tag(info->access_addr);
page = kasan_addr_to_page(addr);
if (page && PageSlab(page)) {
cache = page->slab_cache;
object = nearest_obj(cache, page_slab(page), (void *)addr);
slab = kasan_addr_to_slab(addr);
if (slab) {
cache = slab->slab_cache;
object = nearest_obj(cache, slab, (void *)addr);
alloc_meta = kasan_get_alloc_meta(cache, object);

if (alloc_meta) {
Expand Down
2 changes: 1 addition & 1 deletion mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -2604,7 +2604,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep,
* page_address() in the latter returns a non-tagged pointer,
* as it should be for slab pages.
*/
kasan_poison_slab(slab_page(slab));
kasan_poison_slab(slab);

/* Get slab management. */
freelist = alloc_slabmgmt(cachep, slab, offset,
Expand Down
2 changes: 1 addition & 1 deletion mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1961,7 +1961,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)

slab->slab_cache = s;

kasan_poison_slab(slab_page(slab));
kasan_poison_slab(slab);

start = slab_address(slab);

Expand Down

0 comments on commit 6e48a96

Please sign in to comment.