Skip to content

Commit

Permalink
zram: factor out ZRAM_HUGE write
Browse files Browse the repository at this point in the history
zram_write_page() handles: ZRAM_SAME pages (which was already factored
out) stores, regular page stores and ZRAM_HUGE pages stores.

ZRAM_HUGE handling adds a significant amount of complexity.  Instead, we
can handle ZRAM_HUGE in a separate function.  This allows us to simplify
zs_handle allocations slow-path, as it now does not handle ZRAM_HUGE case.
ZRAM_HUGE zs_handle allocation, on the other hand, can now drop
__GFP_KSWAPD_RECLAIM because we handle ZRAM_HUGE in preemptible context
(outside of local-lock scope).

Link: https://lkml.kernel.org/r/20241218063513.297475-5-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Sergey Senozhatsky authored and Andrew Morton committed Jan 26, 2025
1 parent a5cd78a commit ef932cd
Showing 1 changed file with 83 additions and 53 deletions.
136 changes: 83 additions & 53 deletions drivers/block/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}

static inline void update_used_max(struct zram *zram, const unsigned long pages)
{
unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);

do {
if (cur_max >= pages)
return;
} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
&cur_max, pages));
}

static bool zram_can_store_page(struct zram *zram)
{
unsigned long alloced_pages;

alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);

return !zram->limit_pages || alloced_pages <= zram->limit_pages;
}

#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
Expand Down Expand Up @@ -266,18 +287,6 @@ static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
}
#endif

static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);

do {
if (cur_max >= pages)
return;
} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
&cur_max, pages));
}

static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
Expand Down Expand Up @@ -1639,13 +1648,54 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
return 0;
}

static int write_incompressible_page(struct zram *zram, struct page *page,
u32 index)
{
unsigned long handle;
void *src, *dst;

/*
* This function is called from preemptible context so we don't need
* to do optimistic and fallback to pessimistic handle allocation,
* like we do for compressible pages.
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);

if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}

dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = kmap_local_page(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_local(src);
zs_unmap_object(zram->mem_pool, handle);

zram_slot_lock(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
zram_slot_unlock(zram, index);

atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.huge_pages);
atomic64_inc(&zram->stats.huge_pages_since);
atomic64_inc(&zram->stats.pages_stored);

return 0;
}

static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
unsigned long alloced_pages;
unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
void *dst, *mem;
struct zcomp_strm *zstrm;
unsigned long element = 0;
bool same_filled;
Expand All @@ -1663,10 +1713,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)

compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
src = kmap_local_page(page);
mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
src, &comp_len);
kunmap_local(src);
mem, &comp_len);
kunmap_local(mem);

if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
Expand All @@ -1675,8 +1725,11 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
return ret;
}

if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
if (comp_len >= huge_class_size) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
return write_incompressible_page(zram, page, index);
}

/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
Expand All @@ -1692,66 +1745,43 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
*/
if (IS_ERR_VALUE(handle))
handle = zs_malloc(zram->mem_pool, comp_len,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);

if (comp_len != PAGE_SIZE)
goto compress_again;
/*
* If the page is not compressible, you need to acquire the
* lock and execute the code below. The zcomp_stream_get()
* call is needed to disable the cpu hotplug and grab the
* zstrm buffer back. It is necessary that the dereferencing
* of the zstrm variable below occurs correctly.
*/
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
goto compress_again;
}

alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);

if (zram->limit_pages && alloced_pages > zram->limit_pages) {
if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}

dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);

src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
src = kmap_local_page(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
kunmap_local(src);

memcpy(dst, zstrm->buffer, comp_len);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);

zram_slot_lock(zram, index);
if (comp_len == PAGE_SIZE) {
zram_set_flag(zram, index, ZRAM_HUGE);
atomic64_inc(&zram->stats.huge_pages);
atomic64_inc(&zram->stats.huge_pages_since);
}

zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);

/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
atomic64_add(comp_len, &zram->stats.compr_data_size);

return ret;
}

Expand Down

0 comments on commit ef932cd

Please sign in to comment.