Skip to content

Commit

Permalink
staging: zsmalloc: add mapping modes
Browse files Browse the repository at this point in the history
This patch improves mapping performance in zsmalloc by getting
usage information from the user in the form of a "mapping mode"
and using it to avoid unnecessary copying for objects that span
pages.

Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Seth Jennings authored and Greg Kroah-Hartman committed Jul 9, 2012
1 parent 166cfda commit b741851
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 18 deletions.
6 changes: 3 additions & 3 deletions drivers/staging/zcache/zcache-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -716,7 +716,7 @@ static unsigned long zv_create(struct zs_pool *pool, uint32_t pool_id,
goto out;
atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]);
zv = zs_map_object(pool, handle);
zv = zs_map_object(pool, handle, ZS_MM_WO);
zv->index = index;
zv->oid = *oid;
zv->pool_id = pool_id;
Expand All @@ -735,7 +735,7 @@ static void zv_free(struct zs_pool *pool, unsigned long handle)
uint16_t size;
int chunks;

zv = zs_map_object(pool, handle);
zv = zs_map_object(pool, handle, ZS_MM_RW);
ASSERT_SENTINEL(zv, ZVH);
size = zv->size + sizeof(struct zv_hdr);
INVERT_SENTINEL(zv, ZVH);
Expand All @@ -757,7 +757,7 @@ static void zv_decompress(struct page *page, unsigned long handle)
int ret;
struct zv_hdr *zv;

zv = zs_map_object(zcache_host.zspool, handle);
zv = zs_map_object(zcache_host.zspool, handle, ZS_MM_RO);
BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH);
to_va = kmap_atomic(page);
Expand Down
7 changes: 4 additions & 3 deletions drivers/staging/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
uncmem = user_mem;
clen = PAGE_SIZE;

cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
ZS_MM_RO);

ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
uncmem, &clen);
Expand Down Expand Up @@ -258,7 +259,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
return 0;
}

cmem = zs_map_object(zram->mem_pool, handle);
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
mem, &clen);
zs_unmap_object(zram->mem_pool, handle);
Expand Down Expand Up @@ -351,7 +352,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = -ENOMEM;
goto out;
}
cmem = zs_map_object(zram->mem_pool, handle);
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);

memcpy(cmem, src, clen);

Expand Down
29 changes: 18 additions & 11 deletions drivers/staging/zsmalloc/zsmalloc-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -484,9 +484,6 @@ static void zs_copy_map_object(char *buf, struct page *firstpage,
sizes[0] = PAGE_SIZE - off;
sizes[1] = size - sizes[0];

/* disable page faults to match kmap_atomic() return conditions */
pagefault_disable();

/* copy object to per-cpu buffer */
addr = kmap_atomic(pages[0]);
memcpy(buf, addr + off, sizes[0]);
Expand Down Expand Up @@ -517,9 +514,6 @@ static void zs_copy_unmap_object(char *buf, struct page *firstpage,
addr = kmap_atomic(pages[1]);
memcpy(addr, buf + sizes[0], sizes[1]);
kunmap_atomic(addr);

/* enable page faults to match kunmap_atomic() return conditions */
pagefault_enable();
}

static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
Expand Down Expand Up @@ -754,7 +748,8 @@ EXPORT_SYMBOL_GPL(zs_free);
*
* This function returns with preemption and page faults disabled.
*/
void *zs_map_object(struct zs_pool *pool, unsigned long handle)
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
struct page *page;
unsigned long obj_idx, off;
Expand All @@ -778,7 +773,11 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle)
return area->vm_addr + off;
}

zs_copy_map_object(area->vm_buf, page, off, class->size);
/* disable page faults to match kmap_atomic() return conditions */
pagefault_disable();

if (mm != ZS_MM_WO)
zs_copy_map_object(area->vm_buf, page, off, class->size);
area->vm_addr = NULL;
return area->vm_buf;
}
Expand All @@ -795,13 +794,16 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct mapping_area *area;

area = &__get_cpu_var(zs_map_area);
/* single-page object fastpath */
if (area->vm_addr) {
/* single-page object fastpath */
kunmap_atomic(area->vm_addr);
put_cpu_var(zs_map_area);
return;
goto out;
}

/* no write fastpath */
if (area->vm_mm == ZS_MM_RO)
goto pfenable;

BUG_ON(!handle);

obj_handle_to_location(handle, &page, &obj_idx);
Expand All @@ -810,6 +812,11 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
off = obj_idx_to_offset(page, obj_idx, class->size);

zs_copy_unmap_object(area->vm_buf, page, off, class->size);

pfenable:
/* enable page faults to match kunmap_atomic() return conditions */
pagefault_enable();
out:
put_cpu_var(zs_map_area);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
Expand Down
14 changes: 13 additions & 1 deletion drivers/staging/zsmalloc/zsmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,17 @@

#include <linux/types.h>

/*
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
ZS_MM_RO, /* read-only (no copy-out at unmap time) */
ZS_MM_WO /* write-only (no copy-in at map time) */
};

struct zs_pool;

struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
Expand All @@ -23,7 +34,8 @@ void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
void zs_free(struct zs_pool *pool, unsigned long obj);

void *zs_map_object(struct zs_pool *pool, unsigned long handle);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);

u64 zs_get_total_size_bytes(struct zs_pool *pool);
Expand Down
1 change: 1 addition & 0 deletions drivers/staging/zsmalloc/zsmalloc_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ static const int fullness_threshold_frac = 4;
struct mapping_area {
char *vm_buf; /* copy buffer for objects that span pages */
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
};

struct size_class {
Expand Down

0 comments on commit b741851

Please sign in to comment.