Skip to content

Commit

Permalink
btrfs: add helper methods for workspace manager init and cleanup
Browse files Browse the repository at this point in the history
Workspace manager init and cleanup code is open coded inside a for loop
over the compression types. This forces each compression type to rely on
the same workspace manager implementation. This patch creates helper
methods that will be the generic implementation for btrfs workspace
management.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
Dennis Zhou authored and David Sterba committed Feb 25, 2019
1 parent 10b94a5 commit 1666eda
Showing 1 changed file with 43 additions and 39 deletions.
82 changes: 43 additions & 39 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -796,31 +796,42 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};

void __init btrfs_init_compress(void)
static void btrfs_init_workspace_manager(int type)
{
struct workspace_manager *wsman = &wsm[type];
struct list_head *workspace;
int i;

for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
wsm[i].ops = btrfs_compress_op[i];
wsman->ops = btrfs_compress_op[type];

INIT_LIST_HEAD(&wsm[i].idle_ws);
spin_lock_init(&wsm[i].ws_lock);
atomic_set(&wsm[i].total_ws, 0);
init_waitqueue_head(&wsm[i].ws_wait);
INIT_LIST_HEAD(&wsman->idle_ws);
spin_lock_init(&wsman->ws_lock);
atomic_set(&wsman->total_ws, 0);
init_waitqueue_head(&wsman->ws_wait);

/*
* Preallocate one workspace for each compression type so
* we can guarantee forward progress in the worst case
*/
workspace = wsm[i].ops->alloc_workspace();
if (IS_ERR(workspace)) {
pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
atomic_set(&wsm[i].total_ws, 1);
wsm[i].free_ws = 1;
list_add(workspace, &wsm[i].idle_ws);
}
/*
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
workspace = wsman->ops->alloc_workspace();
if (IS_ERR(workspace)) {
pr_warn(
"BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
atomic_set(&wsman->total_ws, 1);
wsman->free_ws = 1;
list_add(workspace, &wsman->idle_ws);
}
}

static void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
{
struct list_head *ws;

while (!list_empty(&wsman->idle_ws)) {
ws = wsman->idle_ws.next;
list_del(ws);
wsman->ops->free_workspace(ws);
atomic_dec(&wsman->total_ws);
}
}

Expand Down Expand Up @@ -940,24 +951,6 @@ static void free_workspace(int type, struct list_head *workspace)
cond_wake_up(ws_wait);
}

/*
* cleanup function for module exit
*/
static void free_workspaces(void)
{
struct list_head *workspace;
int i;

for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
while (!list_empty(&wsm[i].idle_ws)) {
workspace = wsm[i].idle_ws.next;
list_del(workspace);
wsm[i].ops->free_workspace(workspace);
atomic_dec(&wsm[i].total_ws);
}
}
}

/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
Expand Down Expand Up @@ -1050,9 +1043,20 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
return ret;
}

void __init btrfs_init_compress(void)
{
int i;

for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
btrfs_init_workspace_manager(i);
}

void __cold btrfs_exit_compress(void)
{
free_workspaces();
int i;

for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
btrfs_cleanup_workspace_manager(&wsm[i]);
}

/*
Expand Down

0 comments on commit 1666eda

Please sign in to comment.