Skip to content

Commit

Permalink
nbd: cleanup ioctl handling
Browse files Browse the repository at this point in the history
Break the ioctl handling out into helper functions, some of these things
are getting pretty big and unwieldy.

Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Josef Bacik authored and Jens Axboe committed Feb 21, 2017
1 parent ee52423 commit 9442b73
Showing 1 changed file with 132 additions and 137 deletions.
269 changes: 132 additions & 137 deletions drivers/block/nbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ static int max_part;
static struct workqueue_struct *recv_workqueue;
static int part_shift;

static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);


static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
return disk_to_dev(nbd->disk);
Expand Down Expand Up @@ -571,10 +575,17 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_OK;
}

static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
unsigned long arg)
{
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
int err;

sock = sockfd_lookup(arg, &err);
if (!sock)
return err;

if (!nbd->task_setup)
nbd->task_setup = current;
Expand All @@ -598,26 +609,20 @@ static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
nsock->sock = sock;
socks[nbd->num_connections++] = nsock;

if (max_part)
bdev->bd_invalidated = 1;
return 0;
}

/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
int i;

for (i = 0; i < nbd->num_connections; i++)
kfree(nbd->socks[i]);
kfree(nbd->socks);
nbd->socks = NULL;
nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
nbd->tag_set.timeout = 0;
nbd->num_connections = 0;
nbd->task_setup = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}

Expand Down Expand Up @@ -659,74 +664,135 @@ static void send_disconnects(struct nbd_device *nbd)
}
}

static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev)
{
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->socks)
return -EINVAL;

/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
mutex_unlock(&nbd->config_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->config_lock);

/* Check again after getting mutex back. */
if (!nbd->socks)
return -EINVAL;

if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
&nbd->runtime_flags))
send_disconnects(nbd);
return 0;
}

static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
{
switch (cmd) {
case NBD_DISCONNECT: {
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->socks)
return -EINVAL;

mutex_unlock(&nbd->config_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->config_lock);

/* Check again after getting mutex back. */
if (!nbd->socks)
return -EINVAL;

if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
&nbd->runtime_flags))
send_disconnects(nbd);
return 0;
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
nbd_bdev_reset(bdev);
/*
* We want to give the run thread a chance to wait for everybody
* to clean up and then do it's own cleanup.
*/
if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) &&
nbd->num_connections) {
int i;

for (i = 0; i < nbd->num_connections; i++)
kfree(nbd->socks[i]);
kfree(nbd->socks);
nbd->socks = NULL;
nbd->num_connections = 0;
}
nbd->task_setup = NULL;

case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
nbd_bdev_reset(bdev);
/*
* We want to give the run thread a chance to wait for everybody
* to clean up and then do it's own cleanup.
*/
if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
int i;

for (i = 0; i < nbd->num_connections; i++)
kfree(nbd->socks[i]);
kfree(nbd->socks);
nbd->socks = NULL;
nbd->num_connections = 0;
nbd->task_setup = NULL;
}
return 0;
return 0;
}

case NBD_SET_SOCK: {
int err;
struct socket *sock = sockfd_lookup(arg, &err);
static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev)
{
struct recv_thread_args *args;
int num_connections = nbd->num_connections;
int error = 0, i;

if (nbd->task_recv)
return -EBUSY;
if (!nbd->socks)
return -EINVAL;
if (num_connections > 1 &&
!(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
error = -EINVAL;
goto out_err;
}

if (!sock)
return err;
set_bit(NBD_RUNNING, &nbd->runtime_flags);
blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
if (!args) {
error = -ENOMEM;
goto out_err;
}
nbd->task_recv = current;
mutex_unlock(&nbd->config_lock);

err = nbd_add_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
nbd_parse_flags(nbd, bdev);

return err;
error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (error) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
goto out_recv;
}

case NBD_SET_BLKSIZE: {
loff_t bsize = div_s64(nbd->bytesize, arg);
nbd_size_update(nbd, bdev);

return nbd_size_set(nbd, bdev, arg, bsize);
nbd_dev_dbg_init(nbd);
for (i = 0; i < num_connections; i++) {
sk_set_memalloc(nbd->socks[i]->sock->sk);
atomic_inc(&nbd->recv_threads);
INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd;
args[i].index = i;
queue_work(recv_workqueue, &args[i].work);
}
wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0);
for (i = 0; i < num_connections; i++)
flush_work(&args[i].work);
nbd_dev_dbg_close(nbd);
nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
out_recv:
mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL;
out_err:
clear_bit(NBD_RUNNING, &nbd->runtime_flags);
nbd_clear_sock(nbd, bdev);

/* user requested, ignore socket errors */
if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
error = 0;
if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
error = -ETIMEDOUT;

nbd_reset(nbd);
return error;
}

/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT:
return nbd_disconnect(nbd, bdev);
case NBD_CLEAR_SOCK:
return nbd_clear_sock(nbd, bdev);
case NBD_SET_SOCK:
return nbd_add_socket(nbd, bdev, arg);
case NBD_SET_BLKSIZE:
return nbd_size_set(nbd, bdev, arg,
div_s64(nbd->bytesize, arg));
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
div_s64(arg, nbd->blksize));
Expand All @@ -741,85 +807,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_FLAGS:
nbd->flags = arg;
return 0;

case NBD_DO_IT: {
struct recv_thread_args *args;
int num_connections = nbd->num_connections;
int error = 0, i;

if (nbd->task_recv)
return -EBUSY;
if (!nbd->socks)
return -EINVAL;
if (num_connections > 1 &&
!(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
error = -EINVAL;
goto out_err;
}

set_bit(NBD_RUNNING, &nbd->runtime_flags);
blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
if (!args) {
error = -ENOMEM;
goto out_err;
}
nbd->task_recv = current;
mutex_unlock(&nbd->config_lock);

nbd_parse_flags(nbd, bdev);

error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (error) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
goto out_recv;
}

nbd_size_update(nbd, bdev);

nbd_dev_dbg_init(nbd);
for (i = 0; i < num_connections; i++) {
sk_set_memalloc(nbd->socks[i]->sock->sk);
atomic_inc(&nbd->recv_threads);
INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd;
args[i].index = i;
queue_work(recv_workqueue, &args[i].work);
}
wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0);
for (i = 0; i < num_connections; i++)
flush_work(&args[i].work);
nbd_dev_dbg_close(nbd);
nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
out_recv:
mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL;
out_err:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
nbd_bdev_reset(bdev);

/* user requested, ignore socket errors */
if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
error = 0;
if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
error = -ETIMEDOUT;

nbd_reset(nbd);
return error;
}

case NBD_DO_IT:
return nbd_start_device(nbd, bdev);
case NBD_CLEAR_QUE:
/*
* This is for compatibility only. The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK.
*/
return 0;

case NBD_PRINT_DEBUG:
/*
* For compatibility only, we no longer keep a list of
Expand Down

0 comments on commit 9442b73

Please sign in to comment.