Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 370903
b: refs/heads/master
c: 3ab2e42
h: refs/heads/master
i:
  370901: 1dacb8e
  370899: beb534c
  370895: 1561a64
v: v3
  • Loading branch information
Asias He authored and Michael S. Tsirkin committed May 1, 2013
1 parent f41c938 commit 4c6bbba
Show file tree
Hide file tree
Showing 5 changed files with 125 additions and 88 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bc7562355fda8075793bf66094cda573206ec693
refs/heads/master: 3ab2e420ec1caf4ead233f3161ac7d86fe5d2a9f
64 changes: 41 additions & 23 deletions trunk/drivers/vhost/net.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,13 @@ enum {
VHOST_NET_VQ_MAX = 2,
};

struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
};

struct vhost_net {
struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_poll poll[VHOST_NET_VQ_MAX];
/* Number of TX recently submitted.
* Protected by tx vq lock. */
Expand Down Expand Up @@ -198,7 +202,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq;
unsigned out, in, s;
int head;
struct msghdr msg = {
Expand Down Expand Up @@ -417,7 +421,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
* read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_RX].vq;
unsigned uninitialized_var(in), log;
struct vhost_log *vq_log;
struct msghdr msg = {
Expand Down Expand Up @@ -559,17 +563,26 @@ static int vhost_net_open(struct inode *inode, struct file *f)
{
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int r;

if (!n)
return -ENOMEM;
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
}

dev = &n->dev;
n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
if (r < 0) {
kfree(n);
kfree(vqs);
return r;
}

Expand All @@ -584,7 +597,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct vhost_poll *poll = n->poll + (vq - n->vqs);
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
if (!vq->private_data)
return;
vhost_poll_stop(poll);
Expand All @@ -593,7 +608,9 @@ static void vhost_net_disable_vq(struct vhost_net *n,
static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct vhost_poll *poll = n->poll + (vq - n->vqs);
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;

sock = rcu_dereference_protected(vq->private_data,
Expand Down Expand Up @@ -621,30 +638,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
struct socket **rx_sock)
{
*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}

static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
vhost_poll_flush(n->poll + index);
vhost_poll_flush(&n->dev.vqs[index].poll);
vhost_poll_flush(&n->vqs[index].vq.poll);
}

static void vhost_net_flush(struct vhost_net *n)
{
vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
if (n->vqs[VHOST_NET_VQ_TX].vq.ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
/* Wait for all lower device DMAs done. */
vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].vq.ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref);
mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
kref_init(&n->vqs[VHOST_NET_VQ_TX].vq.ubufs->kref);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}

Expand All @@ -665,6 +682,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
kfree(n->dev.vqs);
kfree(n);
return 0;
}
Expand Down Expand Up @@ -750,7 +768,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = -ENOBUFS;
goto err;
}
vq = n->vqs + index;
vq = &n->vqs[index].vq;
mutex_lock(&vq->mutex);

/* Verify that ring has been setup correctly. */
Expand Down Expand Up @@ -870,10 +888,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
n->dev.acked_features = features;
smp_wmb();
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].mutex);
n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].mutex);
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.vhost_hlen = vhost_hlen;
n->vqs[i].vq.sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].vq.mutex);
}
vhost_net_flush(n);
mutex_unlock(&n->dev.mutex);
Expand Down
55 changes: 37 additions & 18 deletions trunk/drivers/vhost/tcm_vhost.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,17 @@ enum {
#define VHOST_SCSI_MAX_VQ 128
#define VHOST_SCSI_MAX_EVENT 128

struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
};

struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
struct tcm_vhost_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];

struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];

struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
Expand Down Expand Up @@ -366,7 +370,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt;

if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
Expand Down Expand Up @@ -409,7 +413,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
struct tcm_vhost_evt *evt)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
unsigned out, in;
Expand Down Expand Up @@ -460,7 +464,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt;
struct llist_node *llnode;

Expand Down Expand Up @@ -511,8 +515,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
v_rsp.sense_len);
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) {
struct vhost_scsi_virtqueue *q;
vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
vq = tv_cmd->tvc_vq - vs->vqs;
q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
__set_bit(vq, signal);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
Expand All @@ -523,7 +529,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vq = -1;
while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
< VHOST_SCSI_MAX_VQ)
vhost_signal(&vs->dev, &vs->vqs[vq]);
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}

static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
Expand Down Expand Up @@ -938,7 +944,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)

static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{
vhost_poll_flush(&vs->dev.vqs[index].poll);
vhost_poll_flush(&vs->vqs[index].vq.poll);
}

static void vhost_scsi_flush(struct vhost_scsi *vs)
Expand Down Expand Up @@ -975,7 +981,7 @@ static int vhost_scsi_set_endpoint(
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(&vs->vqs[index])) {
if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto out;
}
Expand Down Expand Up @@ -1022,7 +1028,7 @@ static int vhost_scsi_set_endpoint(
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i];
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg);
Expand Down Expand Up @@ -1063,7 +1069,7 @@ static int vhost_scsi_clear_endpoint(
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
if (!vhost_vq_access_ok(&vs->vqs[index])) {
if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto err_dev;
}
Expand Down Expand Up @@ -1103,7 +1109,7 @@ static int vhost_scsi_clear_endpoint(
}
if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i];
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL);
Expand Down Expand Up @@ -1151,24 +1157,36 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
struct vhost_virtqueue **vqs;
int r, i;

s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;

vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(s);
return -ENOMEM;
}

vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);

s->vs_events_nr = 0;
s->vs_events_missed = false;

s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
s->vqs[i].handle_kick = vhost_scsi_handle_kick;
r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
vqs[i] = &s->vqs[i].vq;
s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
if (r < 0) {
kfree(vqs);
kfree(s);
return r;
}
Expand All @@ -1190,6 +1208,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_cleanup(&s->dev, false);
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(s);
kfree(s->dev.vqs);
kfree(s);
return 0;
}
Expand All @@ -1205,7 +1224,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
u32 events_missed;
u64 features;
int r, abi_version = VHOST_SCSI_ABI_VERSION;
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;

switch (ioctl) {
case VHOST_SCSI_SET_ENDPOINT:
Expand Down Expand Up @@ -1333,7 +1352,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED;

vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex);
tcm_vhost_send_evt(vs, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
Expand Down
Loading

0 comments on commit 4c6bbba

Please sign in to comment.