Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 210595
b: refs/heads/master
c: 87d6a41
h: refs/heads/master
i:
  210593: db61564
  210591: b8f99db
v: v3
  • Loading branch information
Michael S. Tsirkin committed Sep 6, 2010
1 parent 648764f commit 49bc6a0
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 73457f0f836956747e0394320be2163c050e96ef
refs/heads/master: 87d6a412bd1ed82c14cabd4b408003b23bbd2880
79 changes: 57 additions & 22 deletions trunk/drivers/vhost/vhost.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}

static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
INIT_LIST_HEAD(&work->node);
work->fn = fn;
init_waitqueue_head(&work->done);
work->flushing = 0;
work->queue_seq = work->done_seq = 0;
}

/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev)
{
struct vhost_work *work = &poll->work;

init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;

INIT_LIST_HEAD(&work->node);
work->fn = fn;
init_waitqueue_head(&work->done);
work->flushing = 0;
work->queue_seq = work->done_seq = 0;
vhost_work_init(&poll->work, fn);
}

/* Start polling a file. We add ourselves to file's wait queue. The caller must
Expand All @@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}

/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;

spin_lock_irq(&poll->dev->work_lock);
spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
spin_unlock_irq(&poll->dev->work_lock);
spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
spin_lock_irq(&poll->dev->work_lock);
spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
spin_unlock_irq(&poll->dev->work_lock);
spin_unlock_irq(&dev->work_lock);
left;
}));
spin_lock_irq(&poll->dev->work_lock);
spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
spin_unlock_irq(&poll->dev->work_lock);
spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}

void vhost_poll_queue(struct vhost_poll *poll)
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
vhost_work_flush(poll->dev, &poll->work);
}

static inline void vhost_work_queue(struct vhost_dev *dev,
struct vhost_work *work)
{
struct vhost_dev *dev = poll->dev;
struct vhost_work *work = &poll->work;
unsigned long flags;

spin_lock_irqsave(&dev->work_lock, flags);
Expand All @@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
spin_unlock_irqrestore(&dev->work_lock, flags);
}

void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
}

static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
Expand Down Expand Up @@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}

struct vhost_attach_cgroups_struct {
struct vhost_work work;
struct task_struct *owner;
int ret;
};

static void vhost_attach_cgroups_work(struct vhost_work *work)
{
struct vhost_attach_cgroups_struct *s;
s = container_of(work, struct vhost_attach_cgroups_struct, work);
s->ret = cgroup_attach_task_all(s->owner, current);
}

static int vhost_attach_cgroups(struct vhost_dev *dev)
{
struct vhost_attach_cgroups_struct attach;
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
vhost_work_flush(dev, &attach.work);
return attach.ret;
}

/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
Expand All @@ -255,10 +289,11 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
}

dev->worker = worker;
err = cgroup_attach_task_current_cg(worker);
wake_up_process(worker); /* avoid contributing to loadavg */

err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
wake_up_process(worker); /* avoid contributing to loadavg */

return 0;
err_cgroup:
Expand Down

0 comments on commit 49bc6a0

Please sign in to comment.