Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 338775
b: refs/heads/master
c: e225042
h: refs/heads/master
i:
  338773: 2717621
  338771: b7c1321
  338767: 4e27648
v: v3
  • Loading branch information
Rafael Aquini authored and Linus Torvalds committed Dec 12, 2012
1 parent 2724190 commit c502693
Show file tree
Hide file tree
Showing 2 changed files with 133 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bf6bddf1924eaebf2beb85e4249a89dd16d4eed6
refs/heads/master: e22504296d4f64fbbbd741602ab47ee874649c18
151 changes: 132 additions & 19 deletions trunk/drivers/virtio/virtio_balloon.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,15 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>

/*
* Balloon device works in 4K page units. So each page is pointed to by
* multiple balloon pages. All memory counters in this driver are in balloon
* page units.
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256

struct virtio_balloon
{
Expand All @@ -52,15 +54,19 @@ struct virtio_balloon
/* Number of balloon pages we've told the Host we're not using. */
unsigned int num_pages;
/*
* The pages we've told the Host we're not using.
* The pages we've told the Host we're not using are enqueued
* at vb_dev_info->pages list.
* Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
* to num_pages above.
*/
struct list_head pages;
struct balloon_dev_info *vb_dev_info;

/* Synchronize access/update to this struct virtio_balloon elements */
struct mutex balloon_lock;

/* The array of pfns we tell the Host about. */
unsigned int num_pfns;
u32 pfns[256];
u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];

/* Memory statistics */
int need_stats_update;
Expand Down Expand Up @@ -122,33 +128,34 @@ static void set_page_pfns(u32 pfns[], struct page *page)

static void fill_balloon(struct virtio_balloon *vb, size_t num)
{
struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;

/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));

mutex_lock(&vb->balloon_lock);
for (vb->num_pfns = 0; vb->num_pfns < num;
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
__GFP_NOMEMALLOC | __GFP_NOWARN);
struct page *page = balloon_page_enqueue(vb_dev_info);

if (!page) {
if (printk_ratelimit())
dev_printk(KERN_INFO, &vb->vdev->dev,
"Out of puff! Can't get %zu pages\n",
num);
"Out of puff! Can't get %u pages\n",
VIRTIO_BALLOON_PAGES_PER_PAGE);
/* Sleep for at least 1/5 of a second before retry. */
msleep(200);
break;
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
totalram_pages--;
list_add(&page->lru, &vb->pages);
}

/* Didn't get any? Oh well. */
if (vb->num_pfns == 0)
return;

tell_host(vb, vb->inflate_vq);
/* Did we get any? */
if (vb->num_pfns != 0)
tell_host(vb, vb->inflate_vq);
mutex_unlock(&vb->balloon_lock);
}

static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
Expand All @@ -157,22 +164,25 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)

/* Find pfns pointing at start of each page, get pages and free them. */
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
__free_page(balloon_pfn_to_page(pfns[i]));
balloon_page_free(balloon_pfn_to_page(pfns[i]));
totalram_pages++;
}
}

static void leak_balloon(struct virtio_balloon *vb, size_t num)
{
struct page *page;
struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;

/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));

mutex_lock(&vb->balloon_lock);
for (vb->num_pfns = 0; vb->num_pfns < num;
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
page = list_first_entry(&vb->pages, struct page, lru);
list_del(&page->lru);
page = balloon_page_dequeue(vb_dev_info);
if (!page)
break;
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
Expand All @@ -183,6 +193,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
* is true, we *have* to do it in this order
*/
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
release_pages_by_pfn(vb->pfns, vb->num_pfns);
}

Expand Down Expand Up @@ -339,9 +350,84 @@ static int init_vqs(struct virtio_balloon *vb)
return 0;
}

static const struct address_space_operations virtio_balloon_aops;
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
* a compation thread. (called under page lock)
* @mapping: the page->mapping which will be assigned to the new migrated page.
* @newpage: page that will replace the isolated page after migration finishes.
* @page : the isolated (old) page that is about to be migrated to newpage.
* @mode : compaction mode -- not used for balloon page migration.
*
* After a ballooned page gets isolated by compaction procedures, this is the
* function that performs the page migration on behalf of a compaction thread
* The page migration for virtio balloon is done in a simple swap fashion which
* follows these two macro steps:
* 1) insert newpage into vb->pages list and update the host about it;
* 2) update the host about the old page removed from vb->pages list;
*
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
int virtballoon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
struct virtio_balloon *vb;
unsigned long flags;

BUG_ON(!vb_dev_info);

vb = vb_dev_info->balloon_device;

/*
* In order to avoid lock contention while migrating pages concurrently
* to leak_balloon() or fill_balloon() we just give up the balloon_lock
* this turn, as it is easier to retry the page migration later.
* This also prevents fill_balloon() getting stuck into a mutex
* recursion in the case it ends up triggering memory compaction
* while it is attempting to inflate the ballon.
*/
if (!mutex_trylock(&vb->balloon_lock))
return -EAGAIN;

/* balloon's page migration 1st step -- inflate "newpage" */
spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
vb_dev_info->isolated_pages--;
spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb->pfns, newpage);
tell_host(vb, vb->inflate_vq);

/*
* balloon's page migration 2nd step -- deflate "page"
*
* It's safe to delete page->lru here because this page is at
* an isolated migration list, and this step is expected to happen here
*/
balloon_page_delete(page);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb->pfns, page);
tell_host(vb, vb->deflate_vq);

mutex_unlock(&vb->balloon_lock);

return MIGRATEPAGE_BALLOON_SUCCESS;
}

/* define the balloon_mapping->a_ops callback to allow balloon page migration */
static const struct address_space_operations virtio_balloon_aops = {
.migratepage = virtballoon_migratepage,
};
#endif /* CONFIG_BALLOON_COMPACTION */

static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
struct address_space *vb_mapping;
struct balloon_dev_info *vb_devinfo;
int err;

vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
Expand All @@ -350,16 +436,37 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out;
}

INIT_LIST_HEAD(&vb->pages);
vb->num_pages = 0;
mutex_init(&vb->balloon_lock);
init_waitqueue_head(&vb->config_change);
init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
vb->need_stats_update = 0;

vb_devinfo = balloon_devinfo_alloc(vb);
if (IS_ERR(vb_devinfo)) {
err = PTR_ERR(vb_devinfo);
goto out_free_vb;
}

vb_mapping = balloon_mapping_alloc(vb_devinfo,
(balloon_compaction_check()) ?
&virtio_balloon_aops : NULL);
if (IS_ERR(vb_mapping)) {
/*
* IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP
* This means !CONFIG_BALLOON_COMPACTION, otherwise we get off.
*/
err = PTR_ERR(vb_mapping);
if (err != -EOPNOTSUPP)
goto out_free_vb_devinfo;
}

vb->vb_dev_info = vb_devinfo;

err = init_vqs(vb);
if (err)
goto out_free_vb;
goto out_free_vb_mapping;

vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
Expand All @@ -371,6 +478,10 @@ static int virtballoon_probe(struct virtio_device *vdev)

out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb_mapping:
balloon_mapping_free(vb_mapping);
out_free_vb_devinfo:
balloon_devinfo_free(vb_devinfo);
out_free_vb:
kfree(vb);
out:
Expand All @@ -396,6 +507,8 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)

kthread_stop(vb->thread);
remove_common(vb);
balloon_mapping_free(vb->vb_dev_info->mapping);
balloon_devinfo_free(vb->vb_dev_info);
kfree(vb);
}

Expand Down

0 comments on commit c502693

Please sign in to comment.