Skip to content

Commit

Permalink
vhost-vdpa: fix page pinning leakage in error path (rework)
Browse files Browse the repository at this point in the history
Pinned pages are not properly accounted particularly when
mapping error occurs on IOTLB update. Clean up dangling
pinned pages for the error path.

The memory usage for bookkeeping pinned pages is reverted
to what it was before: only one single free page is needed.
This helps reduce the host memory demand for VM with a large
amount of memory, or in the situation where host is running
short of free memory.

Fixes: 4c8cf31 ("vhost: introduce vDPA-based backend")
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
Link: https://lore.kernel.org/r/1604618793-4681-1-git-send-email-si-wei.liu@oracle.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
  • Loading branch information
Si-Wei Liu authored and Michael S. Tsirkin committed Nov 25, 2020
1 parent 8009b0f commit ad89653
Showing 1 changed file with 62 additions and 18 deletions.
80 changes: 62 additions & 18 deletions drivers/vhost/vdpa.c
Original file line number Diff line number Diff line change
Expand Up @@ -577,6 +577,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,

if (r)
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
else
atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);

return r;
}
Expand Down Expand Up @@ -608,8 +610,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long locked, lock_limit, pinned, i;
unsigned long lock_limit, sz2pin, nchunks, i;
u64 iova = msg->iova;
long pinned;
int ret = 0;

if (msg->iova < v->range.first ||
Expand All @@ -620,6 +623,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
msg->iova + msg->size - 1))
return -EEXIST;

/* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list)
return -ENOMEM;
Expand All @@ -628,63 +632,103 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
gup_flags |= FOLL_WRITE;

npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
if (!npages)
return -EINVAL;
if (!npages) {
ret = -EINVAL;
goto free;
}

mmap_read_lock(dev->mm);

locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

if (locked > lock_limit) {
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto out;
goto unlock;
}

cur_base = msg->uaddr & PAGE_MASK;
iova &= PAGE_MASK;
nchunks = 0;

while (npages) {
pinned = min_t(unsigned long, npages, list_size);
ret = pin_user_pages(cur_base, pinned,
gup_flags, page_list, NULL);
if (ret != pinned)
sz2pin = min_t(unsigned long, npages, list_size);
pinned = pin_user_pages(cur_base, sz2pin,
gup_flags, page_list, NULL);
if (sz2pin != pinned) {
if (pinned < 0) {
ret = pinned;
} else {
unpin_user_pages(page_list, pinned);
ret = -ENOMEM;
}
goto out;
}
nchunks++;

if (!last_pfn)
map_pfn = page_to_pfn(page_list[0]);

for (i = 0; i < ret; i++) {
for (i = 0; i < pinned; i++) {
unsigned long this_pfn = page_to_pfn(page_list[i]);
u64 csize;

if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
if (vhost_vdpa_map(v, iova, csize,
map_pfn << PAGE_SHIFT,
msg->perm))
ret = vhost_vdpa_map(v, iova, csize,
map_pfn << PAGE_SHIFT,
msg->perm);
if (ret) {
/*
* Unpin the pages that are left unmapped
* from this point on in the current
* page_list. The remaining outstanding
* ones which may stride across several
* chunks will be covered in the common
* error path subsequently.
*/
unpin_user_pages(&page_list[i],
pinned - i);
goto out;
}

map_pfn = this_pfn;
iova += csize;
nchunks = 0;
}

last_pfn = this_pfn;
}

cur_base += ret << PAGE_SHIFT;
npages -= ret;
cur_base += pinned << PAGE_SHIFT;
npages -= pinned;
}

/* Pin the rest chunk */
ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
map_pfn << PAGE_SHIFT, msg->perm);
out:
if (ret) {
if (nchunks) {
unsigned long pfn;

/*
* Unpin the outstanding pages which are yet to be
* mapped but haven't due to vdpa_map() or
* pin_user_pages() failure.
*
* Mapped pages are accounted in vdpa_map(), hence
* the corresponding unpinning will be handled by
* vdpa_unmap().
*/
WARN_ON(!last_pfn);
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
vhost_vdpa_unmap(v, msg->iova, msg->size);
atomic64_sub(npages, &dev->mm->pinned_vm);
}
unlock:
mmap_read_unlock(dev->mm);
free:
free_page((unsigned long)page_list);
return ret;
}
Expand Down

0 comments on commit ad89653

Please sign in to comment.