Skip to content

Commit

Permalink
xen: add helpers for caching grant mapping pages
Browse files Browse the repository at this point in the history
Instead of having similar helpers in multiple backend drivers use
common helpers for caching pages allocated via gnttab_alloc_pages().

Make use of those helpers in blkback and scsiback.

Cc: <stable@vger.kernel.org> # 5.9
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovksy@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
  • Loading branch information
Juergen Gross committed Dec 9, 2020
1 parent a68a026 commit ca33479
Show file tree
Hide file tree
Showing 6 changed files with 116 additions and 128 deletions.
89 changes: 17 additions & 72 deletions drivers/block/xen-blkback/blkback.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);

#define BLKBACK_INVALID_HANDLE (~0)

/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10

static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
{
return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
HZ * pgrant_timeout);
}

static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{
unsigned long flags;

spin_lock_irqsave(&ring->free_pages_lock, flags);
if (list_empty(&ring->free_pages)) {
BUG_ON(ring->free_pages_num != 0);
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
return gnttab_alloc_pages(1, page);
}
BUG_ON(ring->free_pages_num == 0);
page[0] = list_first_entry(&ring->free_pages, struct page, lru);
list_del(&page[0]->lru);
ring->free_pages_num--;
spin_unlock_irqrestore(&ring->free_pages_lock, flags);

return 0;
}

static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
int num)
{
unsigned long flags;
int i;

spin_lock_irqsave(&ring->free_pages_lock, flags);
for (i = 0; i < num; i++)
list_add(&page[i]->lru, &ring->free_pages);
ring->free_pages_num += num;
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
}

static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
{
/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
struct page *page[NUM_BATCH_FREE_PAGES];
unsigned int num_pages = 0;
unsigned long flags;

spin_lock_irqsave(&ring->free_pages_lock, flags);
while (ring->free_pages_num > num) {
BUG_ON(list_empty(&ring->free_pages));
page[num_pages] = list_first_entry(&ring->free_pages,
struct page, lru);
list_del(&page[num_pages]->lru);
ring->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&ring->free_pages_lock, flags);
num_pages = 0;
}
}
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
if (num_pages != 0)
gnttab_free_pages(num_pages, page);
}

#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))

static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
Expand Down Expand Up @@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));

put_free_pages(ring, pages, segs_to_unmap);
gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0;
}

Expand Down Expand Up @@ -371,15 +311,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(ring, pages, segs_to_unmap);
gnttab_page_cache_put(&ring->free_pages, pages,
segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(ring, pages, segs_to_unmap);
gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
}
}

Expand Down Expand Up @@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg)

/* Shrink the free pages pool if it is too large. */
if (time_before(jiffies, blkif->buffer_squeeze_end))
shrink_free_pagepool(ring, 0);
gnttab_page_cache_shrink(&ring->free_pages, 0);
else
shrink_free_pagepool(ring, max_buffer_pages);
gnttab_page_cache_shrink(&ring->free_pages,
max_buffer_pages);

if (log_stats && time_after(jiffies, ring->st_print))
print_stats(ring);
Expand Down Expand Up @@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
ring->persistent_gnt_c = 0;

/* Since we are shutting down remove all pages from the buffer */
shrink_free_pagepool(ring, 0 /* All */);
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
}

static unsigned int xen_blkbk_unmap_prepare(
Expand Down Expand Up @@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
but is this the best way to deal with this? */
BUG_ON(result);

put_free_pages(ring, data->pages, data->count);
gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
make_response(ring, pending_req->id,
pending_req->operation, pending_req->status);
free_req(ring, pending_req);
Expand Down Expand Up @@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(ring, unmap_pages, invcount);
gnttab_page_cache_put(&ring->free_pages, unmap_pages,
invcount);
}
pages += batch;
num -= batch;
Expand Down Expand Up @@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
pages[i]->page = persistent_gnt->page;
pages[i]->persistent_gnt = persistent_gnt;
} else {
if (get_free_page(ring, &pages[i]->page))
if (gnttab_page_cache_get(&ring->free_pages,
&pages[i]->page))
goto out_of_memory;
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
Expand Down Expand Up @@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) {
pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(ring, &pages[seg_idx]->page, 1);
gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
goto next;
Expand Down Expand Up @@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,

out_of_memory:
pr_alert("%s: out of memory\n", __func__);
put_free_pages(ring, pages_to_gnt, segs_to_map);
gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
for (i = last_map; i < num; i++)
pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM;
Expand Down
4 changes: 1 addition & 3 deletions drivers/block/xen-blkback/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,9 +288,7 @@ struct xen_blkif_ring {
struct work_struct persistent_purge_work;

/* Buffer of free pages to map grant refs. */
spinlock_t free_pages_lock;
int free_pages_num;
struct list_head free_pages;
struct gnttab_page_cache free_pages;

struct work_struct free_work;
/* Thread shutdown wait queue. */
Expand Down
6 changes: 2 additions & 4 deletions drivers/block/xen-blkback/xenbus.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
INIT_LIST_HEAD(&ring->pending_free);
INIT_LIST_HEAD(&ring->persistent_purge_list);
INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
spin_lock_init(&ring->free_pages_lock);
INIT_LIST_HEAD(&ring->free_pages);
gnttab_page_cache_init(&ring->free_pages);

spin_lock_init(&ring->pending_free_lock);
init_waitqueue_head(&ring->pending_free_wq);
Expand Down Expand Up @@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
BUG_ON(!list_empty(&ring->persistent_purge_list));
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(!list_empty(&ring->free_pages));
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->free_pages.num_pages != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
Expand Down
72 changes: 72 additions & 0 deletions drivers/xen/grant-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -813,6 +813,78 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);

void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->pages);
cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);

int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
{
unsigned long flags;

spin_lock_irqsave(&cache->lock, flags);

if (list_empty(&cache->pages)) {
spin_unlock_irqrestore(&cache->lock, flags);
return gnttab_alloc_pages(1, page);
}

page[0] = list_first_entry(&cache->pages, struct page, lru);
list_del(&page[0]->lru);
cache->num_pages--;

spin_unlock_irqrestore(&cache->lock, flags);

return 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_get);

void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
unsigned int num)
{
unsigned long flags;
unsigned int i;

spin_lock_irqsave(&cache->lock, flags);

for (i = 0; i < num; i++)
list_add(&page[i]->lru, &cache->pages);
cache->num_pages += num;

spin_unlock_irqrestore(&cache->lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_put);

void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
{
struct page *page[10];
unsigned int i = 0;
unsigned long flags;

spin_lock_irqsave(&cache->lock, flags);

while (cache->num_pages > num) {
page[i] = list_first_entry(&cache->pages, struct page, lru);
list_del(&page[i]->lru);
cache->num_pages--;
if (++i == ARRAY_SIZE(page)) {
spin_unlock_irqrestore(&cache->lock, flags);
gnttab_free_pages(i, page);
i = 0;
spin_lock_irqsave(&cache->lock, flags);
}
}

spin_unlock_irqrestore(&cache->lock, flags);

if (i != 0)
gnttab_free_pages(i, page);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);

void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{
int i;
Expand Down
Loading

0 comments on commit ca33479

Please sign in to comment.