Skip to content

Commit

Permalink
Merge branch 'for-next/iommu/iova' into for-next/iommu/core
Browse files Browse the repository at this point in the history
IOVA allocator updates for 5.11, including removal of unused symbols and
functions as well as some optimisations to improve allocation behaviour
in the face of fragmentation.

* for-next/iommu/iova:
  iommu: Stop exporting free_iova_mem()
  iommu: Stop exporting alloc_iova_mem()
  iommu: Delete split_and_remove_iova()
  iommu: avoid taking iova_rbtree_lock twice
  iommu/iova: Free global iova rcache on iova alloc failure
  iommu/iova: Retry from last rb tree node if iova search fails
  • Loading branch information
Will Deacon committed Dec 8, 2020
2 parents 33f974d + 176cfc1 commit 1ab2bf5
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 74 deletions.
100 changes: 47 additions & 53 deletions drivers/iommu/iova.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
static void free_global_cached_iovas(struct iova_domain *iovad);

void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Expand Down Expand Up @@ -184,8 +185,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
struct rb_node *curr, *prev;
struct iova *curr_iova;
unsigned long flags;
unsigned long new_pfn;
unsigned long new_pfn, retry_pfn;
unsigned long align_mask = ~0UL;
unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;

if (size_aligned)
align_mask <<= fls_long(size - 1);
Expand All @@ -198,15 +200,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,

curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
retry_pfn = curr_iova->pfn_hi + 1;

retry:
do {
limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
new_pfn = (limit_pfn - size) & align_mask;
high_pfn = min(high_pfn, curr_iova->pfn_lo);
new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);

if (limit_pfn < size || new_pfn < iovad->start_pfn) {
} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);

if (high_pfn < size || new_pfn < low_pfn) {
if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
high_pfn = limit_pfn;
low_pfn = retry_pfn;
curr = &iovad->anchor.node;
curr_iova = rb_entry(curr, struct iova, node);
goto retry;
}
iovad->max32_alloc_size = size;
goto iova32_full;
}
Expand All @@ -231,18 +243,16 @@ static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);

struct iova *alloc_iova_mem(void)
static struct iova *alloc_iova_mem(void)
{
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
}
EXPORT_SYMBOL(alloc_iova_mem);

void free_iova_mem(struct iova *iova)
static void free_iova_mem(struct iova *iova)
{
if (iova->pfn_lo != IOVA_ANCHOR)
kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);

int iova_cache_get(void)
{
Expand Down Expand Up @@ -390,10 +400,14 @@ EXPORT_SYMBOL_GPL(__free_iova);
void
free_iova(struct iova_domain *iovad, unsigned long pfn)
{
struct iova *iova = find_iova(iovad, pfn);
unsigned long flags;
struct iova *iova;

spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
iova = private_find_iova(iovad, pfn);
if (iova)
__free_iova(iovad, iova);
private_free_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);

}
EXPORT_SYMBOL_GPL(free_iova);
Expand Down Expand Up @@ -431,6 +445,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
free_global_cached_iovas(iovad);
goto retry;
}

Expand Down Expand Up @@ -725,47 +740,6 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
EXPORT_SYMBOL_GPL(copy_reserved_iova);

struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
unsigned long pfn_lo, unsigned long pfn_hi)
{
unsigned long flags;
struct iova *prev = NULL, *next = NULL;

spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
if (iova->pfn_lo < pfn_lo) {
prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
if (prev == NULL)
goto error;
}
if (iova->pfn_hi > pfn_hi) {
next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
if (next == NULL)
goto error;
}

__cached_rbnode_delete_update(iovad, iova);
rb_erase(&iova->node, &iovad->rbroot);

if (prev) {
iova_insert_rbtree(&iovad->rbroot, prev, NULL);
iova->pfn_lo = pfn_lo;
}
if (next) {
iova_insert_rbtree(&iovad->rbroot, next, NULL);
iova->pfn_hi = pfn_hi;
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);

return iova;

error:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
if (prev)
free_iova_mem(prev);
return NULL;
}

/*
* Magazine caches for IOVA ranges. For an introduction to magazines,
* see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
Expand Down Expand Up @@ -1046,5 +1020,25 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
}
}

/*
* free all the IOVA ranges of global cache
*/
static void free_global_cached_iovas(struct iova_domain *iovad)
{
struct iova_rcache *rcache;
unsigned long flags;
int i, j;

for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
spin_lock_irqsave(&rcache->lock, flags);
for (j = 0; j < rcache->depot_size; ++j) {
iova_magazine_free_pfns(rcache->depot[j], iovad);
iova_magazine_free(rcache->depot[j]);
}
rcache->depot_size = 0;
spin_unlock_irqrestore(&rcache->lock, flags);
}
}
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
21 changes: 0 additions & 21 deletions include/linux/iova.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
int iova_cache_get(void);
void iova_cache_put(void);

struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
Expand All @@ -160,8 +158,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
Expand All @@ -173,15 +169,6 @@ static inline void iova_cache_put(void)
{
}

static inline struct iova *alloc_iova_mem(void)
{
return NULL;
}

static inline void free_iova_mem(struct iova *iova)
{
}

static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
Expand Down Expand Up @@ -258,14 +245,6 @@ static inline void put_iova_domain(struct iova_domain *iovad)
{
}

static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova,
unsigned long pfn_lo,
unsigned long pfn_hi)
{
return NULL;
}

static inline void free_cpu_cached_iovas(unsigned int cpu,
struct iova_domain *iovad)
{
Expand Down

0 comments on commit 1ab2bf5

Please sign in to comment.