Skip to content

Commit

Permalink
intel-iommu: Remove superfluous iova_alloc_lock from IOVA code
Browse files Browse the repository at this point in the history
We only ever obtain this lock immediately before the iova_rbtree_lock,
and release it immediately after the iova_rbtree_lock. So ditch it and
just use iova_rbtree_lock.

[v2: Remove the lockdep bits this time too]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  • Loading branch information
David Woodhouse authored and David Woodhouse committed Jul 15, 2009
1 parent 147202a commit 3d39cec
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 16 deletions.
3 changes: 0 additions & 3 deletions drivers/pci/intel-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1309,7 +1309,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
}

static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key;

static void dmar_init_reserved_ranges(void)
Expand All @@ -1320,8 +1319,6 @@ static void dmar_init_reserved_ranges(void)

init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);

lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
&reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);

Expand Down
16 changes: 4 additions & 12 deletions drivers/pci/iova.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
Expand Down Expand Up @@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn,
bool size_aligned)
{
unsigned long flags;
struct iova *new_iova;
int ret;

Expand All @@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned)
size = __roundup_pow_of_two(size);

spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);

spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) {
free_iova_mem(new_iova);
return NULL;
Expand Down Expand Up @@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;

spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node);
Expand All @@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish:

spin_unlock(&iovad->iova_rbtree_lock);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
}

Expand All @@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags;
struct rb_node *node;

spin_lock_irqsave(&from->iova_alloc_lock, flags);
spin_lock(&from->iova_rbtree_lock);
spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova;
Expand All @@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo);
}
spin_unlock(&from->iova_rbtree_lock);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
1 change: 0 additions & 1 deletion include/linux/iova.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ struct iova {

/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
Expand Down

0 comments on commit 3d39cec

Please sign in to comment.