Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 186492
b: refs/heads/master
c: a1042aa
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mundt committed Mar 3, 2010
1 parent 37dd864 commit bb8c6e3
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 45 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6eb3c735d29e799810ce82118f9260d0044327b7
refs/heads/master: a1042aa248e4ea7f39d5ce13f080cbf3b6c42618
140 changes: 96 additions & 44 deletions trunk/arch/sh/mm/pmb.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,13 +128,67 @@ static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
return pmb_flags;
}

static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{
return (b->vpn == (a->vpn + a->size)) &&
(b->ppn == (a->ppn + a->size)) &&
(b->flags == a->flags);
}

static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
unsigned long size)
{
int i;

read_lock(&pmb_rwlock);

for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe, *iter;
unsigned long span;

if (!test_bit(i, pmb_map))
continue;

pmbe = &pmb_entry_list[i];

/*
* See if VPN and PPN are bounded by an existing mapping.
*/
if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
continue;
if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
continue;

/*
* Now see if we're in range of a simple mapping.
*/
if (size <= pmbe->size) {
read_unlock(&pmb_rwlock);
return true;
}

span = pmbe->size;

/*
* Finally for sizes that involve compound mappings, walk
* the chain.
*/
for (iter = pmbe->link; iter; iter = iter->link)
span += iter->size;

/*
* Nothing else to do if the range requirements are met.
*/
if (size <= span) {
read_unlock(&pmb_rwlock);
return true;
}
}

read_unlock(&pmb_rwlock);
return false;
}

static bool pmb_size_valid(unsigned long size)
{
int i;
Expand Down Expand Up @@ -272,64 +326,62 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned long size, pgprot_t prot)
{
struct pmb_entry *pmbp, *pmbe;
unsigned long pmb_flags;
unsigned long flags, pmb_flags;
int i, mapped;

if (!pmb_addr_valid(vaddr, size))
return -EFAULT;
if (pmb_mapping_exists(vaddr, phys, size))
return 0;

pmb_flags = pgprot_to_pmb_flags(prot);
pmbp = NULL;

again:
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
unsigned long flags;

if (size < pmb_sizes[i].size)
continue;

pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
pmb_unmap_entry(pmbp, mapped);
return PTR_ERR(pmbe);
}

spin_lock_irqsave(&pmbe->lock, flags);
do {
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
if (size < pmb_sizes[i].size)
continue;

pmbe = pmb_alloc(vaddr, phys, pmb_flags |
pmb_sizes[i].flag, PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
pmb_unmap_entry(pmbp, mapped);
return PTR_ERR(pmbe);
}

pmbe->size = pmb_sizes[i].size;
spin_lock_irqsave(&pmbe->lock, flags);

__set_pmb_entry(pmbe);
pmbe->size = pmb_sizes[i].size;

phys += pmbe->size;
vaddr += pmbe->size;
size -= pmbe->size;
__set_pmb_entry(pmbe);

/*
* Link adjacent entries that span multiple PMB entries
* for easier tear-down.
*/
if (likely(pmbp)) {
spin_lock(&pmbp->lock);
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}
phys += pmbe->size;
vaddr += pmbe->size;
size -= pmbe->size;

pmbp = pmbe;
/*
* Link adjacent entries that span multiple PMB
* entries for easier tear-down.
*/
if (likely(pmbp)) {
spin_lock(&pmbp->lock);
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}

/*
* Instead of trying smaller sizes on every iteration
* (even if we succeed in allocating space), try using
* pmb_sizes[i].size again.
*/
i--;
mapped++;
pmbp = pmbe;

spin_unlock_irqrestore(&pmbe->lock, flags);
}
/*
* Instead of trying smaller sizes on every
* iteration (even if we succeed in allocating
* space), try using pmb_sizes[i].size again.
*/
i--;
mapped++;

if (size >= SZ_16M)
goto again;
spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);

return 0;
}
Expand Down Expand Up @@ -374,7 +426,7 @@ void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
orig_addr = vaddr = (unsigned long)area->addr;

ret = pmb_bolt_mapping(vaddr, phys, size, prot);
if (ret != 0)
if (unlikely(ret != 0))
return ERR_PTR(ret);

return (void __iomem *)(offset + (char *)orig_addr);
Expand Down

0 comments on commit bb8c6e3

Please sign in to comment.