Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 15093
b: refs/heads/master
c: bf5421c
h: refs/heads/master
i:
  15091: dcd9490
v: v3
  • Loading branch information
Andi Kleen authored and Linus Torvalds committed Dec 13, 2005
1 parent 440591a commit 6aa806b
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5e9ef02ec00c70840661d174dc2f4862db471bb6
refs/heads/master: bf5421c309bb89e5106452bc840983b1b4754d61
37 changes: 29 additions & 8 deletions trunk/arch/i386/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap_nocache);

/**
* iounmap - Free a IO remapping
* @addr: virtual address from ioremap_*
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
void iounmap(volatile void __iomem *addr)
{
struct vm_struct *p;
struct vm_struct *p, *o;

if ((void __force *)addr <= high_memory)
return;
Expand All @@ -239,22 +245,37 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS))
return;

write_lock(&vmlist_lock);
p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
if (!p) {
printk(KERN_WARNING "iounmap: bad address %p\n", addr);
addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);

/* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
read_lock(&vmlist_lock);
for (p = vmlist; p; p = p->next) {
if (p->addr == addr)
break;
}
read_unlock(&vmlist_lock);

if (!p) {
printk("iounmap: bad address %p\n", addr);
dump_stack();
goto out_unlock;
return;
}

/* Reset the direct mapping. Can block */
if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
out_unlock:
write_unlock(&vmlist_lock);

/* Finally remove it */
o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL);
kfree(p);
}
EXPORT_SYMBOL(iounmap);
Expand Down
37 changes: 31 additions & 6 deletions trunk/arch/x86_64/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,22 +247,47 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
return __ioremap(phys_addr, size, _PAGE_PCD);
}

/**
* iounmap - Free a IO remapping
* @addr: virtual address from ioremap_*
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
void iounmap(volatile void __iomem *addr)
{
struct vm_struct *p;
struct vm_struct *p, *o;

if (addr <= high_memory)
return;
if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
addr < phys_to_virt(ISA_END_ADDRESS))
return;

write_lock(&vmlist_lock);
p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
if (!p)
addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
read_lock(&vmlist_lock);
for (p = vmlist; p; p = p->next) {
if (p->addr == addr)
break;
}
read_unlock(&vmlist_lock);

if (!p) {
printk("iounmap: bad address %p\n", addr);
else if (p->flags >> 20)
dump_stack();
return;
}

/* Reset the direct mapping. Can block */
if (p->flags >> 20)
ioremap_change_attr(p->phys_addr, p->size, 0);
write_unlock(&vmlist_lock);

/* Finally remove it */
o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL);
kfree(p);
}

0 comments on commit 6aa806b

Please sign in to comment.