diff --git a/[refs] b/[refs] index a10499362839..c695608db2ea 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0536bdf33faff4d940ac094c77998cfac368cfff +refs/heads/master: 6ee723a6570a897208b76ab3e9a495e9106b2f8c diff --git a/trunk/arch/arm/mm/ioremap.c b/trunk/arch/arm/mm/ioremap.c index bdb248c4f55c..bc7d9bd766d1 100644 --- a/trunk/arch/arm/mm/ioremap.c +++ b/trunk/arch/arm/mm/ioremap.c @@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle - * such a beast. We need the lock here b/c we need to clear - * all the mappings before the area can be reclaimed - * by someone else. + * such a beast. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); + read_lock(&vmlist_lock); + for (vm = vmlist; vm; vm = vm->next) { + if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { + if (vm->flags & VM_ARM_SECTION_MAPPING) { + unmap_area_sections((unsigned long)vm->addr, + vm->size); } break; } } - write_unlock(&vmlist_lock); + read_unlock(&vmlist_lock); #endif vunmap(addr);