diff --git a/[refs] b/[refs] index 281c768fe3e1..d354b60dda7f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 088406bcf66d6c7fd8a5c04c00aa410ae9077403 +refs/heads/master: 2b4ac44e7c7e16cf9411b81693ff3e604f332bf1 diff --git a/trunk/include/linux/vmalloc.h b/trunk/include/linux/vmalloc.h index dc9a29d84abc..924e502905d4 100644 --- a/trunk/include/linux/vmalloc.h +++ b/trunk/include/linux/vmalloc.h @@ -23,13 +23,14 @@ struct vm_area_struct; #endif struct vm_struct { + /* keep next,addr,size together to speedup lookups */ + struct vm_struct *next; void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int nr_pages; unsigned long phys_addr; - struct vm_struct *next; }; /* diff --git a/trunk/mm/vmalloc.c b/trunk/mm/vmalloc.c index 46606c133e82..7dc6aa745166 100644 --- a/trunk/mm/vmalloc.c +++ b/trunk/mm/vmalloc.c @@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl if (unlikely(!area)) return NULL; - if (unlikely(!size)) { - kfree (area); + if (unlikely(!size)) return NULL; - } /* * We always allocate a guard page. @@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size) void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_user); @@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size) void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_32_user);