Skip to content

Commit

Permalink
powerpc: implement vmemmap_list_free()
Browse files Browse the repository at this point in the history
This patch implements vmemmap_list_free() for vmemmap_free().

The freed entries will be removed from vmemmap_list, and form a freed list,
with next as the header. The next position in the last allocated page is kept
at the list tail.

When allocation, if there are freed entries left, get it from the freed list;
if no freed entries left, get it like before from the last allocated pages.

With this change, realmode_pfn_to_page() also needs to be changed to walk
all the entries in the vmemmap_list, as the virt_addr of the entries might not
be stored in order anymore.

It helps to reuse the memory when continuous doing memory hot-plug/remove
operations, but didn't reclaim the pages already allocated, so the memory usage
will only increase, but won't exceed the value for the largest memory
configuration.

Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Acked-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
  • Loading branch information
Li Zhong authored and Benjamin Herrenschmidt committed Aug 5, 2014
1 parent eeb03a6 commit bd8cb03
Showing 1 changed file with 52 additions and 10 deletions.
62 changes: 52 additions & 10 deletions arch/powerpc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,14 +226,24 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
#endif /* CONFIG_PPC_BOOK3E */

struct vmemmap_backing *vmemmap_list;
static struct vmemmap_backing *next;
static int num_left;
static int num_freed;

static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
{
static struct vmemmap_backing *next;
static int num_left;
struct vmemmap_backing *vmem_back;
/* get from freed entries first */
if (num_freed) {
num_freed--;
vmem_back = next;
next = next->list;

return vmem_back;
}

/* allocate a page when required and hand out chunks */
if (!next || !num_left) {
if (!num_left) {
next = vmemmap_alloc_block(PAGE_SIZE, node);
if (unlikely(!next)) {
WARN_ON(1);
Expand Down Expand Up @@ -266,6 +276,38 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmemmap_list = vmem_back;
}

static unsigned long vmemmap_list_free(unsigned long start)
{
struct vmemmap_backing *vmem_back, *vmem_back_prev;

vmem_back_prev = vmem_back = vmemmap_list;

/* look for it with prev pointer recorded */
for (; vmem_back; vmem_back = vmem_back->list) {
if (vmem_back->virt_addr == start)
break;
vmem_back_prev = vmem_back;
}

if (unlikely(!vmem_back)) {
WARN_ON(1);
return 0;
}

/* remove it from vmemmap_list */
if (vmem_back == vmemmap_list) /* remove head */
vmemmap_list = vmem_back->list;
else
vmem_back_prev->list = vmem_back->list;

/* next point to this freed entry */
vmem_back->list = next;
next = vmem_back;
num_freed++;

return vmem_back->phys;
}

int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
Expand Down Expand Up @@ -331,16 +373,16 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
if (pg_va < vmem_back->virt_addr)
continue;

/* Check that page struct is not split between real pages */
if ((pg_va + sizeof(struct page)) >
(vmem_back->virt_addr + page_size))
return NULL;

page = (struct page *) (vmem_back->phys + pg_va -
/* After vmemmap_list entry free is possible, need check all */
if ((pg_va + sizeof(struct page)) <=
(vmem_back->virt_addr + page_size)) {
page = (struct page *) (vmem_back->phys + pg_va -
vmem_back->virt_addr);
return page;
return page;
}
}

/* Probably that page struct is split between real pages */
return NULL;
}
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
Expand Down

0 comments on commit bd8cb03

Please sign in to comment.