Skip to content

Commit

Permalink
powerpc/vmemmap: Add altmap support
Browse files Browse the repository at this point in the history
Adds support to powerpc for the altmap feature of ZONE_DEVICE memory. An
altmap is a driver provided region that is used to provide the backing
storage for the struct pages of ZONE_DEVICE memory. In situations where
large amount of ZONE_DEVICE memory is being added to the system the
altmap reduces pressure on main system memory by allowing the mm/
metadata to be stored on the device itself rather in main memory.

Reviewed-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Oliver O'Halloran authored and Michael Ellerman committed Jul 2, 2017
1 parent d7d9b61 commit b584c25
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 5 deletions.
15 changes: 13 additions & 2 deletions arch/powerpc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/memremap.h>

#include <asm/pgalloc.h>
#include <asm/page.h>
Expand Down Expand Up @@ -192,13 +193,17 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);

for (; start < end; start += page_size) {
struct vmem_altmap *altmap;
void *p;
int rc;

if (vmemmap_populated(start, page_size))
continue;

p = vmemmap_alloc_block(page_size, node);
/* altmap lookups only work at section boundaries */
altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));

p = __vmemmap_alloc_block_buf(page_size, node, altmap);
if (!p)
return -ENOMEM;

Expand Down Expand Up @@ -263,6 +268,8 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)

for (; start < end; start += page_size) {
unsigned long nr_pages, addr;
struct vmem_altmap *altmap;
struct page *section_base;
struct page *page;

/*
Expand All @@ -278,9 +285,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
continue;

page = pfn_to_page(addr >> PAGE_SHIFT);
section_base = pfn_to_page(vmemmap_section_start(start));
nr_pages = 1 << page_order;

if (PageReserved(page)) {
altmap = to_vmem_altmap((unsigned long) section_base);
if (altmap) {
vmem_altmap_free(altmap, nr_pages);
} else if (PageReserved(page)) {
/* allocated from bootmem */
if (page_size < PAGE_SIZE) {
/*
Expand Down
16 changes: 13 additions & 3 deletions arch/powerpc/mm/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memremap.h>

#include <asm/pgalloc.h>
#include <asm/prom.h>
Expand Down Expand Up @@ -159,11 +160,20 @@ int arch_remove_memory(u64 start, u64 size)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
struct vmem_altmap *altmap;
struct page *page;
int ret;

zone = page_zone(pfn_to_page(start_pfn));
ret = __remove_pages(zone, start_pfn, nr_pages);
/*
* If we have an altmap then we need to skip over any reserved PFNs
* when querying the zone.
*/
page = pfn_to_page(start_pfn);
altmap = to_vmem_altmap((unsigned long) page);
if (altmap)
page += vmem_altmap_offset(altmap);

ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
if (ret)
return ret;

Expand Down

0 comments on commit b584c25

Please sign in to comment.