Skip to content

Commit

Permalink
xen: Delay remapping memory of pv-domain
Browse files Browse the repository at this point in the history
Early in the boot process the memory layout of a pv-domain is changed
to match the E820 map (either the host one for Dom0 or the Xen one)
regarding placement of RAM and PCI holes. This requires removing memory
pages initially located at positions not suitable for RAM and adding
them later at higher addresses where no restrictions apply.

To be able to operate on the hypervisor supported p2m list until a
virtual mapped linear p2m list can be constructed, remapping must
be delayed until virtual memory management is initialized, as the
initial p2m list can't be extended unlimited at physical memory
initialization time due to it's fixed structure.

A further advantage is the reduction in complexity and code volume as
we don't have to be careful regarding memory restrictions during p2m
updates.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
  • Loading branch information
Juergen Gross authored and David Vrabel committed Dec 4, 2014
1 parent 7108c9c commit 1f3ac86
Show file tree
Hide file tree
Showing 5 changed files with 172 additions and 297 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/xen/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ extern unsigned long machine_to_phys_nr;

extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1225,6 +1225,10 @@ static void __init xen_pagetable_init(void)
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();

/* Remap memory freed due to conflicts with E820 map */
if (!xen_feature(XENFEAT_auto_translated_physmap))
xen_remap_memory();

xen_setup_shared_info();
xen_post_allocator_init();
}
Expand Down
94 changes: 0 additions & 94 deletions arch/x86/xen/p2m.c
Original file line number Diff line number Diff line change
Expand Up @@ -662,100 +662,6 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
return true;
}

/*
* Skim over the P2M tree looking at pages that are either filled with
* INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
* replace the P2M leaf with a p2m_missing or p2m_identity.
* Stick the old page in the new P2M tree location.
*/
static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
{
unsigned topidx;
unsigned mididx;
unsigned ident_pfns;
unsigned inv_pfns;
unsigned long *p2m;
unsigned idx;
unsigned long pfn;

/* We only look when this entails a P2M middle layer */
if (p2m_index(set_pfn))
return false;

for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
topidx = p2m_top_index(pfn);

if (!p2m_top[topidx])
continue;

if (p2m_top[topidx] == p2m_mid_missing)
continue;

mididx = p2m_mid_index(pfn);
p2m = p2m_top[topidx][mididx];
if (!p2m)
continue;

if ((p2m == p2m_missing) || (p2m == p2m_identity))
continue;

if ((unsigned long)p2m == INVALID_P2M_ENTRY)
continue;

ident_pfns = 0;
inv_pfns = 0;
for (idx = 0; idx < P2M_PER_PAGE; idx++) {
/* IDENTITY_PFNs are 1:1 */
if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
ident_pfns++;
else if (p2m[idx] == INVALID_P2M_ENTRY)
inv_pfns++;
else
break;
}
if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
goto found;
}
return false;
found:
/* Found one, replace old with p2m_identity or p2m_missing */
p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);

/* Reset where we want to stick the old page in. */
topidx = p2m_top_index(set_pfn);
mididx = p2m_mid_index(set_pfn);

/* This shouldn't happen */
if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
early_alloc_p2m_middle(set_pfn);

if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
return false;

p2m_init(p2m);
p2m_top[topidx][mididx] = p2m;

return true;
}
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!early_alloc_p2m_middle(pfn))
return false;

if (early_can_reuse_p2m_middle(pfn))
return __set_phys_to_machine(pfn, mfn);

if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
return false;

if (!__set_phys_to_machine(pfn, mfn))
return false;
}

return true;
}

static void __init early_split_p2m(unsigned long pfn)
{
unsigned long mididx, idx;
Expand Down
Loading

0 comments on commit 1f3ac86

Please sign in to comment.