Skip to content

Commit

Permalink
xen/x86: Use memblock_reserve for sensitive areas.
Browse files Browse the repository at this point in the history
instead of a big memblock_reserve. This way we can be more
selective in freeing regions (and it also makes it easier
to understand where is what).

[v1: Move the auto_translate_physmap to proper line]
[v2: Per Stefano suggestion add more comments]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  • Loading branch information
Konrad Rzeszutek Wilk committed Aug 21, 2012
1 parent a3118be commit 59b2944
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 9 deletions.
48 changes: 48 additions & 0 deletions arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -998,7 +998,54 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)

return ret;
}
/*
* If the MFN is not in the m2p (provided to us by the hypervisor) this
* function won't do anything. In practice this means that the XenBus
* MFN won't be available for the initial domain. */
static void __init xen_reserve_mfn(unsigned long mfn)
{
unsigned long pfn;

if (!mfn)
return;
pfn = mfn_to_pfn(mfn);
if (phys_to_machine_mapping_valid(pfn))
memblock_reserve(PFN_PHYS(pfn), PAGE_SIZE);
}
static void __init xen_reserve_internals(void)
{
unsigned long size;

if (!xen_pv_domain())
return;

/* xen_start_info does not exist in the M2P, hence can't use
* xen_reserve_mfn. */
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);

xen_reserve_mfn(PFN_DOWN(xen_start_info->shared_info));
xen_reserve_mfn(xen_start_info->store_mfn);

if (!xen_initial_domain())
xen_reserve_mfn(xen_start_info->console.domU.mfn);

if (xen_feature(XENFEAT_auto_translated_physmap))
return;

/*
* ALIGN up to compensate for the p2m_page pointing to an array that
* can partially filled (look in xen_build_dynamic_phys_to_machine).
*/

size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));

/* We could use xen_reserve_mfn here, but would end up looping quite
* a lot (and call memblock_reserve for each PAGE), so lets just use
* the easy way and reserve it wholesale. */
memblock_reserve(__pa(xen_start_info->mfn_list), size);

/* The pagetables are reserved in mmu.c */
}
void xen_setup_shared_info(void)
{
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
Expand Down Expand Up @@ -1362,6 +1409,7 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);

xen_reserve_internals();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();

Expand Down
5 changes: 5 additions & 0 deletions arch/x86/xen/p2m.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,11 @@ void __init xen_build_dynamic_phys_to_machine(void)
}

m2p_override_init();

/* NOTE: We cannot call memblock_reserve here for the mfn_list as there
* isn't enough pieces to make it work (for one - we are still using the
* Xen provided pagetable). Do it later in xen_reserve_internals.
*/
}

unsigned long get_phys_to_machine(unsigned long pfn)
Expand Down
9 changes: 0 additions & 9 deletions arch/x86/xen/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -424,15 +424,6 @@ char * __init xen_memory_setup(void)
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);

/*
* Reserve Xen bits:
* - mfn_list
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
memblock_reserve(__pa(xen_start_info->mfn_list),
xen_start_info->pt_base - xen_start_info->mfn_list);

sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

return "Xen";
Expand Down

0 comments on commit 59b2944

Please sign in to comment.