Skip to content

Commit

Permalink
Revert "xen/x86: Workaround 64-bit hypervisor and 32-bit initial doma…
Browse files Browse the repository at this point in the history
…in." and "xen/x86: Use memblock_reserve for sensitive areas."

This reverts commit 806c312 and
commit 59b2944.

And also documents setup.c and why we want to do it that way, which
is that we tried to make the the memblock_reserve more selective so
that it would be clear what region is reserved. Sadly we ran
in the problem wherein on a 64-bit hypervisor with a 32-bit
initial domain, the pt_base has the cr3 value which is not
neccessarily where the pagetable starts! As Jan put it: "
Actually, the adjustment turns out to be correct: The page
tables for a 32-on-64 dom0 get allocated in the order "first L1",
"first L2", "first L3", so the offset to the page table base is
indeed 2. When reading xen/include/public/xen.h's comment
very strictly, this is not a violation (since there nothing is said
that the first thing in the page table space is pointed to by
pt_base; I admit that this seems to be implied though, namely
do I think that it is implied that the page table space is the
range [pt_base, pt_base + nt_pt_frames), whereas that
range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
which - without a priori knowledge - the kernel would have
difficulty to figure out)." - so lets just fall back to the
easy way and reserve the whole region.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  • Loading branch information
Konrad Rzeszutek Wilk committed Aug 23, 2012
1 parent 806c312 commit 51faaf2
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 65 deletions.
60 changes: 0 additions & 60 deletions arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -998,66 +998,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)

return ret;
}
/*
* If the MFN is not in the m2p (provided to us by the hypervisor) this
* function won't do anything. In practice this means that the XenBus
* MFN won't be available for the initial domain. */
static unsigned long __init xen_reserve_mfn(unsigned long mfn)
{
unsigned long pfn, end_pfn = 0;

if (!mfn)
return end_pfn;

pfn = mfn_to_pfn(mfn);
if (phys_to_machine_mapping_valid(pfn)) {
end_pfn = PFN_PHYS(pfn) + PAGE_SIZE;
memblock_reserve(PFN_PHYS(pfn), end_pfn);
}
return end_pfn;
}
static void __init xen_reserve_internals(void)
{
unsigned long size;
unsigned long last_phys = 0;

if (!xen_pv_domain())
return;

/* xen_start_info does not exist in the M2P, hence can't use
* xen_reserve_mfn. */
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
last_phys = __pa(xen_start_info) + PAGE_SIZE;

last_phys = max(xen_reserve_mfn(PFN_DOWN(xen_start_info->shared_info)), last_phys);
last_phys = max(xen_reserve_mfn(xen_start_info->store_mfn), last_phys);

if (!xen_initial_domain())
last_phys = max(xen_reserve_mfn(xen_start_info->console.domU.mfn), last_phys);

if (xen_feature(XENFEAT_auto_translated_physmap))
return;

/*
* ALIGN up to compensate for the p2m_page pointing to an array that
* can partially filled (look in xen_build_dynamic_phys_to_machine).
*/

size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));

/* We could use xen_reserve_mfn here, but would end up looping quite
* a lot (and call memblock_reserve for each PAGE), so lets just use
* the easy way and reserve it wholesale. */
memblock_reserve(__pa(xen_start_info->mfn_list), size);
last_phys = max(__pa(xen_start_info->mfn_list) + size, last_phys);
/* The pagetables are reserved in mmu.c */

/* Under 64-bit hypervisor with a 32-bit domain, the hypervisor
* offsets the pt_base by two pages. Hence the reservation that is done
* in mmu.c misses two pages. We correct it here if we detect this. */
if (last_phys < __pa(xen_start_info->pt_base))
memblock_reserve(last_phys, __pa(xen_start_info->pt_base) - last_phys);
}
void xen_setup_shared_info(void)
{
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
Expand Down Expand Up @@ -1421,7 +1362,6 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);

xen_reserve_internals();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();

Expand Down
5 changes: 0 additions & 5 deletions arch/x86/xen/p2m.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,11 +388,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
}

m2p_override_init();

/* NOTE: We cannot call memblock_reserve here for the mfn_list as there
* isn't enough pieces to make it work (for one - we are still using the
* Xen provided pagetable). Do it later in xen_reserve_internals.
*/
}

unsigned long get_phys_to_machine(unsigned long pfn)
Expand Down
27 changes: 27 additions & 0 deletions arch/x86/xen/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,33 @@ char * __init xen_memory_setup(void)
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);

/*
* Reserve Xen bits:
* - mfn_list
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
* We tried to make the the memblock_reserve more selective so
* that it would be clear what region is reserved. Sadly we ran
* in the problem wherein on a 64-bit hypervisor with a 32-bit
* initial domain, the pt_base has the cr3 value which is not
* neccessarily where the pagetable starts! As Jan put it: "
* Actually, the adjustment turns out to be correct: The page
* tables for a 32-on-64 dom0 get allocated in the order "first L1",
* "first L2", "first L3", so the offset to the page table base is
* indeed 2. When reading xen/include/public/xen.h's comment
* very strictly, this is not a violation (since there nothing is said
* that the first thing in the page table space is pointed to by
* pt_base; I admit that this seems to be implied though, namely
* do I think that it is implied that the page table space is the
* range [pt_base, pt_base + nt_pt_frames), whereas that
* range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
* which - without a priori knowledge - the kernel would have
* difficulty to figure out)." - so lets just fall back to the
* easy way and reserve the whole region.
*/
memblock_reserve(__pa(xen_start_info->mfn_list),
xen_start_info->pt_base - xen_start_info->mfn_list);

sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

return "Xen";
Expand Down

0 comments on commit 51faaf2

Please sign in to comment.