Skip to content

Commit

Permalink
xen: clean up x86-64 warnings
Browse files Browse the repository at this point in the history
There are a couple of Xen features which rely on directly accessing
per-cpu data via a segment register, which is not yet available on
x86-64.  In the meantime, just disable direct access to the vcpu info
structure; this leaves some of the code as dead, but it will come to
life in time, and the warnings are suppressed.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Jeremy Fitzhardinge authored and Ingo Molnar committed Oct 3, 2008
1 parent 08115ab commit db053b8
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 54 deletions.
61 changes: 10 additions & 51 deletions arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
*
* 0: not available, 1: available
*/
static int have_vcpu_info_placement = 1;
static int have_vcpu_info_placement =
#ifdef CONFIG_X86_32
1
#else
0
#endif
;


static void xen_vcpu_setup(int cpu)
{
Expand Down Expand Up @@ -941,6 +948,7 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
}
#endif

#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
Expand All @@ -959,6 +967,7 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)

xen_set_pte(ptep, pte);
}
#endif

static __init void xen_pagetable_setup_start(pgd_t *base)
{
Expand Down Expand Up @@ -1025,7 +1034,6 @@ void xen_setup_vcpu_info_placement(void)

/* xen_vcpu_setup managed to place the vcpu_info within the
percpu area for all cpus, so make use of it */
#ifdef CONFIG_X86_32
if (have_vcpu_info_placement) {
printk(KERN_INFO "Xen: using vcpu_info placement\n");

Expand All @@ -1035,7 +1043,6 @@ void xen_setup_vcpu_info_placement(void)
pv_irq_ops.irq_enable = xen_irq_enable_direct;
pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
}
#endif
}

static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
Expand All @@ -1056,12 +1063,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
goto patch_site

switch (type) {
#ifdef CONFIG_X86_32
SITE(pv_irq_ops, irq_enable);
SITE(pv_irq_ops, irq_disable);
SITE(pv_irq_ops, save_fl);
SITE(pv_irq_ops, restore_fl);
#endif /* CONFIG_X86_32 */
#undef SITE

patch_site:
Expand Down Expand Up @@ -1399,48 +1404,11 @@ static void *m2v(phys_addr_t maddr)
return __ka(m2p(maddr));
}

#ifdef CONFIG_X86_64
static void walk(pgd_t *pgd, unsigned long addr)
{
unsigned l4idx = pgd_index(addr);
unsigned l3idx = pud_index(addr);
unsigned l2idx = pmd_index(addr);
unsigned l1idx = pte_index(addr);
pgd_t l4;
pud_t l3;
pmd_t l2;
pte_t l1;

xen_raw_printk("walk %p, %lx -> %d %d %d %d\n",
pgd, addr, l4idx, l3idx, l2idx, l1idx);

l4 = pgd[l4idx];
xen_raw_printk(" l4: %016lx\n", l4.pgd);
xen_raw_printk(" %016lx\n", pgd_val(l4));

l3 = ((pud_t *)(m2v(l4.pgd)))[l3idx];
xen_raw_printk(" l3: %016lx\n", l3.pud);
xen_raw_printk(" %016lx\n", pud_val(l3));

l2 = ((pmd_t *)(m2v(l3.pud)))[l2idx];
xen_raw_printk(" l2: %016lx\n", l2.pmd);
xen_raw_printk(" %016lx\n", pmd_val(l2));

l1 = ((pte_t *)(m2v(l2.pmd)))[l1idx];
xen_raw_printk(" l1: %016lx\n", l1.pte);
xen_raw_printk(" %016lx\n", pte_val(l1));
}
#endif

static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);

xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016llx pte=%016llx\n",
addr, pfn, get_phys_to_machine(pfn),
pgprot_val(prot), pte.pte);

if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
BUG();
}
Expand Down Expand Up @@ -1698,15 +1666,6 @@ asmlinkage void __init xen_start_kernel(void)

xen_raw_console_write("about to get started...\n");

#if 0
xen_raw_printk("&boot_params=%p __pa(&boot_params)=%lx __va(__pa(&boot_params))=%lx\n",
&boot_params, __pa_symbol(&boot_params),
__va(__pa_symbol(&boot_params)));

walk(pgd, &boot_params);
walk(pgd, __va(__pa(&boot_params)));
#endif

/* Start the world */
#ifdef CONFIG_X86_32
i386_start_kernel();
Expand Down
20 changes: 17 additions & 3 deletions arch/x86/xen/xen-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,24 @@
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000

#if 0
#include <asm/percpu.h>
#if 1
/*
x86-64 does not yet support direct access to percpu variables
via a segment override, so we just need to make sure this code
never gets used
*/
#define BUG ud2a
#define PER_CPU_VAR(var, off) 0xdeadbeef
#endif

/*
Enable events. This clears the event mask and tests the pending
event status with one and operation. If there are pending
events, then enter the hypervisor to get them handled.
*/
ENTRY(xen_irq_enable_direct)
BUG

/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)

Expand All @@ -58,6 +67,8 @@ ENDPATCH(xen_irq_enable_direct)
non-zero.
*/
ENTRY(xen_irq_disable_direct)
BUG

movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
ENDPATCH(xen_irq_disable_direct)
ret
Expand All @@ -74,6 +85,8 @@ ENDPATCH(xen_irq_disable_direct)
Xen and x86 use opposite senses (mask vs enable).
*/
ENTRY(xen_save_fl_direct)
BUG

testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
setz %ah
addb %ah,%ah
Expand All @@ -91,6 +104,8 @@ ENDPATCH(xen_save_fl_direct)
if so.
*/
ENTRY(xen_restore_fl_direct)
BUG

testb $X86_EFLAGS_IF>>8, %ah
setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
/* Preempt here doesn't matter because that will deal with
Expand Down Expand Up @@ -133,7 +148,6 @@ check_events:
pop %rcx
pop %rax
ret
#endif

ENTRY(xen_adjust_exception_frame)
mov 8+0(%rsp),%rcx
Expand Down

0 comments on commit db053b8

Please sign in to comment.