Skip to content

Commit

Permalink
xen: lazy-mmu operations
Browse files Browse the repository at this point in the history
This patch uses the lazy-mmu hooks to batch mmu operations where
possible.  This is primarily useful for batching operations applied to
active pagetables, which happens during mprotect, munmap, mremap and
the like (mmap does not do bulk pagetable operations, so it isn't
helped).

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: Chris Wright <chrisw@sous-sol.org>
  • Loading branch information
Jeremy Fitzhardinge authored and Jeremy Fitzhardinge committed Jul 18, 2007
1 parent f120f13 commit d66bf8f
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 32 deletions.
48 changes: 31 additions & 17 deletions arch/i386/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,28 +472,38 @@ static void xen_apic_write(unsigned long reg, unsigned long val)

static void xen_flush_tlb(void)
{
struct mmuext_op op;
struct mmuext_op *op;
struct multicall_space mcs = xen_mc_entry(sizeof(*op));

op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

xen_mc_issue(PARAVIRT_LAZY_MMU);
}

static void xen_flush_tlb_single(unsigned long addr)
{
struct mmuext_op op;
struct mmuext_op *op;
struct multicall_space mcs = xen_mc_entry(sizeof(*op));

op.cmd = MMUEXT_INVLPG_LOCAL;
op.arg1.linear_addr = addr & PAGE_MASK;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
op = mcs.args;
op->cmd = MMUEXT_INVLPG_LOCAL;
op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

xen_mc_issue(PARAVIRT_LAZY_MMU);
}

static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
unsigned long va)
{
struct mmuext_op op;
struct {
struct mmuext_op op;
cpumask_t mask;
} *args;
cpumask_t cpumask = *cpus;
struct multicall_space mcs;

/*
* A couple of (to be removed) sanity checks:
Expand All @@ -510,17 +520,21 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
if (cpus_empty(cpumask))
return;

mcs = xen_mc_entry(sizeof(*args));
args = mcs.args;
args->mask = cpumask;
args->op.arg2.vcpumask = &args->mask;

if (va == TLB_FLUSH_ALL) {
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
op.arg2.vcpumask = (void *)cpus;
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
} else {
op.cmd = MMUEXT_INVLPG_MULTI;
op.arg1.linear_addr = va;
op.arg2.vcpumask = (void *)cpus;
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = va;
}

if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);

xen_mc_issue(PARAVIRT_LAZY_MMU);
}

static unsigned long xen_read_cr2(void)
Expand Down
52 changes: 39 additions & 13 deletions arch/i386/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,20 @@ void make_lowmem_page_readwrite(void *vaddr)

void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
struct multicall_space mcs;
struct mmu_update *u;

u.ptr = virt_to_machine(ptr).maddr;
u.val = pmd_val_ma(val);
if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
BUG();
preempt_disable();

mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
u->ptr = virt_to_machine(ptr).maddr;
u->val = pmd_val_ma(val);
MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);

xen_mc_issue(PARAVIRT_LAZY_MMU);

preempt_enable();
}

/*
Expand Down Expand Up @@ -146,20 +154,38 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
if ((mm != current->mm && mm != &init_mm) ||
HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
xen_set_pte(ptep, pteval);
if (mm == current->mm || mm == &init_mm) {
if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
struct multicall_space mcs;
mcs = xen_mc_entry(0);

MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
xen_mc_issue(PARAVIRT_LAZY_MMU);
return;
} else
if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
return;
}
xen_set_pte(ptep, pteval);
}

#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val)
{
struct mmu_update u;
struct multicall_space mcs;
struct mmu_update *u;

u.ptr = virt_to_machine(ptr).maddr;
u.val = pud_val_ma(val);
if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
BUG();
preempt_disable();

mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
u->ptr = virt_to_machine(ptr).maddr;
u->val = pud_val_ma(val);
MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);

xen_mc_issue(PARAVIRT_LAZY_MMU);

preempt_enable();
}

void xen_set_pte(pte_t *ptep, pte_t pte)
Expand Down
4 changes: 2 additions & 2 deletions arch/i386/xen/multicalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@

#include "multicalls.h"

#define MC_BATCH 8
#define MC_ARGS (MC_BATCH * 32 / sizeof(u64))
#define MC_BATCH 32
#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))

struct mc_buffer {
struct multicall_entry entries[MC_BATCH];
Expand Down

0 comments on commit d66bf8f

Please sign in to comment.