Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 257957
b: refs/heads/master
c: bc7fe1d
h: refs/heads/master
i:
  257955: 4a23862
v: v3
  • Loading branch information
Jeremy Fitzhardinge committed Jul 18, 2011
1 parent 49a234a commit e15e2b2
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dcf7435cfe617c9d9f3dd501d3f5ed4d0b30c218
refs/heads/master: bc7fe1d977734efe6059d6d537b439135ad4a34c
53 changes: 44 additions & 9 deletions trunk/arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1497,22 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn)
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}

static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct multicall_space mcs;
struct mmuext_op *op;

mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = cmd;
op->arg1.mfn = pfn_to_mfn(pfn);

MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}

static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
{
struct multicall_space mcs;
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);

mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
pfn_pte(pfn, prot), 0);
}

/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
unsigned level)
{
struct page *page = pfn_to_page(pfn);
int pinned = PagePinned(virt_to_page(mm->pgd));

bool pinned = PagePinned(virt_to_page(mm->pgd));

trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);

if (pinned) {
struct page *page = pfn_to_page(pfn);

SetPagePinned(page);

if (!PageHighMem(page)) {
make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
xen_mc_batch();

__set_pfn_prot(pfn, PAGE_KERNEL_RO);

if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);

xen_mc_issue(PARAVIRT_LAZY_MMU);
} else {
/* make sure there are no stray mappings of
this page */
Expand All @@ -1532,7 +1562,7 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
}

/* This should never happen until we're OK to use struct page */
static void xen_release_ptpage(unsigned long pfn, unsigned level)
static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
bool pinned = PagePinned(page);
Expand All @@ -1541,9 +1571,14 @@ static void xen_release_ptpage(unsigned long pfn, unsigned level)

if (pinned) {
if (!PageHighMem(page)) {
xen_mc_batch();

if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);

__set_pfn_prot(pfn, PAGE_KERNEL);

xen_mc_issue(PARAVIRT_LAZY_MMU);
}
ClearPagePinned(page);
}
Expand Down

0 comments on commit e15e2b2

Please sign in to comment.