From a3810a3238ab77f2a7422be718f128e6a7625430 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 25 Nov 2008 15:58:07 +0100 Subject: [PATCH] --- yaml --- r: 125256 b: refs/heads/master c: ecc5589f19a52e7e6501fe449047b19087ae11bb h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/kvm/mmu.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 963bfd39b5d9..6c9e0e37a835 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5319c662522db8995ff9276ba9d80549c64b294a +refs/heads/master: ecc5589f19a52e7e6501fe449047b19087ae11bb diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index fa3486d64078..dd20b199a7c0 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -1593,6 +1593,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, spte |= PT_WRITABLE_MASK; + /* + * Optimization: for pte sync, if spte was writable the hash + * lookup is unnecessary (and expensive). Write protection + * is responsibility of mmu_get_page / kvm_sync_page. + * Same reasoning can be applied to dirty page accounting. + */ + if (!can_unsync && is_writeble_pte(*shadow_pte)) + goto set_pte; + if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %lx, marking ro\n", __func__, gfn);