From a589f4aa1cf0f90e9858362ac00f55b86bdc8813 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 5 Nov 2007 21:55:57 +1100 Subject: [PATCH] --- yaml --- r: 73128 b: refs/heads/master c: 633872b980f55f40a5e7de374f26970e41e2137b h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/lguest/boot.c | 43 ++++++++++++++++++------------------ 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/[refs] b/[refs] index 305666000b0a..b32ac42097fa 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fad23fc78b959dae89768e523c3a6f5edb83bbe9 +refs/heads/master: 633872b980f55f40a5e7de374f26970e41e2137b diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index e6023b86f31d..92c56117eae5 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -93,27 +93,7 @@ struct lguest_data lguest_data = { }; static cycle_t clock_base; -/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first - * real optimization trick! - * - * When lazy_mode is set, it means we're allowed to defer all hypercalls and do - * them as a batch when lazy_mode is eventually turned off. Because hypercalls - * are reasonably expensive, batching them up makes sense. For example, a - * large munmap might update dozens of page table entries: that code calls - * paravirt_enter_lazy_mmu(), does the dozen updates, then calls - * lguest_leave_lazy_mode(). - * - * So, when we're in lazy mode, we call async_hypercall() to store the call for - * future processing. When lazy mode is turned off we issue a hypercall to - * flush the stored calls. - */ -static void lguest_leave_lazy_mode(void) -{ - paravirt_leave_lazy(paravirt_get_lazy_mode()); - hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); -} - -/* async_hcall() is pretty simple: I'm quite proud of it really. We have a +/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a * ring buffer of stored hypercalls which the Host will run though next time we * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * arguments, and a "hcall_status" word which is 0 if the call is ready to go, @@ -151,6 +131,18 @@ static void async_hcall(unsigned long call, unsigned long arg1, local_irq_restore(flags); } +/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first + * real optimization trick! + * + * When lazy_mode is set, it means we're allowed to defer all hypercalls and do + * them as a batch when lazy_mode is eventually turned off. Because hypercalls + * are reasonably expensive, batching them up makes sense. For example, a + * large munmap might update dozens of page table entries: that code calls + * paravirt_enter_lazy_mmu(), does the dozen updates, then calls + * lguest_leave_lazy_mode(). + * + * So, when we're in lazy mode, we call async_hcall() to store the call for + * future processing. */ static void lazy_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, @@ -161,7 +153,14 @@ static void lazy_hcall(unsigned long call, else async_hcall(call, arg1, arg2, arg3); } -/*:*/ + +/* When lazy mode is turned off reset the per-cpu lazy mode variable and then + * issue a hypercall to flush any stored calls. */ +static void lguest_leave_lazy_mode(void) +{ + paravirt_leave_lazy(paravirt_get_lazy_mode()); + hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); +} /*G:033 * After that diversion we return to our first native-instruction