From 96d6a1a71ce02e1bfac5610b98668c90d0357796 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 16 Oct 2007 11:51:31 -0700 Subject: [PATCH] --- yaml --- r: 70934 b: refs/heads/master c: ace2e92e193126711cb3a83a3752b2c5b8396950 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/fs/xfs/linux-2.6/xfs_buf.c | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 56734b85d849..f634048c01c3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a122d6230e8d8ac7cffdf0bc9cc4b256b928fe49 +refs/heads/master: ace2e92e193126711cb3a83a3752b2c5b8396950 diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.c b/trunk/fs/xfs/linux-2.6/xfs_buf.c index 39f44ee572e8..455e042e0c10 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.c +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.c @@ -187,6 +187,19 @@ free_address( { a_list_t *aentry; +#ifdef CONFIG_XEN + /* + * Xen needs to be able to make sure it can get an exclusive + * RO mapping of pages it wants to turn into a pagetable. If + * a newly allocated page is also still being vmap()ed by xfs, + * it will cause pagetable construction to fail. This is a + * quick workaround to always eagerly unmap pages so that Xen + * is happy. + */ + vunmap(addr); + return; +#endif + aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); if (likely(aentry)) { spin_lock(&as_lock);