From 8efa70009d7515657d43f317bf32aa2c2f58aadc Mon Sep 17 00:00:00 2001 From: Masakazu Mokuno Date: Thu, 21 Aug 2008 06:18:56 +1000 Subject: [PATCH] --- yaml --- r: 109140 b: refs/heads/master c: b47027795a22fe61f93de6010c120f26273fc693 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/powerpc/platforms/ps3/spu.c | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/[refs] b/[refs] index 2930b66ee7e7..4557d6bb4f5f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 9cfeb74e93cf3549b7fa67ffe407192da625c777 +refs/heads/master: b47027795a22fe61f93de6010c120f26273fc693 diff --git a/trunk/arch/powerpc/platforms/ps3/spu.c b/trunk/arch/powerpc/platforms/ps3/spu.c index d135cef9ed6a..ccae3d446b98 100644 --- a/trunk/arch/powerpc/platforms/ps3/spu.c +++ b/trunk/arch/powerpc/platforms/ps3/spu.c @@ -186,14 +186,24 @@ static void spu_unmap(struct spu *spu) iounmap(spu_pdata(spu)->shadow); } +/** + * setup_areas - Map the spu regions into the address space. + * + * The current HV requires the spu shadow regs to be mapped with the + * PTE page protection bits set as read-only (PP=3). This implementation + * uses the low level __ioremap() to bypass the page protection settings + * inforced by ioremap_flags() to get the needed PTE bits set for the + * shadow regs. + */ + static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; + static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3; - spu_pdata(spu)->shadow = ioremap_flags(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), - pgprot_val(PAGE_READONLY) | - _PAGE_NO_CACHE); + spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr, + sizeof(struct spe_shadow), + shadow_flags); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap;