From 1a8a715a8b53967d743129abab8fae1b28dc6749 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 19 Jan 2013 15:27:30 +0000 Subject: [PATCH] --- yaml --- r: 349145 b: refs/heads/master c: 210b1847b32951f52d19df229972399e5b987de2 h: refs/heads/master i: 349143: fe74c76cf43d67e126d3af1a6ffa9ce1b5477008 v: v3 --- [refs] | 2 +- trunk/arch/arm/kernel/head.S | 3 +++ trunk/arch/arm/mm/dma-mapping.c | 18 ++++++++++-------- trunk/arch/arm/mm/mmu.c | 2 +- trunk/arch/arm/vfp/entry.S | 6 +++--- trunk/arch/arm/vfp/vfphw.S | 4 ++-- 6 files changed, 20 insertions(+), 15 deletions(-) diff --git a/[refs] b/[refs] index d4ccde8d66ad..95aab98e69de 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d01723479e6a6c70c83295f7847477a016d5e14a +refs/heads/master: 210b1847b32951f52d19df229972399e5b987de2 diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S index 16abc8322f79..486a15ae9011 100644 --- a/trunk/arch/arm/kernel/head.S +++ b/trunk/arch/arm/kernel/head.S @@ -246,6 +246,7 @@ __create_page_tables: /* * Then map boot params address in r2 if specified. + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -253,6 +254,8 @@ __create_page_tables: addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 + strne r6, [r3], #1 << PMD_ORDER + addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #ifdef CONFIG_DEBUG_LL diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index 6b2fb87c8698..076c26d43864 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ - size_t left = size; do { size_t len = left; void *vaddr; + page = pfn_to_page(pfn); + if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; - } + if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - page++; + pfn++; left -= len; } while (left); } diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 9f0610243bd6..ce328c7f5c94 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -283,7 +283,7 @@ static struct mem_type mem_types[] = { }, [MT_MEMORY_SO] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_MT_UNCACHED, + L_PTE_MT_UNCACHED | L_PTE_XN, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | PMD_SECT_UNCACHED | PMD_SECT_XN, diff --git a/trunk/arch/arm/vfp/entry.S b/trunk/arch/arm/vfp/entry.S index cc926c985981..323ce1a62bbf 100644 --- a/trunk/arch/arm/vfp/entry.S +++ b/trunk/arch/arm/vfp/entry.S @@ -22,7 +22,7 @@ @ IRQs disabled. @ ENTRY(do_vfp) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT ldr r4, [r10, #TI_PREEMPT] @ get preempt count add r11, r4, #1 @ increment it str r11, [r10, #TI_PREEMPT] @@ -35,7 +35,7 @@ ENTRY(do_vfp) ENDPROC(do_vfp) ENTRY(vfp_null_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry) __INIT ENTRY(vfp_testing_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/trunk/arch/arm/vfp/vfphw.S b/trunk/arch/arm/vfp/vfphw.S index ea0349f63586..dd5e56f95f3f 100644 --- a/trunk/arch/arm/vfp/vfphw.S +++ b/trunk/arch/arm/vfp/vfphw.S @@ -168,7 +168,7 @@ vfp_hw_state_valid: @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -192,7 +192,7 @@ look_for_VFP_exceptions: @ not recognised by VFP DBGSTR "not VFP" -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it