Skip to content

Commit

Permalink
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Browse files Browse the repository at this point in the history
Pull ARM fixes from Russell King:
 "A number of fixes:

  Patrik found a problem with preempt counting in the VFP assembly
  functions which can cause the preempt count to be upset.

  Nicolas fixed a problem with the parsing of the DT when it straddles a
  1MB boundary.

  Subhash Jadavani reported a problem with sparsemem and our highmem
  support for cache maintanence for DMA areas, and TI found a bug in
  their strongly ordered memory mapping type.

  Also, three fixes by way of Will Deacon's tree from Dave Martin for
  instruction compatibility and Marc Zyngier to fix hypervisor boot mode
  issues."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO
  ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
  ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
  ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone
  ARM: virt: simplify __hyp_stub_install epilog
  ARM: virt: boot secondary CPUs through the right entry point
  ARM: virt: Avoid bx instruction for compatibility with <=ARMv4
  • Loading branch information
Linus Torvalds committed Jan 24, 2013
2 parents 1496ec1 + 210b184 commit 01acd3e
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 27 deletions.
5 changes: 4 additions & 1 deletion arch/arm/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -246,13 +246,16 @@ __create_page_tables:

/*
* Then map boot params address in r2 if specified.
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
subne r3, r0, r8
addne r3, r3, #PAGE_OFFSET
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orrne r6, r7, r0
strne r6, [r3], #1 << PMD_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]

#ifdef CONFIG_DEBUG_LL
Expand Down Expand Up @@ -331,7 +334,7 @@ ENTRY(secondary_startup)
* as it has already been validated by the primary processor.
*/
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9

Expand Down
18 changes: 6 additions & 12 deletions arch/arm/kernel/hyp-stub.S
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
* immediately.
*/
compare_cpu_mode_with_primary r4, r5, r6, r7
bxne lr
movne pc, lr

/*
* Once we have given up on one CPU, we do not try to install the
Expand All @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
*/

cmp r4, #HYP_MODE
bxne lr @ give up if the CPU is not in HYP mode
movne pc, lr @ give up if the CPU is not in HYP mode

/*
* Configure HSCTLR to set correct exception endianness/instruction set
Expand All @@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
* Eventually, CPU-specific code might be needed -- assume not for now
*
* This code relies on the "eret" instruction to synchronize the
* various coprocessor accesses.
* various coprocessor accesses. This is done when we switch to SVC
* (see safe_svcmode_maskall).
*/
@ Now install the hypervisor stub:
adr r7, __hyp_stub_vectors
Expand Down Expand Up @@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
1:
#endif

bic r7, r4, #MODE_MASK
orr r7, r7, #SVC_MODE
THUMB( orr r7, r7, #PSR_T_BIT )
msr spsr_cxsf, r7 @ This is SPSR_hyp.

__MSR_ELR_HYP(14) @ msr elr_hyp, lr
__ERET @ return, switching to SVC mode
@ The boot CPU mode is left in r4.
bx lr @ The boot CPU mode is left in r4.
ENDPROC(__hyp_stub_install_secondary)

__hyp_stub_do_trap:
Expand Down Expand Up @@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
@ fall through
ENTRY(__hyp_set_vectors)
__HVC(0)
bx lr
mov pc, lr
ENDPROC(__hyp_set_vectors)

#ifndef ZIMAGE
Expand Down
18 changes: 10 additions & 8 deletions arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
unsigned long pfn;
size_t left = size;

pfn = page_to_pfn(page) + offset / PAGE_SIZE;
offset %= PAGE_SIZE;

/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
size_t left = size;
do {
size_t len = left;
void *vaddr;

page = pfn_to_page(pfn);

if (PageHighMem(page)) {
if (len + offset > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
}
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
}
vaddr = kmap_high_get(page);
if (vaddr) {
vaddr += offset;
Expand All @@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
}
offset = 0;
page++;
pfn++;
left -= len;
} while (left);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {
},
[MT_MEMORY_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_UNCACHED,
L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
PMD_SECT_UNCACHED | PMD_SECT_XN,
Expand Down
6 changes: 3 additions & 3 deletions arch/arm/vfp/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
@ IRQs disabled.
@
ENTRY(do_vfp)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
add r11, r4, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
Expand All @@ -35,7 +35,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)

ENTRY(vfp_null_entry)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
Expand All @@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)

__INIT
ENTRY(vfp_testing_entry)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/vfp/vfphw.S
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
Expand All @@ -192,7 +192,7 @@ look_for_VFP_exceptions:
@ not recognised by VFP

DBGSTR "not VFP"
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
Expand Down

0 comments on commit 01acd3e

Please sign in to comment.