Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Browse files Browse the repository at this point in the history
Pull second set of ARM updates from Russell King:
 "This is the second set of ARM updates for this merge window.

  Contained within are changes to allow the kernel to boot in hypervisor
  mode on CPUs supporting virtualization, and cache flushing support to
  the point of inner sharable unification, which are used by the
  suspend/resume code to avoid having to do a full cache flush.

  Also included is one fix for VFP code identified by Michael Olbrich."

* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels
  ARM: 7549/1: HYP: fix boot on some ARM1136 cores
  ARM: 7542/1: mm: fix cache LoUIS API for xscale and feroceon
  ARM: mm: update __v7_setup() to the new LoUIS cache maintenance API
  ARM: kernel: update __cpu_disable to use cache LoUIS maintenance API
  ARM: kernel: update cpu_suspend code to use cache LoUIS operations
  ARM: mm: rename jump labels in v7_flush_dcache_all function
  ARM: mm: implement LoUIS API for cache maintenance ops
  ARM: virt: arch_timers: enable access to physical timers
  ARM: virt: Add CONFIG_ARM_VIRT_EXT option
  ARM: virt: Add boot-time diagnostics
  ARM: virt: Update documentation for hyp mode entry support
  ARM: zImage/virt: hyp mode entry support for the zImage loader
  ARM: virt: allow the kernel to be entered in HYP mode
  ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding
  • Loading branch information
Linus Torvalds committed Oct 12, 2012
2 parents 2fc07ef + a0f0dd5 commit 5cea24c
Show file tree
Hide file tree
Showing 41 changed files with 623 additions and 23 deletions.
22 changes: 21 additions & 1 deletion Documentation/arm/Booting
Original file line number Diff line number Diff line change
Expand Up @@ -154,13 +154,33 @@ In either case, the following conditions must be met:

- CPU mode
All forms of interrupts must be disabled (IRQs and FIQs)
The CPU must be in SVC mode. (A special exception exists for Angel)

For CPUs which do not include the ARM virtualization extensions, the
CPU must be in SVC mode. (A special exception exists for Angel)

CPUs which include support for the virtualization extensions can be
entered in HYP mode in order to enable the kernel to make full use of
these extensions. This is the recommended boot method for such CPUs,
unless the virtualisations are already in use by a pre-installed
hypervisor.

If the kernel is not entered in HYP mode for any reason, it must be
entered in SVC mode.

- Caches, MMUs
The MMU must be off.
Instruction cache may be on or off.
Data cache must be off.

If the kernel is entered in HYP mode, the above requirements apply to
the HYP mode configuration in addition to the ordinary PL1 (privileged
kernel modes) configuration. In addition, all traps into the
hypervisor must be disabled, and PL1 access must be granted for all
peripherals and CPU resources for which this is architecturally
possible. Except for entering in HYP mode, the system configuration
should be such that a kernel which does not include support for the
virtualization extensions can boot correctly without extra help.

- The boot loader is expected to call the kernel image by jumping
directly to the first instruction of the kernel image.

Expand Down
1 change: 1 addition & 0 deletions arch/arm/boot/compressed/.gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
ashldi3.S
font.c
lib1funcs.S
hyp-stub.S
piggy.gzip
piggy.lzo
piggy.lzma
Expand Down
9 changes: 8 additions & 1 deletion arch/arm/boot/compressed/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
OBJS += string.o
CFLAGS_string.o := -Os

ifeq ($(CONFIG_ARM_VIRT_EXT),y)
OBJS += hyp-stub.o
endif

#
# Architecture dependencies
#
Expand Down Expand Up @@ -126,7 +130,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif

ccflags-y := -fpic -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all
asflags-y := -Wa,-march=all -DZIMAGE

# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
Expand Down Expand Up @@ -198,3 +202,6 @@ $(obj)/font.c: $(FONTC)

$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
@sed "$(SEDFLAGS)" < $< > $@

$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
$(call cmd,shipped)
71 changes: 64 additions & 7 deletions arch/arm/boot/compressed/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>

/*
* Debugging stuff
Expand Down Expand Up @@ -132,7 +133,12 @@ start:
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
THUMB( .thumb )
1: mov r7, r1 @ save architecture ID
1:
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
#endif
mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer

#ifndef __ARM_ARCH_2__
Expand All @@ -148,9 +154,9 @@ start:
ARM( swi 0x123456 ) @ angel_SWI_ARM
THUMB( svc 0xab ) @ angel_SWI_THUMB
not_angel:
mrs r2, cpsr @ turn off interrupts to
orr r2, r2, #0xc0 @ prevent angel from running
msr cpsr_c, r2
safe_svcmode_maskall r0
msr spsr_cxsf, r9 @ Save the CPU boot mode in
@ SPSR
#else
teqp pc, #0x0c000003 @ turn off interrupts
#endif
Expand Down Expand Up @@ -350,6 +356,20 @@ dtb_check_done:
adr r5, restart
bic r5, r5, #31

/* Relocate the hyp vector base if necessary */
#ifdef CONFIG_ARM_VIRT_EXT
mrs r0, spsr
and r0, r0, #MODE_MASK
cmp r0, #HYP_MODE
bne 1f

bl __hyp_get_vectors
sub r0, r0, r5
add r0, r0, r10
bl __hyp_set_vectors
1:
#endif

sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
Expand Down Expand Up @@ -458,11 +478,29 @@ not_relocated: mov r0, #0
bl decompress_kernel
bl cache_clean_flush
bl cache_off
mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
ARM( mov pc, r4 ) @ call kernel
THUMB( bx r4 ) @ entry point is always ARM

#ifdef CONFIG_ARM_VIRT_EXT
mrs r0, spsr @ Get saved CPU boot mode
and r0, r0, #MODE_MASK
cmp r0, #HYP_MODE @ if not booted in HYP mode...
bne __enter_kernel @ boot kernel directly

adr r12, .L__hyp_reentry_vectors_offset
ldr r0, [r12]
add r0, r0, r12

bl __hyp_set_vectors
__HVC(0) @ otherwise bounce to hyp mode

b . @ should never be reached

.align 2
.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
#else
b __enter_kernel
#endif

.align 2
.type LC0, #object
Expand Down Expand Up @@ -1196,6 +1234,25 @@ memdump: mov r12, r0
#endif

.ltorg

#ifdef CONFIG_ARM_VIRT_EXT
.align 5
__hyp_reentry_vectors:
W(b) . @ reset
W(b) . @ undef
W(b) . @ svc
W(b) . @ pabort
W(b) . @ dabort
W(b) __enter_kernel @ hyp
W(b) . @ irq
W(b) . @ fiq
#endif /* CONFIG_ARM_VIRT_EXT */

__enter_kernel:
mov r0, #0 @ must be 0
ARM( mov pc, r4 ) @ call kernel
THUMB( bx r4 ) @ entry point is always ARM

reloc_code_end:

.align
Expand Down
29 changes: 29 additions & 0 deletions arch/arm/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

#include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h>

#define IOMEM(x) (x)

Expand Down Expand Up @@ -239,6 +240,34 @@
.endm
#endif

/*
* Helper macro to enter SVC mode cleanly and mask interrupts. reg is
* a scratch register for the macro to overwrite.
*
* This macro is intended for forcing the CPU into SVC mode at boot time.
* you cannot return to the original mode.
*
* Beware, it also clobers LR.
*/
.macro safe_svcmode_maskall reg:req
mrs \reg , cpsr
mov lr , \reg
and lr , lr , #MODE_MASK
cmp lr , #HYP_MODE
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
bic \reg , \reg , #MODE_MASK
orr \reg , \reg , #SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
adr lr, BSYM(2f)
msr spsr_cxsf, \reg
__MSR_ELR_HYP(14)
__ERET
1: msr cpsr_c, \reg
2:
.endm

/*
* STRT/LDRT access macros with ARM and Thumb-2 variants
*/
Expand Down
15 changes: 15 additions & 0 deletions arch/arm/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_kern_louis()
*
* Flush data cache levels up to the level of unification
* inner shareable and invalidate the I-cache.
* Only needed from v7 onwards, falls back to flush_cache_all()
* for all other processor versions.
*
* flush_user_all()
*
* Clean and invalidate all user space cache entries
Expand Down Expand Up @@ -97,6 +104,7 @@
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
void (*flush_kern_louis)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);

Expand All @@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache;

#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
Expand All @@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache;

extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_kern_louis(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
Expand Down Expand Up @@ -204,6 +214,11 @@ static inline void __flush_icache_all(void)
__flush_icache_preferred();
}

/*
* Flush caches up to Level of Unification Inner Shareable
*/
#define flush_cache_louis() __cpuc_flush_kern_louis()

#define flush_cache_all() __cpuc_flush_kern_all()

static inline void vivt_flush_cache_mm(struct mm_struct *mm)
Expand Down
1 change: 1 addition & 0 deletions arch/arm/include/asm/glue-cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
Expand Down
10 changes: 10 additions & 0 deletions arch/arm/include/asm/opcodes-virt.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,14 @@
0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
)

#define __ERET __inst_arm_thumb32( \
0xE160006E, \
0xF3DE8F00 \
)

#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
0xE12EF300 | regnum, \
0xF3808E30 | (regnum << 16) \
)

#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
1 change: 1 addition & 0 deletions arch/arm/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
#define HYP_MODE 0x0000001a
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/include/asm/vfpmacros.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
Expand All @@ -52,7 +52,7 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
Expand Down
69 changes: 69 additions & 0 deletions arch/arm/include/asm/virt.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
* Copyright (c) 2012 Linaro Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/

#ifndef VIRT_H
#define VIRT_H

#include <asm/ptrace.h>

/*
* Flag indicating that the kernel was not entered in the same mode on every
* CPU. The zImage loader stashes this value in an SPSR, so we need an
* architecturally defined flag bit here (the N flag, as it happens)
*/
#define BOOT_CPU_MODE_MISMATCH (1<<31)

#ifndef __ASSEMBLY__

#ifdef CONFIG_ARM_VIRT_EXT
/*
* __boot_cpu_mode records what mode the primary CPU was booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
* if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
* that some CPU(s) were booted in a different mode.
*
* This allows the kernel to flag an error when the secondaries have come up.
*/
extern int __boot_cpu_mode;

void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
#endif

#ifndef ZIMAGE
void hyp_mode_check(void);

/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
}

/* Check if the bootloader has booted CPUs in different modes */
static inline bool is_hyp_mode_mismatched(void)
{
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
}
#endif

#endif /* __ASSEMBLY__ */

#endif /* ! VIRT_H */
Loading

0 comments on commit 5cea24c

Please sign in to comment.