Skip to content

Commit

Permalink
arm64: KVM: Refactor kern_hyp_va to deal with multiple offsets
Browse files Browse the repository at this point in the history
As we move towards a selectable HYP VA range, it is obvious that
we don't want to test a variable to find out if we need to use
the bottom VA range, the top VA range, or use the address as is
(for VHE).

Instead, we can expand our current helper to generate the right
mask or nop with code patching. We default to using the top VA
space, with alternatives to switch to the bottom one or to nop
out the instructions.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
  • Loading branch information
Marc Zyngier authored and Christoffer Dall committed Jul 3, 2016
1 parent d53d9bc commit fd81e6b
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 14 deletions.
11 changes: 0 additions & 11 deletions arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,6 @@

#define __hyp_text __section(.hyp.text) notrace

static inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE("and %0, %0, %1",
"nop",
ARM64_HAS_VIRT_HOST_EXTN)
: "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
return v;
}

#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))

#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
Expand Down
42 changes: 39 additions & 3 deletions arch/arm64/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,33 @@
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
*
* This generates the following sequences:
* - High mask:
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
* nop
* - Low mask:
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
* and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
* - VHE:
* nop
* nop
*
* The "low mask" version works because the mask is a strict subset of
* the "high mask", hence performing the first mask for nothing.
* Should be completely invisible on any viable CPU.
*/
.macro kern_hyp_va reg
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
and \reg, \reg, #HYP_PAGE_OFFSET_MASK
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
alternative_else
nop
alternative_endif
alternative_if_not ARM64_HYP_OFFSET_LOW
nop
alternative_else
and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
alternative_endif
.endm

#else
Expand All @@ -107,7 +127,23 @@ alternative_endif
#include <asm/mmu_context.h>
#include <asm/pgtable.h>

#define KERN_TO_HYP(kva) ((unsigned long)kva & HYP_PAGE_OFFSET_MASK)
static inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE("and %0, %0, %1",
"nop",
ARM64_HAS_VIRT_HOST_EXTN)
: "+r" (v)
: "i" (HYP_PAGE_OFFSET_HIGH_MASK));
asm volatile(ALTERNATIVE("nop",
"and %0, %0, %1",
ARM64_HYP_OFFSET_LOW)
: "+r" (v)
: "i" (HYP_PAGE_OFFSET_LOW_MASK));
return v;
}

#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
#define KERN_TO_HYP(v) kern_hyp_va(v)

/*
* We currently only support a 40bit IPA.
Expand Down

0 comments on commit fd81e6b

Please sign in to comment.