Skip to content

Commit

Permalink
Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/paulus/powerpc into HEAD

Paul Mackerras writes:

    The highlights are:

    * Reduced latency for interrupts from PCI pass-through devices, from
      Suresh Warrier and me.
    * Halt-polling implementation from Suraj Jitindar Singh.
    * 64-bit VCPU statistics, also from Suraj.
    * Various other minor fixes and improvements.
  • Loading branch information
Paolo Bonzini committed Sep 13, 2016
2 parents 6f90f1d + aad9e5b commit ad53e35
Show file tree
Hide file tree
Showing 36 changed files with 1,366 additions and 503 deletions.
12 changes: 6 additions & 6 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,15 +183,15 @@ struct kvm_vcpu_arch {
};

struct kvm_vm_stat {
u32 remote_tlb_flush;
ulong remote_tlb_flush;
};

struct kvm_vcpu_stat {
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
u32 halt_wakeup;
u32 hvc_exit_stat;
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
u64 halt_wakeup;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -290,15 +290,15 @@ struct kvm_vcpu_arch {
#endif

struct kvm_vm_stat {
u32 remote_tlb_flush;
ulong remote_tlb_flush;
};

struct kvm_vcpu_stat {
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
u32 halt_wakeup;
u32 hvc_exit_stat;
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
u64 halt_wakeup;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
Expand Down
46 changes: 23 additions & 23 deletions arch/mips/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,32 +110,32 @@
extern atomic_t kvm_mips_instance;

struct kvm_vm_stat {
u32 remote_tlb_flush;
ulong remote_tlb_flush;
};

struct kvm_vcpu_stat {
u32 wait_exits;
u32 cache_exits;
u32 signal_exits;
u32 int_exits;
u32 cop_unusable_exits;
u32 tlbmod_exits;
u32 tlbmiss_ld_exits;
u32 tlbmiss_st_exits;
u32 addrerr_st_exits;
u32 addrerr_ld_exits;
u32 syscall_exits;
u32 resvd_inst_exits;
u32 break_inst_exits;
u32 trap_inst_exits;
u32 msa_fpe_exits;
u32 fpe_exits;
u32 msa_disabled_exits;
u32 flush_dcache_exits;
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
u32 halt_wakeup;
u64 wait_exits;
u64 cache_exits;
u64 signal_exits;
u64 int_exits;
u64 cop_unusable_exits;
u64 tlbmod_exits;
u64 tlbmiss_ld_exits;
u64 tlbmiss_st_exits;
u64 addrerr_st_exits;
u64 addrerr_ld_exits;
u64 syscall_exits;
u64 resvd_inst_exits;
u64 break_inst_exits;
u64 trap_inst_exits;
u64 msa_fpe_exits;
u64 fpe_exits;
u64 msa_disabled_exits;
u64 flush_dcache_exits;
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
u64 halt_wakeup;
};

struct kvm_arch_memory_slot {
Expand Down
37 changes: 37 additions & 0 deletions arch/powerpc/include/asm/book3s/64/mmu-hash.h
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,43 @@ static inline int segment_shift(int ssize)
return SID_SHIFT_1T;
}

/*
* This array is indexed by the LP field of the HPTE second dword.
* Since this field may contain some RPN bits, some entries are
* replicated so that we get the same value irrespective of RPN.
* The top 4 bits are the page size index (MMU_PAGE_*) for the
* actual page size, the bottom 4 bits are the base page size.
*/
extern u8 hpte_page_sizes[1 << LP_BITS];

static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
bool is_base_size)
{
unsigned int i, lp;

if (!(h & HPTE_V_LARGE))
return 1ul << 12;

/* Look at the 8 bit LP value */
lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
i = hpte_page_sizes[lp];
if (!i)
return 0;
if (!is_base_size)
i >>= 4;
return 1ul << mmu_psize_defs[i & 0xf].shift;
}

static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 0);
}

static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 1);
}

/*
* The current system page and segment sizes
*/
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/hmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#ifndef __ASM_PPC64_HMI_H__
#define __ASM_PPC64_HMI_H__

#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE

#define CORE_TB_RESYNC_REQ_BIT 63
#define MAX_SUBCORE_PER_CORE 4
Expand Down
29 changes: 29 additions & 0 deletions arch/powerpc/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,35 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val)
#endif
#endif /* __powerpc64__ */


/*
* Simple Cache inhibited accessors
* Unlike the DEF_MMIO_* macros, these don't include any h/w memory
* barriers, callers need to manage memory barriers on their own.
* These can only be used in hypervisor real mode.
*/

static inline u32 _lwzcix(unsigned long addr)
{
u32 ret;

__asm__ __volatile__("lwzcix %0,0, %1"
: "=r" (ret) : "r" (addr) : "memory");
return ret;
}

static inline void _stbcix(u64 addr, u8 val)
{
__asm__ __volatile__("stbcix %0,0,%1"
: : "r" (val), "r" (addr) : "memory");
}

static inline void _stwcix(u64 addr, u32 val)
{
__asm__ __volatile__("stwcix %0,0,%1"
: : "r" (val), "r" (addr) : "memory");
}

/*
* Low level IO stream instructions are defined out of line for now
*/
Expand Down
10 changes: 10 additions & 0 deletions arch/powerpc/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,15 @@
#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80

/* book3s_hv */

/*
* Special trap used to indicate to host that this is a
* passthrough interrupt that could not be handled
* completely in the guest.
*/
#define BOOK3S_INTERRUPT_HV_RM_HARD 0x5555

#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
#define BOOK3S_IRQPRIO_INST_SEGMENT 2
Expand Down Expand Up @@ -136,6 +145,7 @@
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
#define RESUME_FLAG_ARCH1 (1<<2)
#define RESUME_FLAG_ARCH2 (1<<3)

#define RESUME_GUEST 0
#define RESUME_GUEST_NV RESUME_FLAG_NV
Expand Down
37 changes: 37 additions & 0 deletions arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,42 @@ struct hpte_cache {
int pagesize;
};

/*
* Struct for a virtual core.
* Note: entry_exit_map combines a bitmap of threads that have entered
* in the bottom 8 bits and a bitmap of threads that have exited in the
* next 8 bits. This is so that we can atomically set the entry bit
* iff the exit map is 0 without taking a lock.
*/
struct kvmppc_vcore {
int n_runnable;
int num_threads;
int entry_exit_map;
int napping_threads;
int first_vcpuid;
u16 pcpu;
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
struct kvmppc_vcore *master_vcore;
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
struct list_head preempt_list;
spinlock_t lock;
struct swait_queue_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
struct kvm *kvm;
u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr;
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
ulong conferring_threads;
unsigned int halt_poll_ns;
};

struct kvmppc_vcpu_book3s {
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct {
Expand Down Expand Up @@ -191,6 +227,7 @@ extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu);
extern int kvm_irq_bypass;

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
Expand Down
87 changes: 7 additions & 80 deletions arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__

#include <asm/book3s/64/mmu-hash.h>

#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
Expand Down Expand Up @@ -97,56 +99,20 @@ static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
hpte[0] = cpu_to_be64(hpte_v);
}

static inline int __hpte_actual_psize(unsigned int lp, int psize)
{
int i, shift;
unsigned int mask;

/* start from 1 ignoring MMU_PAGE_4K */
for (i = 1; i < MMU_PAGE_COUNT; i++) {

/* invalid penc */
if (mmu_psize_defs[psize].penc[i] == -1)
continue;
/*
* encoding bits per actual page size
* PTE LP actual page size
* rrrr rrrz >=8KB
* rrrr rrzz >=16KB
* rrrr rzzz >=32KB
* rrrr zzzz >=64KB
* .......
*/
shift = mmu_psize_defs[i].shift - LP_SHIFT;
if (shift > LP_BITS)
shift = LP_BITS;
mask = (1 << shift) - 1;
if ((lp & mask) == mmu_psize_defs[psize].penc[i])
return i;
}
return -1;
}

static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
int i, b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
unsigned int penc;
unsigned long rb = 0, va_low, sllp;
unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);

if (v & HPTE_V_LARGE) {
for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {

/* valid entries have a shift value */
if (!mmu_psize_defs[b_psize].shift)
continue;

a_psize = __hpte_actual_psize(lp, b_psize);
if (a_psize != -1)
break;
}
i = hpte_page_sizes[lp];
b_psize = i & 0xf;
a_psize = i >> 4;
}

/*
* Ignore the top 14 bits of va
* v have top two bits covering segment size, hence move
Expand Down Expand Up @@ -215,45 +181,6 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb;
}

static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
bool is_base_size)
{

int size, a_psize;
/* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);

/* only handle 4k, 64k and 16M pages for now */
if (!(h & HPTE_V_LARGE))
return 1ul << 12;
else {
for (size = 0; size < MMU_PAGE_COUNT; size++) {
/* valid entries have a shift value */
if (!mmu_psize_defs[size].shift)
continue;

a_psize = __hpte_actual_psize(lp, size);
if (a_psize != -1) {
if (is_base_size)
return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift;
}
}

}
return 0;
}

static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 0);
}

static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 1);
}

static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
Expand Down
Loading

0 comments on commit ad53e35

Please sign in to comment.