Skip to content

Commit

Permalink
Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/paulus/powerpc into HEAD
  • Loading branch information
Paolo Bonzini committed May 13, 2016
2 parents 14717e2 + b1a4286 commit d7e1633
Show file tree
Hide file tree
Showing 5 changed files with 62 additions and 23 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/page-flags.h>
Expand Down
32 changes: 31 additions & 1 deletion arch/powerpc/kvm/book3s_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,24 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
}
#endif

static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
u64 msr = kvmppc_get_msr(vcpu);

kvmppc_set_msr(vcpu, msr | MSR_SE);
}
}

static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
u64 msr = kvmppc_get_msr(vcpu);

kvmppc_set_msr(vcpu, msr & ~MSR_SE);
}
}

int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
Expand Down Expand Up @@ -1207,10 +1225,18 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
#endif
case BOOK3S_INTERRUPT_MACHINE_CHECK:
case BOOK3S_INTERRUPT_TRACE:
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_TRACE:
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
run->exit_reason = KVM_EXIT_DEBUG;
r = RESUME_HOST;
} else {
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
}
break;
default:
{
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Expand Down Expand Up @@ -1479,6 +1505,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out;
}

kvmppc_setup_debug(vcpu);

/*
* Interrupts could be timers for the guest which we have to inject
* again, so let's postpone them until we're in the guest and if we
Expand All @@ -1501,6 +1529,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)

ret = __kvmppc_vcpu_run(kvm_run, vcpu);

kvmppc_clear_debug(vcpu);

/* No need for kvm_guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */

Expand Down
29 changes: 17 additions & 12 deletions arch/powerpc/kvm/book3s_xics.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
* we are the only setter, thus concurrent access is undefined
* to begin with.
*/
if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
state->asserted = 1;
else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
state->asserted = 0;
Expand Down Expand Up @@ -280,7 +280,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
if (!success)
goto bail;

XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
icp->server_num,
old.cppr, old.mfrr, old.pending_pri, old.xisr,
old.need_resend, old.out_ee);
Expand Down Expand Up @@ -336,7 +336,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
union kvmppc_icp_state old_state, new_state;
bool success;

XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
icp->server_num);

do {
Expand Down Expand Up @@ -1174,9 +1174,11 @@ static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
prio = irqp->saved_priority;
}
val |= prio << KVM_XICS_PRIORITY_SHIFT;
if (irqp->asserted)
val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
else if (irqp->masked_pending || irqp->resend)
if (irqp->lsi) {
val |= KVM_XICS_LEVEL_SENSITIVE;
if (irqp->asserted)
val |= KVM_XICS_PENDING;
} else if (irqp->masked_pending || irqp->resend)
val |= KVM_XICS_PENDING;
ret = 0;
}
Expand Down Expand Up @@ -1228,9 +1230,13 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
irqp->priority = prio;
irqp->resend = 0;
irqp->masked_pending = 0;
irqp->lsi = 0;
irqp->asserted = 0;
if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
irqp->asserted = 1;
if (val & KVM_XICS_LEVEL_SENSITIVE) {
irqp->lsi = 1;
if (val & KVM_XICS_PENDING)
irqp->asserted = 1;
}
irqp->exists = 1;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
Expand All @@ -1249,11 +1255,10 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return ics_deliver_irq(xics, irq, level);
}

int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level, bool line_status)
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
if (!level)
return -1;
return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
level, line_status);
}
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_xics.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ struct ics_irq_state {
u8 saved_priority;
u8 resend;
u8 masked_pending;
u8 lsi; /* level-sensitive interrupt */
u8 asserted; /* Only for LSI */
u8 exists;
};
Expand Down
22 changes: 12 additions & 10 deletions arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -800,9 +800,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}

int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int sign_extend)
{
int idx, ret;
bool host_swabbed;
Expand All @@ -827,7 +827,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmio_host_swabbed = host_swabbed;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
vcpu->arch.mmio_sign_extend = sign_extend;

idx = srcu_read_lock(&vcpu->kvm->srcu);

Expand All @@ -844,19 +844,21 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,

return EMULATE_DO_MMIO;
}

int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
}
EXPORT_SYMBOL_GPL(kvmppc_handle_load);

/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
int r;

vcpu->arch.mmio_sign_extend = 1;
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);

return r;
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
}

int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Expand Down

0 comments on commit d7e1633

Please sign in to comment.