Skip to content

Commit

Permalink
KVM: x86/xen: Support direct injection of event channel events
Browse files Browse the repository at this point in the history
This adds a KVM_XEN_HVM_EVTCHN_SEND ioctl which allows direct injection
of events given an explicit { vcpu, port, priority } in precisely the
same form that those fields are given in the IRQ routing table.

Userspace is currently able to inject 2-level events purely by setting
the bits in the shared_info and vcpu_info, but FIFO event channels are
harder to deal with; we will need the kernel to take sole ownership of
delivery when we support those.

A patch advertising this feature with a new bit in the KVM_CAP_XEN_HVM
ioctl will be added in a subsequent patch.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-9-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
David Woodhouse authored and Paolo Bonzini committed Apr 2, 2022
1 parent 8733068 commit 3502573
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 0 deletions.
9 changes: 9 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6505,6 +6505,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_xen_hvm_set_attr(kvm, &xha);
break;
}
case KVM_XEN_HVM_EVTCHN_SEND: {
struct kvm_irq_routing_xen_evtchn uxe;

r = -EFAULT;
if (copy_from_user(&uxe, argp, sizeof(uxe)))
goto out;
r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
break;
}
#endif
case KVM_SET_CLOCK:
r = kvm_vm_ioctl_set_clock(kvm, argp);
Expand Down
32 changes: 32 additions & 0 deletions arch/x86/kvm/xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -1086,6 +1086,38 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
return 0;
}

/*
* Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
*/
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
{
struct kvm_xen_evtchn e;
int ret;

if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
return -EINVAL;

/* We only support 2 level event channels for now */
if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
return -EINVAL;

e.port = uxe->port;
e.vcpu_id = uxe->vcpu;
e.vcpu_idx = -1;
e.priority = uxe->priority;

ret = kvm_xen_set_evtchn(&e, kvm);

/*
* None of that 'return 1 if it actually got delivered' nonsense.
* We don't care if it was masked (-ENOTCONN) either.
*/
if (ret > 0 || ret == -ENOTCONN)
ret = 0;

return ret;
}

void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/xen.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
void kvm_xen_init_vm(struct kvm *kvm);
Expand Down
3 changes: 3 additions & 0 deletions include/uapi/linux/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1699,6 +1699,9 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)

/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)

#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)

Expand Down

0 comments on commit 3502573

Please sign in to comment.