Skip to content

Commit

Permalink
Merge tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:
 - XSA-155 security fixes to backend drivers.
 - XSA-157 security fixes to pciback.

* tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-pciback: fix up cleanup path when alloc fails
  xen/pciback: Don't allow MSI-X ops if PCI_COMMAND_MEMORY is not set.
  xen/pciback: For XEN_PCI_OP_disable_msi[|x] only disable if device has MSI(X) enabled.
  xen/pciback: Do not install an IRQ handler for MSI interrupts.
  xen/pciback: Return error on XEN_PCI_OP_enable_msix when device has MSI or MSI-X enabled
  xen/pciback: Return error on XEN_PCI_OP_enable_msi when device has MSI or MSI-X enabled
  xen/pciback: Save xen_pci_op commands before processing it
  xen-scsiback: safely copy requests
  xen-blkback: read from indirect descriptors only once
  xen-blkback: only read request operation from shared ring once
  xen-netback: use RING_COPY_REQUEST() throughout
  xen-netback: don't use last request to determine minimum Tx credit
  xen: Add RING_COPY_REQUEST()
  xen/x86/pvh: Use HVM's flush_tlb_others op
  xen: Resume PMU from non-atomic context
  xen/events/fifo: Consume unprocessed events when a CPU dies
  • Loading branch information
Linus Torvalds committed Dec 18, 2015
2 parents 83ad283 + 584a561 commit 3273cba
Show file tree
Hide file tree
Showing 11 changed files with 138 additions and 67 deletions.
9 changes: 2 additions & 7 deletions arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;

/* Optimization - we can use the HVM one but it has no idea which
* VCPUs are descheduled - which means that it will needlessly IPI
* them. Xen knows so let it do the job.
*/
if (xen_feature(XENFEAT_auto_translated_physmap)) {
pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
}

pv_mmu_ops = xen_mmu_ops;

memset(dummy_mapping, 0xff, PAGE_SIZE);
Expand Down
20 changes: 10 additions & 10 deletions arch/x86/xen/suspend.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)

void xen_arch_pre_suspend(void)
{
int cpu;

for_each_online_cpu(cpu)
xen_pmu_finish(cpu);

if (xen_pv_domain())
xen_pv_pre_suspend();
}

void xen_arch_post_suspend(int cancelled)
{
int cpu;

if (xen_pv_domain())
xen_pv_post_suspend(cancelled);
else
xen_hvm_post_suspend(cancelled);

for_each_online_cpu(cpu)
xen_pmu_init(cpu);
}

static void xen_vcpu_notify_restore(void *data)
Expand All @@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)

void xen_arch_resume(void)
{
int cpu;

on_each_cpu(xen_vcpu_notify_restore, NULL, 1);

for_each_online_cpu(cpu)
xen_pmu_init(cpu);
}

void xen_arch_suspend(void)
{
int cpu;

for_each_online_cpu(cpu)
xen_pmu_finish(cpu);

on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}
15 changes: 10 additions & 5 deletions drivers/block/xen-blkback/blkback.c
Original file line number Diff line number Diff line change
Expand Up @@ -950,22 +950,27 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
goto unmap;

for (n = 0, i = 0; n < nseg; n++) {
uint8_t first_sect, last_sect;

if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
kunmap_atomic(segments);
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;

pending_req->segments[n]->gref = segments[i].gref;
seg[n].nsec = segments[i].last_sect -
segments[i].first_sect + 1;
seg[n].offset = (segments[i].first_sect << 9);
if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
(segments[i].last_sect < segments[i].first_sect)) {

first_sect = READ_ONCE(segments[i].first_sect);
last_sect = READ_ONCE(segments[i].last_sect);
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}

seg[n].nsec = last_sect - first_sect + 1;
seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}

Expand Down
8 changes: 4 additions & 4 deletions drivers/block/xen-blkback/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
Expand Down Expand Up @@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
Expand Down
34 changes: 15 additions & 19 deletions drivers/net/xen-netback/netback.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
struct xen_netif_rx_request req;

req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);

meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
meta->id = req.id;

npo->copy_off = 0;
npo->copy_gref = req->gref;
npo->copy_gref = req.gref;

return meta;
}
Expand Down Expand Up @@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
struct xen_netif_rx_request req;
struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
Expand All @@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,

/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
meta->id = req.id;
}

req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;

if ((1 << gso_type) & vif->gso_mask) {
Expand All @@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}

meta->size = 0;
meta->id = req->id;
meta->id = req.id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
npo->copy_gref = req.gref;

data = skb->data;
while (data < skb_tail_pointer(skb)) {
Expand Down Expand Up @@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
max_burst = min(max_burst, 131072UL);
max_burst = max(max_burst, queue->credit_bytes);
max_burst = max(131072UL, queue->credit_bytes);

/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = queue->remaining_credit + queue->credit_bytes;
Expand Down Expand Up @@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
txp = RING_GET_REQUEST(&queue->tx, cons++);
RING_COPY_REQUEST(&queue->tx, cons++, txp);
} while (1);
queue->tx.req_cons = cons;
}
Expand Down Expand Up @@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
if (drop_err)
txp = &dropped_tx;

memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
sizeof(*txp));
RING_COPY_REQUEST(&queue->tx, cons + slots, txp);

/* If the guest submitted a frame >= 64 KiB then
* first->size overflowed and following slots will
Expand Down Expand Up @@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
return -EBADR;
}

memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
sizeof(extra));
RING_COPY_REQUEST(&queue->tx, cons, &extra);
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
queue->tx.req_cons = ++cons;
Expand Down Expand Up @@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,

idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
RING_COPY_REQUEST(&queue->tx, idx, &txreq);

/* Credit-based scheduling. */
if (txreq.size > queue->remaining_credit &&
Expand Down
23 changes: 18 additions & 5 deletions drivers/xen/events/events_fifo.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)

static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
unsigned priority, unsigned long *ready)
unsigned priority, unsigned long *ready,
bool drop)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
Expand Down Expand Up @@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
if (head == 0)
clear_bit(priority, ready);

if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
handle_irq_for_port(port);
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
if (unlikely(drop))
pr_warn("Dropping pending event for port %u\n", port);
else
handle_irq_for_port(port);
}

q->head[priority] = head;
}

static void evtchn_fifo_handle_events(unsigned cpu)
static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
Expand All @@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)

while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
consume_one_event(cpu, control_block, q, &ready);
consume_one_event(cpu, control_block, q, &ready, drop);
ready |= xchg(&control_block->ready, 0);
}
}

static void evtchn_fifo_handle_events(unsigned cpu)
{
__evtchn_fifo_handle_events(cpu, false);
}

static void evtchn_fifo_resume(void)
{
unsigned cpu;
Expand Down Expand Up @@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
if (!per_cpu(cpu_control_block, cpu))
ret = evtchn_fifo_alloc_control_block(cpu);
break;
case CPU_DEAD:
__evtchn_fifo_handle_events(cpu, true);
break;
default:
break;
}
Expand Down
1 change: 1 addition & 0 deletions drivers/xen/xen-pciback/pciback.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ struct xen_pcibk_device {
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
struct xen_pci_op op;
};

struct xen_pcibk_dev_data {
Expand Down
Loading

0 comments on commit 3273cba

Please sign in to comment.