Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 104467
b: refs/heads/master
c: 2d9e1e2
h: refs/heads/master
i:
  104465: 6c1e5db
  104463: 5a8c9e1
v: v3
  • Loading branch information
Jeremy Fitzhardinge authored and Ingo Molnar committed Jul 16, 2008
1 parent 77abf56 commit b3fffdb
Show file tree
Hide file tree
Showing 5 changed files with 207 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 56397f8dadb40055479a8ffff23f21a890098a31
refs/heads/master: 2d9e1e2f58b5612aa4eab0ab54c84308a29dbd79
172 changes: 171 additions & 1 deletion trunk/arch/x86/xen/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
* This does not handle HOTPLUG_CPU yet.
*/
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/err.h>
#include <linux/smp.h>

Expand All @@ -35,6 +36,8 @@
#include "xen-ops.h"
#include "mmu.h"

static void __cpuinit xen_init_lock_cpu(int cpu);

cpumask_t xen_cpu_initialized_map;

static DEFINE_PER_CPU(int, resched_irq);
Expand Down Expand Up @@ -179,6 +182,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;

xen_init_lock_cpu(0);

smp_store_cpu_info(0);
cpu_data(0).x86_max_cores = 1;
set_cpu_sibling_map(0);
Expand Down Expand Up @@ -301,6 +306,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
clear_tsk_thread_flag(idle, TIF_FORK);
#endif
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);

per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

Expand Down Expand Up @@ -413,6 +419,170 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}

struct xen_spinlock {
unsigned char lock; /* 0 -> free; 1 -> locked */
unsigned short spinners; /* count of waiting cpus */
};

static int xen_spin_is_locked(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;

return xl->lock != 0;
}

static int xen_spin_is_contended(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;

/* Not strictly true; this is only the count of contended
lock-takers entering the slow path. */
return xl->spinners != 0;
}

static int xen_spin_trylock(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
u8 old = 1;

asm("xchgb %b0,%1"
: "+q" (old), "+m" (xl->lock) : : "memory");

return old == 0;
}

static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);

static inline void spinning_lock(struct xen_spinlock *xl)
{
__get_cpu_var(lock_spinners) = xl;
wmb(); /* set lock of interest before count */
asm(LOCK_PREFIX " incw %0"
: "+m" (xl->spinners) : : "memory");
}

static inline void unspinning_lock(struct xen_spinlock *xl)
{
asm(LOCK_PREFIX " decw %0"
: "+m" (xl->spinners) : : "memory");
wmb(); /* decrement count before clearing lock */
__get_cpu_var(lock_spinners) = NULL;
}

static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
int irq = __get_cpu_var(lock_kicker_irq);
int ret;

/* If kicker interrupts not initialized yet, just spin */
if (irq == -1)
return 0;

/* announce we're spinning */
spinning_lock(xl);

/* clear pending */
xen_clear_irq_pending(irq);

/* check again make sure it didn't become free while
we weren't looking */
ret = xen_spin_trylock(lock);
if (ret)
goto out;

/* block until irq becomes pending */
xen_poll_irq(irq);
kstat_this_cpu.irqs[irq]++;

out:
unspinning_lock(xl);
return ret;
}

static void xen_spin_lock(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
int timeout;
u8 oldval;

do {
timeout = 1 << 10;

asm("1: xchgb %1,%0\n"
" testb %1,%1\n"
" jz 3f\n"
"2: rep;nop\n"
" cmpb $0,%0\n"
" je 1b\n"
" dec %2\n"
" jnz 2b\n"
"3:\n"
: "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
: "1" (1)
: "memory");

} while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
}

static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
{
int cpu;

for_each_online_cpu(cpu) {
/* XXX should mix up next cpu selection */
if (per_cpu(lock_spinners, cpu) == xl) {
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
break;
}
}
}

static void xen_spin_unlock(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;

smp_wmb(); /* make sure no writes get moved after unlock */
xl->lock = 0; /* release lock */

/* make sure unlock happens before kick */
barrier();

if (unlikely(xl->spinners))
xen_spin_unlock_slow(xl);
}

static __cpuinit void xen_init_lock_cpu(int cpu)
{
int irq;
const char *name;

name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
xen_reschedule_interrupt,
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
name,
NULL);

if (irq >= 0) {
disable_irq(irq); /* make sure it's never delivered */
per_cpu(lock_kicker_irq, cpu) = irq;
}

printk("cpu %d spinlock event irq %d\n", cpu, irq);
}

static void __init xen_init_spinlocks(void)
{
pv_lock_ops.spin_is_locked = xen_spin_is_locked;
pv_lock_ops.spin_is_contended = xen_spin_is_contended;
pv_lock_ops.spin_lock = xen_spin_lock;
pv_lock_ops.spin_trylock = xen_spin_trylock;
pv_lock_ops.spin_unlock = xen_spin_unlock;
}

static const struct smp_ops xen_smp_ops __initdata = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
Expand All @@ -430,5 +600,5 @@ void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
xen_fill_possible_map();
paravirt_use_bytelocks();
xen_init_spinlocks();
}
27 changes: 27 additions & 0 deletions trunk/drivers/xen/events.c
Original file line number Diff line number Diff line change
Expand Up @@ -734,6 +734,33 @@ static void restore_cpu_ipis(unsigned int cpu)
}
}

/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
int evtchn = evtchn_from_irq(irq);

if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
}

/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq)
{
evtchn_port_t evtchn = evtchn_from_irq(irq);

if (VALID_EVTCHN(evtchn)) {
struct sched_poll poll;

poll.nr_ports = 1;
poll.timeout = 0;
poll.ports = &evtchn;

if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
BUG();
}
}

void xen_irq_resume(void)
{
unsigned int cpu, irq, evtchn;
Expand Down
1 change: 1 addition & 0 deletions trunk/include/asm-x86/xen/events.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,

XEN_NR_IPIS,
};
Expand Down
7 changes: 7 additions & 0 deletions trunk/include/xen/events.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,11 @@ extern void notify_remote_via_irq(int irq);

extern void xen_irq_resume(void);

/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);

/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq);

#endif /* _XEN_EVENTS_H */

0 comments on commit b3fffdb

Please sign in to comment.