Skip to content

Commit

Permalink
x86: MSR: add methods for writing of an MSR on several CPUs
Browse files Browse the repository at this point in the history
Provide for concurrent MSR writes on all the CPUs in the cpumask. Also,
add a temporary workaround for smp_call_function_many which skips the
CPU we're executing on.

Bart: zero out rv struct which is allocated on stack.

CC: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
  • Loading branch information
Borislav Petkov committed Jun 10, 2009
1 parent 6bc1096 commit b034c19
Show file tree
Hide file tree
Showing 2 changed files with 104 additions and 6 deletions.
12 changes: 12 additions & 0 deletions arch/x86/include/asm/msr.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,8 @@ do { \
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
Expand All @@ -240,6 +242,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no,
struct msr *msrs)
{
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
}
static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no,
struct msr *msrs)
{
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
{
Expand Down
98 changes: 92 additions & 6 deletions arch/x86/lib/msr.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,48 +6,133 @@
struct msr_info {
u32 msr_no;
struct msr reg;
struct msr *msrs;
int off;
int err;
};

static void __rdmsr_on_cpu(void *info)
{
struct msr_info *rv = info;
struct msr *reg;
int this_cpu = raw_smp_processor_id();

rdmsr(rv->msr_no, rv->reg.l, rv->reg.h);
if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
else
reg = &rv->reg;

rdmsr(rv->msr_no, reg->l, reg->h);
}

static void __wrmsr_on_cpu(void *info)
{
struct msr_info *rv = info;
struct msr *reg;
int this_cpu = raw_smp_processor_id();

if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
else
reg = &rv->reg;

wrmsr(rv->msr_no, rv->reg.l, rv->reg.h);
wrmsr(rv->msr_no, reg->l, reg->h);
}

int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
int err;
struct msr_info rv;

memset(&rv, 0, sizeof(rv));

rv.msr_no = msr_no;
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
*l = rv.reg.l;
*h = rv.reg.h;

return err;
}
EXPORT_SYMBOL(rdmsr_on_cpu);

int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
int err;
struct msr_info rv;

memset(&rv, 0, sizeof(rv));

rv.msr_no = msr_no;
rv.reg.l = l;
rv.reg.h = h;
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);

return err;
}
EXPORT_SYMBOL(wrmsr_on_cpu);

/* rdmsr on a bunch of CPUs
*
* @mask: which CPUs
* @msr_no: which MSR
* @msrs: array of MSR values
*
*/
void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
{
struct msr_info rv;
int this_cpu;

memset(&rv, 0, sizeof(rv));

rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;

preempt_disable();
/*
* FIXME: handle the CPU we're executing on separately for now until
* smp_call_function_many has been fixed to not skip it.
*/
this_cpu = raw_smp_processor_id();
smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);

smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
preempt_enable();
}
EXPORT_SYMBOL(rdmsr_on_cpus);

/*
* wrmsr on a bunch of CPUs
*
* @mask: which CPUs
* @msr_no: which MSR
* @msrs: array of MSR values
*
*/
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
{
struct msr_info rv;
int this_cpu;

memset(&rv, 0, sizeof(rv));

rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;

preempt_disable();
/*
* FIXME: handle the CPU we're executing on separately for now until
* smp_call_function_many has been fixed to not skip it.
*/
this_cpu = raw_smp_processor_id();
smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);

smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
preempt_enable();
}
EXPORT_SYMBOL(wrmsr_on_cpus);

/* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */
Expand All @@ -70,28 +155,29 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
int err;
struct msr_info rv;

memset(&rv, 0, sizeof(rv));

rv.msr_no = msr_no;
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
*l = rv.reg.l;
*h = rv.reg.h;

return err ? err : rv.err;
}
EXPORT_SYMBOL(rdmsr_safe_on_cpu);

int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
int err;
struct msr_info rv;

memset(&rv, 0, sizeof(rv));

rv.msr_no = msr_no;
rv.reg.l = l;
rv.reg.h = h;
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);

return err ? err : rv.err;
}

EXPORT_SYMBOL(rdmsr_on_cpu);
EXPORT_SYMBOL(wrmsr_on_cpu);
EXPORT_SYMBOL(rdmsr_safe_on_cpu);
EXPORT_SYMBOL(wrmsr_safe_on_cpu);

0 comments on commit b034c19

Please sign in to comment.