Skip to content

Commit

Permalink
x86: Replace __get_cpu_var uses
Browse files Browse the repository at this point in the history
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x).  This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.

Other use cases are for storing and retrieving data from the current
processors percpu area.  __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.

__get_cpu_var() is defined as :

#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))

__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.

this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.

This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset.  Thereby address calculations are avoided and less registers
are used when code is generated.

Transformations done to __get_cpu_var()

1. Determine the address of the percpu instance of the current processor.

	DEFINE_PER_CPU(int, y);
	int *x = &__get_cpu_var(y);

    Converts to

	int *x = this_cpu_ptr(&y);

2. Same as #1 but this time an array structure is involved.

	DEFINE_PER_CPU(int, y[20]);
	int *x = __get_cpu_var(y);

    Converts to

	int *x = this_cpu_ptr(y);

3. Retrieve the content of the current processors instance of a per cpu
variable.

	DEFINE_PER_CPU(int, y);
	int x = __get_cpu_var(y)

   Converts to

	int x = __this_cpu_read(y);

4. Retrieve the content of a percpu struct

	DEFINE_PER_CPU(struct mystruct, y);
	struct mystruct x = __get_cpu_var(y);

   Converts to

	memcpy(&x, this_cpu_ptr(&y), sizeof(x));

5. Assignment to a per cpu variable

	DEFINE_PER_CPU(int, y)
	__get_cpu_var(y) = x;

   Converts to

	__this_cpu_write(y, x);

6. Increment/Decrement etc of a per cpu variable

	DEFINE_PER_CPU(int, y);
	__get_cpu_var(y)++

   Converts to

	__this_cpu_inc(y)

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Christoph Lameter authored and Tejun Heo committed Aug 26, 2014
1 parent 532d0d0 commit 89cbc76
Show file tree
Hide file tree
Showing 30 changed files with 147 additions and 147 deletions.
4 changes: 2 additions & 2 deletions arch/x86/include/asm/debugreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void);
DECLARE_PER_CPU(int, debug_stack_usage);
static inline void debug_stack_usage_inc(void)
{
__get_cpu_var(debug_stack_usage)++;
__this_cpu_inc(debug_stack_usage);
}
static inline void debug_stack_usage_dec(void)
{
__get_cpu_var(debug_stack_usage)--;
__this_cpu_dec(debug_stack_usage);
}
int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/uv/uv_hub.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ struct uv_hub_info_s {
};

DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/apb_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ static inline int is_apbt_capable(void)
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);

mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
if (mtmr == NULL) {
Expand Down Expand Up @@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void)
if (!cpu)
return;

adev = &__get_cpu_var(cpu_apbt_dev);
adev = this_cpu_ptr(&cpu_apbt_dev);
if (!adev->timer) {
adev->timer = dw_apb_clockevent_init(cpu, adev->name,
APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/apic/apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
*/
static void setup_APIC_timer(void)
{
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);

if (this_cpu_has(X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
Expand Down Expand Up @@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)

static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
void (*real_handler)(struct clock_event_device *dev);
unsigned long deltaj;
long delta, deltatsc;
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1198,9 +1198,9 @@ DEFINE_PER_CPU(int, debug_stack_usage);

int is_debug_stack(unsigned long addr)
{
return __get_cpu_var(debug_stack_usage) ||
(addr <= __get_cpu_var(debug_stack_addr) &&
addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
return __this_cpu_read(debug_stack_usage) ||
(addr <= __this_cpu_read(debug_stack_addr) &&
addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
}
NOKPROBE_SYMBOL(is_debug_stack);

Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/cpu/mcheck/mce-inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex);
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
struct mce *m = this_cpu_ptr(&injectm);
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
Expand All @@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
static void mce_irq_ipi(void *info)
{
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
struct mce *m = this_cpu_ptr(&injectm);

if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
m->inject_flags & MCJ_EXCEPTION) {
Expand All @@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info)
/* Inject mce on current CPU */
static int raise_local(void)
{
struct mce *m = &__get_cpu_var(injectm);
struct mce *m = this_cpu_ptr(&injectm);
int context = MCJ_CTX(m->inject_flags);
int ret = 0;
int cpu = m->extcpu;
Expand Down
46 changes: 23 additions & 23 deletions arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr)

if (offset < 0)
return 0;
return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
}

if (rdmsrl_safe(msr, &v)) {
Expand All @@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
int offset = msr_to_offset(msr);

if (offset >= 0)
*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
return;
}
wrmsrl(msr, v);
Expand Down Expand Up @@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring);
/* Runs with CPU affinity in workqueue */
static int mce_ring_empty(void)
{
struct mce_ring *r = &__get_cpu_var(mce_ring);
struct mce_ring *r = this_cpu_ptr(&mce_ring);

return r->start == r->end;
}
Expand All @@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn)

*pfn = 0;
get_cpu();
r = &__get_cpu_var(mce_ring);
r = this_cpu_ptr(&mce_ring);
if (r->start == r->end)
goto out;
*pfn = r->ring[r->start];
Expand All @@ -504,7 +504,7 @@ static int mce_ring_get(unsigned long *pfn)
/* Always runs in MCE context with preempt off */
static int mce_ring_add(unsigned long pfn)
{
struct mce_ring *r = &__get_cpu_var(mce_ring);
struct mce_ring *r = this_cpu_ptr(&mce_ring);
unsigned next;

next = (r->end + 1) % MCE_RING_SIZE;
Expand All @@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void)
{
if (!mce_ring_empty())
schedule_work(&__get_cpu_var(mce_work));
schedule_work(this_cpu_ptr(&mce_work));
}

DEFINE_PER_CPU(struct irq_work, mce_irq_work);
Expand All @@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs)
return;
}

irq_work_queue(&__get_cpu_var(mce_irq_work));
irq_work_queue(this_cpu_ptr(&mce_irq_work));
}

/*
Expand Down Expand Up @@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)

mce_gather_info(&m, regs);

final = &__get_cpu_var(mces_seen);
final = this_cpu_ptr(&mces_seen);
*final = m;

memset(valid_banks, 0, sizeof(valid_banks));
Expand Down Expand Up @@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) =

static int cmc_error_seen(void)
{
unsigned long *v = &__get_cpu_var(mce_polled_error);
unsigned long *v = this_cpu_ptr(&mce_polled_error);

return test_and_clear_bit(0, v);
}

static void mce_timer_fn(unsigned long data)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long iv;
int notify;

WARN_ON(smp_processor_id() != data);

if (mce_available(__this_cpu_ptr(&cpu_info))) {
if (mce_available(this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
this_cpu_ptr(&mce_poll_banks));
mce_intel_cmci_poll();
}

Expand Down Expand Up @@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data)
*/
void mce_timer_kick(unsigned long interval)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);

Expand Down Expand Up @@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)

static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();

setup_timer(t, mce_timer_fn, cpu);
Expand Down Expand Up @@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_timer();
INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
}

/*
Expand Down Expand Up @@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = {
static void __mce_disable_bank(void *arg)
{
int bank = *((int *)arg);
__clear_bit(bank, __get_cpu_var(mce_poll_banks));
__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
cmci_disable_bank(bank);
}

Expand Down Expand Up @@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void)
static void mce_syscore_resume(void)
{
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
}

static struct syscore_ops mce_syscore_ops = {
Expand All @@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = {

static void mce_cpu_restart(void *data)
{
if (!mce_available(__this_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
__mcheck_cpu_init_generic();
__mcheck_cpu_init_timer();
Expand All @@ -2096,14 +2096,14 @@ static void mce_restart(void)
/* Toggle features for corrected errors */
static void mce_disable_cmci(void *data)
{
if (!mce_available(__this_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_clear();
}

static void mce_enable_ce(void *all)
{
if (!mce_available(__this_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_reenable();
cmci_recheck();
Expand Down Expand Up @@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h)
unsigned long action = *(unsigned long *)h;
int i;

if (!mce_available(__this_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;

if (!(action & CPU_TASKS_FROZEN))
Expand All @@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h)
unsigned long action = *(unsigned long *)h;
int i;

if (!mce_available(__this_cpu_ptr(&cpu_info)))
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;

if (!(action & CPU_TASKS_FROZEN))
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mcheck/mce_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void)
* event.
*/
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
this_cpu_ptr(&mce_poll_banks));

if (high & MASK_OVERFLOW_HI) {
rdmsrl(address, m.misc);
Expand Down
22 changes: 11 additions & 11 deletions arch/x86/kernel/cpu/mcheck/mce_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void)
{
if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
return;
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
}

void mce_intel_hcpu_update(unsigned long cpu)
Expand Down Expand Up @@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void)
u64 val;

raw_spin_lock_irqsave(&cmci_discover_lock, flags);
owned = __get_cpu_var(mce_banks_owned);
owned = this_cpu_ptr(mce_banks_owned);
for_each_set_bit(bank, owned, MAX_NR_BANKS) {
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
Expand Down Expand Up @@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void)
{
if (cmci_storm_detect())
return;
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
mce_notify_irq();
}

Expand All @@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void)
*/
static void cmci_discover(int banks)
{
unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
unsigned long flags;
int i;
int bios_wrong_thresh = 0;
Expand All @@ -228,7 +228,7 @@ static void cmci_discover(int banks)
/* Already owned by someone else? */
if (val & MCI_CTL2_CMCI_EN) {
clear_bit(i, owned);
__clear_bit(i, __get_cpu_var(mce_poll_banks));
__clear_bit(i, this_cpu_ptr(mce_poll_banks));
continue;
}

Expand All @@ -252,7 +252,7 @@ static void cmci_discover(int banks)
/* Did the enable bit stick? -- the bank supports CMCI */
if (val & MCI_CTL2_CMCI_EN) {
set_bit(i, owned);
__clear_bit(i, __get_cpu_var(mce_poll_banks));
__clear_bit(i, this_cpu_ptr(mce_poll_banks));
/*
* We are able to set thresholds for some banks that
* had a threshold of 0. This means the BIOS has not
Expand All @@ -263,7 +263,7 @@ static void cmci_discover(int banks)
(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
bios_wrong_thresh = 1;
} else {
WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
}
}
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
Expand All @@ -284,10 +284,10 @@ void cmci_recheck(void)
unsigned long flags;
int banks;

if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
return;
local_irq_save(flags);
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
}

Expand All @@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank)
{
u64 val;

if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
return;
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
__clear_bit(bank, __get_cpu_var(mce_banks_owned));
__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
}

/*
Expand Down
Loading

0 comments on commit 89cbc76

Please sign in to comment.