Skip to content

Commit

Permalink
percpu: make percpu symbols in x86 unique
Browse files Browse the repository at this point in the history
This patch updates percpu related symbols in x86 such that percpu
symbols are unique and don't clash with local symbols.  This serves
two purposes of decreasing the possibility of global percpu symbol
collision and allowing dropping per_cpu__ prefix from percpu symbols.

* arch/x86/kernel/cpu/common.c: rename local variable to avoid collision

* arch/x86/kvm/svm.c: s/svm_data/sd/ for local variables to avoid collision

* arch/x86/kernel/cpu/cpu_debug.c: s/cpu_arr/cpud_arr/
  				   s/priv_arr/cpud_priv_arr/
				   s/cpu_priv_count/cpud_priv_count/

* arch/x86/kernel/cpu/intel_cacheinfo.c: s/cpuid4_info/ici_cpuid4_info/
  					 s/cache_kobject/ici_cache_kobject/
					 s/index_kobject/ici_index_kobject/

* arch/x86/kernel/ds.c: s/cpu_context/cpu_ds_context/

Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: (kvm) Avi Kivity <avi@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: x86@kernel.org
  • Loading branch information
Tejun Heo committed Oct 29, 2009
1 parent c6e22f9 commit 0fe1e00
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 80 deletions.
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)

void __cpuinit cpu_init(void)
{
struct orig_ist *orig_ist;
struct orig_ist *oist;
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
Expand All @@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)

cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
orig_ist = &per_cpu(orig_ist, cpu);
oist = &per_cpu(orig_ist, cpu);

#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
Expand Down Expand Up @@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
if (!oist->ist[0]) {
char *estacks = per_cpu(exception_stacks, cpu);

for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks += exception_stack_sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
oist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
Expand Down
30 changes: 15 additions & 15 deletions arch/x86/kernel/cpu/cpu_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
#include <asm/apic.h>
#include <asm/desc.h>

static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
static DEFINE_PER_CPU(int, cpu_priv_count);
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
static DEFINE_PER_CPU(int, cpud_priv_count);

static DEFINE_MUTEX(cpu_debug_lock);

Expand Down Expand Up @@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,

/* Already intialized */
if (file == CPU_INDEX_BIT)
if (per_cpu(cpu_arr[type].init, cpu))
if (per_cpu(cpud_arr[type].init, cpu))
return 0;

priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Expand All @@ -543,19 +543,19 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
priv->reg = reg;
priv->file = file;
mutex_lock(&cpu_debug_lock);
per_cpu(priv_arr[type], cpu) = priv;
per_cpu(cpu_priv_count, cpu)++;
per_cpu(cpud_priv_arr[type], cpu) = priv;
per_cpu(cpud_priv_count, cpu)++;
mutex_unlock(&cpu_debug_lock);

if (file)
debugfs_create_file(cpu_file[file].name, S_IRUGO,
dentry, (void *)priv, &cpu_fops);
else {
debugfs_create_file(cpu_base[type].name, S_IRUGO,
per_cpu(cpu_arr[type].dentry, cpu),
per_cpu(cpud_arr[type].dentry, cpu),
(void *)priv, &cpu_fops);
mutex_lock(&cpu_debug_lock);
per_cpu(cpu_arr[type].init, cpu) = 1;
per_cpu(cpud_arr[type].init, cpu) = 1;
mutex_unlock(&cpu_debug_lock);
}

Expand Down Expand Up @@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
continue;
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;

if (type < CPU_TSS_BIT)
err = cpu_init_msr(cpu, type, cpu_dentry);
Expand Down Expand Up @@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
err = cpu_init_allreg(cpu, cpu_dentry);

pr_info("cpu%d(%d) debug files %d\n",
cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
pr_err("Register files count %d exceeds limit %d\n",
per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
err = -ENFILE;
}
if (err)
Expand All @@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
debugfs_remove_recursive(cpu_debugfs_dir);

for (cpu = 0; cpu < nr_cpu_ids; cpu++)
for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
kfree(per_cpu(priv_arr[i], cpu));
for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
kfree(per_cpu(cpud_priv_arr[i], cpu));
}

module_init(cpu_debug_init);
Expand Down
54 changes: 27 additions & 27 deletions arch/x86/kernel/cpu/intel_cacheinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -512,8 +512,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
#ifdef CONFIG_SYSFS

/* pointer to _cpuid4_info array (for each cache leaf) */
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))

#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
Expand All @@ -526,7 +526,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
struct cpuinfo_x86 *d;
for_each_online_cpu(i) {
if (!per_cpu(cpuid4_info, i))
if (!per_cpu(ici_cpuid4_info, i))
continue;
d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
Expand All @@ -548,7 +548,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
c->apicid >> index_msb) {
cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
if (i != cpu && per_cpu(cpuid4_info, i)) {
if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
sibling_leaf =
CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
Expand Down Expand Up @@ -587,8 +587,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);

kfree(per_cpu(cpuid4_info, cpu));
per_cpu(cpuid4_info, cpu) = NULL;
kfree(per_cpu(ici_cpuid4_info, cpu));
per_cpu(ici_cpuid4_info, cpu) = NULL;
}

static int
Expand Down Expand Up @@ -627,15 +627,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0)
return -ENOENT;

per_cpu(cpuid4_info, cpu) = kzalloc(
per_cpu(ici_cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
if (per_cpu(cpuid4_info, cpu) == NULL)
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return -ENOMEM;

smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
if (retval) {
kfree(per_cpu(cpuid4_info, cpu));
per_cpu(cpuid4_info, cpu) = NULL;
kfree(per_cpu(ici_cpuid4_info, cpu));
per_cpu(ici_cpuid4_info, cpu) = NULL;
}

return retval;
Expand All @@ -647,7 +647,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */

/* pointer to kobject for cpuX/cache */
static DEFINE_PER_CPU(struct kobject *, cache_kobject);
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);

struct _index_kobject {
struct kobject kobj;
Expand All @@ -656,8 +656,8 @@ struct _index_kobject {
};

/* pointer to array of kobjects for cpuX/cache/indexY */
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))

#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
Expand Down Expand Up @@ -876,10 +876,10 @@ static struct kobj_type ktype_percpu_entry = {

static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
kfree(per_cpu(cache_kobject, cpu));
kfree(per_cpu(index_kobject, cpu));
per_cpu(cache_kobject, cpu) = NULL;
per_cpu(index_kobject, cpu) = NULL;
kfree(per_cpu(ici_cache_kobject, cpu));
kfree(per_cpu(ici_index_kobject, cpu));
per_cpu(ici_cache_kobject, cpu) = NULL;
per_cpu(ici_index_kobject, cpu) = NULL;
free_cache_attributes(cpu);
}

Expand All @@ -895,14 +895,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return err;

/* Allocate all required memory */
per_cpu(cache_kobject, cpu) =
per_cpu(ici_cache_kobject, cpu) =
kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
goto err_out;

per_cpu(index_kobject, cpu) = kzalloc(
per_cpu(ici_index_kobject, cpu) = kzalloc(
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
goto err_out;

return 0;
Expand All @@ -926,7 +926,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0))
return retval;

retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
&sys_dev->kobj, "%s", "cache");
if (retval < 0) {
Expand All @@ -940,20 +940,20 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
per_cpu(cache_kobject, cpu),
per_cpu(ici_cache_kobject, cpu),
"index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
kobject_put(per_cpu(cache_kobject, cpu));
kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
return retval;
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));

kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
return 0;
}

Expand All @@ -962,15 +962,15 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;

if (per_cpu(cpuid4_info, cpu) == NULL)
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return;
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));

for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
kobject_put(per_cpu(cache_kobject, cpu));
kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
}

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,13 +265,13 @@ struct ds_context {
int cpu;
};

static DEFINE_PER_CPU(struct ds_context *, cpu_context);
static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);


static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
{
struct ds_context **p_context =
(task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
(task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
struct ds_context *context = NULL;
struct ds_context *new_context = NULL;

Expand Down
Loading

0 comments on commit 0fe1e00

Please sign in to comment.