Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69577
b: refs/heads/master
c: 0835761
h: refs/heads/master
i:
  69575: 1d45467
v: v3
  • Loading branch information
Mike Travis authored and Linus Torvalds committed Oct 16, 2007
1 parent 451a366 commit 87d6ee4
Show file tree
Hide file tree
Showing 13 changed files with 64 additions and 47 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cc84634f29d5a92932400a2d52ca17dee2c8a462
refs/heads/master: 083576112940fda783d716fd5ccc744f81667b2f
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
policy->cpus = cpu_core_map[cpu];
policy->cpus = per_cpu(cpu_core_map, cpu);
}
#endif

Expand Down
10 changes: 5 additions & 5 deletions trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ static struct powernow_k8_data *powernow_data[NR_CPUS];
static int cpu_family = CPU_OPTERON;

#ifndef CONFIG_SMP
static cpumask_t cpu_core_map[1];
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
#endif

/* Return a frequency in MHz, given an input fid */
Expand Down Expand Up @@ -667,7 +667,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,

dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
data->powernow_table = powernow_table;
if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
print_basics(data);

for (j = 0; j < data->numps; j++)
Expand Down Expand Up @@ -821,7 +821,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)

/* fill in data */
data->numps = data->acpi_data.state_count;
if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
print_basics(data);
powernow_k8_acpi_pst_values(data, 0);

Expand Down Expand Up @@ -1214,7 +1214,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu);
else
pol->cpus = cpu_core_map[pol->cpu];
pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus);

/* Take a crude guess here.
Expand Down Expand Up @@ -1281,7 +1281,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0;

data = powernow_data[first_cpu(cpu_core_map[cpu])];
data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];

if (!data)
return -EINVAL;
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kernel/cpu/proc.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_X86_HT
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
seq_printf(m, "siblings\t: %d\n",
cpus_weight(per_cpu(cpu_core_map, n)));
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/x86/kernel/mce_amd_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)

#ifdef CONFIG_SMP
if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
i = first_cpu(cpu_core_map[cpu]);
i = first_cpu(per_cpu(cpu_core_map, cpu));

/* first core not up yet */
if (cpu_data[i].cpu_core_id)
Expand All @@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;

b->cpus = cpu_core_map[cpu];
b->cpus = per_cpu(cpu_core_map, cpu);
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
Expand All @@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
b->cpus = CPU_MASK_ALL;
#else
b->cpus = cpu_core_map[cpu];
b->cpus = per_cpu(cpu_core_map, cpu);
#endif
err = kobject_register(&b->kobj);
if (err)
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kernel/setup_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1070,7 +1070,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
seq_printf(m, "siblings\t: %d\n",
cpus_weight(per_cpu(cpu_core_map, cpu)));
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
Expand Down
34 changes: 17 additions & 17 deletions trunk/arch/x86/kernel/smpboot_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);

/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);

/* bitmap of online cpus */
cpumask_t cpu_online_map __read_mostly;
Expand Down Expand Up @@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
* And for power savings, we return cpu_core_map
*/
if (sched_mc_power_savings || sched_smt_power_savings)
return cpu_core_map[cpu];
return per_cpu(cpu_core_map, cpu);
else
return c->llc_shared_map;
}
Expand All @@ -321,8 +321,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
Expand All @@ -334,7 +334,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[cpu].llc_shared_map);

if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
Expand All @@ -346,8 +346,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[i].llc_shared_map);
}
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
* Does this new cpu bringup a new core?
*/
Expand Down Expand Up @@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
" Using dummy APIC emulation.\n");
map_cpu_to_logical_apicid();
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}

Expand All @@ -1009,7 +1009,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}

Expand All @@ -1024,7 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}

Expand Down Expand Up @@ -1107,11 +1107,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
cpus_clear(per_cpu(cpu_core_map, cpu));
}

cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
cpu_set(0, per_cpu(cpu_core_map, 0));

smpboot_setup_io_apic();

Expand Down Expand Up @@ -1148,9 +1148,9 @@ void remove_siblinginfo(int cpu)
int sibling;
struct cpuinfo_x86 *c = cpu_data;

for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
cpu_clear(cpu, cpu_core_map[sibling]);
/*
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
/*/
* last thread sibling in this cpu core going down
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1)
Expand All @@ -1160,7 +1160,7 @@ void remove_siblinginfo(int cpu)
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0;
c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
Expand Down
24 changes: 12 additions & 12 deletions trunk/arch/x86/kernel/smpboot_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);

/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);

/*
* Trampoline 80x86 program as an array.
Expand Down Expand Up @@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
* And for power savings, we return cpu_core_map
*/
if (sched_mc_power_savings || sched_smt_power_savings)
return cpu_core_map[cpu];
return per_cpu(cpu_core_map, cpu);
else
return c->llc_shared_map;
}
Expand All @@ -264,8 +264,8 @@ static inline void set_cpu_sibling_map(int cpu)
c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
Expand All @@ -277,7 +277,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[cpu].llc_shared_map);

if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
Expand All @@ -289,8 +289,8 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[i].llc_shared_map);
}
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
* Does this new cpu bringup a new core?
*/
Expand Down Expand Up @@ -736,7 +736,7 @@ static __init void disable_smp(void)
else
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
cpu_set(0, per_cpu(cpu_core_map, 0));
}

#ifdef CONFIG_HOTPLUG_CPU
Expand Down Expand Up @@ -971,8 +971,8 @@ static void remove_siblinginfo(int cpu)
int sibling;
struct cpuinfo_x86 *c = cpu_data;

for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
cpu_clear(cpu, cpu_core_map[sibling]);
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
/*
* last thread sibling in this cpu core going down
*/
Expand All @@ -983,7 +983,7 @@ static void remove_siblinginfo(int cpu)
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0;
c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
Expand Down
14 changes: 12 additions & 2 deletions trunk/arch/x86/xen/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,12 @@ void __init xen_smp_prepare_boot_cpu(void)

for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
/*
* cpu_core_map lives in a per cpu area that is cleared
* when the per cpu array is allocated.
*
* cpus_clear(per_cpu(cpu_core_map, cpu));
*/
}

xen_setup_vcpu_info_placement();
Expand All @@ -160,7 +165,12 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)

for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
/*
* cpu_core_ map will be zeroed when the per
* cpu area is allocated.
*
* cpus_clear(per_cpu(cpu_core_map, cpu));
*/
}

smp_store_cpu_info(0);
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/asm-x86/smp_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ extern void smp_alloc_memory(void);
extern int pic_mode;
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
DECLARE_PER_CPU(cpumask_t, cpu_core_map);

extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
Expand Down
7 changes: 6 additions & 1 deletion trunk/include/asm-x86/smp_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,12 @@ extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);

extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
/*
* cpu_core_map lives in a per cpu area
*
* extern cpumask_t cpu_core_map[NR_CPUS];
*/
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
extern u8 cpu_llc_id[NR_CPUS];

#define SMP_TRAMPOLINE_BASE 0x6000
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/asm-x86/topology_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#ifdef CONFIG_X86_HT
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif

Expand Down
2 changes: 1 addition & 1 deletion trunk/include/asm-x86/topology_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ extern int __node_distance(int, int);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
#define smt_capable() (smp_num_siblings > 1)
Expand Down

0 comments on commit 87d6ee4

Please sign in to comment.