Skip to content

Commit

Permalink
x86: order functions in cpu/common.c and cpu/common_64.c v2
Browse files Browse the repository at this point in the history
v2: make 64 bit get c->x86_cache_alignment = c->x86_clfush_size

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Yinghai Lu authored and Ingo Molnar committed Sep 4, 2008
1 parent 3da99c9 commit 9d31d35
Show file tree
Hide file tree
Showing 2 changed files with 156 additions and 140 deletions.
190 changes: 105 additions & 85 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);

__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;

/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;

gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}

static int cachesize_override __cpuinitdata = -1;
static int disable_x86_serial_nr __cpuinitdata = 1;

Expand Down Expand Up @@ -123,15 +135,15 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)

void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ecx, edx, l2size;
unsigned int n, dummy, ebx, ecx, edx, l2size;

n = c->extended_cpuid_level;

if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size = (ecx>>24)+(edx>>24);
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size = (ecx>>24) + (edx>>24);
}

if (n < 0x80000006) /* Some chips just has a large L1. */
Expand Down Expand Up @@ -185,6 +197,51 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
return NULL; /* Not found */
}

#ifdef CONFIG_X86_HT
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;

cpuid(1, &eax, &ebx, &ecx, &edx);

if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;

smp_num_siblings = (ebx & 0xff0000) >> 16;

if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {

if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
smp_num_siblings);
smp_num_siblings = 1;
return;
}

index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);

printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);

smp_num_siblings = smp_num_siblings / c->x86_max_cores;

index_msb = get_count_order(smp_num_siblings);

core_bits = get_count_order(c->x86_max_cores);

c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);

if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
}
}
#endif

static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{
Expand Down Expand Up @@ -258,7 +315,26 @@ static int __cpuinit have_cpuid_p(void)
return flag_is_changeable_p(X86_EFLAGS_ID);
}

void __init cpu_detect(struct cpuinfo_x86 *c)
static void __init early_cpu_support_print(void)
{
int i,j;
struct cpu_dev *cpu_devx;

printk("KERNEL supported cpus:\n");
for (i = 0; i < X86_VENDOR_NUM; i++) {
cpu_devx = cpu_devs[i];
if (!cpu_devx)
continue;
for (j = 0; j < 2; j++) {
if (!cpu_devx->c_ident[j])
continue;
printk(" %s %s\n", cpu_devx->c_vendor,
cpu_devx->c_ident[j]);
}
}
}

void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
{
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
Expand All @@ -267,19 +343,20 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
(unsigned int *)&c->x86_vendor_id[4]);

c->x86 = 4;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
c->x86_model += ((tfms >> 16) & 0xf) << 4;
if (cap0 & (1<<19)) {
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
c->x86_cache_alignment = c->x86_clflush_size;
}
}
}
Expand Down Expand Up @@ -341,6 +418,17 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
validate_pat_support(c);
}

void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;

for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;

early_cpu_support_print();
early_identify_cpu(&boot_cpu_data);
}

/*
* The NOPL instruction is supposed to exist on all CPUs with
* family >= 6, unfortunately, that's not true in practice because
Expand Down Expand Up @@ -500,7 +588,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
*/
if (c != &boot_cpu_data) {
/* AND the already accumulated flags with these */
for (i = 0 ; i < NCAPINTS ; i++)
for (i = 0; i < NCAPINTS; i++)
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
}

Expand Down Expand Up @@ -529,52 +617,6 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
}

#ifdef CONFIG_X86_HT
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;

cpuid(1, &eax, &ebx, &ecx, &edx);

if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;

smp_num_siblings = (ebx & 0xff0000) >> 16;

if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {

if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the "
"siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}

index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);

printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);

smp_num_siblings = smp_num_siblings / c->x86_max_cores;

index_msb = get_count_order(smp_num_siblings) ;

core_bits = get_count_order(c->x86_max_cores);

c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);

if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
}
}
#endif

static __init int setup_noclflush(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
Expand All @@ -592,17 +634,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
vendor = c->x86_vendor_id;

if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
printk("%s ", vendor);
printk(KERN_CONT "%s ", vendor);

if (!c->x86_model_id[0])
printk("%d86", c->x86);
if (c->x86_model_id[0])
printk(KERN_CONT "%s", c->x86_model_id);
else
printk("%s", c->x86_model_id);
printk(KERN_CONT "%d86", c->x86);

if (c->x86_mask || c->cpuid_level >= 0)
printk(" stepping %02x\n", c->x86_mask);
printk(KERN_CONT " stepping %02x\n", c->x86_mask);
else
printk("\n");
printk(KERN_CONT "\n");
}

static __init int setup_disablecpuid(char *arg)
Expand All @@ -618,16 +660,6 @@ __setup("clearcpuid=", setup_disablecpuid);

cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;

void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;

for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;

early_identify_cpu(&boot_cpu_data);
}

/* Make sure %fs is initialized properly in idle threads */
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
Expand All @@ -636,18 +668,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
return regs;
}

/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;

gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}

/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
Expand Down
Loading

0 comments on commit 9d31d35

Please sign in to comment.