Skip to content

Commit

Permalink
[PATCH] init call cleanup
Browse files Browse the repository at this point in the history
Trival patch for CPU hotplug.  In CPU identify part, only did cleaup for intel
CPUs.  Need do for other CPUs if they support S3 SMP.

Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Li Shaohua authored and Linus Torvalds committed Jun 25, 2005
1 parent d720803 commit 0bb3184
Show file tree
Hide file tree
Showing 10 changed files with 44 additions and 44 deletions.
14 changes: 7 additions & 7 deletions arch/i386/kernel/apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ void __init init_bsp_APIC(void)
apic_write_around(APIC_LVT1, value);
}

void __init setup_local_APIC (void)
void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;

Expand Down Expand Up @@ -635,7 +635,7 @@ static struct sys_device device_lapic = {
.cls = &lapic_sysclass,
};

static void __init apic_pm_activate(void)
static void __devinit apic_pm_activate(void)
{
apic_pm_state.active = 1;
}
Expand Down Expand Up @@ -856,7 +856,7 @@ void __init init_apic_mappings(void)
* but we do not accept timer interrupts yet. We only allow the BP
* to calibrate.
*/
static unsigned int __init get_8254_timer_count(void)
static unsigned int __devinit get_8254_timer_count(void)
{
extern spinlock_t i8253_lock;
unsigned long flags;
Expand All @@ -875,7 +875,7 @@ static unsigned int __init get_8254_timer_count(void)
}

/* next tick in 8254 can be caught by catching timer wraparound */
static void __init wait_8254_wraparound(void)
static void __devinit wait_8254_wraparound(void)
{
unsigned int curr_count, prev_count;

Expand All @@ -895,7 +895,7 @@ static void __init wait_8254_wraparound(void)
* Default initialization for 8254 timers. If we use other timers like HPET,
* we override this later
*/
void (*wait_timer_tick)(void) __initdata = wait_8254_wraparound;
void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;

/*
* This function sets up the local APIC timer, with a timeout of
Expand Down Expand Up @@ -931,7 +931,7 @@ static void __setup_APIC_LVTT(unsigned int clocks)
apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
}

static void __init setup_APIC_timer(unsigned int clocks)
static void __devinit setup_APIC_timer(unsigned int clocks)
{
unsigned long flags;

Expand Down Expand Up @@ -1044,7 +1044,7 @@ void __init setup_boot_APIC_clock(void)
local_irq_enable();
}

void __init setup_secondary_APIC_clock(void)
void __devinit setup_secondary_APIC_clock(void)
{
setup_APIC_timer(calibration_result);
}
Expand Down
30 changes: 15 additions & 15 deletions arch/i386/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);

static int cachesize_override __initdata = -1;
static int disable_x86_fxsr __initdata = 0;
static int disable_x86_serial_nr __initdata = 1;
static int cachesize_override __devinitdata = -1;
static int disable_x86_fxsr __devinitdata = 0;
static int disable_x86_serial_nr __devinitdata = 1;

struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};

Expand Down Expand Up @@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str)
}
__setup("cachesize=", cachesize_setup);

int __init get_model_name(struct cpuinfo_x86 *c)
int __devinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
char *p, *q;
Expand Down Expand Up @@ -89,7 +89,7 @@ int __init get_model_name(struct cpuinfo_x86 *c)
}


void __init display_cacheinfo(struct cpuinfo_x86 *c)
void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ecx, edx, l2size;

Expand Down Expand Up @@ -130,7 +130,7 @@ void __init display_cacheinfo(struct cpuinfo_x86 *c)
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */

/* Look up CPU names by table lookup. */
static char __init *table_lookup_model(struct cpuinfo_x86 *c)
static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
{
struct cpu_model_info *info;

Expand All @@ -151,7 +151,7 @@ static char __init *table_lookup_model(struct cpuinfo_x86 *c)
}


void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
Expand Down Expand Up @@ -202,7 +202,7 @@ static inline int flag_is_changeable_p(u32 flag)


/* Probe for the CPUID instruction */
static int __init have_cpuid_p(void)
static int __devinit have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
Expand Down Expand Up @@ -249,7 +249,7 @@ static void __init early_cpu_detect(void)
#endif
}

void __init generic_identify(struct cpuinfo_x86 * c)
void __devinit generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
int junk;
Expand Down Expand Up @@ -296,7 +296,7 @@ void __init generic_identify(struct cpuinfo_x86 * c)
}
}

static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
/* Disable processor serial number */
Expand Down Expand Up @@ -324,7 +324,7 @@ __setup("serialnumber", x86_serial_nr_setup);
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
void __init identify_cpu(struct cpuinfo_x86 *c)
void __devinit identify_cpu(struct cpuinfo_x86 *c)
{
int i;

Expand Down Expand Up @@ -438,7 +438,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
}

#ifdef CONFIG_X86_HT
void __init detect_ht(struct cpuinfo_x86 *c)
void __devinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, tmp;
Expand Down Expand Up @@ -493,7 +493,7 @@ void __init detect_ht(struct cpuinfo_x86 *c)
}
#endif

void __init print_cpu_info(struct cpuinfo_x86 *c)
void __devinit print_cpu_info(struct cpuinfo_x86 *c)
{
char *vendor = NULL;

Expand All @@ -516,7 +516,7 @@ void __init print_cpu_info(struct cpuinfo_x86 *c)
printk("\n");
}

cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;

/* This is hacky. :)
* We're emulating future behavior.
Expand Down Expand Up @@ -563,7 +563,7 @@ void __init early_cpu_init(void)
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
*/
void __init cpu_init (void)
void __devinit cpu_init(void)
{
int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu);
Expand Down
12 changes: 6 additions & 6 deletions arch/i386/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ extern int trap_init_f00f_bug(void);
struct movsl_mask movsl_mask;
#endif

void __init early_intel_workaround(struct cpuinfo_x86 *c)
void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return;
Expand All @@ -43,7 +43,7 @@ void __init early_intel_workaround(struct cpuinfo_x86 *c)
* This is called before we do cpu ident work
*/

int __init ppro_with_ram_bug(void)
int __devinit ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
Expand All @@ -61,7 +61,7 @@ int __init ppro_with_ram_bug(void)
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;

Expand All @@ -80,7 +80,7 @@ static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
/*
* find out the number of processor cores on the die
*/
static int __init num_cpu_cores(struct cpuinfo_x86 *c)
static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax;

Expand All @@ -98,7 +98,7 @@ static int __init num_cpu_cores(struct cpuinfo_x86 *c)
return 1;
}

static void __init init_intel(struct cpuinfo_x86 *c)
static void __devinit init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
char *p = NULL;
Expand Down Expand Up @@ -204,7 +204,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
return size;
}

static struct cpu_dev intel_cpu_dev __initdata = {
static struct cpu_dev intel_cpu_dev __devinitdata = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_models = {
Expand Down
4 changes: 2 additions & 2 deletions arch/i386/kernel/cpu/intel_cacheinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct _cache_table
};

/* all the cache descriptor types we care about (no TLB or trace cache entries) */
static struct _cache_table cache_table[] __initdata =
static struct _cache_table cache_table[] __devinitdata =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
Expand Down Expand Up @@ -160,7 +160,7 @@ static int __init find_num_cache_leaves(void)
return retval;
}

unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_
void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;

/* This has to be run for each processor */
void __init mcheck_init(struct cpuinfo_x86 *c)
void __devinit mcheck_init(struct cpuinfo_x86 *c)
{
if (mce_disabled==1)
return;
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/cpu/mcheck/p5.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ static fastcall void pentium_machine_check(struct pt_regs * regs, long error_cod
}

/* Set up machine check reporting for processors with Intel style MCE */
void __init intel_p5_mcheck_init(struct cpuinfo_x86 *c)
void __devinit intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;

Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ static void mwait_idle(void)
}
}

void __init select_idle_routine(const struct cpuinfo_x86 *c)
void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_MWAIT)) {
printk("monitor/mwait feature present.\n");
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
address, and must not be in the .bss segment! */
unsigned long init_pg_tables_end __initdata = ~0UL;

int disable_pse __initdata = 0;
int disable_pse __devinitdata = 0;

/*
* Machine setup..
Expand Down
18 changes: 9 additions & 9 deletions arch/i386/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
#include <smpboot_hooks.h>

/* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping;
static int __devinitdata smp_b_stepping;

/* Number of siblings per CPU package */
int smp_num_siblings = 1;
Expand Down Expand Up @@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
* has made sure it's suitably aligned.
*/

static unsigned long __init setup_trampoline(void)
static unsigned long __devinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(trampoline_base);
Expand Down Expand Up @@ -148,7 +148,7 @@ void __init smp_alloc_memory(void)
* a given CPU
*/

static void __init smp_store_cpu_info(int id)
static void __devinit smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = cpu_data + id;

Expand Down Expand Up @@ -342,7 +342,7 @@ extern void calibrate_delay(void);

static atomic_t init_deasserted;

static void __init smp_callin(void)
static void __devinit smp_callin(void)
{
int cpuid, phys_id;
unsigned long timeout;
Expand Down Expand Up @@ -468,7 +468,7 @@ set_cpu_sibling_map(int cpu)
/*
* Activate a secondary processor.
*/
static void __init start_secondary(void *unused)
static void __devinit start_secondary(void *unused)
{
/*
* Dont put anything before smp_callin(), SMP
Expand Down Expand Up @@ -521,7 +521,7 @@ static void __init start_secondary(void *unused)
* from the task structure
* This function must not return.
*/
void __init initialize_secondary(void)
void __devinit initialize_secondary(void)
{
/*
* We don't actually need to load the full TSS,
Expand Down Expand Up @@ -635,7 +635,7 @@ static inline void __inquire_remote_apic(int apicid)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later.
*/
static int __init
static int __devinit
wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
{
unsigned long send_status = 0, accept_status = 0;
Expand Down Expand Up @@ -681,7 +681,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
#endif /* WAKE_SECONDARY_VIA_NMI */

#ifdef WAKE_SECONDARY_VIA_INIT
static int __init
static int __devinit
wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
{
unsigned long send_status = 0, accept_status = 0;
Expand Down Expand Up @@ -817,7 +817,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)

extern cpumask_t cpu_initialized;

static int __init do_boot_cpu(int apicid)
static int __devinit do_boot_cpu(int apicid)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/timers/timer_tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static struct timer_opts timer_tsc;

static inline void cpufreq_delayed_get(void);

int tsc_disable __initdata = 0;
int tsc_disable __devinitdata = 0;

extern spinlock_t i8253_lock;

Expand Down

0 comments on commit 0bb3184

Please sign in to comment.