Skip to content

Commit

Permalink
Merge branch 'origin'
Browse files Browse the repository at this point in the history
  • Loading branch information
Trond Myklebust authored and Trond Myklebust committed May 15, 2007
2 parents 7531d69 + 0560551 commit 6684e32
Show file tree
Hide file tree
Showing 39 changed files with 588 additions and 630 deletions.
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -3267,6 +3267,7 @@ W: http://tpmdd.sourceforge.net
P: Marcel Selhorst
M: tpm@selhorst.net
W: http://www.prosec.rub.de/tpm/
L: tpmdd-devel@lists.sourceforge.net
S: Maintained

Telecom Clock Driver for MCPL0010
Expand Down
1 change: 1 addition & 0 deletions arch/i386/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
Expand Down
36 changes: 28 additions & 8 deletions arch/i386/kernel/cpu/cpufreq/powernow-k7.c
Original file line number Diff line number Diff line change
Expand Up @@ -341,15 +341,17 @@ static int powernow_acpi_init(void)
pc.val = (unsigned long) acpi_processor_perf->states[0].control;
for (i = 0; i < number_scales; i++) {
u8 fid, vid;
unsigned int speed;
struct acpi_processor_px *state =
&acpi_processor_perf->states[i];
unsigned int speed, speed_mhz;

pc.val = (unsigned long) acpi_processor_perf->states[i].control;
pc.val = (unsigned long) state->control;
dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
i,
(u32) acpi_processor_perf->states[i].core_frequency,
(u32) acpi_processor_perf->states[i].power,
(u32) acpi_processor_perf->states[i].transition_latency,
(u32) acpi_processor_perf->states[i].control,
(u32) state->core_frequency,
(u32) state->power,
(u32) state->transition_latency,
(u32) state->control,
pc.bits.sgtc);

vid = pc.bits.vid;
Expand All @@ -360,6 +362,18 @@ static int powernow_acpi_init(void)
powernow_table[i].index |= (vid << 8); /* upper 8 bits */

speed = powernow_table[i].frequency;
speed_mhz = speed / 1000;

/* processor_perflib will multiply the MHz value by 1000 to
* get a KHz value (e.g. 1266000). However, powernow-k7 works
* with true KHz values (e.g. 1266768). To ensure that all
* powernow frequencies are available, we must ensure that
* ACPI doesn't restrict them, so we round up the MHz value
* to ensure that perflib's computed KHz value is greater than
* or equal to powernow's KHz value.
*/
if (speed % 1000 > 0)
speed_mhz++;

if ((fid_codes[fid] % 10)==5) {
if (have_a0 == 1)
Expand All @@ -368,10 +382,16 @@ static int powernow_acpi_init(void)

dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
fid_codes[fid] % 10, speed_mhz, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);

if (state->core_frequency != speed_mhz) {
state->core_frequency = speed_mhz;
dprintk(" Corrected ACPI frequency to %d\n",
speed_mhz);
}

if (latency < pc.bits.sgtc)
latency = pc.bits.sgtc;

Expand Down Expand Up @@ -602,7 +622,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
result = powernow_acpi_init();
if (result) {
printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n");
printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
}
} else {
/* SGTC use the bus clock as timer */
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/cpu/cpufreq/powernow-k8.c
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ static int check_supported_cpu(unsigned int cpu)

if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
goto out;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/i386/kernel/cpu/cpufreq/powernow-k8.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ struct powernow_k8_data {
#define CPUID_XFAM 0x0ff00000 /* extended family */
#define CPUID_XFAM_K8 0
#define CPUID_XMOD 0x000f0000 /* extended model */
#define CPUID_XMOD_REV_G 0x00060000
#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
#define CPUID_XMOD_REV_MASK 0x00080000
#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
#define CPUID_USE_XFAM_XMOD 0x00000f00
#define CPUID_GET_MAX_CAPABILITIES 0x80000000
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
Expand Down
65 changes: 6 additions & 59 deletions arch/i386/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void native_smp_send_reschedule(int cpu)
static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
Expand Down Expand Up @@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info,
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
static int
native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
Expand Down Expand Up @@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
return 0;
}

/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);

/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}

ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);

put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);

static void stop_this_cpu (void * dummy)
{
local_irq_disable();
Expand All @@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system.
*/

void native_smp_send_stop(void)
static void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
Expand Down
22 changes: 0 additions & 22 deletions arch/i386/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);

u8 apicid_2_node[MAX_APICID];

DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);

/*
* Trampoline 80x86 program as an array.
*/
Expand Down Expand Up @@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
#define alloc_idle_task(cpu) fork_idle(cpu)
#endif

/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
static __cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);

pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);

per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}

/* Defined in head.S */
extern struct Xgt_desc_struct early_gdt_descr;

static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
Expand Down
79 changes: 79 additions & 0 deletions arch/i386/kernel/smpcommon.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/*
* SMP stuff which is common to all sub-architectures.
*/
#include <linux/module.h>
#include <asm/smp.h>

DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);

/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
__cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);

pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);

per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}


/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);

/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}

ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);

put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
Loading

0 comments on commit 6684e32

Please sign in to comment.