Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 136502
b: refs/heads/master
c: 6470aff
h: refs/heads/master
v: v3
  • Loading branch information
Brian Gerst authored and Tejun Heo committed Jan 27, 2009
1 parent 706585d commit 25523f1
Show file tree
Hide file tree
Showing 4 changed files with 229 additions and 233 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0d77e7f04d5da160307f4f5c030a171e004f602b
refs/heads/master: 6470aff619fbb9dff8dfe8afa5033084cd55ca20
6 changes: 6 additions & 0 deletions trunk/arch/x86/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}

static inline void setup_node_to_cpumask_map(void) { }

#else /* CONFIG_X86_64 */

/* Mappings between node number and cpus on that node. */
Expand Down Expand Up @@ -120,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)

#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */

extern void setup_node_to_cpumask_map(void);

/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
Expand Down Expand Up @@ -218,6 +222,8 @@ static inline int node_to_first_cpu(int node)
return first_cpu(cpu_online_map);
}

static inline void setup_node_to_cpumask_map(void) { }

/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
Expand Down
237 changes: 5 additions & 232 deletions trunk/arch/x86/kernel/setup_percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,32 +51,6 @@ DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);

#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
#define X86_64_NUMA 1 /* (used later) */
DEFINE_PER_CPU(int, node_number) = 0;
EXPORT_PER_CPU_SYMBOL(node_number);

/*
* Map cpu index to node index
*/
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);

/*
* Which logical CPUs are on which nodes
*/
cpumask_t *node_to_cpumask_map;
EXPORT_SYMBOL(node_to_cpumask_map);

/*
* Setup node_to_cpumask_map
*/
static void __init setup_node_to_cpumask_map(void);

#else
static inline void setup_node_to_cpumask_map(void) { }
#endif

#ifdef CONFIG_X86_64

/* correctly size the local cpu masks */
Expand Down Expand Up @@ -163,13 +137,13 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_map(x86_cpu_to_apicid, cpu);
per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#ifdef X86_64_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
#endif
#ifdef CONFIG_X86_64
per_cpu(irq_stack_ptr, cpu) =
per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
#ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
#endif
/*
* Up to this point, CPU0 has been using .data.init
* area. Reload %gs offset for CPU0.
Expand All @@ -184,7 +158,7 @@ void __init setup_per_cpu_areas(void)
/* indicate the early static arrays will soon be gone */
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#ifdef X86_64_NUMA
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif

Expand All @@ -197,204 +171,3 @@ void __init setup_per_cpu_areas(void)

#endif

#ifdef X86_64_NUMA

/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
* Note: node_to_cpumask() is not valid until after this is done.
* (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/
static void __init setup_node_to_cpumask_map(void)
{
unsigned int node, num = 0;
cpumask_t *map;

/* setup nr_node_ids if not done yet */
if (nr_node_ids == MAX_NUMNODES) {
for_each_node_mask(node, node_possible_map)
num = node;
nr_node_ids = num + 1;
}

/* allocate the map */
map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);

pr_debug("Node to cpumask map at %p for %d nodes\n",
map, nr_node_ids);

/* node_to_cpumask() will now work */
node_to_cpumask_map = map;
}

void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);

/* early setting, no percpu area yet */
if (cpu_to_node_map) {
cpu_to_node_map[cpu] = node;
return;
}

#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
dump_stack();
return;
}
#endif
per_cpu(x86_cpu_to_node_map, cpu) = node;

if (node != NUMA_NO_NODE)
per_cpu(node_number, cpu) = node;
}

void __cpuinit numa_clear_node(int cpu)
{
numa_set_node(cpu, NUMA_NO_NODE);
}

#ifndef CONFIG_DEBUG_PER_CPU_MAPS

void __cpuinit numa_add_cpu(int cpu)
{
cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}

void __cpuinit numa_remove_cpu(int cpu)
{
cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}

#else /* CONFIG_DEBUG_PER_CPU_MAPS */

/*
* --------- debug versions of the numa functions ---------
*/
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
int node = early_cpu_to_node(cpu);
cpumask_t *mask;
char buf[64];

if (node_to_cpumask_map == NULL) {
printk(KERN_ERR "node_to_cpumask_map NULL\n");
dump_stack();
return;
}

mask = &node_to_cpumask_map[node];
if (enable)
cpu_set(cpu, *mask);
else
cpu_clear(cpu, *mask);

cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
}

void __cpuinit numa_add_cpu(int cpu)
{
numa_set_cpumask(cpu, 1);
}

void __cpuinit numa_remove_cpu(int cpu)
{
numa_set_cpumask(cpu, 0);
}

int cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
printk(KERN_WARNING
"cpu_to_node(%d): usage too early!\n", cpu);
dump_stack();
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(cpu_to_node);

/*
* Same function as cpu_to_node() but used if called before the
* per_cpu areas are setup.
*/
int early_cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map))
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];

if (!per_cpu_offset(cpu)) {
printk(KERN_WARNING
"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
dump_stack();
return NUMA_NO_NODE;
}
return per_cpu(x86_cpu_to_node_map, cpu);
}


/* empty cpumask */
static const cpumask_t cpu_mask_none;

/*
* Returns a pointer to the bitmask of CPUs on Node 'node'.
*/
const cpumask_t *cpumask_of_node(int node)
{
if (node_to_cpumask_map == NULL) {
printk(KERN_WARNING
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
node);
dump_stack();
return (const cpumask_t *)&cpu_online_map;
}
if (node >= nr_node_ids) {
printk(KERN_WARNING
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
node, nr_node_ids);
dump_stack();
return &cpu_mask_none;
}
return &node_to_cpumask_map[node];
}
EXPORT_SYMBOL(cpumask_of_node);

/*
* Returns a bitmask of CPUs on Node 'node'.
*
* Side note: this function creates the returned cpumask on the stack
* so with a high NR_CPUS count, excessive stack space is used. The
* node_to_cpumask_ptr function should be used whenever possible.
*/
cpumask_t node_to_cpumask(int node)
{
if (node_to_cpumask_map == NULL) {
printk(KERN_WARNING
"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
dump_stack();
return cpu_online_map;
}
if (node >= nr_node_ids) {
printk(KERN_WARNING
"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
node, nr_node_ids);
dump_stack();
return cpu_mask_none;
}
return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(node_to_cpumask);

/*
* --------- end of debug versions of the numa functions ---------
*/

#endif /* CONFIG_DEBUG_PER_CPU_MAPS */

#endif /* X86_64_NUMA */

Loading

0 comments on commit 25523f1

Please sign in to comment.