Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 234834
b: refs/heads/master
c: 8db78cc
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Ingo Molnar committed Jan 28, 2011
1 parent 4a05a69 commit 0a5aa09
Show file tree
Hide file tree
Showing 7 changed files with 78 additions and 85 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: de2d9445f1627830ed2ebd00ee9d851986c940b5
refs/heads/master: 8db78cc4b4048e3add40bca1bc3e55057c319256
4 changes: 4 additions & 0 deletions trunk/arch/x86/include/asm/numa.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,15 @@ static inline void set_apicid_to_node(int apicid, s16 node)
#ifdef CONFIG_NUMA
extern void __cpuinit numa_set_node(int cpu, int node);
extern void __cpuinit numa_clear_node(int cpu);
extern void __init numa_init_array(void);
extern void __init init_cpu_to_node(void);
extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void numa_init_array(void) { }
static inline void init_cpu_to_node(void) { }
static inline void numa_add_cpu(int cpu) { }
static inline void numa_remove_cpu(int cpu) { }
#endif /* CONFIG_NUMA */
Expand Down
3 changes: 0 additions & 3 deletions trunk/arch/x86/include/asm/numa_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks,

#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))

extern void numa_init_array(void);
extern int numa_off;

extern unsigned long numa_free_all_bootmem(void);
Expand All @@ -28,7 +27,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
*/
#define NODE_MIN_SIZE (4*1024*1024)

extern void __init init_cpu_to_node(void);
extern int __cpuinit numa_cpu_node(int cpu);

#ifdef CONFIG_NUMA_EMU
Expand All @@ -37,7 +35,6 @@ extern int __cpuinit numa_cpu_node(int cpu);
void numa_emu_cmdline(char *);
#endif /* CONFIG_NUMA_EMU */
#else
static inline void init_cpu_to_node(void) { }
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
#endif

Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1040,9 +1040,7 @@ void __init setup_arch(char **cmdline_p)

prefill_possible_map();

#ifdef CONFIG_X86_64
init_cpu_to_node();
#endif

init_apic_mappings();
ioapic_and_gsi_init();
Expand Down
76 changes: 72 additions & 4 deletions trunk/arch/x86/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,7 @@ EXPORT_SYMBOL(node_to_cpumask_map);
/*
* Map cpu index to node index
*/
#ifdef CONFIG_X86_32
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, 0);
#else
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
#endif
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);

void __cpuinit numa_set_node(int cpu, int node)
Expand Down Expand Up @@ -99,6 +95,78 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
}

/*
* There are unfortunately some poorly designed mainboards around that
* only connect memory to a single CPU. This breaks the 1:1 cpu->node
* mapping. To avoid this fill in the mapping for all possible CPUs,
* as the number of CPUs is not known yet. We round robin the existing
* nodes.
*/
void __init numa_init_array(void)
{
int rr, i;

rr = first_node(node_online_map);
for (i = 0; i < nr_cpu_ids; i++) {
if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
}
}

static __init int find_near_online_node(int node)
{
int n, val;
int min_val = INT_MAX;
int best_node = -1;

for_each_online_node(n) {
val = node_distance(node, n);

if (val < min_val) {
min_val = val;
best_node = n;
}
}

return best_node;
}

/*
* Setup early cpu_to_node.
*
* Populate cpu_to_node[] only if x86_cpu_to_apicid[],
* and apicid_to_node[] tables have valid entries for a CPU.
* This means we skip cpu_to_node[] initialisation for NUMA
* emulation and faking node case (when running a kernel compiled
* for NUMA on a non NUMA box), which is OK as cpu_to_node[]
* is already initialized in a round robin manner at numa_init_array,
* prior to this call, and this initialization is good enough
* for the fake NUMA cases.
*
* Called before the per_cpu areas are setup.
*/
void __init init_cpu_to_node(void)
{
int cpu;
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);

BUG_ON(cpu_to_apicid == NULL);

for_each_possible_cpu(cpu) {
int node = numa_cpu_node(cpu);

if (node == NUMA_NO_NODE)
continue;
if (!node_online(node))
node = find_near_online_node(node);
numa_set_node(cpu, node);
}
}

#ifndef CONFIG_DEBUG_PER_CPU_MAPS

# ifndef CONFIG_NUMA_EMU
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/x86/mm/numa_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
*/

get_memcfg_numa();
numa_init_array();

kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);

Expand Down
75 changes: 0 additions & 75 deletions trunk/arch/x86/mm/numa_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
node_set_online(nodeid);
}

/*
* There are unfortunately some poorly designed mainboards around that
* only connect memory to a single CPU. This breaks the 1:1 cpu->node
* mapping. To avoid this fill in the mapping for all possible CPUs,
* as the number of CPUs is not known yet. We round robin the existing
* nodes.
*/
void __init numa_init_array(void)
{
int rr, i;

rr = first_node(node_online_map);
for (i = 0; i < nr_cpu_ids; i++) {
if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
}
}

#ifdef CONFIG_NUMA_EMU
/* Numa emulation */
static struct bootnode nodes[MAX_NUMNODES] __initdata;
Expand Down Expand Up @@ -664,59 +642,6 @@ unsigned long __init numa_free_all_bootmem(void)
return pages;
}

#ifdef CONFIG_NUMA

static __init int find_near_online_node(int node)
{
int n, val;
int min_val = INT_MAX;
int best_node = -1;

for_each_online_node(n) {
val = node_distance(node, n);

if (val < min_val) {
min_val = val;
best_node = n;
}
}

return best_node;
}

/*
* Setup early cpu_to_node.
*
* Populate cpu_to_node[] only if x86_cpu_to_apicid[],
* and apicid_to_node[] tables have valid entries for a CPU.
* This means we skip cpu_to_node[] initialisation for NUMA
* emulation and faking node case (when running a kernel compiled
* for NUMA on a non NUMA box), which is OK as cpu_to_node[]
* is already initialized in a round robin manner at numa_init_array,
* prior to this call, and this initialization is good enough
* for the fake NUMA cases.
*
* Called before the per_cpu areas are setup.
*/
void __init init_cpu_to_node(void)
{
int cpu;
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);

BUG_ON(cpu_to_apicid == NULL);

for_each_possible_cpu(cpu) {
int node = numa_cpu_node(cpu);

if (node == NUMA_NO_NODE)
continue;
if (!node_online(node))
node = find_near_online_node(node);
numa_set_node(cpu, node);
}
}
#endif

int __cpuinit numa_cpu_node(int cpu)
{
int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
Expand Down

0 comments on commit 0a5aa09

Please sign in to comment.