Skip to content

Commit

Permalink
x86, NUMA: Enable CONFIG_AMD_NUMA on 32bit too
Browse files Browse the repository at this point in the history
Now that NUMA init path is unified, amdtopology can be enabled on
32bit.  Make amdtopology.c safe on 32bit by explicitly using u64 and
drop X86_64 dependency from Kconfig.

Inclusion of bootmem.h is added for max_pfn declaration.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
  • Loading branch information
Tejun Heo committed May 2, 2011
1 parent c6f5887 commit 2706a0b
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 11 deletions.
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1174,7 +1174,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
config AMD_NUMA
def_bool y
prompt "Old style AMD Opteron NUMA detection"
depends on X86_64 && NUMA && PCI
depends on NUMA && PCI
---help---
Enable AMD NUMA node topology detection. You should say Y here if
you have a multi processor AMD system. This uses an old method to
Expand Down
21 changes: 11 additions & 10 deletions arch/x86/mm/amdtopology.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>

#include <asm/io.h>
#include <linux/pci_ids.h>
Expand Down Expand Up @@ -69,10 +70,10 @@ static __init void early_get_boot_cpu_id(void)

int __init amd_numa_init(void)
{
unsigned long start = PFN_PHYS(0);
unsigned long end = PFN_PHYS(max_pfn);
u64 start = PFN_PHYS(0);
u64 end = PFN_PHYS(max_pfn);
unsigned numnodes;
unsigned long prevbase;
u64 prevbase;
int i, j, nb;
u32 nodeid, reg;
unsigned int bits, cores, apicid_base;
Expand All @@ -95,7 +96,7 @@ int __init amd_numa_init(void)

prevbase = 0;
for (i = 0; i < 8; i++) {
unsigned long base, limit;
u64 base, limit;

base = read_pci_config(0, nb, 1, 0x40 + i*8);
limit = read_pci_config(0, nb, 1, 0x44 + i*8);
Expand All @@ -107,18 +108,18 @@ int __init amd_numa_init(void)
continue;
}
if (nodeid >= numnodes) {
pr_info("Ignoring excess node %d (%lx:%lx)\n", nodeid,
pr_info("Ignoring excess node %d (%Lx:%Lx)\n", nodeid,
base, limit);
continue;
}

if (!limit) {
pr_info("Skipping node entry %d (base %lx)\n",
pr_info("Skipping node entry %d (base %Lx)\n",
i, base);
continue;
}
if ((base >> 8) & 3 || (limit >> 8) & 3) {
pr_err("Node %d using interleaving mode %lx/%lx\n",
pr_err("Node %d using interleaving mode %Lx/%Lx\n",
nodeid, (base >> 8) & 3, (limit >> 8) & 3);
return -EINVAL;
}
Expand Down Expand Up @@ -150,19 +151,19 @@ int __init amd_numa_init(void)
continue;
}
if (limit < base) {
pr_err("Node %d bogus settings %lx-%lx.\n",
pr_err("Node %d bogus settings %Lx-%Lx.\n",
nodeid, base, limit);
continue;
}

/* Could sort here, but pun for now. Should not happen anyroads. */
if (prevbase > base) {
pr_err("Node map not sorted %lx,%lx\n",
pr_err("Node map not sorted %Lx,%Lx\n",
prevbase, base);
return -EINVAL;
}

pr_info("Node %d MemBase %016lx Limit %016lx\n",
pr_info("Node %d MemBase %016Lx Limit %016Lx\n",
nodeid, base, limit);

prevbase = base;
Expand Down

0 comments on commit 2706a0b

Please sign in to comment.