Skip to content

Commit

Permalink
x86, amd-nb: Complete the rename of AMD NB and related code
Browse files Browse the repository at this point in the history
Not only the naming of the files was confusing, it was even more so for
the function and variable names.

Renamed the K8 NB and NUMA stuff that is also used on other AMD
platforms. This also renames the CONFIG_K8_NUMA option to
CONFIG_AMD_NUMA and the related file k8topology_64.c to
amdtopology_64.c. No functional changes intended.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
  • Loading branch information
Hans Rosenfeld authored and Borislav Petkov committed Nov 18, 2010
1 parent e53beac commit eec1d4f
Show file tree
Hide file tree
Showing 12 changed files with 119 additions and 119 deletions.
12 changes: 6 additions & 6 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1141,16 +1141,16 @@ config NUMA
comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)

config K8_NUMA
config AMD_NUMA
def_bool y
prompt "Old style AMD Opteron NUMA detection"
depends on X86_64 && NUMA && PCI
---help---
Enable K8 NUMA node topology detection. You should say Y here if
you have a multi processor AMD K8 system. This uses an old
method to read the NUMA configuration directly from the builtin
Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
instead, which also takes priority if both are compiled in.
Enable AMD NUMA node topology detection. You should say Y here if
you have a multi processor AMD system. This uses an old method to
read the NUMA configuration directly from the builtin Northbridge
of Opteron. It is recommended to use X86_64_ACPI_NUMA instead,
which also takes priority if both are compiled in.

config X86_64_ACPI_NUMA
def_bool y
Expand Down
24 changes: 12 additions & 12 deletions arch/x86/include/asm/amd_nb.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,33 +3,33 @@

#include <linux/pci.h>

extern struct pci_device_id k8_nb_ids[];
extern struct pci_device_id amd_nb_ids[];
struct bootnode;

extern int early_is_k8_nb(u32 value);
extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
extern int k8_get_nodes(struct bootnode *nodes);
extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
extern int early_is_amd_nb(u32 value);
extern int cache_amd_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_get_nodes(struct bootnode *nodes);
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int amd_scan_nodes(void);

struct k8_northbridge_info {
struct amd_northbridge_info {
u16 num;
u8 gart_supported;
struct pci_dev **nb_misc;
};
extern struct k8_northbridge_info k8_northbridges;
extern struct amd_northbridge_info amd_northbridges;

#ifdef CONFIG_AMD_NB

static inline struct pci_dev *node_to_k8_nb_misc(int node)
static inline struct pci_dev *node_to_amd_nb_misc(int node)
{
return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL;
}

#else

static inline struct pci_dev *node_to_k8_nb_misc(int node)
static inline struct pci_dev *node_to_amd_nb_misc(int node)
{
return NULL;
}
Expand Down
72 changes: 36 additions & 36 deletions arch/x86/kernel/amd_nb.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,95 +12,95 @@

static u32 *flush_words;

struct pci_device_id k8_nb_ids[] = {
struct pci_device_id amd_nb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
{}
};
EXPORT_SYMBOL(k8_nb_ids);
EXPORT_SYMBOL(amd_nb_ids);

struct k8_northbridge_info k8_northbridges;
EXPORT_SYMBOL(k8_northbridges);
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);

static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
static struct pci_dev *next_amd_northbridge(struct pci_dev *dev)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_ids[0], dev));
} while (!pci_match_id(&amd_nb_ids[0], dev));
return dev;
}

int cache_k8_northbridges(void)
int cache_amd_northbridges(void)
{
int i;
struct pci_dev *dev;

if (k8_northbridges.num)
if (amd_northbridges.num)
return 0;

dev = NULL;
while ((dev = next_k8_northbridge(dev)) != NULL)
k8_northbridges.num++;
while ((dev = next_amd_northbridge(dev)) != NULL)
amd_northbridges.num++;

/* some CPU families (e.g. family 0x11) do not support GART */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
boot_cpu_data.x86 == 0x15)
k8_northbridges.gart_supported = 1;
amd_northbridges.gart_supported = 1;

k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) *
sizeof(void *), GFP_KERNEL);
if (!k8_northbridges.nb_misc)
if (!amd_northbridges.nb_misc)
return -ENOMEM;

if (!k8_northbridges.num) {
k8_northbridges.nb_misc[0] = NULL;
if (!amd_northbridges.num) {
amd_northbridges.nb_misc[0] = NULL;
return 0;
}

if (k8_northbridges.gart_supported) {
flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
if (amd_northbridges.gart_supported) {
flush_words = kmalloc(amd_northbridges.num * sizeof(u32),
GFP_KERNEL);
if (!flush_words) {
kfree(k8_northbridges.nb_misc);
kfree(amd_northbridges.nb_misc);
return -ENOMEM;
}
}

dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
k8_northbridges.nb_misc[i] = dev;
if (k8_northbridges.gart_supported)
while ((dev = next_amd_northbridge(dev)) != NULL) {
amd_northbridges.nb_misc[i] = dev;
if (amd_northbridges.gart_supported)
pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
}
k8_northbridges.nb_misc[i] = NULL;
amd_northbridges.nb_misc[i] = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(cache_k8_northbridges);
EXPORT_SYMBOL_GPL(cache_amd_northbridges);

/* Ignores subdevice/subvendor but as far as I can figure out
they're useless anyways */
int __init early_is_k8_nb(u32 device)
int __init early_is_amd_nb(u32 device)
{
struct pci_device_id *id;
u32 vendor = device & 0xffff;
device >>= 16;
for (id = k8_nb_ids; id->vendor; id++)
for (id = amd_nb_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return 1;
return 0;
}

void k8_flush_garts(void)
void amd_flush_garts(void)
{
int flushed, i;
unsigned long flags;
static DEFINE_SPINLOCK(gart_lock);

if (!k8_northbridges.gart_supported)
if (!amd_northbridges.gart_supported)
return;

/* Avoid races between AGP and IOMMU. In theory it's not needed
Expand All @@ -109,16 +109,16 @@ void k8_flush_garts(void)
that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
for (i = 0; i < k8_northbridges.num; i++) {
pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
for (i = 0; i < amd_northbridges.num; i++) {
pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c,
flush_words[i]|1);
flushed++;
}
for (i = 0; i < k8_northbridges.num; i++) {
for (i = 0; i < amd_northbridges.num; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
pci_read_config_dword(k8_northbridges.nb_misc[i],
pci_read_config_dword(amd_northbridges.nb_misc[i],
0x9c, &w);
if (!(w & 1))
break;
Expand All @@ -129,19 +129,19 @@ void k8_flush_garts(void)
if (!flushed)
printk("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(k8_flush_garts);
EXPORT_SYMBOL_GPL(amd_flush_garts);

static __init int init_k8_nbs(void)
static __init int init_amd_nbs(void)
{
int err = 0;

err = cache_k8_northbridges();
err = cache_amd_northbridges();

if (err < 0)
printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");

return err;
}

/* This has to go after the PCI subsystem */
fs_initcall(init_k8_nbs);
fs_initcall(init_amd_nbs);
10 changes: 5 additions & 5 deletions arch/x86/kernel/aperture_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
* Do an PCI bus scan by hand because we're running before the PCI
* subsystem.
*
* All K8 AGP bridges are AGPv3 compliant, so we can do this scan
* All AMD AGP bridges are AGPv3 compliant, so we can do this scan
* generically. It's probably overkill to always scan all slots because
* the AGP bridges should be always an own bus on the HT hierarchy,
* but do it here for future safety.
Expand Down Expand Up @@ -303,7 +303,7 @@ void __init early_gart_iommu_check(void)
dev_limit = bus_dev_ranges[i].dev_limit;

for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;

ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
Expand Down Expand Up @@ -358,7 +358,7 @@ void __init early_gart_iommu_check(void)
dev_limit = bus_dev_ranges[i].dev_limit;

for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;

ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
Expand Down Expand Up @@ -400,7 +400,7 @@ int __init gart_iommu_hole_init(void)
dev_limit = bus_dev_ranges[i].dev_limit;

for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;

iommu_detected = 1;
Expand Down Expand Up @@ -518,7 +518,7 @@ int __init gart_iommu_hole_init(void)
dev_base = bus_dev_ranges[i].dev_base;
dev_limit = bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;

write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/cpu/intel_cacheinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
{
struct amd_l3_cache *l3;
struct pci_dev *dev = node_to_k8_nb_misc(node);
struct pci_dev *dev = node_to_amd_nb_misc(node);

l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
if (!l3) {
Expand Down Expand Up @@ -370,15 +370,15 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
return;

/* not in virtualized environments */
if (k8_northbridges.num == 0)
if (amd_northbridges.num == 0)
return;

/*
* Strictly speaking, the amount in @size below is leaked since it is
* never freed but this is done only on shutdown so it doesn't matter.
*/
if (!l3_caches) {
int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
int size = amd_northbridges.num * sizeof(struct amd_l3_cache *);

l3_caches = kzalloc(size, GFP_ATOMIC);
if (!l3_caches)
Expand Down
Loading

0 comments on commit eec1d4f

Please sign in to comment.