Skip to content

Commit

Permalink
Merge branch 'x86/amd-nb' into x86/urgent
Browse files Browse the repository at this point in the history
Merge reason: This is one followup commit that was not in x86/mm - merge it via the urgent path

Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed Mar 16, 2011
2 parents b87cf80 + 84fd1d3 commit 344c21c
Show file tree
Hide file tree
Showing 7 changed files with 169 additions and 37 deletions.
16 changes: 10 additions & 6 deletions arch/x86/include/asm/amd_nb.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,17 @@ struct amd_nb_bus_dev_range {
u8 dev_limit;
};

extern struct pci_device_id amd_nb_misc_ids[];
extern const struct pci_device_id amd_nb_misc_ids[];
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
struct bootnode;

extern int early_is_amd_nb(u32 value);
extern bool early_is_amd_nb(u32 value);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int amd_scan_nodes(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, int);

#ifdef CONFIG_NUMA_EMU
extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
Expand All @@ -26,6 +28,7 @@ extern void amd_get_nodes(struct bootnode *nodes);

struct amd_northbridge {
struct pci_dev *misc;
struct pci_dev *link;
};

struct amd_northbridge_info {
Expand All @@ -35,17 +38,18 @@ struct amd_northbridge_info {
};
extern struct amd_northbridge_info amd_northbridges;

#define AMD_NB_GART 0x1
#define AMD_NB_L3_INDEX_DISABLE 0x2
#define AMD_NB_GART BIT(0)
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
#define AMD_NB_L3_PARTITIONING BIT(2)

#ifdef CONFIG_AMD_NB

static inline int amd_nb_num(void)
static inline u16 amd_nb_num(void)
{
return amd_northbridges.num;
}

static inline int amd_nb_has_feature(int feature)
static inline bool amd_nb_has_feature(unsigned feature)
{
return ((amd_northbridges.flags & feature) == feature);
}
Expand Down
100 changes: 88 additions & 12 deletions arch/x86/kernel/amd_nb.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,19 @@

static u32 *flush_words;

struct pci_device_id amd_nb_misc_ids[] = {
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
{}
};
EXPORT_SYMBOL(amd_nb_misc_ids);

static struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
{}
};

const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ 0x00, 0x18, 0x20 },
{ 0xff, 0x00, 0x20 },
Expand All @@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);

static struct pci_dev *next_northbridge(struct pci_dev *dev,
struct pci_device_id *ids)
const struct pci_device_id *ids)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
Expand All @@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,

int amd_cache_northbridges(void)
{
int i = 0;
u16 i = 0;
struct amd_northbridge *nb;
struct pci_dev *misc;
struct pci_dev *misc, *link;

if (amd_nb_num())
return 0;
Expand All @@ -64,10 +69,12 @@ int amd_cache_northbridges(void)
amd_northbridges.nb = nb;
amd_northbridges.num = i;

misc = NULL;
link = misc = NULL;
for (i = 0; i != amd_nb_num(); i++) {
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
next_northbridge(link, amd_nb_link_ids);
}

/* some CPU families (e.g. family 0x11) do not support GART */
Expand All @@ -85,26 +92,95 @@ int amd_cache_northbridges(void)
boot_cpu_data.x86_mask >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

/* L3 cache partitioning is supported on family 0x15 */
if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;

return 0;
}
EXPORT_SYMBOL_GPL(amd_cache_northbridges);

/* Ignores subdevice/subvendor but as far as I can figure out
they're useless anyways */
int __init early_is_amd_nb(u32 device)
/*
* Ignores subdevice/subvendor but as far as I can figure out
* they're useless anyways
*/
bool __init early_is_amd_nb(u32 device)
{
struct pci_device_id *id;
const struct pci_device_id *id;
u32 vendor = device & 0xffff;

device >>= 16;
for (id = amd_nb_misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return 1;
return true;
return false;
}

int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
unsigned int mask;
int cuid = 0;

if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
return 0;

pci_read_config_dword(link, 0x1d4, &mask);

#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
#endif
return (mask >> (4 * cuid)) & 0xf;
}

int amd_set_subcaches(int cpu, int mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
unsigned int reg;
int cuid = 0;

if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
return -EINVAL;

/* if necessary, collect reset state of L3 partitioning and BAN mode */
if (reset == 0) {
pci_read_config_dword(nb->link, 0x1d4, &reset);
pci_read_config_dword(nb->misc, 0x1b8, &ban);
ban &= 0x180000;
}

/* deactivate BAN mode if any subcaches are to be disabled */
if (mask != 0xf) {
pci_read_config_dword(nb->misc, 0x1b8, &reg);
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}

#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
#endif
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;

pci_write_config_dword(nb->link, 0x1d4, mask);

/* reset BAN mode if L3 partitioning returned to reset state */
pci_read_config_dword(nb->link, 0x1d4, &reg);
if (reg == reset) {
pci_read_config_dword(nb->misc, 0x1b8, &reg);
reg &= ~0x180000;
pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
}

return 0;
}

int amd_cache_gart(void)
static int amd_cache_gart(void)
{
int i;
u16 i;

if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
Expand Down
10 changes: 7 additions & 3 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ static int __cpuinit nearby_node(int apicid)
#ifdef CONFIG_X86_HT
static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
{
u32 nodes;
u32 nodes, cores_per_cu = 1;
u8 node_id;
int cpu = smp_processor_id();

Expand All @@ -276,6 +276,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
/* get compute unit information */
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->compute_unit_id = ebx & 0xff;
cores_per_cu += ((ebx >> 8) & 3);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;

Expand All @@ -288,15 +289,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
/* fixup multi-node processor information */
if (nodes > 1) {
u32 cores_per_node;
u32 cus_per_node;

set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cores_per_node = c->x86_max_cores / nodes;
cus_per_node = cores_per_node / cores_per_cu;

/* store NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = node_id;

/* core id to be in range from 0 to (cores_per_node - 1) */
c->cpu_core_id = c->cpu_core_id % cores_per_node;
/* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cores_per_node;
c->compute_unit_id %= cus_per_node;
}
}
#endif
Expand Down
Loading

0 comments on commit 344c21c

Please sign in to comment.