Skip to content

Commit

Permalink
Merge tag 'x86_cpu_for_v5.11' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/tip/tip

Pull x86 cpuid updates from Borislav Petkov:
 "Only AMD-specific changes this time:

   - Save the AMD physical die ID into cpuinfo_x86.cpu_die_id and
     convert all code to use it (Yazen Ghannam)

   - Remove a dead and unused TSEG region remapping workaround on AMD
     (Arvind Sankar)"

* tag 'x86_cpu_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu/amd: Remove dead code for TSEG region remapping
  x86/topology: Set cpu_die_id only if DIE_TYPE found
  EDAC/mce_amd: Use struct cpuinfo_x86.cpu_die_id for AMD NodeId
  x86/CPU/AMD: Remove amd_get_nb_id()
  x86/CPU/AMD: Save AMD NodeId as cpu_die_id
  • Loading branch information
torvalds committed Dec 14, 2020
2 parents 5583ff6 + 262bd57 commit 0d71297
Show file tree
Hide file tree
Showing 13 changed files with 44 additions and 80 deletions.
9 changes: 9 additions & 0 deletions Documentation/x86/topology.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ Package
Packages contain a number of cores plus shared resources, e.g. DRAM
controller, shared caches etc.

Modern systems may also use the term 'Die' for package.

AMD nomenclature for package is 'Node'.

Package-related topology information in the kernel:
Expand All @@ -53,11 +55,18 @@ Package-related topology information in the kernel:

The number of dies in a package. This information is retrieved via CPUID.

- cpuinfo_x86.cpu_die_id:

The physical ID of the die. This information is retrieved via CPUID.

- cpuinfo_x86.phys_proc_id:

The physical ID of the package. This information is retrieved via CPUID
and deduced from the APIC IDs of the cores in the package.

Modern systems use this value for the socket. There may be multiple
packages within a socket. This value may differ from cpu_die_id.

- cpuinfo_x86.logical_proc_id:

The logical ID of the package. As we do not trust BIOSes to enumerate the
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,7 @@ static void amd_pmu_cpu_starting(int cpu)
if (!x86_pmu.amd_nb_constraints)
return;

nb_id = amd_get_nb_id(cpu);
nb_id = topology_die_id(cpu);
WARN_ON_ONCE(nb_id == BAD_APICID);

for_each_online_cpu(i) {
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/cacheinfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#ifndef _ASM_X86_CACHEINFO_H
#define _ASM_X86_CACHEINFO_H

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);

#endif /* _ASM_X86_CACHEINFO_H */
2 changes: 0 additions & 2 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -813,10 +813,8 @@ extern int set_tsc_mode(unsigned int val);
DECLARE_PER_CPU(u64, msr_misc_features_shadow);

#ifdef CONFIG_CPU_SUP_AMD
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
#else
static inline u16 amd_get_nb_id(int cpu) { return 0; }
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
#endif

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/amd_nb.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)

int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
unsigned int mask;

if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
Expand All @@ -398,7 +398,7 @@ int amd_get_subcaches(int cpu)
int amd_set_subcaches(int cpu, unsigned long mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
unsigned int reg;
int cuid;

Expand Down
38 changes: 5 additions & 33 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
# include <asm/set_memory.h>
#endif

#include "cpu.h"
Expand Down Expand Up @@ -330,7 +329,6 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
*/
static void amd_get_topology(struct cpuinfo_x86 *c)
{
u8 node_id;
int cpu = smp_processor_id();

/* get information required for multi-node processors */
Expand All @@ -340,7 +338,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)

cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);

node_id = ecx & 0xff;
c->cpu_die_id = ecx & 0xff;

if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
Expand All @@ -360,15 +358,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);

cacheinfo_amd_init_llc_id(c, cpu, node_id);
cacheinfo_amd_init_llc_id(c, cpu);

} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;

rdmsrl(MSR_FAM10H_NODE_ID, value);
node_id = value & 7;
c->cpu_die_id = value & 7;

per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else
return;

Expand All @@ -393,7 +391,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
}

static void amd_detect_ppin(struct cpuinfo_x86 *c)
Expand Down Expand Up @@ -425,12 +423,6 @@ static void amd_detect_ppin(struct cpuinfo_x86 *c)
clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
}

u16 amd_get_nb_id(int cpu)
{
return per_cpu(cpu_llc_id, cpu);
}
EXPORT_SYMBOL_GPL(amd_get_nb_id);

u32 amd_get_nodes_per_socket(void)
{
return nodes_per_socket;
Expand Down Expand Up @@ -516,26 +508,6 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)

static void bsp_init_amd(struct cpuinfo_x86 *c)
{

#ifdef CONFIG_X86_64
if (c->x86 >= 0xf) {
unsigned long long tseg;

/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT;

pr_debug("tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
#endif

if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {

if (c->x86 > 0x10 ||
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/cacheinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
if (index < 3)
return;

node = amd_get_nb_id(smp_processor_id());
node = topology_die_id(smp_processor_id());
this_leaf->nb = node_to_amd_nb(node);
if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
amd_calc_l3_indices(this_leaf->nb);
Expand Down Expand Up @@ -646,7 +646,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
return i;
}

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
Expand All @@ -657,7 +657,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)

if (c->x86 < 0x17) {
/* LLC is at the node level. */
per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
/*
* LLC is at the core complex level.
Expand All @@ -684,7 +684,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
}
}

void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
Expand Down
31 changes: 5 additions & 26 deletions arch/x86/kernel/cpu/hygon.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@
#include <asm/cacheinfo.h>
#include <asm/spec-ctrl.h>
#include <asm/delay.h>
#ifdef CONFIG_X86_64
# include <asm/set_memory.h>
#endif

#include "cpu.h"

Expand Down Expand Up @@ -65,7 +62,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c)
*/
static void hygon_get_topology(struct cpuinfo_x86 *c)
{
u8 node_id;
int cpu = smp_processor_id();

/* get information required for multi-node processors */
Expand All @@ -75,7 +71,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)

cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);

node_id = ecx & 0xff;
c->cpu_die_id = ecx & 0xff;

c->cpu_core_id = ebx & 0xff;

Expand All @@ -93,14 +89,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
/* Socket ID is ApicId[6] for these processors. */
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;

cacheinfo_hygon_init_llc_id(c, cpu, node_id);
cacheinfo_hygon_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;

rdmsrl(MSR_FAM10H_NODE_ID, value);
node_id = value & 7;
c->cpu_die_id = value & 7;

per_cpu(cpu_llc_id, cpu) = node_id;
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else
return;

Expand All @@ -123,7 +119,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c)
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
}

static void srat_detect_node(struct cpuinfo_x86 *c)
Expand Down Expand Up @@ -204,23 +200,6 @@ static void early_init_hygon_mc(struct cpuinfo_x86 *c)

static void bsp_init_hygon(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
unsigned long long tseg;

/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT;

pr_debug("tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
#endif

if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
u64 val;

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/mce/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1341,7 +1341,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
return -ENODEV;

if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
nb = node_to_amd_nb(topology_die_id(cpu));

/* threshold descriptor already initialized on this node? */
if (nb && nb->bank4) {
Expand Down Expand Up @@ -1445,7 +1445,7 @@ static void threshold_remove_bank(struct threshold_bank *bank)
* The last CPU on this node using the shared bank is going
* away, remove that bank now.
*/
nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
nb = node_to_amd_nb(topology_die_id(smp_processor_id()));
nb->bank4 = NULL;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/mce/inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -522,8 +522,8 @@ static void do_inject(void)
if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
b == 4 &&
boot_cpu_data.x86 < 0x17) {
toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
cpu = get_nbc_for_node(amd_get_nb_id(cpu));
toggle_nb_mca_mst_cpu(topology_die_id(cpu));
cpu = get_nbc_for_node(topology_die_id(cpu));
}

get_online_cpus();
Expand Down
10 changes: 8 additions & 2 deletions arch/x86/kernel/cpu/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
bool die_level_present = false;
int leaf;

leaf = detect_extended_topology_leaf(c);
Expand Down Expand Up @@ -126,6 +127,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
die_level_present = true;
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
Expand All @@ -139,8 +141,12 @@ int detect_extended_topology(struct cpuinfo_x86 *c)

c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
ht_mask_width) & core_select_mask;
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;

if (die_level_present) {
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;
}

c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
/*
Expand Down
4 changes: 2 additions & 2 deletions drivers/edac/amd64_edac.c
Original file line number Diff line number Diff line change
Expand Up @@ -1136,7 +1136,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
u16 mce_nid = amd_get_nb_id(m->extcpu);
u16 mce_nid = topology_die_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
Expand Down Expand Up @@ -3047,7 +3047,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
int cpu;

for_each_online_cpu(cpu)
if (amd_get_nb_id(cpu) == nid)
if (topology_die_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}

Expand Down
4 changes: 2 additions & 2 deletions drivers/edac/mce_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -869,7 +869,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
int node_id = amd_get_nb_id(m->extcpu);
int node_id = topology_die_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u8 offset = 0;
Expand Down Expand Up @@ -1003,7 +1003,7 @@ static void decode_smca_error(struct mce *m)
pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);

if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
decode_dram_ecc(cpu_to_node(m->extcpu), m);
decode_dram_ecc(topology_die_id(m->extcpu), m);
}

static inline void amd_decode_err_code(u16 ec)
Expand Down

0 comments on commit 0d71297

Please sign in to comment.