+// SPDX-License-Identifier: GPL-2.0-only
/*
* Support Intel RAPL energy consumption counters
* Copyright (C) 2013 Google, Inc., Stephane Eranian
struct rapl_pmus {
struct pmu pmu;
- unsigned int maxpkg;
+ unsigned int maxdie;
struct rapl_pmu *pmus[];
};
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
- unsigned int pkgid = topology_logical_package_id(cpu);
+ unsigned int dieid = topology_logical_die_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
- return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
+ return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
}
static inline u64 rapl_read_counter(struct perf_event *event)
pmu->cpu = -1;
/* Find a new cpu to collect rapl events */
- target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
/* Migrate rapl events to the new target */
if (target < nr_cpu_ids) {
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
rapl_hrtimer_init(pmu);
- rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+ rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
}
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
- target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
+ target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
{
int i;
- for (i = 0; i < rapl_pmus->maxpkg; i++)
+ for (i = 0; i < rapl_pmus->maxdie; i++)
kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}
static int __init init_rapl_pmus(void)
{
- int maxpkg = topology_max_packages();
+ int maxdie = topology_max_packages() * topology_max_die_per_package();
size_t size;
- size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
+ size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
rapl_pmus = kzalloc(size, GFP_KERNEL);
if (!rapl_pmus)
return -ENOMEM;
- rapl_pmus->maxpkg = maxpkg;
+ rapl_pmus->maxdie = maxdie;
rapl_pmus->pmu.attr_groups = rapl_attr_groups;
rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
rapl_pmus->pmu.event_init = rapl_pmu_event_init;
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <asm/cpu_device_id.h>
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
struct pci_extra_dev *uncore_extra_pci_dev;
- static int max_packages;
+ static int max_dies;
/* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask;
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
- unsigned int pkgid = topology_logical_package_id(cpu);
+ unsigned int dieid = topology_logical_die_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
- return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
+ return dieid < max_dies ? pmu->boxes[dieid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
uncore_pmu_init_hrtimer(box);
box->cpu = -1;
box->pci_phys_id = -1;
- box->pkgid = -1;
+ box->dieid = -1;
/* set default hrtimer timeout */
box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
- int pkg;
+ int die;
- for (pkg = 0; pkg < max_packages; pkg++)
- kfree(pmu->boxes[pkg]);
+ for (die = 0; die < max_dies; die++)
+ kfree(pmu->boxes[die]);
kfree(pmu->boxes);
}
if (!pmus)
return -ENOMEM;
- size = max_packages * sizeof(struct intel_uncore_box *);
+ size = max_dies * sizeof(struct intel_uncore_box *);
for (i = 0; i < type->num_boxes; i++) {
pmus[i].func_id = setid ? i : -1;
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu = NULL;
struct intel_uncore_box *box;
- int phys_id, pkg, ret;
+ int phys_id, die, ret;
phys_id = uncore_pcibus_to_physid(pdev->bus);
if (phys_id < 0)
return -ENODEV;
- pkg = topology_phys_to_logical_pkg(phys_id);
- if (pkg < 0)
+ die = (topology_max_die_per_package() > 1) ? phys_id :
+ topology_phys_to_logical_pkg(phys_id);
+ if (die < 0)
return -EINVAL;
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
- uncore_extra_pci_dev[pkg].dev[idx] = pdev;
+ uncore_extra_pci_dev[die].dev[idx] = pdev;
pci_set_drvdata(pdev, NULL);
return 0;
}
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
}
- if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
+ if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
return -EINVAL;
box = uncore_alloc_box(type, NUMA_NO_NODE);
atomic_inc(&box->refcnt);
box->pci_phys_id = phys_id;
- box->pkgid = pkg;
+ box->dieid = die;
box->pci_dev = pdev;
box->pmu = pmu;
uncore_box_init(box);
pci_set_drvdata(pdev, box);
- pmu->boxes[pkg] = box;
+ pmu->boxes[die] = box;
if (atomic_inc_return(&pmu->activeboxes) > 1)
return 0;
ret = uncore_pmu_register(pmu);
if (ret) {
pci_set_drvdata(pdev, NULL);
- pmu->boxes[pkg] = NULL;
+ pmu->boxes[die] = NULL;
uncore_box_exit(box);
kfree(box);
}
{
struct intel_uncore_box *box;
struct intel_uncore_pmu *pmu;
- int i, phys_id, pkg;
+ int i, phys_id, die;
phys_id = uncore_pcibus_to_physid(pdev->bus);
box = pci_get_drvdata(pdev);
if (!box) {
- pkg = topology_phys_to_logical_pkg(phys_id);
+ die = (topology_max_die_per_package() > 1) ? phys_id :
+ topology_phys_to_logical_pkg(phys_id);
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
- if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
- uncore_extra_pci_dev[pkg].dev[i] = NULL;
+ if (uncore_extra_pci_dev[die].dev[i] == pdev) {
+ uncore_extra_pci_dev[die].dev[i] = NULL;
break;
}
}
return;
pci_set_drvdata(pdev, NULL);
- pmu->boxes[box->pkgid] = NULL;
+ pmu->boxes[box->dieid] = NULL;
if (atomic_dec_return(&pmu->activeboxes) == 0)
uncore_pmu_unregister(pmu);
uncore_box_exit(box);
size_t size;
int ret;
- size = max_packages * sizeof(struct pci_extra_dev);
+ size = max_dies * sizeof(struct pci_extra_dev);
uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
if (!uncore_extra_pci_dev) {
ret = -ENOMEM;
{
struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_box *box;
- int i, pkg;
+ int i, die;
- pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
+ die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
+ box = pmu->boxes[die];
if (!box)
continue;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- int i, pkg, target;
+ int i, die, target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
goto unref;
/* Find a new cpu to collect uncore events */
- target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
/* Migrate uncore events to the new target */
if (target < nr_cpu_ids)
unref:
/* Clear the references */
- pkg = topology_logical_package_id(cpu);
+ die = topology_logical_die_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
+ box = pmu->boxes[die];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
static int allocate_boxes(struct intel_uncore_type **types,
- unsigned int pkg, unsigned int cpu)
+ unsigned int die, unsigned int cpu)
{
struct intel_uncore_box *box, *tmp;
struct intel_uncore_type *type;
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
- if (pmu->boxes[pkg])
+ if (pmu->boxes[die])
continue;
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
goto cleanup;
box->pmu = pmu;
- box->pkgid = pkg;
+ box->dieid = die;
list_add(&box->active_list, &allocated);
}
}
/* Install them in the pmus */
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
- box->pmu->boxes[pkg] = box;
+ box->pmu->boxes[die] = box;
}
return 0;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- int i, ret, pkg, target;
+ int i, ret, die, target;
- pkg = topology_logical_package_id(cpu);
- ret = allocate_boxes(types, pkg, cpu);
+ die = topology_logical_die_id(cpu);
+ ret = allocate_boxes(types, die, cpu);
if (ret)
return ret;
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
+ box = pmu->boxes[die];
if (box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
* Check if there is an online cpu in the package
* which collects uncore events already.
*/
- target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
{},
};
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return -ENODEV;
- max_packages = topology_max_packages();
+ max_dies = topology_max_packages() * topology_max_die_per_package();
uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
if (uncore_init->pci_init) {
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
- u16 x86_max_cores;
+ u16 x86_max_cores;
u16 apicid;
u16 initial_apicid;
u16 x86_clflush_size;
u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
+ u16 cpu_die_id;
+ u16 logical_die_id;
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
#define X86_VENDOR_HYGON 9
-#define X86_VENDOR_NUM 10
+#define X86_VENDOR_ZHAOXIN 10
+#define X86_VENDOR_NUM 11
#define X86_VENDOR_UNKNOWN 0xff
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
/* cpus sharing the last level cache: */
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
+#define raw_smp_processor_id() this_cpu_read(cpu_number)
+#define __smp_processor_id() __this_cpu_read(cpu_number)
#ifdef CONFIG_X86_32
extern int safe_smp_processor_id(void);
+// SPDX-License-Identifier: GPL-2.0-only
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
cr4_clear_bits(X86_CR4_UMIP);
}
+DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
+EXPORT_SYMBOL(cr_pinning);
+unsigned long cr4_pinned_bits __ro_after_init;
+EXPORT_SYMBOL(cr4_pinned_bits);
+
+/*
+ * Once CPU feature detection is finished (and boot params have been
+ * parsed), record any of the sensitive CR bits that are set, and
+ * enable CR pinning.
+ */
+static void __init setup_cr_pinning(void)
+{
+ unsigned long mask;
+
+ mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
+ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+ static_key_enable(&cr_pinning.key);
+}
+
/*
* Protection Keys are not available in 32-bit mode.
*/
}
}
+static void init_cqm(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
+ c->x86_cache_max_rmid = -1;
+ c->x86_cache_occ_scale = -1;
+ return;
+ }
+
+ /* will be overridden if occupancy monitoring exists */
+ c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
+ u32 eax, ebx, ecx, edx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+ c->x86_cache_max_rmid = ecx;
+ c->x86_cache_occ_scale = ebx;
+ }
+}
+
void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx;
c->x86_capability[CPUID_7_EDX] = edx;
+
+ /* Check valid sub-leaf index before accessing it */
+ if (eax >= 1) {
+ cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[CPUID_7_1_EAX] = eax;
+ }
}
/* Extended state features: level 0x0000000d */
c->x86_capability[CPUID_D_1_EAX] = eax;
}
- /* Additional Intel-defined flags: level 0x0000000F */
- if (c->cpuid_level >= 0x0000000F) {
-
- /* QoS sub-leaf, EAX=0Fh, ECX=0 */
- cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_F_0_EDX] = edx;
-
- if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
- /* will be overridden if occupancy monitoring exists */
- c->x86_cache_max_rmid = ebx;
-
- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
- cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_F_1_EDX] = edx;
-
- if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
- ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
- (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
- c->x86_cache_max_rmid = ecx;
- c->x86_cache_occ_scale = ebx;
- }
- } else {
- c->x86_cache_max_rmid = -1;
- c->x86_cache_occ_scale = -1;
- }
- }
-
/* AMD-defined flags: level 0x80000001 */
eax = cpuid_eax(0x80000000);
c->extended_cpuid_level = eax;
init_scattered_cpuid_features(c);
init_speculation_control(c);
+ init_cqm(c);
/*
* Clear/Set all flags overridden by options, after probe.
cpu, apicid, c->initial_apicid);
}
BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
+ BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
#else
c->logical_proc_id = 0;
#endif
enable_sep_cpu();
#endif
cpu_detect_tlb(&boot_cpu_data);
+ setup_cr_pinning();
}
void identify_secondary_cpu(struct cpuinfo_x86 *c)
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 SMP booting functions
*
* Pentium Pro and Pentium-II/Xeon MP machines.
* Original development of Linux SMP code supported by Caldera.
*
- * This code is released under the GNU General Public License version 2 or
- * later.
- *
* Fixes
* Felix Koop : NR_CPUS used properly
* Jose Renau : Handle single CPU case.
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
+ /* representing HT, core, and die siblings of each logical CPU */
+ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
+ EXPORT_PER_CPU_SYMBOL(cpu_die_map);
+
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
/* Per CPU bogomips and other parameters */
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
static unsigned int logical_packages __read_mostly;
+ static unsigned int logical_die __read_mostly;
/* Maximum number of SMT threads on any online core */
int __read_mostly __max_smt_threads = 1;
*/
static void notrace start_secondary(void *unused)
{
+ unsigned long cr4 = __read_cr4();
+
/*
* Don't put *anything* except direct CPU state initialization
* before cpu_init(), SMP booting is too fragile that we want to
* limit the things done here to the most necessary things.
*/
if (boot_cpu_has(X86_FEATURE_PCID))
- __write_cr4(__read_cr4() | X86_CR4_PCIDE);
+ cr4 |= X86_CR4_PCIDE;
+ if (static_branch_likely(&cr_pinning))
+ cr4 |= cr4_pinned_bits;
+
+ __write_cr4(cr4);
#ifdef CONFIG_X86_32
/* switch away from the initial page table */
return -1;
}
EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+ /**
+ * topology_phys_to_logical_die - Map a physical die id to logical
+ *
+ * Returns logical die id or -1 if not found
+ */
+ int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
+ {
+ int cpu;
+ int proc_id = cpu_data(cur_cpu).phys_proc_id;
+
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && c->cpu_die_id == die_id &&
+ c->phys_proc_id == proc_id)
+ return c->logical_die_id;
+ }
+ return -1;
+ }
+ EXPORT_SYMBOL(topology_phys_to_logical_die);
/**
* topology_update_package_map - Update the physical to logical package map
cpu_data(cpu).logical_proc_id = new;
return 0;
}
+ /**
+ * topology_update_die_map - Update the physical to logical die map
+ * @die: The die id as retrieved via CPUID
+ * @cpu: The cpu for which this is updated
+ */
+ int topology_update_die_map(unsigned int die, unsigned int cpu)
+ {
+ int new;
+
+ /* Already available somewhere? */
+ new = topology_phys_to_logical_die(die, cpu);
+ if (new >= 0)
+ goto found;
+
+ new = logical_die++;
+ if (new != die) {
+ pr_info("CPU %u Converting physical %u to logical die %u\n",
+ cpu, die, new);
+ }
+ found:
+ cpu_data(cpu).logical_die_id = new;
+ return 0;
+ }
void __init smp_store_boot_cpu_info(void)
{
*c = boot_cpu_data;
c->cpu_index = id;
topology_update_package_map(c->phys_proc_id, id);
+ topology_update_die_map(c->cpu_die_id, id);
c->initialized = true;
}
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id &&
+ c->cpu_die_id == o->cpu_die_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
if (c->cpu_core_id == o->cpu_core_id)
return topology_sane(c, o, "smt");
}
} else if (c->phys_proc_id == o->phys_proc_id &&
+ c->cpu_die_id == o->cpu_die_id &&
c->cpu_core_id == o->cpu_core_id) {
return topology_sane(c, o, "smt");
}
return false;
}
+ static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ {
+ if ((c->phys_proc_id == o->phys_proc_id) &&
+ (c->cpu_die_id == o->cpu_die_id))
+ return true;
+ return false;
+ }
+
+
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
static inline int x86_sched_itmt_flags(void)
{
cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
+ cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
c->booted_cores = 1;
return;
}
}
if (match_pkg(c, o) && !topology_same_node(c, o))
x86_has_numa_in_package = true;
+
+ if ((i == cpu) || (has_mp && match_die(c, o)))
+ link_mask(topology_die_cpumask, cpu, i);
}
threads = cpumask_weight(topology_sibling_cpumask(cpu));
physid_set_mask_of_physid(0, &phys_cpu_present_map);
cpumask_set_cpu(0, topology_sibling_cpumask(0));
cpumask_set_cpu(0, topology_core_cpumask(0));
+ cpumask_set_cpu(0, topology_die_cpumask(0));
}
/*
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
}
cpu_data(sibling).booted_cores--;
}
+ for_each_cpu(sibling, topology_die_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
cpumask_clear(cpu_llc_shared_mask(cpu));
cpumask_clear(topology_sibling_cpumask(cpu));
cpumask_clear(topology_core_cpumask(cpu));
+ cpumask_clear(topology_die_cpumask(cpu));
c->cpu_core_id = 0;
c->booted_cores = 0;
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
+// SPDX-License-Identifier: GPL-2.0-only
/*
* coretemp.c - Linux kernel module for hardware monitoring
*
* Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
*
* Inspired from many hwmon drivers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301 USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
struct device_attribute name_attr;
};
- /* Keep track of how many package pointers we allocated in init() */
- static int max_packages __read_mostly;
- /* Array of package pointers. Serialized by cpu hotplug lock */
- static struct platform_device **pkg_devices;
+ /* Keep track of how many zone pointers we allocated in init() */
+ static int max_zones __read_mostly;
+ /* Array of zone pointers. Serialized by cpu hotplug lock */
+ static struct platform_device **zone_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return pkg_devices[pkgid];
+ if (id >= 0 && id < max_zones)
+ return zone_devices[id];
return NULL;
}
struct device *dev = &pdev->dev;
struct platform_data *pdata;
- /* Initialize the per-package data structures */
+ /* Initialize the per-zone data structures */
pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err, pkgid = topology_logical_package_id(cpu);
+ int err, zoneid = topology_logical_die_id(cpu);
struct platform_device *pdev;
- if (pkgid < 0)
+ if (zoneid < 0)
return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, pkgid);
+ pdev = platform_device_alloc(DRVNAME, zoneid);
if (!pdev)
return ERR_PTR(-ENOMEM);
return ERR_PTR(err);
}
- pkg_devices[pkgid] = pdev;
+ zone_devices[zoneid] = pdev;
return pdev;
}
* the rest.
*/
if (cpumask_empty(&pd->cpumask)) {
- pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ zone_devices[topology_logical_die_id(cpu)] = NULL;
platform_device_unregister(pdev);
return 0;
}
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- pkg_devices = kcalloc(max_packages, sizeof(struct platform_device *),
+ max_zones = topology_max_packages() * topology_max_die_per_package();
+ zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
GFP_KERNEL);
- if (!pkg_devices)
+ if (!zone_devices)
return -ENOMEM;
err = platform_driver_register(&coretemp_driver);
outdrv:
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
return err;
}
module_init(coretemp_init)
{
cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
}
module_exit(coretemp_exit)
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Running Average Power Limit (RAPL) Driver
* Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
+ /* maximum rapl package domain name: package-%d-die-%d */
+ #define PACKAGE_DOMAIN_NAME_LENGTH 30
- /* Each physical package contains multiple domains, these are the common
+
+ /* Each rapl package contains multiple domains, these are the common
* data across RAPL domains within a package.
*/
struct rapl_package {
- unsigned int id; /* physical package/socket id */
+ unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
unsigned int power_unit;
int lead_cpu; /* one active cpu per package for access */
/* Track active cpus */
struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
};
struct rapl_defaults {
static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
/* caller to ensure CPU hotplug lock is held */
- static struct rapl_package *find_package_by_id(int id)
+ static struct rapl_package *rapl_find_package_domain(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
list_for_each_entry(rp, &rapl_packages, plist) {
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
u64 val;
for (dmn = 0; dmn < rp->nr_domains; dmn++) {
- pr_debug("update package %d domain %s data\n", rp->id,
+ pr_debug("update %s domain %s data\n", rp->name,
rp->domains[dmn].name);
/* exclude non-raw primitives */
for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
static int rapl_package_register_powercap(struct rapl_package *rp)
{
struct rapl_domain *rd;
- char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
int nr_pl, ret;
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
if (rd->id == RAPL_DOMAIN_PACKAGE) {
nr_pl = find_nr_power_limit(rd);
- pr_debug("register socket %d package domain %s\n",
- rp->id, rd->name);
- memset(dev_name, 0, sizeof(dev_name));
- snprintf(dev_name, sizeof(dev_name), "%s-%d",
- rd->name, rp->id);
+ pr_debug("register package domain %s\n", rp->name);
power_zone = powercap_register_zone(&rd->power_zone,
control_type,
- dev_name, NULL,
+ rp->name, NULL,
&zone_ops[rd->id],
nr_pl,
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register package, %d\n",
- rp->id);
+ pr_debug("failed to register power zone %s\n",
+ rp->name);
return PTR_ERR(power_zone);
}
/* track parent zone in per package/socket data */
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register power_zone, %d:%s:%s\n",
- rp->id, rd->name, dev_name);
+ pr_debug("failed to register power_zone, %s:%s\n",
+ rp->name, rd->name);
ret = PTR_ERR(power_zone);
goto err_cleanup;
}
* failed after the first domain setup.
*/
while (--rd >= rp->domains) {
- pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ pr_debug("unregister %s domain %s\n", rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
rd->rpl[0].name = pl1_name;
rd->rpl[1].prim_id = PL2_ENABLE;
rd->rpl[1].name = pl2_name;
- rd->rp = find_package_by_id(0);
+ rd->rp = rapl_find_package_domain(0);
power_zone = powercap_register_zone(&rd->power_zone, control_type,
"psys", NULL,
/* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
if (val64) {
- pr_info("RAPL package %d domain %s locked by BIOS\n",
- rd->rp->id, rd->name);
+ pr_info("RAPL %s domain %s locked by BIOS\n",
+ rd->rp->name, rd->name);
rd->state |= DOMAIN_STATE_BIOS_LOCKED;
}
}
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
- pr_debug("no valid rapl domains found in package %d\n", rp->id);
+ pr_debug("no valid rapl domains found in %s\n", rp->name);
return -ENODEV;
}
- pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+ pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
GFP_KERNEL);
rd_package = rd;
continue;
}
- pr_debug("remove package, undo power limit on %d: %s\n",
- rp->id, rd->name);
+ pr_debug("remove package, undo power limit on %s: %s\n",
+ rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
/* do parent zone last */
}
/* called from CPU hotplug notifier, hotplug lock held */
- static struct rapl_package *rapl_add_package(int cpu, int pkgid)
+ static struct rapl_package *rapl_add_package(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
return ERR_PTR(-ENOMEM);
/* add the new package to the list */
- rp->id = pkgid;
+ rp->id = id;
rp->lead_cpu = cpu;
+ if (topology_max_die_per_package() > 1)
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
+ "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id);
+ else
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
+ c->phys_proc_id);
+
/* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) ||
rapl_defaults->check_unit(rp, cpu)) {
*/
static int rapl_cpu_online(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp) {
- rp = rapl_add_package(cpu, pkgid);
+ rp = rapl_add_package(cpu);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
static int rapl_cpu_down_prep(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
int lead_cpu;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp)
return 0;
+// SPDX-License-Identifier: GPL-2.0-only
/*
* x86_pkg_temp_thermal driver
* Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
*/
#define MAX_NUMBER_OF_TRIPS 2
- struct pkg_device {
+ struct zone_device {
int cpu;
bool work_scheduled;
u32 tj_max;
.no_hwmon = true,
};
- /* Keep track of how many package pointers we allocated in init() */
- static int max_packages __read_mostly;
- /* Array of package pointers */
- static struct pkg_device **packages;
+ /* Keep track of how many zone pointers we allocated in init() */
+ static int max_id __read_mostly;
+ /* Array of zone pointers */
+ static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */
static DEFINE_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
*
* - Other callsites: Must hold pkg_temp_lock
*/
- static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu)
+ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return packages[pkgid];
+ if (id >= 0 && id < max_id)
+ return zones[id];
return NULL;
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 eax, edx;
- rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx);
+ rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
if (eax & 0x80000000) {
- *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000;
+ *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
int trip, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
unsigned long thres_reg_value;
u32 mask, shift, eax, edx;
int ret;
shift = THERM_SHIFT_THRESHOLD0;
}
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&eax, &edx);
if (ret < 0)
return ret;
thres_reg_value = (eax & mask) >> shift;
if (thres_reg_value)
- *temp = pkgdev->tj_max - thres_reg_value * 1000;
+ *temp = zonedev->tj_max - thres_reg_value * 1000;
else
*temp = 0;
pr_debug("sys_get_trip_temp %d\n", *temp);
static int
sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 l, h, mask, shift, intr;
int ret;
- if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max)
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max)
return -EINVAL;
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&l, &h);
if (ret < 0)
return ret;
if (!temp) {
l &= ~intr;
} else {
- l |= (pkgdev->tj_max - temp)/1000 << shift;
+ l |= (zonedev->tj_max - temp)/1000 << shift;
l |= intr;
}
- return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l, h);
}
static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
{
struct thermal_zone_device *tzone = NULL;
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex);
spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (!pkgdev) {
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (!zonedev) {
spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
- pkgdev->work_scheduled = false;
+ zonedev->work_scheduled = false;
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
if (wr_val != msr_val) {
wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val);
- tzone = pkgdev->tzone;
+ tzone = zonedev->tzone;
}
enable_pkg_thres_interrupt();
static int pkg_thermal_notify(u64 msr_val)
{
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
unsigned long flags;
spin_lock_irqsave(&pkg_temp_lock, flags);
disable_pkg_thres_interrupt();
/* Work is per package, so scheduling it once is enough. */
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (pkgdev && !pkgdev->work_scheduled) {
- pkgdev->work_scheduled = true;
- pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (zonedev && !zonedev->work_scheduled) {
+ zonedev->work_scheduled = true;
+ pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
}
spin_unlock_irqrestore(&pkg_temp_lock, flags);
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
u32 tj_max, eax, ebx, ecx, edx;
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
int thres_count, err;
- if (pkgid >= max_packages)
+ if (id >= max_id)
return -ENOMEM;
cpuid(6, &eax, &ebx, &ecx, &edx);
if (err)
return err;
- pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL);
- if (!pkgdev)
+ zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
+ if (!zonedev)
return -ENOMEM;
- INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn);
- pkgdev->cpu = cpu;
- pkgdev->tj_max = tj_max;
- pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp",
+ INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
+ zonedev->cpu = cpu;
+ zonedev->tj_max = tj_max;
+ zonedev->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
- pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
- if (IS_ERR(pkgdev->tzone)) {
- err = PTR_ERR(pkgdev->tzone);
- kfree(pkgdev);
+ zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(zonedev->tzone)) {
+ err = PTR_ERR(zonedev->tzone);
+ kfree(zonedev);
return err;
}
/* Store MSR value for package thermal interrupt, to restore at exit */
- rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low,
- pkgdev->msr_pkg_therm_high);
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low,
+ zonedev->msr_pkg_therm_high);
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
spin_lock_irq(&pkg_temp_lock);
- packages[pkgid] = pkgdev;
+ zones[id] = zonedev;
spin_unlock_irq(&pkg_temp_lock);
return 0;
}
static int pkg_thermal_cpu_offline(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
bool lastcpu, was_target;
int target;
- if (!pkgdev)
+ if (!zonedev)
return 0;
- target = cpumask_any_but(&pkgdev->cpumask, cpu);
- cpumask_clear_cpu(cpu, &pkgdev->cpumask);
+ target = cpumask_any_but(&zonedev->cpumask, cpu);
+ cpumask_clear_cpu(cpu, &zonedev->cpumask);
lastcpu = target >= nr_cpu_ids;
/*
* Remove the sysfs files, if this is the last cpu in the package
* before doing further cleanups.
*/
if (lastcpu) {
- struct thermal_zone_device *tzone = pkgdev->tzone;
+ struct thermal_zone_device *tzone = zonedev->tzone;
/*
* We must protect against a work function calling
* won't try to call.
*/
mutex_lock(&thermal_zone_mutex);
- pkgdev->tzone = NULL;
+ zonedev->tzone = NULL;
mutex_unlock(&thermal_zone_mutex);
thermal_zone_device_unregister(tzone);
* one. When we drop the lock, then the interrupt notify function
* will see the new target.
*/
- was_target = pkgdev->cpu == cpu;
- pkgdev->cpu = target;
+ was_target = zonedev->cpu == cpu;
+ zonedev->cpu = target;
/*
* If this is the last CPU in the package remove the package
* worker will see the package anymore.
*/
if (lastcpu) {
- packages[topology_logical_package_id(cpu)] = NULL;
+ zones[topology_logical_die_id(cpu)] = NULL;
/* After this point nothing touches the MSR anymore. */
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high);
+ zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high);
}
/*
* Check whether there is work scheduled and whether the work is
* targeted at the outgoing CPU.
*/
- if (pkgdev->work_scheduled && was_target) {
+ if (zonedev->work_scheduled && was_target) {
/*
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
spin_unlock_irq(&pkg_temp_lock);
- cancel_delayed_work_sync(&pkgdev->work);
+ cancel_delayed_work_sync(&zonedev->work);
spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
* need to reschedule the work, otherwise the interrupt
* stays disabled forever.
*/
- if (!lastcpu && pkgdev->work_scheduled)
- pkg_thermal_schedule_work(target, &pkgdev->work);
+ if (!lastcpu && zonedev->work_scheduled)
+ pkg_thermal_schedule_work(target, &zonedev->work);
}
spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
if (lastcpu)
- kfree(pkgdev);
+ kfree(zonedev);
return 0;
}
static int pkg_thermal_cpu_online(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
/* Paranoia check */
return -ENODEV;
/* If the package exists, nothing to do */
- if (pkgdev) {
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ if (zonedev) {
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
return 0;
}
return pkg_temp_thermal_device_add(cpu);
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- packages = kcalloc(max_packages, sizeof(struct pkg_device *),
+ max_id = topology_max_packages() * topology_max_die_per_package();
+ zones = kcalloc(max_id, sizeof(struct zone_device *),
GFP_KERNEL);
- if (!packages)
+ if (!zones)
return -ENOMEM;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
return 0;
err:
- kfree(packages);
+ kfree(zones);
return ret;
}
module_init(pkg_temp_thermal_init)
cpuhp_remove_state(pkg_thermal_hp_state);
debugfs_remove_recursive(debugfs);
- kfree(packages);
+ kfree(zones);
}
module_exit(pkg_temp_thermal_exit)