1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
23 * - Platform conveys its decision back to OS
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <asm/unaligned.h>
46 #include <acpi/cppc_acpi.h>
48 struct cppc_pcc_data {
49 struct pcc_mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired;
52 unsigned int deadline_us;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
56 bool platform_owns_pcc; /* Ownership of PCC subspace */
57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
60 * Lock to provide controlled access to the PCC channel.
62 * For performance critical usecases(currently cppc_set_perf)
63 * We need to take read_lock and check if channel belongs to OSPM
64 * before reading or writing to PCC subspace
65 * We need to take write_lock before transferring the channel
66 * ownership to the platform via a Doorbell
67 * This allows us to batch a number of CPPC requests if they happen
68 * to originate in about the same time
70 * For non-performance critical usecases(init)
71 * Take write_lock for all purposes which gives exclusive access
73 struct rw_semaphore pcc_lock;
75 /* Wait queue for CPUs whose requests were batched */
76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
83 /* Array to represent the PCC channel per subspace ID */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
89 * The cpc_desc structure contains the ACPI register details
90 * as described in the per CPU _CPC tables. The details
91 * include the type of register (e.g. PCC, System IO, FFH etc.)
92 * and destination addresses which lets us READ/WRITE CPU performance
93 * information using the appropriate I/O methods.
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
103 (cpc)->cpc_entry.reg.space_id == \
104 ACPI_ADR_SPACE_PLATFORM_COMM)
106 /* Check if a CPC register is in SystemMemory */
107 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
108 (cpc)->cpc_entry.reg.space_id == \
109 ACPI_ADR_SPACE_SYSTEM_MEMORY)
111 /* Check if a CPC register is in SystemIo */
112 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
113 (cpc)->cpc_entry.reg.space_id == \
114 ACPI_ADR_SPACE_SYSTEM_IO)
116 /* Evaluates to True if reg is a NULL register descriptor */
117 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
118 (reg)->address == 0 && \
119 (reg)->bit_width == 0 && \
120 (reg)->bit_offset == 0 && \
121 (reg)->access_width == 0)
123 /* Evaluates to True if an optional cpc field is supported */
124 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
125 !!(cpc)->cpc_entry.int_value : \
126 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
128 * Arbitrary Retries in case the remote processor is slow to respond
129 * to PCC commands. Keeping it high enough to cover emulators where
130 * the processors run painfully slow.
132 #define NUM_RETRIES 500ULL
134 #define OVER_16BTS_MASK ~0xFFFFULL
136 #define define_one_cppc_ro(_name) \
137 static struct kobj_attribute _name = \
138 __ATTR(_name, 0444, show_##_name, NULL)
140 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
142 #define show_cppc_data(access_fn, struct_name, member_name) \
143 static ssize_t show_##member_name(struct kobject *kobj, \
144 struct kobj_attribute *attr, char *buf) \
146 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
147 struct struct_name st_name = {0}; \
150 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
154 return sysfs_emit(buf, "%llu\n", \
155 (u64)st_name.member_name); \
157 define_one_cppc_ro(member_name)
159 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
160 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
161 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
162 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
163 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
166 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
167 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
169 static ssize_t show_feedback_ctrs(struct kobject *kobj,
170 struct kobj_attribute *attr, char *buf)
172 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
173 struct cppc_perf_fb_ctrs fb_ctrs = {0};
176 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
180 return sysfs_emit(buf, "ref:%llu del:%llu\n",
181 fb_ctrs.reference, fb_ctrs.delivered);
183 define_one_cppc_ro(feedback_ctrs);
185 static struct attribute *cppc_attrs[] = {
187 &reference_perf.attr,
188 &wraparound_time.attr,
191 &lowest_nonlinear_perf.attr,
197 ATTRIBUTE_GROUPS(cppc);
199 static const struct kobj_type cppc_ktype = {
200 .sysfs_ops = &kobj_sysfs_ops,
201 .default_groups = cppc_groups,
204 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
207 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
208 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
209 pcc_ss_data->pcc_comm_addr;
211 if (!pcc_ss_data->platform_owns_pcc)
215 * Poll PCC status register every 3us(delay_us) for maximum of
216 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
218 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
219 status & PCC_CMD_COMPLETE_MASK, 3,
220 pcc_ss_data->deadline_us);
223 pcc_ss_data->platform_owns_pcc = false;
224 if (chk_err_bit && (status & PCC_ERROR_MASK))
229 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
236 * This function transfers the ownership of the PCC to the platform
237 * So it must be called while holding write_lock(pcc_lock)
239 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
242 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
243 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
244 pcc_ss_data->pcc_comm_addr;
245 unsigned int time_delta;
248 * For CMD_WRITE we know for a fact the caller should have checked
249 * the channel before writing to PCC space
251 if (cmd == CMD_READ) {
253 * If there are pending cpc_writes, then we stole the channel
254 * before write completion, so first send a WRITE command to
257 if (pcc_ss_data->pending_pcc_write_cmd)
258 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
260 ret = check_pcc_chan(pcc_ss_id, false);
263 } else /* CMD_WRITE */
264 pcc_ss_data->pending_pcc_write_cmd = FALSE;
267 * Handle the Minimum Request Turnaround Time(MRTT)
268 * "The minimum amount of time that OSPM must wait after the completion
269 * of a command before issuing the next command, in microseconds"
271 if (pcc_ss_data->pcc_mrtt) {
272 time_delta = ktime_us_delta(ktime_get(),
273 pcc_ss_data->last_cmd_cmpl_time);
274 if (pcc_ss_data->pcc_mrtt > time_delta)
275 udelay(pcc_ss_data->pcc_mrtt - time_delta);
279 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
280 * "The maximum number of periodic requests that the subspace channel can
281 * support, reported in commands per minute. 0 indicates no limitation."
283 * This parameter should be ideally zero or large enough so that it can
284 * handle maximum number of requests that all the cores in the system can
285 * collectively generate. If it is not, we will follow the spec and just
286 * not send the request to the platform after hitting the MPAR limit in
289 if (pcc_ss_data->pcc_mpar) {
290 if (pcc_ss_data->mpar_count == 0) {
291 time_delta = ktime_ms_delta(ktime_get(),
292 pcc_ss_data->last_mpar_reset);
293 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
294 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
299 pcc_ss_data->last_mpar_reset = ktime_get();
300 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
302 pcc_ss_data->mpar_count--;
305 /* Write to the shared comm region. */
306 writew_relaxed(cmd, &generic_comm_base->command);
308 /* Flip CMD COMPLETE bit */
309 writew_relaxed(0, &generic_comm_base->status);
311 pcc_ss_data->platform_owns_pcc = true;
314 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
316 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
317 pcc_ss_id, cmd, ret);
321 /* wait for completion and check for PCC error bit */
322 ret = check_pcc_chan(pcc_ss_id, true);
324 if (pcc_ss_data->pcc_mrtt)
325 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
327 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
328 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
330 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
333 if (cmd == CMD_WRITE) {
335 for_each_possible_cpu(i) {
336 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
341 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
342 desc->write_cmd_status = ret;
345 pcc_ss_data->pcc_write_cnt++;
346 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
352 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
355 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
358 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
362 static struct mbox_client cppc_mbox_cl = {
363 .tx_done = cppc_chan_tx_done,
364 .knows_txdone = true,
367 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
369 int result = -EFAULT;
370 acpi_status status = AE_OK;
371 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
372 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
373 struct acpi_buffer state = {0, NULL};
374 union acpi_object *psd = NULL;
375 struct acpi_psd_package *pdomain;
377 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
378 &buffer, ACPI_TYPE_PACKAGE);
379 if (status == AE_NOT_FOUND) /* _PSD is optional */
381 if (ACPI_FAILURE(status))
384 psd = buffer.pointer;
385 if (!psd || psd->package.count != 1) {
386 pr_debug("Invalid _PSD data\n");
390 pdomain = &(cpc_ptr->domain_info);
392 state.length = sizeof(struct acpi_psd_package);
393 state.pointer = pdomain;
395 status = acpi_extract_package(&(psd->package.elements[0]),
397 if (ACPI_FAILURE(status)) {
398 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
402 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
403 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
407 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
408 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
412 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
413 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
414 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
415 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
421 kfree(buffer.pointer);
425 bool acpi_cpc_valid(void)
427 struct cpc_desc *cpc_ptr;
433 for_each_present_cpu(cpu) {
434 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
441 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
443 bool cppc_allow_fast_switch(void)
445 struct cpc_register_resource *desired_reg;
446 struct cpc_desc *cpc_ptr;
449 for_each_possible_cpu(cpu) {
450 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
451 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
452 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
453 !CPC_IN_SYSTEM_IO(desired_reg))
459 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
462 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
463 * @cpu: Find all CPUs that share a domain with cpu.
464 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
466 * Return: 0 for success or negative value for err.
468 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
470 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
471 struct acpi_psd_package *match_pdomain;
472 struct acpi_psd_package *pdomain;
476 * Now that we have _PSD data from all CPUs, let's setup P-state
479 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
483 pdomain = &(cpc_ptr->domain_info);
484 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
485 if (pdomain->num_processors <= 1)
488 /* Validate the Domain info */
489 count_target = pdomain->num_processors;
490 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
491 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
492 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
493 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
494 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
495 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
497 for_each_possible_cpu(i) {
501 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
505 match_pdomain = &(match_cpc_ptr->domain_info);
506 if (match_pdomain->domain != pdomain->domain)
509 /* Here i and cpu are in the same domain */
510 if (match_pdomain->num_processors != count_target)
513 if (pdomain->coord_type != match_pdomain->coord_type)
516 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
522 /* Assume no coordination on any error parsing domain info */
523 cpumask_clear(cpu_data->shared_cpu_map);
524 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
525 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
529 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
531 static int register_pcc_channel(int pcc_ss_idx)
533 struct pcc_mbox_chan *pcc_chan;
536 if (pcc_ss_idx >= 0) {
537 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
539 if (IS_ERR(pcc_chan)) {
540 pr_err("Failed to find PCC channel for subspace %d\n",
545 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
547 * cppc_ss->latency is just a Nominal value. In reality
548 * the remote processor could be much slower to reply.
549 * So add an arbitrary amount of wait on top of Nominal.
551 usecs_lat = NUM_RETRIES * pcc_chan->latency;
552 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
553 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
554 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
555 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
557 pcc_data[pcc_ss_idx]->pcc_comm_addr =
558 acpi_os_ioremap(pcc_chan->shmem_base_addr,
559 pcc_chan->shmem_size);
560 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
561 pr_err("Failed to ioremap PCC comm region mem for %d\n",
566 /* Set flag so that we don't come here for each CPU. */
567 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
574 * cpc_ffh_supported() - check if FFH reading supported
576 * Check if the architecture has support for functional fixed hardware
577 * read/write capability.
579 * Return: true for supported, false for not supported
581 bool __weak cpc_ffh_supported(void)
587 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
589 * Check if the architectural support for CPPC is present even
590 * if the _OSC hasn't prescribed it
592 * Return: true for supported, false for not supported
594 bool __weak cpc_supported_by_cpu(void)
600 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
601 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
603 * Check and allocate the cppc_pcc_data memory.
604 * In some processor configurations it is possible that same subspace
605 * is shared between multiple CPUs. This is seen especially in CPUs
606 * with hardware multi-threading support.
608 * Return: 0 for success, errno for failure
610 static int pcc_data_alloc(int pcc_ss_id)
612 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
615 if (pcc_data[pcc_ss_id]) {
616 pcc_data[pcc_ss_id]->refcount++;
618 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
620 if (!pcc_data[pcc_ss_id])
622 pcc_data[pcc_ss_id]->refcount++;
629 * An example CPC table looks like the following.
631 * Name (_CPC, Package() {
634 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
635 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
636 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
637 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
638 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
639 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
640 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
645 * Each Register() encodes how to access that specific register.
646 * e.g. a sample PCC entry has the following encoding:
649 * PCC, // AddressSpaceKeyword
650 * 8, // RegisterBitWidth
651 * 8, // RegisterBitOffset
652 * 0x30, // RegisterAddress
653 * 9, // AccessSize (subspace ID)
657 #ifndef arch_init_invariance_cppc
658 static inline void arch_init_invariance_cppc(void) { }
662 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
663 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
665 * Return: 0 for success or negative value for err.
667 int acpi_cppc_processor_probe(struct acpi_processor *pr)
669 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
670 union acpi_object *out_obj, *cpc_obj;
671 struct cpc_desc *cpc_ptr;
672 struct cpc_reg *gas_t;
673 struct device *cpu_dev;
674 acpi_handle handle = pr->handle;
675 unsigned int num_ent, i, cpc_rev;
676 int pcc_subspace_id = -1;
680 if (!osc_sb_cppc2_support_acked) {
681 pr_debug("CPPC v2 _OSC not acked\n");
682 if (!cpc_supported_by_cpu())
686 /* Parse the ACPI _CPC table for this CPU. */
687 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
689 if (ACPI_FAILURE(status)) {
694 out_obj = (union acpi_object *) output.pointer;
696 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
702 /* First entry is NumEntries. */
703 cpc_obj = &out_obj->package.elements[0];
704 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
705 num_ent = cpc_obj->integer.value;
707 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
712 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
713 cpc_obj->type, pr->id);
717 /* Second entry should be revision. */
718 cpc_obj = &out_obj->package.elements[1];
719 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
720 cpc_rev = cpc_obj->integer.value;
722 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
723 cpc_obj->type, pr->id);
727 if (cpc_rev < CPPC_V2_REV) {
728 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
734 * Disregard _CPC if the number of entries in the return pachage is not
735 * as expected, but support future revisions being proper supersets of
736 * the v3 and only causing more entries to be returned by _CPC.
738 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
739 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
740 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
741 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
745 if (cpc_rev > CPPC_V3_REV) {
746 num_ent = CPPC_V3_NUM_ENT;
747 cpc_rev = CPPC_V3_REV;
750 cpc_ptr->num_entries = num_ent;
751 cpc_ptr->version = cpc_rev;
753 /* Iterate through remaining entries in _CPC */
754 for (i = 2; i < num_ent; i++) {
755 cpc_obj = &out_obj->package.elements[i];
757 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
758 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
759 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
760 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
761 gas_t = (struct cpc_reg *)
762 cpc_obj->buffer.pointer;
765 * The PCC Subspace index is encoded inside
766 * the CPC table entries. The same PCC index
767 * will be used for all the PCC entries,
768 * so extract it only once.
770 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
771 if (pcc_subspace_id < 0) {
772 pcc_subspace_id = gas_t->access_width;
773 if (pcc_data_alloc(pcc_subspace_id))
775 } else if (pcc_subspace_id != gas_t->access_width) {
776 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
780 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
781 if (gas_t->address) {
784 if (!osc_cpc_flexible_adr_space_confirmed) {
785 pr_debug("Flexible address space capability not supported\n");
786 if (!cpc_supported_by_cpu())
790 addr = ioremap(gas_t->address, gas_t->bit_width/8);
793 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
795 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
796 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
798 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
799 * SystemIO doesn't implement 64-bit
802 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
803 gas_t->access_width);
806 if (gas_t->address & OVER_16BTS_MASK) {
807 /* SystemIO registers use 16-bit integer addresses */
808 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
812 if (!osc_cpc_flexible_adr_space_confirmed) {
813 pr_debug("Flexible address space capability not supported\n");
814 if (!cpc_supported_by_cpu())
818 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
819 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
820 pr_debug("Unsupported register type (%d) in _CPC\n",
826 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
827 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
829 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
834 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
837 * Initialize the remaining cpc_regs as unsupported.
838 * Example: In case FW exposes CPPC v2, the below loop will initialize
839 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
841 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
842 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
843 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
847 /* Store CPU Logical ID */
848 cpc_ptr->cpu_id = pr->id;
850 /* Parse PSD data for this CPU */
851 ret = acpi_get_psd(cpc_ptr, handle);
855 /* Register PCC channel once for all PCC subspace ID. */
856 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
857 ret = register_pcc_channel(pcc_subspace_id);
861 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
862 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
865 /* Everything looks okay */
866 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
868 /* Add per logical CPU nodes for reading its feedback counters. */
869 cpu_dev = get_cpu_device(pr->id);
875 /* Plug PSD data into this CPU's CPC descriptor. */
876 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
878 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
881 per_cpu(cpc_desc_ptr, pr->id) = NULL;
882 kobject_put(&cpc_ptr->kobj);
886 arch_init_invariance_cppc();
888 kfree(output.pointer);
892 /* Free all the mapped sys mem areas for this CPU */
893 for (i = 2; i < cpc_ptr->num_entries; i++) {
894 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
902 kfree(output.pointer);
905 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
908 * acpi_cppc_processor_exit - Cleanup CPC structs.
909 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
913 void acpi_cppc_processor_exit(struct acpi_processor *pr)
915 struct cpc_desc *cpc_ptr;
918 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
920 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
921 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
922 pcc_data[pcc_ss_id]->refcount--;
923 if (!pcc_data[pcc_ss_id]->refcount) {
924 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
925 kfree(pcc_data[pcc_ss_id]);
926 pcc_data[pcc_ss_id] = NULL;
931 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
935 /* Free all the mapped sys mem areas for this CPU */
936 for (i = 2; i < cpc_ptr->num_entries; i++) {
937 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
942 kobject_put(&cpc_ptr->kobj);
945 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
948 * cpc_read_ffh() - Read FFH register
949 * @cpunum: CPU number to read
950 * @reg: cppc register information
951 * @val: place holder for return value
953 * Read bit_width bits from a specified address and bit_offset
955 * Return: 0 for success and error code
957 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
963 * cpc_write_ffh() - Write FFH register
964 * @cpunum: CPU number to write
965 * @reg: cppc register information
966 * @val: value to write
968 * Write value of bit_width bits to a specified address and bit_offset
970 * Return: 0 for success and error code
972 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
978 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
979 * as fast as possible. We have already mapped the PCC subspace during init, so
980 * we can directly write to it.
983 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
985 void __iomem *vaddr = NULL;
986 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
987 struct cpc_reg *reg = ®_res->cpc_entry.reg;
989 if (reg_res->type == ACPI_TYPE_INTEGER) {
990 *val = reg_res->cpc_entry.int_value;
996 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
997 u32 width = 8 << (reg->access_width - 1);
1001 status = acpi_os_read_port((acpi_io_address)reg->address,
1003 if (ACPI_FAILURE(status)) {
1004 pr_debug("Error: Failed to read SystemIO port %llx\n",
1011 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1012 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1013 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1014 vaddr = reg_res->sys_mem_vaddr;
1015 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1016 return cpc_read_ffh(cpu, reg, val);
1018 return acpi_os_read_memory((acpi_physical_address)reg->address,
1019 val, reg->bit_width);
1021 switch (reg->bit_width) {
1023 *val = readb_relaxed(vaddr);
1026 *val = readw_relaxed(vaddr);
1029 *val = readl_relaxed(vaddr);
1032 *val = readq_relaxed(vaddr);
1035 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1036 reg->bit_width, pcc_ss_id);
1043 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1046 void __iomem *vaddr = NULL;
1047 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1048 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1050 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1051 u32 width = 8 << (reg->access_width - 1);
1054 status = acpi_os_write_port((acpi_io_address)reg->address,
1056 if (ACPI_FAILURE(status)) {
1057 pr_debug("Error: Failed to write SystemIO port %llx\n",
1063 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1064 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1065 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1066 vaddr = reg_res->sys_mem_vaddr;
1067 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1068 return cpc_write_ffh(cpu, reg, val);
1070 return acpi_os_write_memory((acpi_physical_address)reg->address,
1071 val, reg->bit_width);
1073 switch (reg->bit_width) {
1075 writeb_relaxed(val, vaddr);
1078 writew_relaxed(val, vaddr);
1081 writel_relaxed(val, vaddr);
1084 writeq_relaxed(val, vaddr);
1087 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1088 reg->bit_width, pcc_ss_id);
1096 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1098 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1099 struct cpc_register_resource *reg;
1102 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1106 reg = &cpc_desc->cpc_regs[reg_idx];
1108 if (CPC_IN_PCC(reg)) {
1109 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1110 struct cppc_pcc_data *pcc_ss_data = NULL;
1116 pcc_ss_data = pcc_data[pcc_ss_id];
1118 down_write(&pcc_ss_data->pcc_lock);
1120 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1121 cpc_read(cpunum, reg, perf);
1125 up_write(&pcc_ss_data->pcc_lock);
1130 cpc_read(cpunum, reg, perf);
1136 * cppc_get_desired_perf - Get the desired performance register value.
1137 * @cpunum: CPU from which to get desired performance.
1138 * @desired_perf: Return address.
1140 * Return: 0 for success, -EIO otherwise.
1142 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1144 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1146 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1149 * cppc_get_nominal_perf - Get the nominal performance register value.
1150 * @cpunum: CPU from which to get nominal performance.
1151 * @nominal_perf: Return address.
1153 * Return: 0 for success, -EIO otherwise.
1155 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1157 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1161 * cppc_get_epp_perf - Get the epp register value.
1162 * @cpunum: CPU from which to get epp preference value.
1163 * @epp_perf: Return address.
1165 * Return: 0 for success, -EIO otherwise.
1167 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1169 return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1171 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1174 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1175 * @cpunum: CPU from which to get capabilities info.
1176 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1178 * Return: 0 for success with perf_caps populated else -ERRNO.
1180 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1182 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1183 struct cpc_register_resource *highest_reg, *lowest_reg,
1184 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1185 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1186 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1187 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1188 struct cppc_pcc_data *pcc_ss_data = NULL;
1189 int ret = 0, regs_in_pcc = 0;
1192 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1196 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1197 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1198 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1199 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1200 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1201 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1202 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1204 /* Are any of the regs PCC ?*/
1205 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1206 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1207 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1208 if (pcc_ss_id < 0) {
1209 pr_debug("Invalid pcc_ss_id\n");
1212 pcc_ss_data = pcc_data[pcc_ss_id];
1214 down_write(&pcc_ss_data->pcc_lock);
1215 /* Ring doorbell once to update PCC subspace */
1216 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1222 cpc_read(cpunum, highest_reg, &high);
1223 perf_caps->highest_perf = high;
1225 cpc_read(cpunum, lowest_reg, &low);
1226 perf_caps->lowest_perf = low;
1228 cpc_read(cpunum, nominal_reg, &nom);
1229 perf_caps->nominal_perf = nom;
1231 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1232 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1233 perf_caps->guaranteed_perf = 0;
1235 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1236 perf_caps->guaranteed_perf = guaranteed;
1239 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1240 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1242 if (!high || !low || !nom || !min_nonlinear)
1245 /* Read optional lowest and nominal frequencies if present */
1246 if (CPC_SUPPORTED(low_freq_reg))
1247 cpc_read(cpunum, low_freq_reg, &low_f);
1249 if (CPC_SUPPORTED(nom_freq_reg))
1250 cpc_read(cpunum, nom_freq_reg, &nom_f);
1252 perf_caps->lowest_freq = low_f;
1253 perf_caps->nominal_freq = nom_f;
1258 up_write(&pcc_ss_data->pcc_lock);
1261 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1264 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1266 * CPPC has flexibility about how CPU performance counters are accessed.
1267 * One of the choices is PCC regions, which can have a high access latency. This
1268 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1270 * Return: true if any of the counters are in PCC regions, false otherwise
1272 bool cppc_perf_ctrs_in_pcc(void)
1276 for_each_present_cpu(cpu) {
1277 struct cpc_register_resource *ref_perf_reg;
1278 struct cpc_desc *cpc_desc;
1280 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1282 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1283 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1284 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1288 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1291 * If reference perf register is not supported then we should
1292 * use the nominal perf value
1294 if (!CPC_SUPPORTED(ref_perf_reg))
1295 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1297 if (CPC_IN_PCC(ref_perf_reg))
1303 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1306 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1307 * @cpunum: CPU from which to read counters.
1308 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1310 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1312 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1314 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1315 struct cpc_register_resource *delivered_reg, *reference_reg,
1316 *ref_perf_reg, *ctr_wrap_reg;
1317 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1318 struct cppc_pcc_data *pcc_ss_data = NULL;
1319 u64 delivered, reference, ref_perf, ctr_wrap_time;
1320 int ret = 0, regs_in_pcc = 0;
1323 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1327 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1328 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1329 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1330 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1333 * If reference perf register is not supported then we should
1334 * use the nominal perf value
1336 if (!CPC_SUPPORTED(ref_perf_reg))
1337 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1339 /* Are any of the regs PCC ?*/
1340 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1341 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1342 if (pcc_ss_id < 0) {
1343 pr_debug("Invalid pcc_ss_id\n");
1346 pcc_ss_data = pcc_data[pcc_ss_id];
1347 down_write(&pcc_ss_data->pcc_lock);
1349 /* Ring doorbell once to update PCC subspace */
1350 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1356 cpc_read(cpunum, delivered_reg, &delivered);
1357 cpc_read(cpunum, reference_reg, &reference);
1358 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1361 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1362 * performance counters are assumed to never wrap during the lifetime of
1365 ctr_wrap_time = (u64)(~((u64)0));
1366 if (CPC_SUPPORTED(ctr_wrap_reg))
1367 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1369 if (!delivered || !reference || !ref_perf) {
1374 perf_fb_ctrs->delivered = delivered;
1375 perf_fb_ctrs->reference = reference;
1376 perf_fb_ctrs->reference_perf = ref_perf;
1377 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1380 up_write(&pcc_ss_data->pcc_lock);
1383 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1386 * Set Energy Performance Preference Register value through
1387 * Performance Controls Interface
1389 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1391 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1392 struct cpc_register_resource *epp_set_reg;
1393 struct cpc_register_resource *auto_sel_reg;
1394 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1395 struct cppc_pcc_data *pcc_ss_data = NULL;
1399 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1403 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1404 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1406 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1407 if (pcc_ss_id < 0) {
1408 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1412 if (CPC_SUPPORTED(auto_sel_reg)) {
1413 ret = cpc_write(cpu, auto_sel_reg, enable);
1418 if (CPC_SUPPORTED(epp_set_reg)) {
1419 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1424 pcc_ss_data = pcc_data[pcc_ss_id];
1426 down_write(&pcc_ss_data->pcc_lock);
1427 /* after writing CPC, transfer the ownership of PCC to platform */
1428 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1429 up_write(&pcc_ss_data->pcc_lock);
1432 pr_debug("_CPC in PCC is not supported\n");
1437 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1440 * cppc_get_auto_sel_caps - Read autonomous selection register.
1441 * @cpunum : CPU from which to read register.
1442 * @perf_caps : struct where autonomous selection register value is updated.
1444 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1446 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1447 struct cpc_register_resource *auto_sel_reg;
1451 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1455 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1457 if (!CPC_SUPPORTED(auto_sel_reg))
1458 pr_warn_once("Autonomous mode is not unsupported!\n");
1460 if (CPC_IN_PCC(auto_sel_reg)) {
1461 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1462 struct cppc_pcc_data *pcc_ss_data = NULL;
1468 pcc_ss_data = pcc_data[pcc_ss_id];
1470 down_write(&pcc_ss_data->pcc_lock);
1472 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1473 cpc_read(cpunum, auto_sel_reg, &auto_sel);
1474 perf_caps->auto_sel = (bool)auto_sel;
1479 up_write(&pcc_ss_data->pcc_lock);
1486 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1489 * cppc_set_auto_sel - Write autonomous selection register.
1490 * @cpu : CPU to which to write register.
1491 * @enable : the desired value of autonomous selection resiter to be updated.
1493 int cppc_set_auto_sel(int cpu, bool enable)
1495 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1496 struct cpc_register_resource *auto_sel_reg;
1497 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1498 struct cppc_pcc_data *pcc_ss_data = NULL;
1502 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1506 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1508 if (CPC_IN_PCC(auto_sel_reg)) {
1509 if (pcc_ss_id < 0) {
1510 pr_debug("Invalid pcc_ss_id\n");
1514 if (CPC_SUPPORTED(auto_sel_reg)) {
1515 ret = cpc_write(cpu, auto_sel_reg, enable);
1520 pcc_ss_data = pcc_data[pcc_ss_id];
1522 down_write(&pcc_ss_data->pcc_lock);
1523 /* after writing CPC, transfer the ownership of PCC to platform */
1524 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1525 up_write(&pcc_ss_data->pcc_lock);
1528 pr_debug("_CPC in PCC is not supported\n");
1533 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1536 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1537 * Continuous Performance Control package EnableRegister field.
1538 * @cpu: CPU for which to enable CPPC register.
1539 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1541 * Return: 0 for success, -ERRNO or -EIO otherwise.
1543 int cppc_set_enable(int cpu, bool enable)
1545 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1546 struct cpc_register_resource *enable_reg;
1547 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1548 struct cppc_pcc_data *pcc_ss_data = NULL;
1552 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1556 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1558 if (CPC_IN_PCC(enable_reg)) {
1563 ret = cpc_write(cpu, enable_reg, enable);
1567 pcc_ss_data = pcc_data[pcc_ss_id];
1569 down_write(&pcc_ss_data->pcc_lock);
1570 /* after writing CPC, transfer the ownership of PCC to platfrom */
1571 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1572 up_write(&pcc_ss_data->pcc_lock);
1576 return cpc_write(cpu, enable_reg, enable);
1578 EXPORT_SYMBOL_GPL(cppc_set_enable);
1581 * cppc_set_perf - Set a CPU's performance controls.
1582 * @cpu: CPU for which to set performance controls.
1583 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1585 * Return: 0 for success, -ERRNO otherwise.
1587 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1589 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1590 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1591 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1592 struct cppc_pcc_data *pcc_ss_data = NULL;
1596 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1600 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1601 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1602 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1605 * This is Phase-I where we want to write to CPC registers
1606 * -> We want all CPUs to be able to execute this phase in parallel
1608 * Since read_lock can be acquired by multiple CPUs simultaneously we
1609 * achieve that goal here
1611 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1612 if (pcc_ss_id < 0) {
1613 pr_debug("Invalid pcc_ss_id\n");
1616 pcc_ss_data = pcc_data[pcc_ss_id];
1617 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1618 if (pcc_ss_data->platform_owns_pcc) {
1619 ret = check_pcc_chan(pcc_ss_id, false);
1621 up_read(&pcc_ss_data->pcc_lock);
1626 * Update the pending_write to make sure a PCC CMD_READ will not
1627 * arrive and steal the channel during the switch to write lock
1629 pcc_ss_data->pending_pcc_write_cmd = true;
1630 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1631 cpc_desc->write_cmd_status = 0;
1634 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1637 * Only write if min_perf and max_perf not zero. Some drivers pass zero
1638 * value to min and max perf, but they don't mean to set the zero value,
1639 * they just don't want to write to those registers.
1641 if (perf_ctrls->min_perf)
1642 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1643 if (perf_ctrls->max_perf)
1644 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1646 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1647 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1649 * This is Phase-II where we transfer the ownership of PCC to Platform
1651 * Short Summary: Basically if we think of a group of cppc_set_perf
1652 * requests that happened in short overlapping interval. The last CPU to
1653 * come out of Phase-I will enter Phase-II and ring the doorbell.
1655 * We have the following requirements for Phase-II:
1656 * 1. We want to execute Phase-II only when there are no CPUs
1657 * currently executing in Phase-I
1658 * 2. Once we start Phase-II we want to avoid all other CPUs from
1660 * 3. We want only one CPU among all those who went through Phase-I
1663 * If write_trylock fails to get the lock and doesn't transfer the
1664 * PCC ownership to the platform, then one of the following will be TRUE
1665 * 1. There is at-least one CPU in Phase-I which will later execute
1666 * write_trylock, so the CPUs in Phase-I will be responsible for
1667 * executing the Phase-II.
1668 * 2. Some other CPU has beaten this CPU to successfully execute the
1669 * write_trylock and has already acquired the write_lock. We know for a
1670 * fact it (other CPU acquiring the write_lock) couldn't have happened
1671 * before this CPU's Phase-I as we held the read_lock.
1672 * 3. Some other CPU executing pcc CMD_READ has stolen the
1673 * down_write, in which case, send_pcc_cmd will check for pending
1674 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1675 * So this CPU can be certain that its request will be delivered
1676 * So in all cases, this CPU knows that its request will be delivered
1677 * by another CPU and can return
1679 * After getting the down_write we still need to check for
1680 * pending_pcc_write_cmd to take care of the following scenario
1681 * The thread running this code could be scheduled out between
1682 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1683 * could have delivered the request to Platform by triggering the
1684 * doorbell and transferred the ownership of PCC to platform. So this
1685 * avoids triggering an unnecessary doorbell and more importantly before
1686 * triggering the doorbell it makes sure that the PCC channel ownership
1687 * is still with OSPM.
1688 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1689 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1690 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1691 * case during a CMD_READ and if there are pending writes it delivers
1692 * the write command before servicing the read command
1694 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1695 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1696 /* Update only if there are pending write commands */
1697 if (pcc_ss_data->pending_pcc_write_cmd)
1698 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1699 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1701 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1702 wait_event(pcc_ss_data->pcc_write_wait_q,
1703 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1705 /* send_pcc_cmd updates the status in case of failure */
1706 ret = cpc_desc->write_cmd_status;
1710 EXPORT_SYMBOL_GPL(cppc_set_perf);
1713 * cppc_get_transition_latency - returns frequency transition latency in ns
1714 * @cpu_num: CPU number for per_cpu().
1716 * ACPI CPPC does not explicitly specify how a platform can specify the
1717 * transition latency for performance change requests. The closest we have
1718 * is the timing information from the PCCT tables which provides the info
1719 * on the number and frequency of PCC commands the platform can handle.
1721 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1722 * then assume there is no latency.
1724 unsigned int cppc_get_transition_latency(int cpu_num)
1727 * Expected transition latency is based on the PCCT timing values
1728 * Below are definition from ACPI spec:
1729 * pcc_nominal- Expected latency to process a command, in microseconds
1730 * pcc_mpar - The maximum number of periodic requests that the subspace
1731 * channel can support, reported in commands per minute. 0
1732 * indicates no limitation.
1733 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1734 * completion of a command before issuing the next command,
1737 unsigned int latency_ns = 0;
1738 struct cpc_desc *cpc_desc;
1739 struct cpc_register_resource *desired_reg;
1740 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1741 struct cppc_pcc_data *pcc_ss_data;
1743 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1745 return CPUFREQ_ETERNAL;
1747 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1748 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1750 else if (!CPC_IN_PCC(desired_reg))
1751 return CPUFREQ_ETERNAL;
1754 return CPUFREQ_ETERNAL;
1756 pcc_ss_data = pcc_data[pcc_ss_id];
1757 if (pcc_ss_data->pcc_mpar)
1758 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1760 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1761 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1765 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1767 /* Minimum struct length needed for the DMI processor entry we want */
1768 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
1770 /* Offset in the DMI processor structure for the max frequency */
1771 #define DMI_PROCESSOR_MAX_SPEED 0x14
1773 /* Callback function used to retrieve the max frequency from DMI */
1774 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1776 const u8 *dmi_data = (const u8 *)dm;
1777 u16 *mhz = (u16 *)private;
1779 if (dm->type == DMI_ENTRY_PROCESSOR &&
1780 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1781 u16 val = (u16)get_unaligned((const u16 *)
1782 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1783 *mhz = val > *mhz ? val : *mhz;
1787 /* Look up the max frequency in DMI */
1788 static u64 cppc_get_dmi_max_khz(void)
1792 dmi_walk(cppc_find_dmi_mhz, &mhz);
1795 * Real stupid fallback value, just in case there is no
1798 mhz = mhz ? mhz : 1;
1800 return KHZ_PER_MHZ * mhz;
1804 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1805 * use them to convert perf to freq and vice versa. The conversion is
1806 * extrapolated as an affine function passing by the 2 points:
1807 * - (Low perf, Low freq)
1808 * - (Nominal perf, Nominal freq)
1810 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1812 s64 retval, offset = 0;
1816 if (caps->lowest_freq && caps->nominal_freq) {
1817 mul = caps->nominal_freq - caps->lowest_freq;
1819 div = caps->nominal_perf - caps->lowest_perf;
1820 offset = caps->nominal_freq * KHZ_PER_MHZ -
1821 div64_u64(caps->nominal_perf * mul, div);
1824 max_khz = cppc_get_dmi_max_khz();
1826 div = caps->highest_perf;
1829 retval = offset + div64_u64(perf * mul, div);
1834 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1836 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1838 s64 retval, offset = 0;
1842 if (caps->lowest_freq && caps->nominal_freq) {
1843 mul = caps->nominal_perf - caps->lowest_perf;
1844 div = caps->nominal_freq - caps->lowest_freq;
1846 * We don't need to convert to kHz for computing offset and can
1847 * directly use nominal_freq and lowest_freq as the div64_u64
1848 * will remove the frequency unit.
1850 offset = caps->nominal_perf -
1851 div64_u64(caps->nominal_freq * mul, div);
1852 /* But we need it for computing the perf level. */
1856 max_khz = cppc_get_dmi_max_khz();
1857 mul = caps->highest_perf;
1861 retval = offset + div64_u64(freq * mul, div);
1866 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);