Commit | Line | Data |
---|---|---|
337aadff AC |
1 | /* |
2 | * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. | |
3 | * | |
4 | * (C) Copyright 2014, 2015 Linaro Ltd. | |
5 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | * | |
12 | * CPPC describes a few methods for controlling CPU performance using | |
13 | * information from a per CPU table called CPC. This table is described in | |
14 | * the ACPI v5.0+ specification. The table consists of a list of | |
15 | * registers which may be memory mapped or hardware registers and also may | |
16 | * include some static integer values. | |
17 | * | |
18 | * CPU performance is on an abstract continuous scale as against a discretized | |
19 | * P-state scale which is tied to CPU frequency only. In brief, the basic | |
20 | * operation involves: | |
21 | * | |
22 | * - OS makes a CPU performance request. (Can provide min and max bounds) | |
23 | * | |
24 | * - Platform (such as BMC) is free to optimize request within requested bounds | |
25 | * depending on power/thermal budgets etc. | |
26 | * | |
27 | * - Platform conveys its decision back to OS | |
28 | * | |
29 | * The communication between OS and platform occurs through another medium | |
30 | * called (PCC) Platform Communication Channel. This is a generic mailbox like | |
31 | * mechanism which includes doorbell semantics to indicate register updates. | |
32 | * See drivers/mailbox/pcc.c for details on PCC. | |
33 | * | |
34 | * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and | |
35 | * above specifications. | |
36 | */ | |
37 | ||
38 | #define pr_fmt(fmt) "ACPI CPPC: " fmt | |
39 | ||
40 | #include <linux/cpufreq.h> | |
41 | #include <linux/delay.h> | |
ad62e1e6 | 42 | #include <linux/ktime.h> |
337aadff AC |
43 | |
44 | #include <acpi/cppc_acpi.h> | |
45 | /* | |
46 | * Lock to provide mutually exclusive access to the PCC | |
47 | * channel. e.g. When the remote updates the shared region | |
48 | * with new data, the reader needs to be protected from | |
49 | * other CPUs activity on the same channel. | |
50 | */ | |
51 | static DEFINE_SPINLOCK(pcc_lock); | |
52 | ||
53 | /* | |
54 | * The cpc_desc structure contains the ACPI register details | |
55 | * as described in the per CPU _CPC tables. The details | |
56 | * include the type of register (e.g. PCC, System IO, FFH etc.) | |
57 | * and destination addresses which lets us READ/WRITE CPU performance | |
58 | * information using the appropriate I/O methods. | |
59 | */ | |
60 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); | |
61 | ||
62 | /* This layer handles all the PCC specifics for CPPC. */ | |
63 | static struct mbox_chan *pcc_channel; | |
64 | static void __iomem *pcc_comm_addr; | |
65 | static u64 comm_base_addr; | |
66 | static int pcc_subspace_idx = -1; | |
337aadff | 67 | static bool pcc_channel_acquired; |
ad62e1e6 | 68 | static ktime_t deadline; |
f387e5b9 | 69 | static unsigned int pcc_mpar, pcc_mrtt; |
337aadff | 70 | |
77e3d86f PP |
71 | /* pcc mapped address + header size + offset within PCC subspace */ |
72 | #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) | |
73 | ||
337aadff AC |
74 | /* |
75 | * Arbitrary Retries in case the remote processor is slow to respond | |
ad62e1e6 AC |
76 | * to PCC commands. Keeping it high enough to cover emulators where |
77 | * the processors run painfully slow. | |
337aadff AC |
78 | */ |
79 | #define NUM_RETRIES 500 | |
80 | ||
ad62e1e6 AC |
81 | static int check_pcc_chan(void) |
82 | { | |
83 | int ret = -EIO; | |
84 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; | |
85 | ktime_t next_deadline = ktime_add(ktime_get(), deadline); | |
86 | ||
87 | /* Retry in case the remote processor was too slow to catch up. */ | |
88 | while (!ktime_after(ktime_get(), next_deadline)) { | |
f387e5b9 PP |
89 | /* |
90 | * Per spec, prior to boot the PCC space wil be initialized by | |
91 | * platform and should have set the command completion bit when | |
92 | * PCC can be used by OSPM | |
93 | */ | |
ad62e1e6 AC |
94 | if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { |
95 | ret = 0; | |
96 | break; | |
97 | } | |
98 | /* | |
99 | * Reducing the bus traffic in case this loop takes longer than | |
100 | * a few retries. | |
101 | */ | |
102 | udelay(3); | |
103 | } | |
104 | ||
105 | return ret; | |
106 | } | |
107 | ||
337aadff AC |
108 | static int send_pcc_cmd(u16 cmd) |
109 | { | |
ad62e1e6 | 110 | int ret = -EIO; |
337aadff AC |
111 | struct acpi_pcct_shared_memory *generic_comm_base = |
112 | (struct acpi_pcct_shared_memory *) pcc_comm_addr; | |
f387e5b9 PP |
113 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; |
114 | static int mpar_count; | |
115 | unsigned int time_delta; | |
337aadff | 116 | |
ad62e1e6 AC |
117 | /* |
118 | * For CMD_WRITE we know for a fact the caller should have checked | |
119 | * the channel before writing to PCC space | |
120 | */ | |
121 | if (cmd == CMD_READ) { | |
122 | ret = check_pcc_chan(); | |
123 | if (ret) | |
124 | return ret; | |
125 | } | |
337aadff | 126 | |
f387e5b9 PP |
127 | /* |
128 | * Handle the Minimum Request Turnaround Time(MRTT) | |
129 | * "The minimum amount of time that OSPM must wait after the completion | |
130 | * of a command before issuing the next command, in microseconds" | |
131 | */ | |
132 | if (pcc_mrtt) { | |
133 | time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); | |
134 | if (pcc_mrtt > time_delta) | |
135 | udelay(pcc_mrtt - time_delta); | |
136 | } | |
137 | ||
138 | /* | |
139 | * Handle the non-zero Maximum Periodic Access Rate(MPAR) | |
140 | * "The maximum number of periodic requests that the subspace channel can | |
141 | * support, reported in commands per minute. 0 indicates no limitation." | |
142 | * | |
143 | * This parameter should be ideally zero or large enough so that it can | |
144 | * handle maximum number of requests that all the cores in the system can | |
145 | * collectively generate. If it is not, we will follow the spec and just | |
146 | * not send the request to the platform after hitting the MPAR limit in | |
147 | * any 60s window | |
148 | */ | |
149 | if (pcc_mpar) { | |
150 | if (mpar_count == 0) { | |
151 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); | |
152 | if (time_delta < 60 * MSEC_PER_SEC) { | |
153 | pr_debug("PCC cmd not sent due to MPAR limit"); | |
154 | return -EIO; | |
155 | } | |
156 | last_mpar_reset = ktime_get(); | |
157 | mpar_count = pcc_mpar; | |
158 | } | |
159 | mpar_count--; | |
160 | } | |
161 | ||
337aadff | 162 | /* Write to the shared comm region. */ |
beee23ae | 163 | writew_relaxed(cmd, &generic_comm_base->command); |
337aadff AC |
164 | |
165 | /* Flip CMD COMPLETE bit */ | |
beee23ae | 166 | writew_relaxed(0, &generic_comm_base->status); |
337aadff AC |
167 | |
168 | /* Ring doorbell */ | |
ad62e1e6 AC |
169 | ret = mbox_send_message(pcc_channel, &cmd); |
170 | if (ret < 0) { | |
337aadff | 171 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", |
ad62e1e6 AC |
172 | cmd, ret); |
173 | return ret; | |
337aadff AC |
174 | } |
175 | ||
ad62e1e6 AC |
176 | /* |
177 | * For READs we need to ensure the cmd completed to ensure | |
178 | * the ensuing read()s can proceed. For WRITEs we dont care | |
179 | * because the actual write()s are done before coming here | |
180 | * and the next READ or WRITE will check if the channel | |
181 | * is busy/free at the entry of this call. | |
f387e5b9 PP |
182 | * |
183 | * If Minimum Request Turnaround Time is non-zero, we need | |
184 | * to record the completion time of both READ and WRITE | |
185 | * command for proper handling of MRTT, so we need to check | |
186 | * for pcc_mrtt in addition to CMD_READ | |
ad62e1e6 | 187 | */ |
f387e5b9 | 188 | if (cmd == CMD_READ || pcc_mrtt) { |
ad62e1e6 | 189 | ret = check_pcc_chan(); |
f387e5b9 PP |
190 | if (pcc_mrtt) |
191 | last_cmd_cmpl_time = ktime_get(); | |
192 | } | |
337aadff | 193 | |
ad62e1e6 AC |
194 | mbox_client_txdone(pcc_channel, ret); |
195 | return ret; | |
337aadff AC |
196 | } |
197 | ||
198 | static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) | |
199 | { | |
ad62e1e6 | 200 | if (ret < 0) |
337aadff AC |
201 | pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", |
202 | *(u16 *)msg, ret); | |
203 | else | |
204 | pr_debug("TX completed. CMD sent:%x, ret:%d\n", | |
205 | *(u16 *)msg, ret); | |
206 | } | |
207 | ||
208 | struct mbox_client cppc_mbox_cl = { | |
209 | .tx_done = cppc_chan_tx_done, | |
210 | .knows_txdone = true, | |
211 | }; | |
212 | ||
213 | static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) | |
214 | { | |
215 | int result = -EFAULT; | |
216 | acpi_status status = AE_OK; | |
217 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | |
218 | struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; | |
219 | struct acpi_buffer state = {0, NULL}; | |
220 | union acpi_object *psd = NULL; | |
221 | struct acpi_psd_package *pdomain; | |
222 | ||
223 | status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, | |
224 | ACPI_TYPE_PACKAGE); | |
225 | if (ACPI_FAILURE(status)) | |
226 | return -ENODEV; | |
227 | ||
228 | psd = buffer.pointer; | |
229 | if (!psd || psd->package.count != 1) { | |
230 | pr_debug("Invalid _PSD data\n"); | |
231 | goto end; | |
232 | } | |
233 | ||
234 | pdomain = &(cpc_ptr->domain_info); | |
235 | ||
236 | state.length = sizeof(struct acpi_psd_package); | |
237 | state.pointer = pdomain; | |
238 | ||
239 | status = acpi_extract_package(&(psd->package.elements[0]), | |
240 | &format, &state); | |
241 | if (ACPI_FAILURE(status)) { | |
242 | pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); | |
243 | goto end; | |
244 | } | |
245 | ||
246 | if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { | |
247 | pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); | |
248 | goto end; | |
249 | } | |
250 | ||
251 | if (pdomain->revision != ACPI_PSD_REV0_REVISION) { | |
252 | pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); | |
253 | goto end; | |
254 | } | |
255 | ||
256 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && | |
257 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && | |
258 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { | |
259 | pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); | |
260 | goto end; | |
261 | } | |
262 | ||
263 | result = 0; | |
264 | end: | |
265 | kfree(buffer.pointer); | |
266 | return result; | |
267 | } | |
268 | ||
269 | /** | |
270 | * acpi_get_psd_map - Map the CPUs in a common freq domain. | |
271 | * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. | |
272 | * | |
273 | * Return: 0 for success or negative value for err. | |
274 | */ | |
275 | int acpi_get_psd_map(struct cpudata **all_cpu_data) | |
276 | { | |
277 | int count_target; | |
278 | int retval = 0; | |
279 | unsigned int i, j; | |
280 | cpumask_var_t covered_cpus; | |
281 | struct cpudata *pr, *match_pr; | |
282 | struct acpi_psd_package *pdomain; | |
283 | struct acpi_psd_package *match_pdomain; | |
284 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; | |
285 | ||
286 | if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | |
287 | return -ENOMEM; | |
288 | ||
289 | /* | |
290 | * Now that we have _PSD data from all CPUs, lets setup P-state | |
291 | * domain info. | |
292 | */ | |
293 | for_each_possible_cpu(i) { | |
294 | pr = all_cpu_data[i]; | |
295 | if (!pr) | |
296 | continue; | |
297 | ||
298 | if (cpumask_test_cpu(i, covered_cpus)) | |
299 | continue; | |
300 | ||
301 | cpc_ptr = per_cpu(cpc_desc_ptr, i); | |
302 | if (!cpc_ptr) | |
303 | continue; | |
304 | ||
305 | pdomain = &(cpc_ptr->domain_info); | |
306 | cpumask_set_cpu(i, pr->shared_cpu_map); | |
307 | cpumask_set_cpu(i, covered_cpus); | |
308 | if (pdomain->num_processors <= 1) | |
309 | continue; | |
310 | ||
311 | /* Validate the Domain info */ | |
312 | count_target = pdomain->num_processors; | |
313 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) | |
314 | pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
315 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) | |
316 | pr->shared_type = CPUFREQ_SHARED_TYPE_HW; | |
317 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) | |
318 | pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; | |
319 | ||
320 | for_each_possible_cpu(j) { | |
321 | if (i == j) | |
322 | continue; | |
323 | ||
324 | match_cpc_ptr = per_cpu(cpc_desc_ptr, j); | |
325 | if (!match_cpc_ptr) | |
326 | continue; | |
327 | ||
328 | match_pdomain = &(match_cpc_ptr->domain_info); | |
329 | if (match_pdomain->domain != pdomain->domain) | |
330 | continue; | |
331 | ||
332 | /* Here i and j are in the same domain */ | |
333 | if (match_pdomain->num_processors != count_target) { | |
334 | retval = -EFAULT; | |
335 | goto err_ret; | |
336 | } | |
337 | ||
338 | if (pdomain->coord_type != match_pdomain->coord_type) { | |
339 | retval = -EFAULT; | |
340 | goto err_ret; | |
341 | } | |
342 | ||
343 | cpumask_set_cpu(j, covered_cpus); | |
344 | cpumask_set_cpu(j, pr->shared_cpu_map); | |
345 | } | |
346 | ||
347 | for_each_possible_cpu(j) { | |
348 | if (i == j) | |
349 | continue; | |
350 | ||
351 | match_pr = all_cpu_data[j]; | |
352 | if (!match_pr) | |
353 | continue; | |
354 | ||
355 | match_cpc_ptr = per_cpu(cpc_desc_ptr, j); | |
356 | if (!match_cpc_ptr) | |
357 | continue; | |
358 | ||
359 | match_pdomain = &(match_cpc_ptr->domain_info); | |
360 | if (match_pdomain->domain != pdomain->domain) | |
361 | continue; | |
362 | ||
363 | match_pr->shared_type = pr->shared_type; | |
364 | cpumask_copy(match_pr->shared_cpu_map, | |
365 | pr->shared_cpu_map); | |
366 | } | |
367 | } | |
368 | ||
369 | err_ret: | |
370 | for_each_possible_cpu(i) { | |
371 | pr = all_cpu_data[i]; | |
372 | if (!pr) | |
373 | continue; | |
374 | ||
375 | /* Assume no coordination on any error parsing domain info */ | |
376 | if (retval) { | |
377 | cpumask_clear(pr->shared_cpu_map); | |
378 | cpumask_set_cpu(i, pr->shared_cpu_map); | |
379 | pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
380 | } | |
381 | } | |
382 | ||
383 | free_cpumask_var(covered_cpus); | |
384 | return retval; | |
385 | } | |
386 | EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |
387 | ||
32c0b2f6 | 388 | static int register_pcc_channel(int pcc_subspace_idx) |
337aadff | 389 | { |
d29d6735 | 390 | struct acpi_pcct_hw_reduced *cppc_ss; |
337aadff | 391 | unsigned int len; |
ad62e1e6 | 392 | u64 usecs_lat; |
337aadff AC |
393 | |
394 | if (pcc_subspace_idx >= 0) { | |
395 | pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, | |
396 | pcc_subspace_idx); | |
397 | ||
398 | if (IS_ERR(pcc_channel)) { | |
399 | pr_err("Failed to find PCC communication channel\n"); | |
400 | return -ENODEV; | |
401 | } | |
402 | ||
403 | /* | |
404 | * The PCC mailbox controller driver should | |
405 | * have parsed the PCCT (global table of all | |
406 | * PCC channels) and stored pointers to the | |
407 | * subspace communication region in con_priv. | |
408 | */ | |
409 | cppc_ss = pcc_channel->con_priv; | |
410 | ||
411 | if (!cppc_ss) { | |
412 | pr_err("No PCC subspace found for CPPC\n"); | |
413 | return -ENODEV; | |
414 | } | |
415 | ||
416 | /* | |
417 | * This is the shared communication region | |
418 | * for the OS and Platform to communicate over. | |
419 | */ | |
420 | comm_base_addr = cppc_ss->base_address; | |
421 | len = cppc_ss->length; | |
ad62e1e6 AC |
422 | |
423 | /* | |
424 | * cppc_ss->latency is just a Nominal value. In reality | |
425 | * the remote processor could be much slower to reply. | |
426 | * So add an arbitrary amount of wait on top of Nominal. | |
427 | */ | |
428 | usecs_lat = NUM_RETRIES * cppc_ss->latency; | |
429 | deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); | |
f387e5b9 PP |
430 | pcc_mrtt = cppc_ss->min_turnaround_time; |
431 | pcc_mpar = cppc_ss->max_access_rate; | |
337aadff AC |
432 | |
433 | pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); | |
434 | if (!pcc_comm_addr) { | |
435 | pr_err("Failed to ioremap PCC comm region mem\n"); | |
436 | return -ENOMEM; | |
437 | } | |
438 | ||
439 | /* Set flag so that we dont come here for each CPU. */ | |
440 | pcc_channel_acquired = true; | |
441 | } | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | /* | |
447 | * An example CPC table looks like the following. | |
448 | * | |
449 | * Name(_CPC, Package() | |
450 | * { | |
451 | * 17, | |
452 | * NumEntries | |
453 | * 1, | |
454 | * // Revision | |
455 | * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)}, | |
456 | * // Highest Performance | |
457 | * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)}, | |
458 | * // Nominal Performance | |
459 | * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)}, | |
460 | * // Lowest Nonlinear Performance | |
461 | * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)}, | |
462 | * // Lowest Performance | |
463 | * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)}, | |
464 | * // Guaranteed Performance Register | |
465 | * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)}, | |
466 | * // Desired Performance Register | |
467 | * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)}, | |
468 | * .. | |
469 | * .. | |
470 | * .. | |
471 | * | |
472 | * } | |
473 | * Each Register() encodes how to access that specific register. | |
474 | * e.g. a sample PCC entry has the following encoding: | |
475 | * | |
476 | * Register ( | |
477 | * PCC, | |
478 | * AddressSpaceKeyword | |
479 | * 8, | |
480 | * //RegisterBitWidth | |
481 | * 8, | |
482 | * //RegisterBitOffset | |
483 | * 0x30, | |
484 | * //RegisterAddress | |
485 | * 9 | |
486 | * //AccessSize (subspace ID) | |
487 | * 0 | |
488 | * ) | |
489 | * } | |
490 | */ | |
491 | ||
492 | /** | |
493 | * acpi_cppc_processor_probe - Search for per CPU _CPC objects. | |
494 | * @pr: Ptr to acpi_processor containing this CPUs logical Id. | |
495 | * | |
496 | * Return: 0 for success or negative value for err. | |
497 | */ | |
498 | int acpi_cppc_processor_probe(struct acpi_processor *pr) | |
499 | { | |
500 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | |
501 | union acpi_object *out_obj, *cpc_obj; | |
502 | struct cpc_desc *cpc_ptr; | |
503 | struct cpc_reg *gas_t; | |
504 | acpi_handle handle = pr->handle; | |
505 | unsigned int num_ent, i, cpc_rev; | |
506 | acpi_status status; | |
507 | int ret = -EFAULT; | |
508 | ||
509 | /* Parse the ACPI _CPC table for this cpu. */ | |
510 | status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, | |
511 | ACPI_TYPE_PACKAGE); | |
512 | if (ACPI_FAILURE(status)) { | |
513 | ret = -ENODEV; | |
514 | goto out_buf_free; | |
515 | } | |
516 | ||
517 | out_obj = (union acpi_object *) output.pointer; | |
518 | ||
519 | cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); | |
520 | if (!cpc_ptr) { | |
521 | ret = -ENOMEM; | |
522 | goto out_buf_free; | |
523 | } | |
524 | ||
525 | /* First entry is NumEntries. */ | |
526 | cpc_obj = &out_obj->package.elements[0]; | |
527 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
528 | num_ent = cpc_obj->integer.value; | |
529 | } else { | |
530 | pr_debug("Unexpected entry type(%d) for NumEntries\n", | |
531 | cpc_obj->type); | |
532 | goto out_free; | |
533 | } | |
534 | ||
535 | /* Only support CPPCv2. Bail otherwise. */ | |
536 | if (num_ent != CPPC_NUM_ENT) { | |
537 | pr_debug("Firmware exports %d entries. Expected: %d\n", | |
538 | num_ent, CPPC_NUM_ENT); | |
539 | goto out_free; | |
540 | } | |
541 | ||
542 | /* Second entry should be revision. */ | |
543 | cpc_obj = &out_obj->package.elements[1]; | |
544 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
545 | cpc_rev = cpc_obj->integer.value; | |
546 | } else { | |
547 | pr_debug("Unexpected entry type(%d) for Revision\n", | |
548 | cpc_obj->type); | |
549 | goto out_free; | |
550 | } | |
551 | ||
552 | if (cpc_rev != CPPC_REV) { | |
553 | pr_debug("Firmware exports revision:%d. Expected:%d\n", | |
554 | cpc_rev, CPPC_REV); | |
555 | goto out_free; | |
556 | } | |
557 | ||
558 | /* Iterate through remaining entries in _CPC */ | |
559 | for (i = 2; i < num_ent; i++) { | |
560 | cpc_obj = &out_obj->package.elements[i]; | |
561 | ||
562 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
563 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; | |
564 | cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; | |
565 | } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { | |
566 | gas_t = (struct cpc_reg *) | |
567 | cpc_obj->buffer.pointer; | |
568 | ||
569 | /* | |
570 | * The PCC Subspace index is encoded inside | |
571 | * the CPC table entries. The same PCC index | |
572 | * will be used for all the PCC entries, | |
573 | * so extract it only once. | |
574 | */ | |
575 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
576 | if (pcc_subspace_idx < 0) | |
577 | pcc_subspace_idx = gas_t->access_width; | |
578 | else if (pcc_subspace_idx != gas_t->access_width) { | |
579 | pr_debug("Mismatched PCC ids.\n"); | |
580 | goto out_free; | |
581 | } | |
582 | } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { | |
583 | /* Support only PCC and SYS MEM type regs */ | |
584 | pr_debug("Unsupported register type: %d\n", gas_t->space_id); | |
585 | goto out_free; | |
586 | } | |
587 | ||
588 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; | |
589 | memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); | |
590 | } else { | |
591 | pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); | |
592 | goto out_free; | |
593 | } | |
594 | } | |
595 | /* Store CPU Logical ID */ | |
596 | cpc_ptr->cpu_id = pr->id; | |
597 | ||
598 | /* Plug it into this CPUs CPC descriptor. */ | |
599 | per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; | |
600 | ||
601 | /* Parse PSD data for this CPU */ | |
602 | ret = acpi_get_psd(cpc_ptr, handle); | |
603 | if (ret) | |
604 | goto out_free; | |
605 | ||
606 | /* Register PCC channel once for all CPUs. */ | |
607 | if (!pcc_channel_acquired) { | |
608 | ret = register_pcc_channel(pcc_subspace_idx); | |
609 | if (ret) | |
610 | goto out_free; | |
611 | } | |
612 | ||
613 | /* Everything looks okay */ | |
614 | pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); | |
615 | ||
616 | kfree(output.pointer); | |
617 | return 0; | |
618 | ||
619 | out_free: | |
337aadff AC |
620 | kfree(cpc_ptr); |
621 | ||
622 | out_buf_free: | |
623 | kfree(output.pointer); | |
624 | return ret; | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); | |
627 | ||
628 | /** | |
629 | * acpi_cppc_processor_exit - Cleanup CPC structs. | |
630 | * @pr: Ptr to acpi_processor containing this CPUs logical Id. | |
631 | * | |
632 | * Return: Void | |
633 | */ | |
634 | void acpi_cppc_processor_exit(struct acpi_processor *pr) | |
635 | { | |
636 | struct cpc_desc *cpc_ptr; | |
637 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); | |
638 | kfree(cpc_ptr); | |
639 | } | |
640 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); | |
641 | ||
77e3d86f PP |
642 | /* |
643 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be | |
644 | * as fast as possible. We have already mapped the PCC subspace during init, so | |
645 | * we can directly write to it. | |
646 | */ | |
337aadff | 647 | |
77e3d86f | 648 | static int cpc_read(struct cpc_reg *reg, u64 *val) |
337aadff | 649 | { |
77e3d86f PP |
650 | int ret_val = 0; |
651 | ||
652 | *val = 0; | |
653 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
654 | void __iomem *vaddr = GET_PCC_VADDR(reg->address); | |
337aadff | 655 | |
77e3d86f PP |
656 | switch (reg->bit_width) { |
657 | case 8: | |
beee23ae | 658 | *val = readb_relaxed(vaddr); |
77e3d86f PP |
659 | break; |
660 | case 16: | |
beee23ae | 661 | *val = readw_relaxed(vaddr); |
77e3d86f PP |
662 | break; |
663 | case 32: | |
beee23ae | 664 | *val = readl_relaxed(vaddr); |
77e3d86f PP |
665 | break; |
666 | case 64: | |
beee23ae | 667 | *val = readq_relaxed(vaddr); |
77e3d86f PP |
668 | break; |
669 | default: | |
670 | pr_debug("Error: Cannot read %u bit width from PCC\n", | |
671 | reg->bit_width); | |
672 | ret_val = -EFAULT; | |
673 | } | |
674 | } else | |
675 | ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, | |
676 | val, reg->bit_width); | |
677 | return ret_val; | |
337aadff AC |
678 | } |
679 | ||
77e3d86f | 680 | static int cpc_write(struct cpc_reg *reg, u64 val) |
337aadff | 681 | { |
77e3d86f PP |
682 | int ret_val = 0; |
683 | ||
684 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
685 | void __iomem *vaddr = GET_PCC_VADDR(reg->address); | |
337aadff | 686 | |
77e3d86f PP |
687 | switch (reg->bit_width) { |
688 | case 8: | |
beee23ae | 689 | writeb_relaxed(val, vaddr); |
77e3d86f PP |
690 | break; |
691 | case 16: | |
beee23ae | 692 | writew_relaxed(val, vaddr); |
77e3d86f PP |
693 | break; |
694 | case 32: | |
beee23ae | 695 | writel_relaxed(val, vaddr); |
77e3d86f PP |
696 | break; |
697 | case 64: | |
beee23ae | 698 | writeq_relaxed(val, vaddr); |
77e3d86f PP |
699 | break; |
700 | default: | |
701 | pr_debug("Error: Cannot write %u bit width to PCC\n", | |
702 | reg->bit_width); | |
703 | ret_val = -EFAULT; | |
704 | break; | |
705 | } | |
706 | } else | |
707 | ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, | |
708 | val, reg->bit_width); | |
709 | return ret_val; | |
337aadff AC |
710 | } |
711 | ||
712 | /** | |
713 | * cppc_get_perf_caps - Get a CPUs performance capabilities. | |
714 | * @cpunum: CPU from which to get capabilities info. | |
715 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h | |
716 | * | |
717 | * Return: 0 for success with perf_caps populated else -ERRNO. | |
718 | */ | |
719 | int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |
720 | { | |
721 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
722 | struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, | |
723 | *nom_perf; | |
724 | u64 high, low, ref, nom; | |
725 | int ret = 0; | |
726 | ||
727 | if (!cpc_desc) { | |
728 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | |
729 | return -ENODEV; | |
730 | } | |
731 | ||
732 | highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; | |
733 | lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; | |
734 | ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF]; | |
735 | nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
736 | ||
737 | spin_lock(&pcc_lock); | |
738 | ||
739 | /* Are any of the regs PCC ?*/ | |
740 | if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | |
741 | (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | |
742 | (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | |
743 | (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | |
744 | /* Ring doorbell once to update PCC subspace */ | |
ad62e1e6 | 745 | if (send_pcc_cmd(CMD_READ) < 0) { |
337aadff AC |
746 | ret = -EIO; |
747 | goto out_err; | |
748 | } | |
749 | } | |
750 | ||
751 | cpc_read(&highest_reg->cpc_entry.reg, &high); | |
752 | perf_caps->highest_perf = high; | |
753 | ||
754 | cpc_read(&lowest_reg->cpc_entry.reg, &low); | |
755 | perf_caps->lowest_perf = low; | |
756 | ||
757 | cpc_read(&ref_perf->cpc_entry.reg, &ref); | |
758 | perf_caps->reference_perf = ref; | |
759 | ||
760 | cpc_read(&nom_perf->cpc_entry.reg, &nom); | |
761 | perf_caps->nominal_perf = nom; | |
762 | ||
763 | if (!ref) | |
764 | perf_caps->reference_perf = perf_caps->nominal_perf; | |
765 | ||
766 | if (!high || !low || !nom) | |
767 | ret = -EFAULT; | |
768 | ||
769 | out_err: | |
770 | spin_unlock(&pcc_lock); | |
771 | return ret; | |
772 | } | |
773 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | |
774 | ||
775 | /** | |
776 | * cppc_get_perf_ctrs - Read a CPUs performance feedback counters. | |
777 | * @cpunum: CPU from which to read counters. | |
778 | * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h | |
779 | * | |
780 | * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. | |
781 | */ | |
782 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |
783 | { | |
784 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
785 | struct cpc_register_resource *delivered_reg, *reference_reg; | |
786 | u64 delivered, reference; | |
787 | int ret = 0; | |
788 | ||
789 | if (!cpc_desc) { | |
790 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | |
791 | return -ENODEV; | |
792 | } | |
793 | ||
794 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; | |
795 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; | |
796 | ||
797 | spin_lock(&pcc_lock); | |
798 | ||
799 | /* Are any of the regs PCC ?*/ | |
800 | if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | |
801 | (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | |
802 | /* Ring doorbell once to update PCC subspace */ | |
ad62e1e6 | 803 | if (send_pcc_cmd(CMD_READ) < 0) { |
337aadff AC |
804 | ret = -EIO; |
805 | goto out_err; | |
806 | } | |
807 | } | |
808 | ||
809 | cpc_read(&delivered_reg->cpc_entry.reg, &delivered); | |
810 | cpc_read(&reference_reg->cpc_entry.reg, &reference); | |
811 | ||
812 | if (!delivered || !reference) { | |
813 | ret = -EFAULT; | |
814 | goto out_err; | |
815 | } | |
816 | ||
817 | perf_fb_ctrs->delivered = delivered; | |
818 | perf_fb_ctrs->reference = reference; | |
819 | ||
820 | perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; | |
821 | perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference; | |
822 | ||
823 | perf_fb_ctrs->prev_delivered = delivered; | |
824 | perf_fb_ctrs->prev_reference = reference; | |
825 | ||
826 | out_err: | |
827 | spin_unlock(&pcc_lock); | |
828 | return ret; | |
829 | } | |
830 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | |
831 | ||
832 | /** | |
833 | * cppc_set_perf - Set a CPUs performance controls. | |
834 | * @cpu: CPU for which to set performance controls. | |
835 | * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h | |
836 | * | |
837 | * Return: 0 for success, -ERRNO otherwise. | |
838 | */ | |
839 | int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |
840 | { | |
841 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
842 | struct cpc_register_resource *desired_reg; | |
843 | int ret = 0; | |
844 | ||
845 | if (!cpc_desc) { | |
846 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); | |
847 | return -ENODEV; | |
848 | } | |
849 | ||
850 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
851 | ||
852 | spin_lock(&pcc_lock); | |
853 | ||
ad62e1e6 AC |
854 | /* If this is PCC reg, check if channel is free before writing */ |
855 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
856 | ret = check_pcc_chan(); | |
857 | if (ret) | |
858 | goto busy_channel; | |
859 | } | |
860 | ||
337aadff AC |
861 | /* |
862 | * Skip writing MIN/MAX until Linux knows how to come up with | |
863 | * useful values. | |
864 | */ | |
865 | cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf); | |
866 | ||
867 | /* Is this a PCC reg ?*/ | |
868 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
869 | /* Ring doorbell so Remote can get our perf request. */ | |
ad62e1e6 | 870 | if (send_pcc_cmd(CMD_WRITE) < 0) |
337aadff AC |
871 | ret = -EIO; |
872 | } | |
ad62e1e6 | 873 | busy_channel: |
337aadff AC |
874 | spin_unlock(&pcc_lock); |
875 | ||
876 | return ret; | |
877 | } | |
878 | EXPORT_SYMBOL_GPL(cppc_set_perf); |