Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
337aadff AC |
2 | /* |
3 | * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. | |
4 | * | |
5 | * (C) Copyright 2014, 2015 Linaro Ltd. | |
6 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> | |
7 | * | |
337aadff AC |
8 | * CPPC describes a few methods for controlling CPU performance using |
9 | * information from a per CPU table called CPC. This table is described in | |
10 | * the ACPI v5.0+ specification. The table consists of a list of | |
11 | * registers which may be memory mapped or hardware registers and also may | |
12 | * include some static integer values. | |
13 | * | |
14 | * CPU performance is on an abstract continuous scale as against a discretized | |
15 | * P-state scale which is tied to CPU frequency only. In brief, the basic | |
16 | * operation involves: | |
17 | * | |
18 | * - OS makes a CPU performance request. (Can provide min and max bounds) | |
19 | * | |
20 | * - Platform (such as BMC) is free to optimize request within requested bounds | |
21 | * depending on power/thermal budgets etc. | |
22 | * | |
23 | * - Platform conveys its decision back to OS | |
24 | * | |
25 | * The communication between OS and platform occurs through another medium | |
26 | * called (PCC) Platform Communication Channel. This is a generic mailbox like | |
27 | * mechanism which includes doorbell semantics to indicate register updates. | |
28 | * See drivers/mailbox/pcc.c for details on PCC. | |
29 | * | |
30 | * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and | |
31 | * above specifications. | |
32 | */ | |
33 | ||
34 | #define pr_fmt(fmt) "ACPI CPPC: " fmt | |
35 | ||
36 | #include <linux/cpufreq.h> | |
37 | #include <linux/delay.h> | |
58e1c035 | 38 | #include <linux/iopoll.h> |
ad62e1e6 | 39 | #include <linux/ktime.h> |
80b8286a PP |
40 | #include <linux/rwsem.h> |
41 | #include <linux/wait.h> | |
337aadff AC |
42 | |
43 | #include <acpi/cppc_acpi.h> | |
80b8286a | 44 | |
8482ef8c PP |
45 | struct cppc_pcc_data { |
46 | struct mbox_chan *pcc_channel; | |
47 | void __iomem *pcc_comm_addr; | |
8482ef8c | 48 | bool pcc_channel_acquired; |
58e1c035 | 49 | unsigned int deadline_us; |
8482ef8c | 50 | unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; |
80b8286a | 51 | |
8482ef8c | 52 | bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ |
139aee73 | 53 | bool platform_owns_pcc; /* Ownership of PCC subspace */ |
8482ef8c | 54 | unsigned int pcc_write_cnt; /* Running count of PCC write commands */ |
80b8286a | 55 | |
8482ef8c PP |
56 | /* |
57 | * Lock to provide controlled access to the PCC channel. | |
58 | * | |
59 | * For performance critical usecases(currently cppc_set_perf) | |
60 | * We need to take read_lock and check if channel belongs to OSPM | |
61 | * before reading or writing to PCC subspace | |
62 | * We need to take write_lock before transferring the channel | |
63 | * ownership to the platform via a Doorbell | |
64 | * This allows us to batch a number of CPPC requests if they happen | |
65 | * to originate in about the same time | |
66 | * | |
67 | * For non-performance critical usecases(init) | |
68 | * Take write_lock for all purposes which gives exclusive access | |
69 | */ | |
70 | struct rw_semaphore pcc_lock; | |
71 | ||
72 | /* Wait queue for CPUs whose requests were batched */ | |
73 | wait_queue_head_t pcc_write_wait_q; | |
85b1407b GC |
74 | ktime_t last_cmd_cmpl_time; |
75 | ktime_t last_mpar_reset; | |
76 | int mpar_count; | |
77 | int refcount; | |
8482ef8c | 78 | }; |
80b8286a | 79 | |
603fadf3 | 80 | /* Array to represent the PCC channel per subspace ID */ |
85b1407b | 81 | static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; |
603fadf3 | 82 | /* The cpu_pcc_subspace_idx contains per CPU subspace ID */ |
85b1407b | 83 | static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); |
337aadff AC |
84 | |
85 | /* | |
86 | * The cpc_desc structure contains the ACPI register details | |
87 | * as described in the per CPU _CPC tables. The details | |
88 | * include the type of register (e.g. PCC, System IO, FFH etc.) | |
89 | * and destination addresses which lets us READ/WRITE CPU performance | |
90 | * information using the appropriate I/O methods. | |
91 | */ | |
92 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); | |
93 | ||
77e3d86f | 94 | /* pcc mapped address + header size + offset within PCC subspace */ |
85b1407b GC |
95 | #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ |
96 | 0x8 + (offs)) | |
77e3d86f | 97 | |
ad61dd30 | 98 | /* Check if a CPC register is in PCC */ |
80b8286a PP |
99 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
100 | (cpc)->cpc_entry.reg.space_id == \ | |
101 | ACPI_ADR_SPACE_PLATFORM_COMM) | |
102 | ||
158c998e AC |
103 | /* Evalutes to True if reg is a NULL register descriptor */ |
104 | #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ | |
105 | (reg)->address == 0 && \ | |
106 | (reg)->bit_width == 0 && \ | |
107 | (reg)->bit_offset == 0 && \ | |
108 | (reg)->access_width == 0) | |
109 | ||
110 | /* Evalutes to True if an optional cpc field is supported */ | |
111 | #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ | |
112 | !!(cpc)->cpc_entry.int_value : \ | |
113 | !IS_NULL_REG(&(cpc)->cpc_entry.reg)) | |
337aadff AC |
114 | /* |
115 | * Arbitrary Retries in case the remote processor is slow to respond | |
ad62e1e6 AC |
116 | * to PCC commands. Keeping it high enough to cover emulators where |
117 | * the processors run painfully slow. | |
337aadff | 118 | */ |
b52f4511 | 119 | #define NUM_RETRIES 500ULL |
337aadff | 120 | |
158c998e AC |
121 | struct cppc_attr { |
122 | struct attribute attr; | |
123 | ssize_t (*show)(struct kobject *kobj, | |
124 | struct attribute *attr, char *buf); | |
125 | ssize_t (*store)(struct kobject *kobj, | |
126 | struct attribute *attr, const char *c, ssize_t count); | |
127 | }; | |
128 | ||
129 | #define define_one_cppc_ro(_name) \ | |
130 | static struct cppc_attr _name = \ | |
131 | __ATTR(_name, 0444, show_##_name, NULL) | |
132 | ||
133 | #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) | |
134 | ||
2c74d847 PP |
135 | #define show_cppc_data(access_fn, struct_name, member_name) \ |
136 | static ssize_t show_##member_name(struct kobject *kobj, \ | |
137 | struct attribute *attr, char *buf) \ | |
138 | { \ | |
139 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ | |
140 | struct struct_name st_name = {0}; \ | |
141 | int ret; \ | |
142 | \ | |
143 | ret = access_fn(cpc_ptr->cpu_id, &st_name); \ | |
144 | if (ret) \ | |
145 | return ret; \ | |
146 | \ | |
147 | return scnprintf(buf, PAGE_SIZE, "%llu\n", \ | |
148 | (u64)st_name.member_name); \ | |
149 | } \ | |
150 | define_one_cppc_ro(member_name) | |
151 | ||
152 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); | |
153 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); | |
154 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); | |
155 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); | |
4773e77c PP |
156 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); |
157 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); | |
158 | ||
2c74d847 PP |
159 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); |
160 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); | |
161 | ||
158c998e AC |
162 | static ssize_t show_feedback_ctrs(struct kobject *kobj, |
163 | struct attribute *attr, char *buf) | |
164 | { | |
165 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); | |
166 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; | |
2c74d847 | 167 | int ret; |
158c998e | 168 | |
2c74d847 PP |
169 | ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); |
170 | if (ret) | |
171 | return ret; | |
158c998e AC |
172 | |
173 | return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", | |
174 | fb_ctrs.reference, fb_ctrs.delivered); | |
175 | } | |
176 | define_one_cppc_ro(feedback_ctrs); | |
177 | ||
158c998e AC |
178 | static struct attribute *cppc_attrs[] = { |
179 | &feedback_ctrs.attr, | |
180 | &reference_perf.attr, | |
181 | &wraparound_time.attr, | |
2c74d847 PP |
182 | &highest_perf.attr, |
183 | &lowest_perf.attr, | |
184 | &lowest_nonlinear_perf.attr, | |
185 | &nominal_perf.attr, | |
4773e77c PP |
186 | &nominal_freq.attr, |
187 | &lowest_freq.attr, | |
158c998e AC |
188 | NULL |
189 | }; | |
190 | ||
191 | static struct kobj_type cppc_ktype = { | |
192 | .sysfs_ops = &kobj_sysfs_ops, | |
193 | .default_attrs = cppc_attrs, | |
194 | }; | |
195 | ||
85b1407b | 196 | static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) |
ad62e1e6 | 197 | { |
58e1c035 | 198 | int ret, status; |
85b1407b GC |
199 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
200 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = | |
201 | pcc_ss_data->pcc_comm_addr; | |
ad62e1e6 | 202 | |
85b1407b | 203 | if (!pcc_ss_data->platform_owns_pcc) |
139aee73 PP |
204 | return 0; |
205 | ||
58e1c035 PP |
206 | /* |
207 | * Poll PCC status register every 3us(delay_us) for maximum of | |
208 | * deadline_us(timeout_us) until PCC command complete bit is set(cond) | |
209 | */ | |
210 | ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, | |
211 | status & PCC_CMD_COMPLETE_MASK, 3, | |
212 | pcc_ss_data->deadline_us); | |
ad62e1e6 | 213 | |
58e1c035 | 214 | if (likely(!ret)) { |
85b1407b | 215 | pcc_ss_data->platform_owns_pcc = false; |
58e1c035 PP |
216 | if (chk_err_bit && (status & PCC_ERROR_MASK)) |
217 | ret = -EIO; | |
218 | } | |
219 | ||
220 | if (unlikely(ret)) | |
221 | pr_err("PCC check channel failed for ss: %d. ret=%d\n", | |
222 | pcc_ss_id, ret); | |
139aee73 | 223 | |
ad62e1e6 AC |
224 | return ret; |
225 | } | |
226 | ||
80b8286a PP |
227 | /* |
228 | * This function transfers the ownership of the PCC to the platform | |
229 | * So it must be called while holding write_lock(pcc_lock) | |
230 | */ | |
85b1407b | 231 | static int send_pcc_cmd(int pcc_ss_id, u16 cmd) |
337aadff | 232 | { |
80b8286a | 233 | int ret = -EIO, i; |
85b1407b | 234 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
337aadff | 235 | struct acpi_pcct_shared_memory *generic_comm_base = |
85b1407b | 236 | (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr; |
f387e5b9 | 237 | unsigned int time_delta; |
337aadff | 238 | |
ad62e1e6 AC |
239 | /* |
240 | * For CMD_WRITE we know for a fact the caller should have checked | |
241 | * the channel before writing to PCC space | |
242 | */ | |
243 | if (cmd == CMD_READ) { | |
80b8286a PP |
244 | /* |
245 | * If there are pending cpc_writes, then we stole the channel | |
246 | * before write completion, so first send a WRITE command to | |
247 | * platform | |
248 | */ | |
85b1407b GC |
249 | if (pcc_ss_data->pending_pcc_write_cmd) |
250 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
80b8286a | 251 | |
85b1407b | 252 | ret = check_pcc_chan(pcc_ss_id, false); |
ad62e1e6 | 253 | if (ret) |
80b8286a PP |
254 | goto end; |
255 | } else /* CMD_WRITE */ | |
85b1407b | 256 | pcc_ss_data->pending_pcc_write_cmd = FALSE; |
337aadff | 257 | |
f387e5b9 PP |
258 | /* |
259 | * Handle the Minimum Request Turnaround Time(MRTT) | |
260 | * "The minimum amount of time that OSPM must wait after the completion | |
261 | * of a command before issuing the next command, in microseconds" | |
262 | */ | |
85b1407b GC |
263 | if (pcc_ss_data->pcc_mrtt) { |
264 | time_delta = ktime_us_delta(ktime_get(), | |
265 | pcc_ss_data->last_cmd_cmpl_time); | |
266 | if (pcc_ss_data->pcc_mrtt > time_delta) | |
267 | udelay(pcc_ss_data->pcc_mrtt - time_delta); | |
f387e5b9 PP |
268 | } |
269 | ||
270 | /* | |
271 | * Handle the non-zero Maximum Periodic Access Rate(MPAR) | |
272 | * "The maximum number of periodic requests that the subspace channel can | |
273 | * support, reported in commands per minute. 0 indicates no limitation." | |
274 | * | |
275 | * This parameter should be ideally zero or large enough so that it can | |
276 | * handle maximum number of requests that all the cores in the system can | |
277 | * collectively generate. If it is not, we will follow the spec and just | |
278 | * not send the request to the platform after hitting the MPAR limit in | |
279 | * any 60s window | |
280 | */ | |
85b1407b GC |
281 | if (pcc_ss_data->pcc_mpar) { |
282 | if (pcc_ss_data->mpar_count == 0) { | |
283 | time_delta = ktime_ms_delta(ktime_get(), | |
284 | pcc_ss_data->last_mpar_reset); | |
285 | if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { | |
d29abc83 GC |
286 | pr_debug("PCC cmd for subspace %d not sent due to MPAR limit", |
287 | pcc_ss_id); | |
80b8286a PP |
288 | ret = -EIO; |
289 | goto end; | |
f387e5b9 | 290 | } |
85b1407b GC |
291 | pcc_ss_data->last_mpar_reset = ktime_get(); |
292 | pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; | |
f387e5b9 | 293 | } |
85b1407b | 294 | pcc_ss_data->mpar_count--; |
f387e5b9 PP |
295 | } |
296 | ||
337aadff | 297 | /* Write to the shared comm region. */ |
beee23ae | 298 | writew_relaxed(cmd, &generic_comm_base->command); |
337aadff AC |
299 | |
300 | /* Flip CMD COMPLETE bit */ | |
beee23ae | 301 | writew_relaxed(0, &generic_comm_base->status); |
337aadff | 302 | |
85b1407b | 303 | pcc_ss_data->platform_owns_pcc = true; |
139aee73 | 304 | |
337aadff | 305 | /* Ring doorbell */ |
85b1407b | 306 | ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); |
ad62e1e6 | 307 | if (ret < 0) { |
d29abc83 GC |
308 | pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n", |
309 | pcc_ss_id, cmd, ret); | |
80b8286a | 310 | goto end; |
337aadff AC |
311 | } |
312 | ||
139aee73 | 313 | /* wait for completion and check for PCC errro bit */ |
85b1407b | 314 | ret = check_pcc_chan(pcc_ss_id, true); |
139aee73 | 315 | |
85b1407b GC |
316 | if (pcc_ss_data->pcc_mrtt) |
317 | pcc_ss_data->last_cmd_cmpl_time = ktime_get(); | |
337aadff | 318 | |
85b1407b GC |
319 | if (pcc_ss_data->pcc_channel->mbox->txdone_irq) |
320 | mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); | |
b59c4b3d | 321 | else |
85b1407b | 322 | mbox_client_txdone(pcc_ss_data->pcc_channel, ret); |
80b8286a PP |
323 | |
324 | end: | |
325 | if (cmd == CMD_WRITE) { | |
326 | if (unlikely(ret)) { | |
327 | for_each_possible_cpu(i) { | |
328 | struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); | |
329 | if (!desc) | |
330 | continue; | |
331 | ||
85b1407b | 332 | if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) |
80b8286a PP |
333 | desc->write_cmd_status = ret; |
334 | } | |
335 | } | |
85b1407b GC |
336 | pcc_ss_data->pcc_write_cnt++; |
337 | wake_up_all(&pcc_ss_data->pcc_write_wait_q); | |
80b8286a PP |
338 | } |
339 | ||
ad62e1e6 | 340 | return ret; |
337aadff AC |
341 | } |
342 | ||
343 | static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) | |
344 | { | |
ad62e1e6 | 345 | if (ret < 0) |
337aadff AC |
346 | pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", |
347 | *(u16 *)msg, ret); | |
348 | else | |
349 | pr_debug("TX completed. CMD sent:%x, ret:%d\n", | |
350 | *(u16 *)msg, ret); | |
351 | } | |
352 | ||
353 | struct mbox_client cppc_mbox_cl = { | |
354 | .tx_done = cppc_chan_tx_done, | |
355 | .knows_txdone = true, | |
356 | }; | |
357 | ||
358 | static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) | |
359 | { | |
360 | int result = -EFAULT; | |
361 | acpi_status status = AE_OK; | |
362 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | |
363 | struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; | |
364 | struct acpi_buffer state = {0, NULL}; | |
365 | union acpi_object *psd = NULL; | |
366 | struct acpi_psd_package *pdomain; | |
367 | ||
368 | status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, | |
369 | ACPI_TYPE_PACKAGE); | |
370 | if (ACPI_FAILURE(status)) | |
371 | return -ENODEV; | |
372 | ||
373 | psd = buffer.pointer; | |
374 | if (!psd || psd->package.count != 1) { | |
375 | pr_debug("Invalid _PSD data\n"); | |
376 | goto end; | |
377 | } | |
378 | ||
379 | pdomain = &(cpc_ptr->domain_info); | |
380 | ||
381 | state.length = sizeof(struct acpi_psd_package); | |
382 | state.pointer = pdomain; | |
383 | ||
384 | status = acpi_extract_package(&(psd->package.elements[0]), | |
385 | &format, &state); | |
386 | if (ACPI_FAILURE(status)) { | |
387 | pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); | |
388 | goto end; | |
389 | } | |
390 | ||
391 | if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { | |
392 | pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); | |
393 | goto end; | |
394 | } | |
395 | ||
396 | if (pdomain->revision != ACPI_PSD_REV0_REVISION) { | |
397 | pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); | |
398 | goto end; | |
399 | } | |
400 | ||
401 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && | |
402 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && | |
403 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { | |
404 | pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); | |
405 | goto end; | |
406 | } | |
407 | ||
408 | result = 0; | |
409 | end: | |
410 | kfree(buffer.pointer); | |
411 | return result; | |
412 | } | |
413 | ||
414 | /** | |
415 | * acpi_get_psd_map - Map the CPUs in a common freq domain. | |
416 | * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. | |
417 | * | |
418 | * Return: 0 for success or negative value for err. | |
419 | */ | |
41dd6403 | 420 | int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) |
337aadff AC |
421 | { |
422 | int count_target; | |
423 | int retval = 0; | |
424 | unsigned int i, j; | |
425 | cpumask_var_t covered_cpus; | |
41dd6403 | 426 | struct cppc_cpudata *pr, *match_pr; |
337aadff AC |
427 | struct acpi_psd_package *pdomain; |
428 | struct acpi_psd_package *match_pdomain; | |
429 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; | |
430 | ||
431 | if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | |
432 | return -ENOMEM; | |
433 | ||
434 | /* | |
603fadf3 | 435 | * Now that we have _PSD data from all CPUs, let's setup P-state |
337aadff AC |
436 | * domain info. |
437 | */ | |
438 | for_each_possible_cpu(i) { | |
439 | pr = all_cpu_data[i]; | |
440 | if (!pr) | |
441 | continue; | |
442 | ||
443 | if (cpumask_test_cpu(i, covered_cpus)) | |
444 | continue; | |
445 | ||
446 | cpc_ptr = per_cpu(cpc_desc_ptr, i); | |
8343c40d HT |
447 | if (!cpc_ptr) { |
448 | retval = -EFAULT; | |
449 | goto err_ret; | |
450 | } | |
337aadff AC |
451 | |
452 | pdomain = &(cpc_ptr->domain_info); | |
453 | cpumask_set_cpu(i, pr->shared_cpu_map); | |
454 | cpumask_set_cpu(i, covered_cpus); | |
455 | if (pdomain->num_processors <= 1) | |
456 | continue; | |
457 | ||
458 | /* Validate the Domain info */ | |
459 | count_target = pdomain->num_processors; | |
460 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) | |
461 | pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
462 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) | |
463 | pr->shared_type = CPUFREQ_SHARED_TYPE_HW; | |
464 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) | |
465 | pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; | |
466 | ||
467 | for_each_possible_cpu(j) { | |
468 | if (i == j) | |
469 | continue; | |
470 | ||
471 | match_cpc_ptr = per_cpu(cpc_desc_ptr, j); | |
8343c40d HT |
472 | if (!match_cpc_ptr) { |
473 | retval = -EFAULT; | |
474 | goto err_ret; | |
475 | } | |
337aadff AC |
476 | |
477 | match_pdomain = &(match_cpc_ptr->domain_info); | |
478 | if (match_pdomain->domain != pdomain->domain) | |
479 | continue; | |
480 | ||
481 | /* Here i and j are in the same domain */ | |
482 | if (match_pdomain->num_processors != count_target) { | |
483 | retval = -EFAULT; | |
484 | goto err_ret; | |
485 | } | |
486 | ||
487 | if (pdomain->coord_type != match_pdomain->coord_type) { | |
488 | retval = -EFAULT; | |
489 | goto err_ret; | |
490 | } | |
491 | ||
492 | cpumask_set_cpu(j, covered_cpus); | |
493 | cpumask_set_cpu(j, pr->shared_cpu_map); | |
494 | } | |
495 | ||
496 | for_each_possible_cpu(j) { | |
497 | if (i == j) | |
498 | continue; | |
499 | ||
500 | match_pr = all_cpu_data[j]; | |
501 | if (!match_pr) | |
502 | continue; | |
503 | ||
504 | match_cpc_ptr = per_cpu(cpc_desc_ptr, j); | |
8343c40d HT |
505 | if (!match_cpc_ptr) { |
506 | retval = -EFAULT; | |
507 | goto err_ret; | |
508 | } | |
337aadff AC |
509 | |
510 | match_pdomain = &(match_cpc_ptr->domain_info); | |
511 | if (match_pdomain->domain != pdomain->domain) | |
512 | continue; | |
513 | ||
514 | match_pr->shared_type = pr->shared_type; | |
515 | cpumask_copy(match_pr->shared_cpu_map, | |
516 | pr->shared_cpu_map); | |
517 | } | |
518 | } | |
519 | ||
520 | err_ret: | |
521 | for_each_possible_cpu(i) { | |
522 | pr = all_cpu_data[i]; | |
523 | if (!pr) | |
524 | continue; | |
525 | ||
526 | /* Assume no coordination on any error parsing domain info */ | |
527 | if (retval) { | |
528 | cpumask_clear(pr->shared_cpu_map); | |
529 | cpumask_set_cpu(i, pr->shared_cpu_map); | |
530 | pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
531 | } | |
532 | } | |
533 | ||
534 | free_cpumask_var(covered_cpus); | |
535 | return retval; | |
536 | } | |
537 | EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |
538 | ||
85b1407b | 539 | static int register_pcc_channel(int pcc_ss_idx) |
337aadff | 540 | { |
d29d6735 | 541 | struct acpi_pcct_hw_reduced *cppc_ss; |
ad62e1e6 | 542 | u64 usecs_lat; |
337aadff | 543 | |
85b1407b GC |
544 | if (pcc_ss_idx >= 0) { |
545 | pcc_data[pcc_ss_idx]->pcc_channel = | |
546 | pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); | |
337aadff | 547 | |
85b1407b | 548 | if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { |
d29abc83 GC |
549 | pr_err("Failed to find PCC channel for subspace %d\n", |
550 | pcc_ss_idx); | |
337aadff AC |
551 | return -ENODEV; |
552 | } | |
553 | ||
554 | /* | |
555 | * The PCC mailbox controller driver should | |
556 | * have parsed the PCCT (global table of all | |
557 | * PCC channels) and stored pointers to the | |
558 | * subspace communication region in con_priv. | |
559 | */ | |
85b1407b | 560 | cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; |
337aadff AC |
561 | |
562 | if (!cppc_ss) { | |
d29abc83 GC |
563 | pr_err("No PCC subspace found for %d CPPC\n", |
564 | pcc_ss_idx); | |
337aadff AC |
565 | return -ENODEV; |
566 | } | |
567 | ||
ad62e1e6 AC |
568 | /* |
569 | * cppc_ss->latency is just a Nominal value. In reality | |
570 | * the remote processor could be much slower to reply. | |
571 | * So add an arbitrary amount of wait on top of Nominal. | |
572 | */ | |
573 | usecs_lat = NUM_RETRIES * cppc_ss->latency; | |
58e1c035 | 574 | pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; |
85b1407b GC |
575 | pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; |
576 | pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; | |
577 | pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; | |
578 | ||
579 | pcc_data[pcc_ss_idx]->pcc_comm_addr = | |
580 | acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); | |
581 | if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { | |
d29abc83 GC |
582 | pr_err("Failed to ioremap PCC comm region mem for %d\n", |
583 | pcc_ss_idx); | |
337aadff AC |
584 | return -ENOMEM; |
585 | } | |
586 | ||
603fadf3 | 587 | /* Set flag so that we don't come here for each CPU. */ |
85b1407b | 588 | pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; |
337aadff AC |
589 | } |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
a6cbcdd5 SP |
594 | /** |
595 | * cpc_ffh_supported() - check if FFH reading supported | |
596 | * | |
597 | * Check if the architecture has support for functional fixed hardware | |
598 | * read/write capability. | |
599 | * | |
600 | * Return: true for supported, false for not supported | |
601 | */ | |
602 | bool __weak cpc_ffh_supported(void) | |
603 | { | |
604 | return false; | |
605 | } | |
606 | ||
85b1407b GC |
607 | /** |
608 | * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace | |
609 | * | |
610 | * Check and allocate the cppc_pcc_data memory. | |
611 | * In some processor configurations it is possible that same subspace | |
603fadf3 | 612 | * is shared between multiple CPUs. This is seen especially in CPUs |
85b1407b GC |
613 | * with hardware multi-threading support. |
614 | * | |
615 | * Return: 0 for success, errno for failure | |
616 | */ | |
617 | int pcc_data_alloc(int pcc_ss_id) | |
618 | { | |
619 | if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) | |
620 | return -EINVAL; | |
621 | ||
622 | if (pcc_data[pcc_ss_id]) { | |
623 | pcc_data[pcc_ss_id]->refcount++; | |
624 | } else { | |
625 | pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data), | |
626 | GFP_KERNEL); | |
627 | if (!pcc_data[pcc_ss_id]) | |
628 | return -ENOMEM; | |
629 | pcc_data[pcc_ss_id]->refcount++; | |
630 | } | |
631 | ||
632 | return 0; | |
633 | } | |
4773e77c PP |
634 | |
635 | /* Check if CPPC revision + num_ent combination is supported */ | |
636 | static bool is_cppc_supported(int revision, int num_ent) | |
637 | { | |
638 | int expected_num_ent; | |
639 | ||
640 | switch (revision) { | |
641 | case CPPC_V2_REV: | |
642 | expected_num_ent = CPPC_V2_NUM_ENT; | |
643 | break; | |
644 | case CPPC_V3_REV: | |
645 | expected_num_ent = CPPC_V3_NUM_ENT; | |
646 | break; | |
647 | default: | |
648 | pr_debug("Firmware exports unsupported CPPC revision: %d\n", | |
649 | revision); | |
650 | return false; | |
651 | } | |
652 | ||
653 | if (expected_num_ent != num_ent) { | |
654 | pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", | |
655 | num_ent, expected_num_ent, revision); | |
656 | return false; | |
657 | } | |
658 | ||
659 | return true; | |
660 | } | |
661 | ||
337aadff AC |
662 | /* |
663 | * An example CPC table looks like the following. | |
664 | * | |
665 | * Name(_CPC, Package() | |
666 | * { | |
667 | * 17, | |
668 | * NumEntries | |
669 | * 1, | |
670 | * // Revision | |
671 | * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)}, | |
672 | * // Highest Performance | |
673 | * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)}, | |
674 | * // Nominal Performance | |
675 | * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)}, | |
676 | * // Lowest Nonlinear Performance | |
677 | * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)}, | |
678 | * // Lowest Performance | |
679 | * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)}, | |
680 | * // Guaranteed Performance Register | |
681 | * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)}, | |
682 | * // Desired Performance Register | |
683 | * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)}, | |
684 | * .. | |
685 | * .. | |
686 | * .. | |
687 | * | |
688 | * } | |
689 | * Each Register() encodes how to access that specific register. | |
690 | * e.g. a sample PCC entry has the following encoding: | |
691 | * | |
692 | * Register ( | |
693 | * PCC, | |
694 | * AddressSpaceKeyword | |
695 | * 8, | |
696 | * //RegisterBitWidth | |
697 | * 8, | |
698 | * //RegisterBitOffset | |
699 | * 0x30, | |
700 | * //RegisterAddress | |
701 | * 9 | |
702 | * //AccessSize (subspace ID) | |
703 | * 0 | |
704 | * ) | |
705 | * } | |
706 | */ | |
707 | ||
708 | /** | |
709 | * acpi_cppc_processor_probe - Search for per CPU _CPC objects. | |
603fadf3 | 710 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
337aadff AC |
711 | * |
712 | * Return: 0 for success or negative value for err. | |
713 | */ | |
714 | int acpi_cppc_processor_probe(struct acpi_processor *pr) | |
715 | { | |
716 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | |
717 | union acpi_object *out_obj, *cpc_obj; | |
718 | struct cpc_desc *cpc_ptr; | |
719 | struct cpc_reg *gas_t; | |
158c998e | 720 | struct device *cpu_dev; |
337aadff AC |
721 | acpi_handle handle = pr->handle; |
722 | unsigned int num_ent, i, cpc_rev; | |
85b1407b | 723 | int pcc_subspace_id = -1; |
337aadff AC |
724 | acpi_status status; |
725 | int ret = -EFAULT; | |
726 | ||
603fadf3 | 727 | /* Parse the ACPI _CPC table for this CPU. */ |
337aadff AC |
728 | status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, |
729 | ACPI_TYPE_PACKAGE); | |
730 | if (ACPI_FAILURE(status)) { | |
731 | ret = -ENODEV; | |
732 | goto out_buf_free; | |
733 | } | |
734 | ||
735 | out_obj = (union acpi_object *) output.pointer; | |
736 | ||
737 | cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); | |
738 | if (!cpc_ptr) { | |
739 | ret = -ENOMEM; | |
740 | goto out_buf_free; | |
741 | } | |
742 | ||
743 | /* First entry is NumEntries. */ | |
744 | cpc_obj = &out_obj->package.elements[0]; | |
745 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
746 | num_ent = cpc_obj->integer.value; | |
747 | } else { | |
748 | pr_debug("Unexpected entry type(%d) for NumEntries\n", | |
749 | cpc_obj->type); | |
750 | goto out_free; | |
751 | } | |
5bbb86aa AC |
752 | cpc_ptr->num_entries = num_ent; |
753 | ||
337aadff AC |
754 | /* Second entry should be revision. */ |
755 | cpc_obj = &out_obj->package.elements[1]; | |
756 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
757 | cpc_rev = cpc_obj->integer.value; | |
758 | } else { | |
759 | pr_debug("Unexpected entry type(%d) for Revision\n", | |
760 | cpc_obj->type); | |
761 | goto out_free; | |
762 | } | |
4773e77c | 763 | cpc_ptr->version = cpc_rev; |
337aadff | 764 | |
4773e77c | 765 | if (!is_cppc_supported(cpc_rev, num_ent)) |
337aadff | 766 | goto out_free; |
337aadff AC |
767 | |
768 | /* Iterate through remaining entries in _CPC */ | |
769 | for (i = 2; i < num_ent; i++) { | |
770 | cpc_obj = &out_obj->package.elements[i]; | |
771 | ||
772 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
773 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; | |
774 | cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; | |
775 | } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { | |
776 | gas_t = (struct cpc_reg *) | |
777 | cpc_obj->buffer.pointer; | |
778 | ||
779 | /* | |
780 | * The PCC Subspace index is encoded inside | |
781 | * the CPC table entries. The same PCC index | |
782 | * will be used for all the PCC entries, | |
783 | * so extract it only once. | |
784 | */ | |
785 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
85b1407b GC |
786 | if (pcc_subspace_id < 0) { |
787 | pcc_subspace_id = gas_t->access_width; | |
788 | if (pcc_data_alloc(pcc_subspace_id)) | |
789 | goto out_free; | |
790 | } else if (pcc_subspace_id != gas_t->access_width) { | |
337aadff AC |
791 | pr_debug("Mismatched PCC ids.\n"); |
792 | goto out_free; | |
793 | } | |
5bbb86aa AC |
794 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
795 | if (gas_t->address) { | |
796 | void __iomem *addr; | |
797 | ||
798 | addr = ioremap(gas_t->address, gas_t->bit_width/8); | |
799 | if (!addr) | |
800 | goto out_free; | |
801 | cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; | |
802 | } | |
803 | } else { | |
a6cbcdd5 SP |
804 | if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { |
805 | /* Support only PCC ,SYS MEM and FFH type regs */ | |
806 | pr_debug("Unsupported register type: %d\n", gas_t->space_id); | |
807 | goto out_free; | |
808 | } | |
337aadff AC |
809 | } |
810 | ||
811 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; | |
812 | memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); | |
813 | } else { | |
814 | pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); | |
815 | goto out_free; | |
816 | } | |
817 | } | |
85b1407b | 818 | per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; |
4773e77c PP |
819 | |
820 | /* | |
821 | * Initialize the remaining cpc_regs as unsupported. | |
822 | * Example: In case FW exposes CPPC v2, the below loop will initialize | |
823 | * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported | |
824 | */ | |
825 | for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { | |
826 | cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; | |
827 | cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; | |
828 | } | |
829 | ||
830 | ||
337aadff AC |
831 | /* Store CPU Logical ID */ |
832 | cpc_ptr->cpu_id = pr->id; | |
833 | ||
337aadff AC |
834 | /* Parse PSD data for this CPU */ |
835 | ret = acpi_get_psd(cpc_ptr, handle); | |
836 | if (ret) | |
837 | goto out_free; | |
838 | ||
603fadf3 | 839 | /* Register PCC channel once for all PCC subspace ID. */ |
85b1407b GC |
840 | if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { |
841 | ret = register_pcc_channel(pcc_subspace_id); | |
337aadff AC |
842 | if (ret) |
843 | goto out_free; | |
8482ef8c | 844 | |
85b1407b GC |
845 | init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); |
846 | init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); | |
337aadff AC |
847 | } |
848 | ||
849 | /* Everything looks okay */ | |
850 | pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); | |
851 | ||
158c998e AC |
852 | /* Add per logical CPU nodes for reading its feedback counters. */ |
853 | cpu_dev = get_cpu_device(pr->id); | |
50163475 DC |
854 | if (!cpu_dev) { |
855 | ret = -EINVAL; | |
158c998e | 856 | goto out_free; |
50163475 | 857 | } |
158c998e | 858 | |
603fadf3 | 859 | /* Plug PSD data into this CPU's CPC descriptor. */ |
28076483 RW |
860 | per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; |
861 | ||
158c998e AC |
862 | ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, |
863 | "acpi_cppc"); | |
28076483 RW |
864 | if (ret) { |
865 | per_cpu(cpc_desc_ptr, pr->id) = NULL; | |
158c998e | 866 | goto out_free; |
28076483 | 867 | } |
158c998e | 868 | |
337aadff AC |
869 | kfree(output.pointer); |
870 | return 0; | |
871 | ||
872 | out_free: | |
5bbb86aa AC |
873 | /* Free all the mapped sys mem areas for this CPU */ |
874 | for (i = 2; i < cpc_ptr->num_entries; i++) { | |
875 | void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | |
876 | ||
877 | if (addr) | |
878 | iounmap(addr); | |
879 | } | |
337aadff AC |
880 | kfree(cpc_ptr); |
881 | ||
882 | out_buf_free: | |
883 | kfree(output.pointer); | |
884 | return ret; | |
885 | } | |
886 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); | |
887 | ||
888 | /** | |
889 | * acpi_cppc_processor_exit - Cleanup CPC structs. | |
603fadf3 | 890 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
337aadff AC |
891 | * |
892 | * Return: Void | |
893 | */ | |
894 | void acpi_cppc_processor_exit(struct acpi_processor *pr) | |
895 | { | |
896 | struct cpc_desc *cpc_ptr; | |
5bbb86aa AC |
897 | unsigned int i; |
898 | void __iomem *addr; | |
85b1407b GC |
899 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); |
900 | ||
901 | if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) { | |
902 | if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { | |
903 | pcc_data[pcc_ss_id]->refcount--; | |
904 | if (!pcc_data[pcc_ss_id]->refcount) { | |
905 | pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); | |
906 | pcc_data[pcc_ss_id]->pcc_channel_acquired = 0; | |
907 | kfree(pcc_data[pcc_ss_id]); | |
908 | } | |
909 | } | |
910 | } | |
158c998e | 911 | |
337aadff | 912 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); |
9e9d68da SAS |
913 | if (!cpc_ptr) |
914 | return; | |
5bbb86aa AC |
915 | |
916 | /* Free all the mapped sys mem areas for this CPU */ | |
917 | for (i = 2; i < cpc_ptr->num_entries; i++) { | |
918 | addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | |
919 | if (addr) | |
920 | iounmap(addr); | |
921 | } | |
922 | ||
158c998e | 923 | kobject_put(&cpc_ptr->kobj); |
337aadff AC |
924 | kfree(cpc_ptr); |
925 | } | |
926 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); | |
927 | ||
a6cbcdd5 SP |
928 | /** |
929 | * cpc_read_ffh() - Read FFH register | |
603fadf3 | 930 | * @cpunum: CPU number to read |
a6cbcdd5 SP |
931 | * @reg: cppc register information |
932 | * @val: place holder for return value | |
933 | * | |
934 | * Read bit_width bits from a specified address and bit_offset | |
935 | * | |
936 | * Return: 0 for success and error code | |
937 | */ | |
938 | int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) | |
939 | { | |
940 | return -ENOTSUPP; | |
941 | } | |
942 | ||
943 | /** | |
944 | * cpc_write_ffh() - Write FFH register | |
603fadf3 | 945 | * @cpunum: CPU number to write |
a6cbcdd5 SP |
946 | * @reg: cppc register information |
947 | * @val: value to write | |
948 | * | |
949 | * Write value of bit_width bits to a specified address and bit_offset | |
950 | * | |
951 | * Return: 0 for success and error code | |
952 | */ | |
953 | int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) | |
954 | { | |
955 | return -ENOTSUPP; | |
956 | } | |
957 | ||
77e3d86f PP |
958 | /* |
959 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be | |
960 | * as fast as possible. We have already mapped the PCC subspace during init, so | |
961 | * we can directly write to it. | |
962 | */ | |
337aadff | 963 | |
a6cbcdd5 | 964 | static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) |
337aadff | 965 | { |
77e3d86f | 966 | int ret_val = 0; |
5bbb86aa | 967 | void __iomem *vaddr = 0; |
85b1407b | 968 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
5bbb86aa AC |
969 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
970 | ||
971 | if (reg_res->type == ACPI_TYPE_INTEGER) { | |
972 | *val = reg_res->cpc_entry.int_value; | |
973 | return ret_val; | |
974 | } | |
77e3d86f PP |
975 | |
976 | *val = 0; | |
1ecbd717 | 977 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) |
85b1407b | 978 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
5bbb86aa AC |
979 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
980 | vaddr = reg_res->sys_mem_vaddr; | |
a6cbcdd5 SP |
981 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
982 | return cpc_read_ffh(cpu, reg, val); | |
5bbb86aa AC |
983 | else |
984 | return acpi_os_read_memory((acpi_physical_address)reg->address, | |
985 | val, reg->bit_width); | |
337aadff | 986 | |
5bbb86aa | 987 | switch (reg->bit_width) { |
77e3d86f | 988 | case 8: |
beee23ae | 989 | *val = readb_relaxed(vaddr); |
77e3d86f PP |
990 | break; |
991 | case 16: | |
beee23ae | 992 | *val = readw_relaxed(vaddr); |
77e3d86f PP |
993 | break; |
994 | case 32: | |
beee23ae | 995 | *val = readl_relaxed(vaddr); |
77e3d86f PP |
996 | break; |
997 | case 64: | |
beee23ae | 998 | *val = readq_relaxed(vaddr); |
77e3d86f PP |
999 | break; |
1000 | default: | |
d29abc83 GC |
1001 | pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", |
1002 | reg->bit_width, pcc_ss_id); | |
77e3d86f | 1003 | ret_val = -EFAULT; |
5bbb86aa AC |
1004 | } |
1005 | ||
77e3d86f | 1006 | return ret_val; |
337aadff AC |
1007 | } |
1008 | ||
a6cbcdd5 | 1009 | static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) |
337aadff | 1010 | { |
77e3d86f | 1011 | int ret_val = 0; |
5bbb86aa | 1012 | void __iomem *vaddr = 0; |
85b1407b | 1013 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
5bbb86aa | 1014 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
77e3d86f | 1015 | |
1ecbd717 | 1016 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) |
85b1407b | 1017 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
5bbb86aa AC |
1018 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1019 | vaddr = reg_res->sys_mem_vaddr; | |
a6cbcdd5 SP |
1020 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
1021 | return cpc_write_ffh(cpu, reg, val); | |
5bbb86aa AC |
1022 | else |
1023 | return acpi_os_write_memory((acpi_physical_address)reg->address, | |
1024 | val, reg->bit_width); | |
337aadff | 1025 | |
5bbb86aa | 1026 | switch (reg->bit_width) { |
77e3d86f | 1027 | case 8: |
beee23ae | 1028 | writeb_relaxed(val, vaddr); |
77e3d86f PP |
1029 | break; |
1030 | case 16: | |
beee23ae | 1031 | writew_relaxed(val, vaddr); |
77e3d86f PP |
1032 | break; |
1033 | case 32: | |
beee23ae | 1034 | writel_relaxed(val, vaddr); |
77e3d86f PP |
1035 | break; |
1036 | case 64: | |
beee23ae | 1037 | writeq_relaxed(val, vaddr); |
77e3d86f PP |
1038 | break; |
1039 | default: | |
d29abc83 GC |
1040 | pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", |
1041 | reg->bit_width, pcc_ss_id); | |
77e3d86f PP |
1042 | ret_val = -EFAULT; |
1043 | break; | |
5bbb86aa AC |
1044 | } |
1045 | ||
77e3d86f | 1046 | return ret_val; |
337aadff AC |
1047 | } |
1048 | ||
1757d05f XW |
1049 | /** |
1050 | * cppc_get_desired_perf - Get the value of desired performance register. | |
1051 | * @cpunum: CPU from which to get desired performance. | |
1052 | * @desired_perf: address of a variable to store the returned desired performance | |
1053 | * | |
1054 | * Return: 0 for success, -EIO otherwise. | |
1055 | */ | |
1056 | int cppc_get_desired_perf(int cpunum, u64 *desired_perf) | |
1057 | { | |
1058 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
1059 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); | |
1060 | struct cpc_register_resource *desired_reg; | |
1061 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1062 | ||
1063 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
1064 | ||
1065 | if (CPC_IN_PCC(desired_reg)) { | |
1066 | int ret = 0; | |
1067 | ||
1068 | if (pcc_ss_id < 0) | |
1069 | return -EIO; | |
1070 | ||
1071 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1072 | ||
1073 | down_write(&pcc_ss_data->pcc_lock); | |
1074 | ||
1075 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) | |
1076 | cpc_read(cpunum, desired_reg, desired_perf); | |
1077 | else | |
1078 | ret = -EIO; | |
1079 | ||
1080 | up_write(&pcc_ss_data->pcc_lock); | |
1081 | ||
1082 | return ret; | |
1083 | } | |
1084 | ||
1085 | cpc_read(cpunum, desired_reg, desired_perf); | |
1086 | ||
1087 | return 0; | |
1088 | } | |
1089 | EXPORT_SYMBOL_GPL(cppc_get_desired_perf); | |
1090 | ||
337aadff | 1091 | /** |
603fadf3 | 1092 | * cppc_get_perf_caps - Get a CPU's performance capabilities. |
337aadff AC |
1093 | * @cpunum: CPU from which to get capabilities info. |
1094 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h | |
1095 | * | |
1096 | * Return: 0 for success with perf_caps populated else -ERRNO. | |
1097 | */ | |
1098 | int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |
1099 | { | |
1100 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
368520a6 | 1101 | struct cpc_register_resource *highest_reg, *lowest_reg, |
29523f09 | 1102 | *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, |
4773e77c | 1103 | *low_freq_reg = NULL, *nom_freq_reg = NULL; |
29523f09 | 1104 | u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; |
85b1407b | 1105 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
6fa12d58 | 1106 | struct cppc_pcc_data *pcc_ss_data = NULL; |
850d64a4 | 1107 | int ret = 0, regs_in_pcc = 0; |
337aadff | 1108 | |
6fa12d58 | 1109 | if (!cpc_desc) { |
337aadff AC |
1110 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
1111 | return -ENODEV; | |
1112 | } | |
1113 | ||
1114 | highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; | |
1115 | lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; | |
368520a6 PP |
1116 | lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; |
1117 | nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
4773e77c PP |
1118 | low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; |
1119 | nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; | |
29523f09 | 1120 | guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; |
337aadff | 1121 | |
337aadff | 1122 | /* Are any of the regs PCC ?*/ |
80b8286a | 1123 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
4773e77c PP |
1124 | CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || |
1125 | CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { | |
6fa12d58 PP |
1126 | if (pcc_ss_id < 0) { |
1127 | pr_debug("Invalid pcc_ss_id\n"); | |
1128 | return -ENODEV; | |
1129 | } | |
1130 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
850d64a4 | 1131 | regs_in_pcc = 1; |
85b1407b | 1132 | down_write(&pcc_ss_data->pcc_lock); |
337aadff | 1133 | /* Ring doorbell once to update PCC subspace */ |
85b1407b | 1134 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
337aadff AC |
1135 | ret = -EIO; |
1136 | goto out_err; | |
1137 | } | |
1138 | } | |
1139 | ||
a6cbcdd5 | 1140 | cpc_read(cpunum, highest_reg, &high); |
337aadff AC |
1141 | perf_caps->highest_perf = high; |
1142 | ||
a6cbcdd5 | 1143 | cpc_read(cpunum, lowest_reg, &low); |
337aadff AC |
1144 | perf_caps->lowest_perf = low; |
1145 | ||
368520a6 | 1146 | cpc_read(cpunum, nominal_reg, &nom); |
337aadff AC |
1147 | perf_caps->nominal_perf = nom; |
1148 | ||
edef1ef1 SP |
1149 | if (guaranteed_reg->type != ACPI_TYPE_BUFFER || |
1150 | IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { | |
1151 | perf_caps->guaranteed_perf = 0; | |
1152 | } else { | |
1153 | cpc_read(cpunum, guaranteed_reg, &guaranteed); | |
1154 | perf_caps->guaranteed_perf = guaranteed; | |
1155 | } | |
29523f09 | 1156 | |
368520a6 PP |
1157 | cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); |
1158 | perf_caps->lowest_nonlinear_perf = min_nonlinear; | |
1159 | ||
1160 | if (!high || !low || !nom || !min_nonlinear) | |
337aadff AC |
1161 | ret = -EFAULT; |
1162 | ||
4773e77c PP |
1163 | /* Read optional lowest and nominal frequencies if present */ |
1164 | if (CPC_SUPPORTED(low_freq_reg)) | |
1165 | cpc_read(cpunum, low_freq_reg, &low_f); | |
1166 | ||
1167 | if (CPC_SUPPORTED(nom_freq_reg)) | |
1168 | cpc_read(cpunum, nom_freq_reg, &nom_f); | |
1169 | ||
1170 | perf_caps->lowest_freq = low_f; | |
1171 | perf_caps->nominal_freq = nom_f; | |
1172 | ||
1173 | ||
337aadff | 1174 | out_err: |
850d64a4 | 1175 | if (regs_in_pcc) |
85b1407b | 1176 | up_write(&pcc_ss_data->pcc_lock); |
337aadff AC |
1177 | return ret; |
1178 | } | |
1179 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | |
1180 | ||
1181 | /** | |
603fadf3 | 1182 | * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. |
337aadff AC |
1183 | * @cpunum: CPU from which to read counters. |
1184 | * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h | |
1185 | * | |
1186 | * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. | |
1187 | */ | |
1188 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |
1189 | { | |
1190 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
158c998e AC |
1191 | struct cpc_register_resource *delivered_reg, *reference_reg, |
1192 | *ref_perf_reg, *ctr_wrap_reg; | |
85b1407b | 1193 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
6fa12d58 | 1194 | struct cppc_pcc_data *pcc_ss_data = NULL; |
158c998e | 1195 | u64 delivered, reference, ref_perf, ctr_wrap_time; |
850d64a4 | 1196 | int ret = 0, regs_in_pcc = 0; |
337aadff | 1197 | |
6fa12d58 | 1198 | if (!cpc_desc) { |
337aadff AC |
1199 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
1200 | return -ENODEV; | |
1201 | } | |
1202 | ||
1203 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; | |
1204 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; | |
158c998e AC |
1205 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; |
1206 | ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; | |
1207 | ||
1208 | /* | |
603fadf3 | 1209 | * If reference perf register is not supported then we should |
158c998e AC |
1210 | * use the nominal perf value |
1211 | */ | |
1212 | if (!CPC_SUPPORTED(ref_perf_reg)) | |
1213 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
337aadff | 1214 | |
337aadff | 1215 | /* Are any of the regs PCC ?*/ |
158c998e AC |
1216 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || |
1217 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { | |
6fa12d58 PP |
1218 | if (pcc_ss_id < 0) { |
1219 | pr_debug("Invalid pcc_ss_id\n"); | |
1220 | return -ENODEV; | |
1221 | } | |
1222 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b | 1223 | down_write(&pcc_ss_data->pcc_lock); |
850d64a4 | 1224 | regs_in_pcc = 1; |
337aadff | 1225 | /* Ring doorbell once to update PCC subspace */ |
85b1407b | 1226 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
337aadff AC |
1227 | ret = -EIO; |
1228 | goto out_err; | |
1229 | } | |
1230 | } | |
1231 | ||
a6cbcdd5 SP |
1232 | cpc_read(cpunum, delivered_reg, &delivered); |
1233 | cpc_read(cpunum, reference_reg, &reference); | |
1234 | cpc_read(cpunum, ref_perf_reg, &ref_perf); | |
158c998e AC |
1235 | |
1236 | /* | |
1237 | * Per spec, if ctr_wrap_time optional register is unsupported, then the | |
1238 | * performance counters are assumed to never wrap during the lifetime of | |
1239 | * platform | |
1240 | */ | |
1241 | ctr_wrap_time = (u64)(~((u64)0)); | |
1242 | if (CPC_SUPPORTED(ctr_wrap_reg)) | |
a6cbcdd5 | 1243 | cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); |
337aadff | 1244 | |
158c998e | 1245 | if (!delivered || !reference || !ref_perf) { |
337aadff AC |
1246 | ret = -EFAULT; |
1247 | goto out_err; | |
1248 | } | |
1249 | ||
1250 | perf_fb_ctrs->delivered = delivered; | |
1251 | perf_fb_ctrs->reference = reference; | |
158c998e | 1252 | perf_fb_ctrs->reference_perf = ref_perf; |
2c74d847 | 1253 | perf_fb_ctrs->wraparound_time = ctr_wrap_time; |
337aadff | 1254 | out_err: |
850d64a4 | 1255 | if (regs_in_pcc) |
85b1407b | 1256 | up_write(&pcc_ss_data->pcc_lock); |
337aadff AC |
1257 | return ret; |
1258 | } | |
1259 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | |
1260 | ||
1261 | /** | |
603fadf3 | 1262 | * cppc_set_perf - Set a CPU's performance controls. |
337aadff AC |
1263 | * @cpu: CPU for which to set performance controls. |
1264 | * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h | |
1265 | * | |
1266 | * Return: 0 for success, -ERRNO otherwise. | |
1267 | */ | |
1268 | int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |
1269 | { | |
1270 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
1271 | struct cpc_register_resource *desired_reg; | |
85b1407b | 1272 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
6fa12d58 | 1273 | struct cppc_pcc_data *pcc_ss_data = NULL; |
337aadff AC |
1274 | int ret = 0; |
1275 | ||
6fa12d58 | 1276 | if (!cpc_desc) { |
337aadff AC |
1277 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); |
1278 | return -ENODEV; | |
1279 | } | |
1280 | ||
1281 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
1282 | ||
80b8286a PP |
1283 | /* |
1284 | * This is Phase-I where we want to write to CPC registers | |
1285 | * -> We want all CPUs to be able to execute this phase in parallel | |
1286 | * | |
1287 | * Since read_lock can be acquired by multiple CPUs simultaneously we | |
1288 | * achieve that goal here | |
1289 | */ | |
1290 | if (CPC_IN_PCC(desired_reg)) { | |
6fa12d58 PP |
1291 | if (pcc_ss_id < 0) { |
1292 | pr_debug("Invalid pcc_ss_id\n"); | |
1293 | return -ENODEV; | |
1294 | } | |
1295 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b GC |
1296 | down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ |
1297 | if (pcc_ss_data->platform_owns_pcc) { | |
1298 | ret = check_pcc_chan(pcc_ss_id, false); | |
80b8286a | 1299 | if (ret) { |
85b1407b | 1300 | up_read(&pcc_ss_data->pcc_lock); |
80b8286a PP |
1301 | return ret; |
1302 | } | |
80b8286a | 1303 | } |
139aee73 PP |
1304 | /* |
1305 | * Update the pending_write to make sure a PCC CMD_READ will not | |
1306 | * arrive and steal the channel during the switch to write lock | |
1307 | */ | |
85b1407b GC |
1308 | pcc_ss_data->pending_pcc_write_cmd = true; |
1309 | cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; | |
80b8286a | 1310 | cpc_desc->write_cmd_status = 0; |
ad62e1e6 AC |
1311 | } |
1312 | ||
337aadff AC |
1313 | /* |
1314 | * Skip writing MIN/MAX until Linux knows how to come up with | |
1315 | * useful values. | |
1316 | */ | |
a6cbcdd5 | 1317 | cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); |
337aadff | 1318 | |
80b8286a | 1319 | if (CPC_IN_PCC(desired_reg)) |
85b1407b | 1320 | up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ |
80b8286a PP |
1321 | /* |
1322 | * This is Phase-II where we transfer the ownership of PCC to Platform | |
1323 | * | |
1324 | * Short Summary: Basically if we think of a group of cppc_set_perf | |
1325 | * requests that happened in short overlapping interval. The last CPU to | |
1326 | * come out of Phase-I will enter Phase-II and ring the doorbell. | |
1327 | * | |
1328 | * We have the following requirements for Phase-II: | |
1329 | * 1. We want to execute Phase-II only when there are no CPUs | |
1330 | * currently executing in Phase-I | |
1331 | * 2. Once we start Phase-II we want to avoid all other CPUs from | |
1332 | * entering Phase-I. | |
1333 | * 3. We want only one CPU among all those who went through Phase-I | |
1334 | * to run phase-II | |
1335 | * | |
1336 | * If write_trylock fails to get the lock and doesn't transfer the | |
1337 | * PCC ownership to the platform, then one of the following will be TRUE | |
1338 | * 1. There is at-least one CPU in Phase-I which will later execute | |
1339 | * write_trylock, so the CPUs in Phase-I will be responsible for | |
1340 | * executing the Phase-II. | |
1341 | * 2. Some other CPU has beaten this CPU to successfully execute the | |
1342 | * write_trylock and has already acquired the write_lock. We know for a | |
603fadf3 | 1343 | * fact it (other CPU acquiring the write_lock) couldn't have happened |
80b8286a PP |
1344 | * before this CPU's Phase-I as we held the read_lock. |
1345 | * 3. Some other CPU executing pcc CMD_READ has stolen the | |
1346 | * down_write, in which case, send_pcc_cmd will check for pending | |
1347 | * CMD_WRITE commands by checking the pending_pcc_write_cmd. | |
1348 | * So this CPU can be certain that its request will be delivered | |
1349 | * So in all cases, this CPU knows that its request will be delivered | |
1350 | * by another CPU and can return | |
1351 | * | |
1352 | * After getting the down_write we still need to check for | |
1353 | * pending_pcc_write_cmd to take care of the following scenario | |
1354 | * The thread running this code could be scheduled out between | |
1355 | * Phase-I and Phase-II. Before it is scheduled back on, another CPU | |
1356 | * could have delivered the request to Platform by triggering the | |
1357 | * doorbell and transferred the ownership of PCC to platform. So this | |
1358 | * avoids triggering an unnecessary doorbell and more importantly before | |
1359 | * triggering the doorbell it makes sure that the PCC channel ownership | |
1360 | * is still with OSPM. | |
1361 | * pending_pcc_write_cmd can also be cleared by a different CPU, if | |
1362 | * there was a pcc CMD_READ waiting on down_write and it steals the lock | |
1363 | * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this | |
1364 | * case during a CMD_READ and if there are pending writes it delivers | |
1365 | * the write command before servicing the read command | |
1366 | */ | |
1367 | if (CPC_IN_PCC(desired_reg)) { | |
85b1407b | 1368 | if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ |
80b8286a | 1369 | /* Update only if there are pending write commands */ |
85b1407b GC |
1370 | if (pcc_ss_data->pending_pcc_write_cmd) |
1371 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
1372 | up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ | |
80b8286a PP |
1373 | } else |
1374 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ | |
85b1407b GC |
1375 | wait_event(pcc_ss_data->pcc_write_wait_q, |
1376 | cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); | |
80b8286a PP |
1377 | |
1378 | /* send_pcc_cmd updates the status in case of failure */ | |
1379 | ret = cpc_desc->write_cmd_status; | |
337aadff | 1380 | } |
337aadff AC |
1381 | return ret; |
1382 | } | |
1383 | EXPORT_SYMBOL_GPL(cppc_set_perf); | |
be8b88d7 PP |
1384 | |
1385 | /** | |
1386 | * cppc_get_transition_latency - returns frequency transition latency in ns | |
1387 | * | |
1388 | * ACPI CPPC does not explicitly specifiy how a platform can specify the | |
1389 | * transition latency for perfromance change requests. The closest we have | |
1390 | * is the timing information from the PCCT tables which provides the info | |
1391 | * on the number and frequency of PCC commands the platform can handle. | |
1392 | */ | |
1393 | unsigned int cppc_get_transition_latency(int cpu_num) | |
1394 | { | |
1395 | /* | |
1396 | * Expected transition latency is based on the PCCT timing values | |
1397 | * Below are definition from ACPI spec: | |
1398 | * pcc_nominal- Expected latency to process a command, in microseconds | |
1399 | * pcc_mpar - The maximum number of periodic requests that the subspace | |
1400 | * channel can support, reported in commands per minute. 0 | |
1401 | * indicates no limitation. | |
1402 | * pcc_mrtt - The minimum amount of time that OSPM must wait after the | |
1403 | * completion of a command before issuing the next command, | |
1404 | * in microseconds. | |
1405 | */ | |
1406 | unsigned int latency_ns = 0; | |
1407 | struct cpc_desc *cpc_desc; | |
1408 | struct cpc_register_resource *desired_reg; | |
85b1407b | 1409 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); |
1ecbd717 | 1410 | struct cppc_pcc_data *pcc_ss_data; |
be8b88d7 PP |
1411 | |
1412 | cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); | |
1413 | if (!cpc_desc) | |
1414 | return CPUFREQ_ETERNAL; | |
1415 | ||
1416 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
1417 | if (!CPC_IN_PCC(desired_reg)) | |
1418 | return CPUFREQ_ETERNAL; | |
1419 | ||
1ecbd717 GC |
1420 | if (pcc_ss_id < 0) |
1421 | return CPUFREQ_ETERNAL; | |
1422 | ||
1423 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b GC |
1424 | if (pcc_ss_data->pcc_mpar) |
1425 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); | |
be8b88d7 | 1426 | |
85b1407b GC |
1427 | latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); |
1428 | latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); | |
be8b88d7 PP |
1429 | |
1430 | return latency_ns; | |
1431 | } | |
1432 | EXPORT_SYMBOL_GPL(cppc_get_transition_latency); |