Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
337aadff AC |
2 | /* |
3 | * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. | |
4 | * | |
5 | * (C) Copyright 2014, 2015 Linaro Ltd. | |
6 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> | |
7 | * | |
337aadff AC |
8 | * CPPC describes a few methods for controlling CPU performance using |
9 | * information from a per CPU table called CPC. This table is described in | |
10 | * the ACPI v5.0+ specification. The table consists of a list of | |
11 | * registers which may be memory mapped or hardware registers and also may | |
12 | * include some static integer values. | |
13 | * | |
14 | * CPU performance is on an abstract continuous scale as against a discretized | |
15 | * P-state scale which is tied to CPU frequency only. In brief, the basic | |
16 | * operation involves: | |
17 | * | |
18 | * - OS makes a CPU performance request. (Can provide min and max bounds) | |
19 | * | |
20 | * - Platform (such as BMC) is free to optimize request within requested bounds | |
21 | * depending on power/thermal budgets etc. | |
22 | * | |
23 | * - Platform conveys its decision back to OS | |
24 | * | |
25 | * The communication between OS and platform occurs through another medium | |
26 | * called (PCC) Platform Communication Channel. This is a generic mailbox like | |
27 | * mechanism which includes doorbell semantics to indicate register updates. | |
28 | * See drivers/mailbox/pcc.c for details on PCC. | |
29 | * | |
30 | * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and | |
31 | * above specifications. | |
32 | */ | |
33 | ||
34 | #define pr_fmt(fmt) "ACPI CPPC: " fmt | |
35 | ||
337aadff | 36 | #include <linux/delay.h> |
58e1c035 | 37 | #include <linux/iopoll.h> |
ad62e1e6 | 38 | #include <linux/ktime.h> |
80b8286a PP |
39 | #include <linux/rwsem.h> |
40 | #include <linux/wait.h> | |
41ea6672 | 41 | #include <linux/topology.h> |
50b813b1 VG |
42 | #include <linux/dmi.h> |
43 | #include <linux/units.h> | |
44 | #include <asm/unaligned.h> | |
337aadff AC |
45 | |
46 | #include <acpi/cppc_acpi.h> | |
80b8286a | 47 | |
8482ef8c | 48 | struct cppc_pcc_data { |
7b6da7fe | 49 | struct pcc_mbox_chan *pcc_channel; |
8482ef8c | 50 | void __iomem *pcc_comm_addr; |
8482ef8c | 51 | bool pcc_channel_acquired; |
58e1c035 | 52 | unsigned int deadline_us; |
8482ef8c | 53 | unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; |
80b8286a | 54 | |
8482ef8c | 55 | bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ |
139aee73 | 56 | bool platform_owns_pcc; /* Ownership of PCC subspace */ |
8482ef8c | 57 | unsigned int pcc_write_cnt; /* Running count of PCC write commands */ |
80b8286a | 58 | |
8482ef8c PP |
59 | /* |
60 | * Lock to provide controlled access to the PCC channel. | |
61 | * | |
62 | * For performance critical usecases(currently cppc_set_perf) | |
63 | * We need to take read_lock and check if channel belongs to OSPM | |
64 | * before reading or writing to PCC subspace | |
65 | * We need to take write_lock before transferring the channel | |
66 | * ownership to the platform via a Doorbell | |
67 | * This allows us to batch a number of CPPC requests if they happen | |
68 | * to originate in about the same time | |
69 | * | |
70 | * For non-performance critical usecases(init) | |
71 | * Take write_lock for all purposes which gives exclusive access | |
72 | */ | |
73 | struct rw_semaphore pcc_lock; | |
74 | ||
75 | /* Wait queue for CPUs whose requests were batched */ | |
76 | wait_queue_head_t pcc_write_wait_q; | |
85b1407b GC |
77 | ktime_t last_cmd_cmpl_time; |
78 | ktime_t last_mpar_reset; | |
79 | int mpar_count; | |
80 | int refcount; | |
8482ef8c | 81 | }; |
80b8286a | 82 | |
603fadf3 | 83 | /* Array to represent the PCC channel per subspace ID */ |
85b1407b | 84 | static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; |
603fadf3 | 85 | /* The cpu_pcc_subspace_idx contains per CPU subspace ID */ |
85b1407b | 86 | static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); |
337aadff AC |
87 | |
88 | /* | |
89 | * The cpc_desc structure contains the ACPI register details | |
90 | * as described in the per CPU _CPC tables. The details | |
91 | * include the type of register (e.g. PCC, System IO, FFH etc.) | |
92 | * and destination addresses which lets us READ/WRITE CPU performance | |
93 | * information using the appropriate I/O methods. | |
94 | */ | |
95 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); | |
96 | ||
77e3d86f | 97 | /* pcc mapped address + header size + offset within PCC subspace */ |
85b1407b GC |
98 | #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ |
99 | 0x8 + (offs)) | |
77e3d86f | 100 | |
ad61dd30 | 101 | /* Check if a CPC register is in PCC */ |
80b8286a PP |
102 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
103 | (cpc)->cpc_entry.reg.space_id == \ | |
104 | ACPI_ADR_SPACE_PLATFORM_COMM) | |
105 | ||
6380b7b2 PG |
106 | /* Check if a CPC register is in SystemMemory */ |
107 | #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ | |
108 | (cpc)->cpc_entry.reg.space_id == \ | |
109 | ACPI_ADR_SPACE_SYSTEM_MEMORY) | |
110 | ||
111 | /* Check if a CPC register is in SystemIo */ | |
112 | #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ | |
113 | (cpc)->cpc_entry.reg.space_id == \ | |
114 | ACPI_ADR_SPACE_SYSTEM_IO) | |
115 | ||
935ab850 | 116 | /* Evaluates to True if reg is a NULL register descriptor */ |
158c998e AC |
117 | #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ |
118 | (reg)->address == 0 && \ | |
119 | (reg)->bit_width == 0 && \ | |
120 | (reg)->bit_offset == 0 && \ | |
121 | (reg)->access_width == 0) | |
122 | ||
935ab850 | 123 | /* Evaluates to True if an optional cpc field is supported */ |
158c998e AC |
124 | #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ |
125 | !!(cpc)->cpc_entry.int_value : \ | |
126 | !IS_NULL_REG(&(cpc)->cpc_entry.reg)) | |
337aadff AC |
127 | /* |
128 | * Arbitrary Retries in case the remote processor is slow to respond | |
ad62e1e6 AC |
129 | * to PCC commands. Keeping it high enough to cover emulators where |
130 | * the processors run painfully slow. | |
337aadff | 131 | */ |
b52f4511 | 132 | #define NUM_RETRIES 500ULL |
337aadff | 133 | |
a2c8f92b SN |
134 | #define OVER_16BTS_MASK ~0xFFFFULL |
135 | ||
158c998e | 136 | #define define_one_cppc_ro(_name) \ |
2bc6262c | 137 | static struct kobj_attribute _name = \ |
158c998e AC |
138 | __ATTR(_name, 0444, show_##_name, NULL) |
139 | ||
140 | #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) | |
141 | ||
2c74d847 PP |
142 | #define show_cppc_data(access_fn, struct_name, member_name) \ |
143 | static ssize_t show_##member_name(struct kobject *kobj, \ | |
2bc6262c | 144 | struct kobj_attribute *attr, char *buf) \ |
2c74d847 PP |
145 | { \ |
146 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ | |
147 | struct struct_name st_name = {0}; \ | |
148 | int ret; \ | |
149 | \ | |
150 | ret = access_fn(cpc_ptr->cpu_id, &st_name); \ | |
151 | if (ret) \ | |
152 | return ret; \ | |
153 | \ | |
92266c65 | 154 | return sysfs_emit(buf, "%llu\n", \ |
2c74d847 PP |
155 | (u64)st_name.member_name); \ |
156 | } \ | |
157 | define_one_cppc_ro(member_name) | |
158 | ||
159 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); | |
160 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); | |
161 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); | |
162 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); | |
4773e77c PP |
163 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); |
164 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); | |
165 | ||
2c74d847 PP |
166 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); |
167 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); | |
168 | ||
2f4a4d63 JW |
169 | /* Check for valid access_width, otherwise, fallback to using bit_width */ |
170 | #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) | |
171 | ||
172 | /* Shift and apply the mask for CPC reads/writes */ | |
05d92ee7 JW |
173 | #define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ |
174 | GENMASK(((reg)->bit_width) - 1, 0)) | |
2f4a4d63 | 175 | |
158c998e | 176 | static ssize_t show_feedback_ctrs(struct kobject *kobj, |
2bc6262c | 177 | struct kobj_attribute *attr, char *buf) |
158c998e AC |
178 | { |
179 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); | |
180 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; | |
2c74d847 | 181 | int ret; |
158c998e | 182 | |
2c74d847 PP |
183 | ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); |
184 | if (ret) | |
185 | return ret; | |
158c998e | 186 | |
92266c65 | 187 | return sysfs_emit(buf, "ref:%llu del:%llu\n", |
158c998e AC |
188 | fb_ctrs.reference, fb_ctrs.delivered); |
189 | } | |
190 | define_one_cppc_ro(feedback_ctrs); | |
191 | ||
158c998e AC |
192 | static struct attribute *cppc_attrs[] = { |
193 | &feedback_ctrs.attr, | |
194 | &reference_perf.attr, | |
195 | &wraparound_time.attr, | |
2c74d847 PP |
196 | &highest_perf.attr, |
197 | &lowest_perf.attr, | |
198 | &lowest_nonlinear_perf.attr, | |
199 | &nominal_perf.attr, | |
4773e77c PP |
200 | &nominal_freq.attr, |
201 | &lowest_freq.attr, | |
158c998e AC |
202 | NULL |
203 | }; | |
17f18417 | 204 | ATTRIBUTE_GROUPS(cppc); |
158c998e | 205 | |
a527b011 | 206 | static const struct kobj_type cppc_ktype = { |
158c998e | 207 | .sysfs_ops = &kobj_sysfs_ops, |
17f18417 | 208 | .default_groups = cppc_groups, |
158c998e AC |
209 | }; |
210 | ||
85b1407b | 211 | static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) |
ad62e1e6 | 212 | { |
58e1c035 | 213 | int ret, status; |
85b1407b GC |
214 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
215 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = | |
216 | pcc_ss_data->pcc_comm_addr; | |
ad62e1e6 | 217 | |
85b1407b | 218 | if (!pcc_ss_data->platform_owns_pcc) |
139aee73 PP |
219 | return 0; |
220 | ||
58e1c035 PP |
221 | /* |
222 | * Poll PCC status register every 3us(delay_us) for maximum of | |
223 | * deadline_us(timeout_us) until PCC command complete bit is set(cond) | |
224 | */ | |
225 | ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, | |
226 | status & PCC_CMD_COMPLETE_MASK, 3, | |
227 | pcc_ss_data->deadline_us); | |
ad62e1e6 | 228 | |
58e1c035 | 229 | if (likely(!ret)) { |
85b1407b | 230 | pcc_ss_data->platform_owns_pcc = false; |
58e1c035 PP |
231 | if (chk_err_bit && (status & PCC_ERROR_MASK)) |
232 | ret = -EIO; | |
233 | } | |
234 | ||
235 | if (unlikely(ret)) | |
236 | pr_err("PCC check channel failed for ss: %d. ret=%d\n", | |
237 | pcc_ss_id, ret); | |
139aee73 | 238 | |
ad62e1e6 AC |
239 | return ret; |
240 | } | |
241 | ||
80b8286a PP |
242 | /* |
243 | * This function transfers the ownership of the PCC to the platform | |
244 | * So it must be called while holding write_lock(pcc_lock) | |
245 | */ | |
85b1407b | 246 | static int send_pcc_cmd(int pcc_ss_id, u16 cmd) |
337aadff | 247 | { |
80b8286a | 248 | int ret = -EIO, i; |
85b1407b | 249 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
1d9b4abe IV |
250 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = |
251 | pcc_ss_data->pcc_comm_addr; | |
f387e5b9 | 252 | unsigned int time_delta; |
337aadff | 253 | |
ad62e1e6 AC |
254 | /* |
255 | * For CMD_WRITE we know for a fact the caller should have checked | |
256 | * the channel before writing to PCC space | |
257 | */ | |
258 | if (cmd == CMD_READ) { | |
80b8286a PP |
259 | /* |
260 | * If there are pending cpc_writes, then we stole the channel | |
261 | * before write completion, so first send a WRITE command to | |
262 | * platform | |
263 | */ | |
85b1407b GC |
264 | if (pcc_ss_data->pending_pcc_write_cmd) |
265 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
80b8286a | 266 | |
85b1407b | 267 | ret = check_pcc_chan(pcc_ss_id, false); |
ad62e1e6 | 268 | if (ret) |
80b8286a PP |
269 | goto end; |
270 | } else /* CMD_WRITE */ | |
85b1407b | 271 | pcc_ss_data->pending_pcc_write_cmd = FALSE; |
337aadff | 272 | |
f387e5b9 PP |
273 | /* |
274 | * Handle the Minimum Request Turnaround Time(MRTT) | |
275 | * "The minimum amount of time that OSPM must wait after the completion | |
276 | * of a command before issuing the next command, in microseconds" | |
277 | */ | |
85b1407b GC |
278 | if (pcc_ss_data->pcc_mrtt) { |
279 | time_delta = ktime_us_delta(ktime_get(), | |
280 | pcc_ss_data->last_cmd_cmpl_time); | |
281 | if (pcc_ss_data->pcc_mrtt > time_delta) | |
282 | udelay(pcc_ss_data->pcc_mrtt - time_delta); | |
f387e5b9 PP |
283 | } |
284 | ||
285 | /* | |
286 | * Handle the non-zero Maximum Periodic Access Rate(MPAR) | |
287 | * "The maximum number of periodic requests that the subspace channel can | |
288 | * support, reported in commands per minute. 0 indicates no limitation." | |
289 | * | |
290 | * This parameter should be ideally zero or large enough so that it can | |
291 | * handle maximum number of requests that all the cores in the system can | |
292 | * collectively generate. If it is not, we will follow the spec and just | |
293 | * not send the request to the platform after hitting the MPAR limit in | |
294 | * any 60s window | |
295 | */ | |
85b1407b GC |
296 | if (pcc_ss_data->pcc_mpar) { |
297 | if (pcc_ss_data->mpar_count == 0) { | |
298 | time_delta = ktime_ms_delta(ktime_get(), | |
299 | pcc_ss_data->last_mpar_reset); | |
300 | if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { | |
d29abc83 GC |
301 | pr_debug("PCC cmd for subspace %d not sent due to MPAR limit", |
302 | pcc_ss_id); | |
80b8286a PP |
303 | ret = -EIO; |
304 | goto end; | |
f387e5b9 | 305 | } |
85b1407b GC |
306 | pcc_ss_data->last_mpar_reset = ktime_get(); |
307 | pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; | |
f387e5b9 | 308 | } |
85b1407b | 309 | pcc_ss_data->mpar_count--; |
f387e5b9 PP |
310 | } |
311 | ||
337aadff | 312 | /* Write to the shared comm region. */ |
beee23ae | 313 | writew_relaxed(cmd, &generic_comm_base->command); |
337aadff AC |
314 | |
315 | /* Flip CMD COMPLETE bit */ | |
beee23ae | 316 | writew_relaxed(0, &generic_comm_base->status); |
337aadff | 317 | |
85b1407b | 318 | pcc_ss_data->platform_owns_pcc = true; |
139aee73 | 319 | |
337aadff | 320 | /* Ring doorbell */ |
7b6da7fe | 321 | ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd); |
ad62e1e6 | 322 | if (ret < 0) { |
d29abc83 GC |
323 | pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n", |
324 | pcc_ss_id, cmd, ret); | |
80b8286a | 325 | goto end; |
337aadff AC |
326 | } |
327 | ||
9e12eb82 | 328 | /* wait for completion and check for PCC error bit */ |
85b1407b | 329 | ret = check_pcc_chan(pcc_ss_id, true); |
139aee73 | 330 | |
85b1407b GC |
331 | if (pcc_ss_data->pcc_mrtt) |
332 | pcc_ss_data->last_cmd_cmpl_time = ktime_get(); | |
337aadff | 333 | |
7b6da7fe SH |
334 | if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq) |
335 | mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret); | |
b59c4b3d | 336 | else |
7b6da7fe | 337 | mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret); |
80b8286a PP |
338 | |
339 | end: | |
340 | if (cmd == CMD_WRITE) { | |
341 | if (unlikely(ret)) { | |
342 | for_each_possible_cpu(i) { | |
343 | struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); | |
e69ae675 | 344 | |
80b8286a PP |
345 | if (!desc) |
346 | continue; | |
347 | ||
85b1407b | 348 | if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) |
80b8286a PP |
349 | desc->write_cmd_status = ret; |
350 | } | |
351 | } | |
85b1407b GC |
352 | pcc_ss_data->pcc_write_cnt++; |
353 | wake_up_all(&pcc_ss_data->pcc_write_wait_q); | |
80b8286a PP |
354 | } |
355 | ||
ad62e1e6 | 356 | return ret; |
337aadff AC |
357 | } |
358 | ||
359 | static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) | |
360 | { | |
ad62e1e6 | 361 | if (ret < 0) |
337aadff AC |
362 | pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", |
363 | *(u16 *)msg, ret); | |
364 | else | |
365 | pr_debug("TX completed. CMD sent:%x, ret:%d\n", | |
366 | *(u16 *)msg, ret); | |
367 | } | |
368 | ||
5c447c18 | 369 | static struct mbox_client cppc_mbox_cl = { |
337aadff AC |
370 | .tx_done = cppc_chan_tx_done, |
371 | .knows_txdone = true, | |
372 | }; | |
373 | ||
374 | static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) | |
375 | { | |
376 | int result = -EFAULT; | |
377 | acpi_status status = AE_OK; | |
378 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | |
379 | struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; | |
380 | struct acpi_buffer state = {0, NULL}; | |
381 | union acpi_object *psd = NULL; | |
382 | struct acpi_psd_package *pdomain; | |
383 | ||
4c4cdc4c AS |
384 | status = acpi_evaluate_object_typed(handle, "_PSD", NULL, |
385 | &buffer, ACPI_TYPE_PACKAGE); | |
386 | if (status == AE_NOT_FOUND) /* _PSD is optional */ | |
387 | return 0; | |
337aadff AC |
388 | if (ACPI_FAILURE(status)) |
389 | return -ENODEV; | |
390 | ||
391 | psd = buffer.pointer; | |
392 | if (!psd || psd->package.count != 1) { | |
393 | pr_debug("Invalid _PSD data\n"); | |
394 | goto end; | |
395 | } | |
396 | ||
397 | pdomain = &(cpc_ptr->domain_info); | |
398 | ||
399 | state.length = sizeof(struct acpi_psd_package); | |
400 | state.pointer = pdomain; | |
401 | ||
402 | status = acpi_extract_package(&(psd->package.elements[0]), | |
403 | &format, &state); | |
404 | if (ACPI_FAILURE(status)) { | |
405 | pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); | |
406 | goto end; | |
407 | } | |
408 | ||
409 | if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { | |
410 | pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); | |
411 | goto end; | |
412 | } | |
413 | ||
414 | if (pdomain->revision != ACPI_PSD_REV0_REVISION) { | |
415 | pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); | |
416 | goto end; | |
417 | } | |
418 | ||
419 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && | |
420 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && | |
421 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { | |
422 | pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); | |
423 | goto end; | |
424 | } | |
425 | ||
426 | result = 0; | |
427 | end: | |
428 | kfree(buffer.pointer); | |
429 | return result; | |
430 | } | |
431 | ||
a28b2bfc IV |
432 | bool acpi_cpc_valid(void) |
433 | { | |
434 | struct cpc_desc *cpc_ptr; | |
435 | int cpu; | |
436 | ||
a2a9d185 PY |
437 | if (acpi_disabled) |
438 | return false; | |
439 | ||
2aeca6bd | 440 | for_each_present_cpu(cpu) { |
a28b2bfc IV |
441 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); |
442 | if (!cpc_ptr) | |
443 | return false; | |
444 | } | |
445 | ||
446 | return true; | |
447 | } | |
448 | EXPORT_SYMBOL_GPL(acpi_cpc_valid); | |
449 | ||
3cc30dd0 PG |
450 | bool cppc_allow_fast_switch(void) |
451 | { | |
452 | struct cpc_register_resource *desired_reg; | |
453 | struct cpc_desc *cpc_ptr; | |
454 | int cpu; | |
455 | ||
456 | for_each_possible_cpu(cpu) { | |
457 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); | |
458 | desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; | |
459 | if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && | |
460 | !CPC_IN_SYSTEM_IO(desired_reg)) | |
461 | return false; | |
462 | } | |
463 | ||
464 | return true; | |
465 | } | |
466 | EXPORT_SYMBOL_GPL(cppc_allow_fast_switch); | |
467 | ||
337aadff | 468 | /** |
a28b2bfc IV |
469 | * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu |
470 | * @cpu: Find all CPUs that share a domain with cpu. | |
471 | * @cpu_data: Pointer to CPU specific CPPC data including PSD info. | |
337aadff AC |
472 | * |
473 | * Return: 0 for success or negative value for err. | |
474 | */ | |
a28b2bfc | 475 | int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data) |
337aadff | 476 | { |
337aadff | 477 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; |
a28b2bfc IV |
478 | struct acpi_psd_package *match_pdomain; |
479 | struct acpi_psd_package *pdomain; | |
480 | int count_target, i; | |
337aadff AC |
481 | |
482 | /* | |
603fadf3 | 483 | * Now that we have _PSD data from all CPUs, let's setup P-state |
337aadff AC |
484 | * domain info. |
485 | */ | |
a28b2bfc IV |
486 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); |
487 | if (!cpc_ptr) | |
488 | return -EFAULT; | |
337aadff | 489 | |
a28b2bfc IV |
490 | pdomain = &(cpc_ptr->domain_info); |
491 | cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); | |
492 | if (pdomain->num_processors <= 1) | |
493 | return 0; | |
337aadff | 494 | |
a28b2bfc IV |
495 | /* Validate the Domain info */ |
496 | count_target = pdomain->num_processors; | |
497 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) | |
498 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
499 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) | |
500 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW; | |
501 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) | |
502 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; | |
337aadff | 503 | |
a28b2bfc IV |
504 | for_each_possible_cpu(i) { |
505 | if (i == cpu) | |
506 | continue; | |
337aadff | 507 | |
a28b2bfc IV |
508 | match_cpc_ptr = per_cpu(cpc_desc_ptr, i); |
509 | if (!match_cpc_ptr) | |
510 | goto err_fault; | |
337aadff | 511 | |
a28b2bfc IV |
512 | match_pdomain = &(match_cpc_ptr->domain_info); |
513 | if (match_pdomain->domain != pdomain->domain) | |
514 | continue; | |
337aadff | 515 | |
a28b2bfc IV |
516 | /* Here i and cpu are in the same domain */ |
517 | if (match_pdomain->num_processors != count_target) | |
518 | goto err_fault; | |
337aadff | 519 | |
a28b2bfc IV |
520 | if (pdomain->coord_type != match_pdomain->coord_type) |
521 | goto err_fault; | |
337aadff | 522 | |
a28b2bfc | 523 | cpumask_set_cpu(i, cpu_data->shared_cpu_map); |
337aadff AC |
524 | } |
525 | ||
a28b2bfc | 526 | return 0; |
337aadff | 527 | |
a28b2bfc IV |
528 | err_fault: |
529 | /* Assume no coordination on any error parsing domain info */ | |
530 | cpumask_clear(cpu_data->shared_cpu_map); | |
531 | cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); | |
532 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE; | |
533 | ||
534 | return -EFAULT; | |
337aadff AC |
535 | } |
536 | EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |
537 | ||
85b1407b | 538 | static int register_pcc_channel(int pcc_ss_idx) |
337aadff | 539 | { |
7b6da7fe | 540 | struct pcc_mbox_chan *pcc_chan; |
ad62e1e6 | 541 | u64 usecs_lat; |
337aadff | 542 | |
85b1407b | 543 | if (pcc_ss_idx >= 0) { |
7b6da7fe | 544 | pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); |
337aadff | 545 | |
7b6da7fe | 546 | if (IS_ERR(pcc_chan)) { |
d29abc83 GC |
547 | pr_err("Failed to find PCC channel for subspace %d\n", |
548 | pcc_ss_idx); | |
337aadff AC |
549 | return -ENODEV; |
550 | } | |
551 | ||
7b6da7fe | 552 | pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan; |
ad62e1e6 AC |
553 | /* |
554 | * cppc_ss->latency is just a Nominal value. In reality | |
555 | * the remote processor could be much slower to reply. | |
556 | * So add an arbitrary amount of wait on top of Nominal. | |
557 | */ | |
7b6da7fe | 558 | usecs_lat = NUM_RETRIES * pcc_chan->latency; |
58e1c035 | 559 | pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; |
7b6da7fe SH |
560 | pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time; |
561 | pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate; | |
562 | pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency; | |
85b1407b GC |
563 | |
564 | pcc_data[pcc_ss_idx]->pcc_comm_addr = | |
7b6da7fe SH |
565 | acpi_os_ioremap(pcc_chan->shmem_base_addr, |
566 | pcc_chan->shmem_size); | |
85b1407b | 567 | if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { |
d29abc83 GC |
568 | pr_err("Failed to ioremap PCC comm region mem for %d\n", |
569 | pcc_ss_idx); | |
337aadff AC |
570 | return -ENOMEM; |
571 | } | |
572 | ||
603fadf3 | 573 | /* Set flag so that we don't come here for each CPU. */ |
85b1407b | 574 | pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; |
337aadff AC |
575 | } |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
a6cbcdd5 SP |
580 | /** |
581 | * cpc_ffh_supported() - check if FFH reading supported | |
582 | * | |
583 | * Check if the architecture has support for functional fixed hardware | |
584 | * read/write capability. | |
585 | * | |
586 | * Return: true for supported, false for not supported | |
587 | */ | |
588 | bool __weak cpc_ffh_supported(void) | |
589 | { | |
590 | return false; | |
591 | } | |
592 | ||
8b356e53 ML |
593 | /** |
594 | * cpc_supported_by_cpu() - check if CPPC is supported by CPU | |
595 | * | |
596 | * Check if the architectural support for CPPC is present even | |
597 | * if the _OSC hasn't prescribed it | |
598 | * | |
599 | * Return: true for supported, false for not supported | |
600 | */ | |
601 | bool __weak cpc_supported_by_cpu(void) | |
602 | { | |
603 | return false; | |
604 | } | |
605 | ||
85b1407b GC |
606 | /** |
607 | * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace | |
fda7be20 | 608 | * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package. |
85b1407b GC |
609 | * |
610 | * Check and allocate the cppc_pcc_data memory. | |
611 | * In some processor configurations it is possible that same subspace | |
603fadf3 | 612 | * is shared between multiple CPUs. This is seen especially in CPUs |
85b1407b GC |
613 | * with hardware multi-threading support. |
614 | * | |
615 | * Return: 0 for success, errno for failure | |
616 | */ | |
5c447c18 | 617 | static int pcc_data_alloc(int pcc_ss_id) |
85b1407b GC |
618 | { |
619 | if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) | |
620 | return -EINVAL; | |
621 | ||
622 | if (pcc_data[pcc_ss_id]) { | |
623 | pcc_data[pcc_ss_id]->refcount++; | |
624 | } else { | |
625 | pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data), | |
626 | GFP_KERNEL); | |
627 | if (!pcc_data[pcc_ss_id]) | |
628 | return -ENOMEM; | |
629 | pcc_data[pcc_ss_id]->refcount++; | |
630 | } | |
631 | ||
632 | return 0; | |
633 | } | |
4773e77c | 634 | |
337aadff AC |
635 | /* |
636 | * An example CPC table looks like the following. | |
637 | * | |
1a901c91 AS |
638 | * Name (_CPC, Package() { |
639 | * 17, // NumEntries | |
640 | * 1, // Revision | |
641 | * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance | |
642 | * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance | |
643 | * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance | |
644 | * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance | |
645 | * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register | |
646 | * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register | |
647 | * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)}, | |
648 | * ... | |
649 | * ... | |
650 | * ... | |
651 | * } | |
337aadff AC |
652 | * Each Register() encodes how to access that specific register. |
653 | * e.g. a sample PCC entry has the following encoding: | |
654 | * | |
1a901c91 AS |
655 | * Register ( |
656 | * PCC, // AddressSpaceKeyword | |
657 | * 8, // RegisterBitWidth | |
658 | * 8, // RegisterBitOffset | |
659 | * 0x30, // RegisterAddress | |
660 | * 9, // AccessSize (subspace ID) | |
661 | * ) | |
337aadff AC |
662 | */ |
663 | ||
1132e6de IV |
664 | #ifndef arch_init_invariance_cppc |
665 | static inline void arch_init_invariance_cppc(void) { } | |
41ea6672 NF |
666 | #endif |
667 | ||
337aadff AC |
668 | /** |
669 | * acpi_cppc_processor_probe - Search for per CPU _CPC objects. | |
603fadf3 | 670 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
337aadff AC |
671 | * |
672 | * Return: 0 for success or negative value for err. | |
673 | */ | |
674 | int acpi_cppc_processor_probe(struct acpi_processor *pr) | |
675 | { | |
676 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | |
677 | union acpi_object *out_obj, *cpc_obj; | |
678 | struct cpc_desc *cpc_ptr; | |
679 | struct cpc_reg *gas_t; | |
158c998e | 680 | struct device *cpu_dev; |
337aadff AC |
681 | acpi_handle handle = pr->handle; |
682 | unsigned int num_ent, i, cpc_rev; | |
85b1407b | 683 | int pcc_subspace_id = -1; |
337aadff | 684 | acpi_status status; |
f21a3509 | 685 | int ret = -ENODATA; |
337aadff | 686 | |
7feec743 ML |
687 | if (!osc_sb_cppc2_support_acked) { |
688 | pr_debug("CPPC v2 _OSC not acked\n"); | |
5f8f9bc4 PY |
689 | if (!cpc_supported_by_cpu()) { |
690 | pr_debug("CPPC is not supported by the CPU\n"); | |
8b356e53 | 691 | return -ENODEV; |
5f8f9bc4 | 692 | } |
7feec743 | 693 | } |
c42fa24b | 694 | |
603fadf3 | 695 | /* Parse the ACPI _CPC table for this CPU. */ |
337aadff AC |
696 | status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, |
697 | ACPI_TYPE_PACKAGE); | |
698 | if (ACPI_FAILURE(status)) { | |
699 | ret = -ENODEV; | |
700 | goto out_buf_free; | |
701 | } | |
702 | ||
703 | out_obj = (union acpi_object *) output.pointer; | |
704 | ||
705 | cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); | |
706 | if (!cpc_ptr) { | |
707 | ret = -ENOMEM; | |
708 | goto out_buf_free; | |
709 | } | |
710 | ||
711 | /* First entry is NumEntries. */ | |
712 | cpc_obj = &out_obj->package.elements[0]; | |
713 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
714 | num_ent = cpc_obj->integer.value; | |
40d8abf3 RW |
715 | if (num_ent <= 1) { |
716 | pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", | |
717 | num_ent, pr->id); | |
718 | goto out_free; | |
719 | } | |
337aadff | 720 | } else { |
f21a3509 RW |
721 | pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n", |
722 | cpc_obj->type, pr->id); | |
337aadff AC |
723 | goto out_free; |
724 | } | |
5bbb86aa | 725 | |
337aadff AC |
726 | /* Second entry should be revision. */ |
727 | cpc_obj = &out_obj->package.elements[1]; | |
728 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
729 | cpc_rev = cpc_obj->integer.value; | |
730 | } else { | |
f21a3509 RW |
731 | pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n", |
732 | cpc_obj->type, pr->id); | |
337aadff AC |
733 | goto out_free; |
734 | } | |
735 | ||
4f4179fc RW |
736 | if (cpc_rev < CPPC_V2_REV) { |
737 | pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, | |
738 | pr->id); | |
337aadff | 739 | goto out_free; |
4f4179fc RW |
740 | } |
741 | ||
742 | /* | |
743 | * Disregard _CPC if the number of entries in the return pachage is not | |
744 | * as expected, but support future revisions being proper supersets of | |
745 | * the v3 and only causing more entries to be returned by _CPC. | |
746 | */ | |
747 | if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || | |
748 | (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || | |
749 | (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { | |
750 | pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", | |
751 | num_ent, pr->id); | |
752 | goto out_free; | |
753 | } | |
754 | if (cpc_rev > CPPC_V3_REV) { | |
755 | num_ent = CPPC_V3_NUM_ENT; | |
756 | cpc_rev = CPPC_V3_REV; | |
757 | } | |
758 | ||
759 | cpc_ptr->num_entries = num_ent; | |
760 | cpc_ptr->version = cpc_rev; | |
337aadff AC |
761 | |
762 | /* Iterate through remaining entries in _CPC */ | |
763 | for (i = 2; i < num_ent; i++) { | |
764 | cpc_obj = &out_obj->package.elements[i]; | |
765 | ||
766 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | |
767 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; | |
768 | cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; | |
769 | } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { | |
770 | gas_t = (struct cpc_reg *) | |
771 | cpc_obj->buffer.pointer; | |
772 | ||
773 | /* | |
774 | * The PCC Subspace index is encoded inside | |
775 | * the CPC table entries. The same PCC index | |
776 | * will be used for all the PCC entries, | |
777 | * so extract it only once. | |
778 | */ | |
779 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
85b1407b GC |
780 | if (pcc_subspace_id < 0) { |
781 | pcc_subspace_id = gas_t->access_width; | |
782 | if (pcc_data_alloc(pcc_subspace_id)) | |
783 | goto out_free; | |
784 | } else if (pcc_subspace_id != gas_t->access_width) { | |
f21a3509 RW |
785 | pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n", |
786 | pr->id); | |
337aadff AC |
787 | goto out_free; |
788 | } | |
5bbb86aa AC |
789 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
790 | if (gas_t->address) { | |
791 | void __iomem *addr; | |
2f4a4d63 | 792 | size_t access_width; |
5bbb86aa | 793 | |
0651ab90 PG |
794 | if (!osc_cpc_flexible_adr_space_confirmed) { |
795 | pr_debug("Flexible address space capability not supported\n"); | |
09073396 ML |
796 | if (!cpc_supported_by_cpu()) |
797 | goto out_free; | |
0651ab90 PG |
798 | } |
799 | ||
2f4a4d63 JW |
800 | access_width = GET_BIT_WIDTH(gas_t) / 8; |
801 | addr = ioremap(gas_t->address, access_width); | |
5bbb86aa AC |
802 | if (!addr) |
803 | goto out_free; | |
804 | cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; | |
805 | } | |
a2c8f92b SN |
806 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
807 | if (gas_t->access_width < 1 || gas_t->access_width > 3) { | |
808 | /* | |
809 | * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit. | |
810 | * SystemIO doesn't implement 64-bit | |
811 | * registers. | |
812 | */ | |
f21a3509 RW |
813 | pr_debug("Invalid access width %d for SystemIO register in _CPC\n", |
814 | gas_t->access_width); | |
a2c8f92b SN |
815 | goto out_free; |
816 | } | |
817 | if (gas_t->address & OVER_16BTS_MASK) { | |
818 | /* SystemIO registers use 16-bit integer addresses */ | |
f21a3509 RW |
819 | pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n", |
820 | gas_t->address); | |
a2c8f92b SN |
821 | goto out_free; |
822 | } | |
0651ab90 PG |
823 | if (!osc_cpc_flexible_adr_space_confirmed) { |
824 | pr_debug("Flexible address space capability not supported\n"); | |
09073396 ML |
825 | if (!cpc_supported_by_cpu()) |
826 | goto out_free; | |
0651ab90 | 827 | } |
5bbb86aa | 828 | } else { |
a6cbcdd5 | 829 | if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { |
a2c8f92b | 830 | /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */ |
f21a3509 RW |
831 | pr_debug("Unsupported register type (%d) in _CPC\n", |
832 | gas_t->space_id); | |
a6cbcdd5 SP |
833 | goto out_free; |
834 | } | |
337aadff AC |
835 | } |
836 | ||
837 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; | |
838 | memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); | |
839 | } else { | |
f21a3509 RW |
840 | pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n", |
841 | i, pr->id); | |
337aadff AC |
842 | goto out_free; |
843 | } | |
844 | } | |
85b1407b | 845 | per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; |
4773e77c PP |
846 | |
847 | /* | |
848 | * Initialize the remaining cpc_regs as unsupported. | |
849 | * Example: In case FW exposes CPPC v2, the below loop will initialize | |
850 | * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported | |
851 | */ | |
852 | for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { | |
853 | cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; | |
854 | cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; | |
855 | } | |
856 | ||
857 | ||
337aadff AC |
858 | /* Store CPU Logical ID */ |
859 | cpc_ptr->cpu_id = pr->id; | |
860 | ||
337aadff AC |
861 | /* Parse PSD data for this CPU */ |
862 | ret = acpi_get_psd(cpc_ptr, handle); | |
863 | if (ret) | |
864 | goto out_free; | |
865 | ||
603fadf3 | 866 | /* Register PCC channel once for all PCC subspace ID. */ |
85b1407b GC |
867 | if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { |
868 | ret = register_pcc_channel(pcc_subspace_id); | |
337aadff AC |
869 | if (ret) |
870 | goto out_free; | |
8482ef8c | 871 | |
85b1407b GC |
872 | init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); |
873 | init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); | |
337aadff AC |
874 | } |
875 | ||
876 | /* Everything looks okay */ | |
877 | pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); | |
878 | ||
158c998e AC |
879 | /* Add per logical CPU nodes for reading its feedback counters. */ |
880 | cpu_dev = get_cpu_device(pr->id); | |
50163475 DC |
881 | if (!cpu_dev) { |
882 | ret = -EINVAL; | |
158c998e | 883 | goto out_free; |
50163475 | 884 | } |
158c998e | 885 | |
603fadf3 | 886 | /* Plug PSD data into this CPU's CPC descriptor. */ |
28076483 RW |
887 | per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; |
888 | ||
158c998e AC |
889 | ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, |
890 | "acpi_cppc"); | |
28076483 RW |
891 | if (ret) { |
892 | per_cpu(cpc_desc_ptr, pr->id) = NULL; | |
4d8be4bc | 893 | kobject_put(&cpc_ptr->kobj); |
158c998e | 894 | goto out_free; |
28076483 | 895 | } |
158c998e | 896 | |
1132e6de | 897 | arch_init_invariance_cppc(); |
41ea6672 | 898 | |
337aadff AC |
899 | kfree(output.pointer); |
900 | return 0; | |
901 | ||
902 | out_free: | |
5bbb86aa AC |
903 | /* Free all the mapped sys mem areas for this CPU */ |
904 | for (i = 2; i < cpc_ptr->num_entries; i++) { | |
905 | void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | |
906 | ||
907 | if (addr) | |
908 | iounmap(addr); | |
909 | } | |
337aadff AC |
910 | kfree(cpc_ptr); |
911 | ||
912 | out_buf_free: | |
913 | kfree(output.pointer); | |
914 | return ret; | |
915 | } | |
916 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); | |
917 | ||
918 | /** | |
919 | * acpi_cppc_processor_exit - Cleanup CPC structs. | |
603fadf3 | 920 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
337aadff AC |
921 | * |
922 | * Return: Void | |
923 | */ | |
924 | void acpi_cppc_processor_exit(struct acpi_processor *pr) | |
925 | { | |
926 | struct cpc_desc *cpc_ptr; | |
5bbb86aa AC |
927 | unsigned int i; |
928 | void __iomem *addr; | |
85b1407b GC |
929 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); |
930 | ||
e69ae675 | 931 | if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) { |
85b1407b GC |
932 | if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { |
933 | pcc_data[pcc_ss_id]->refcount--; | |
934 | if (!pcc_data[pcc_ss_id]->refcount) { | |
935 | pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); | |
85b1407b | 936 | kfree(pcc_data[pcc_ss_id]); |
56a0b978 | 937 | pcc_data[pcc_ss_id] = NULL; |
85b1407b GC |
938 | } |
939 | } | |
940 | } | |
158c998e | 941 | |
337aadff | 942 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); |
9e9d68da SAS |
943 | if (!cpc_ptr) |
944 | return; | |
5bbb86aa AC |
945 | |
946 | /* Free all the mapped sys mem areas for this CPU */ | |
947 | for (i = 2; i < cpc_ptr->num_entries; i++) { | |
948 | addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | |
949 | if (addr) | |
950 | iounmap(addr); | |
951 | } | |
952 | ||
158c998e | 953 | kobject_put(&cpc_ptr->kobj); |
337aadff AC |
954 | kfree(cpc_ptr); |
955 | } | |
956 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); | |
957 | ||
a6cbcdd5 SP |
958 | /** |
959 | * cpc_read_ffh() - Read FFH register | |
603fadf3 | 960 | * @cpunum: CPU number to read |
a6cbcdd5 SP |
961 | * @reg: cppc register information |
962 | * @val: place holder for return value | |
963 | * | |
964 | * Read bit_width bits from a specified address and bit_offset | |
965 | * | |
966 | * Return: 0 for success and error code | |
967 | */ | |
968 | int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) | |
969 | { | |
970 | return -ENOTSUPP; | |
971 | } | |
972 | ||
973 | /** | |
974 | * cpc_write_ffh() - Write FFH register | |
603fadf3 | 975 | * @cpunum: CPU number to write |
a6cbcdd5 SP |
976 | * @reg: cppc register information |
977 | * @val: value to write | |
978 | * | |
979 | * Write value of bit_width bits to a specified address and bit_offset | |
980 | * | |
981 | * Return: 0 for success and error code | |
982 | */ | |
983 | int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) | |
984 | { | |
985 | return -ENOTSUPP; | |
986 | } | |
987 | ||
77e3d86f PP |
988 | /* |
989 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be | |
990 | * as fast as possible. We have already mapped the PCC subspace during init, so | |
991 | * we can directly write to it. | |
992 | */ | |
337aadff | 993 | |
a6cbcdd5 | 994 | static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) |
337aadff | 995 | { |
26692cd9 | 996 | void __iomem *vaddr = NULL; |
2f4a4d63 | 997 | int size; |
85b1407b | 998 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
5bbb86aa AC |
999 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
1000 | ||
1001 | if (reg_res->type == ACPI_TYPE_INTEGER) { | |
1002 | *val = reg_res->cpc_entry.int_value; | |
f684b107 | 1003 | return 0; |
5bbb86aa | 1004 | } |
77e3d86f PP |
1005 | |
1006 | *val = 0; | |
f489c948 | 1007 | size = GET_BIT_WIDTH(reg); |
a2c8f92b SN |
1008 | |
1009 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | |
5f51c7ce | 1010 | u32 val_u32; |
a2c8f92b SN |
1011 | acpi_status status; |
1012 | ||
1013 | status = acpi_os_read_port((acpi_io_address)reg->address, | |
f489c948 | 1014 | &val_u32, size); |
a2c8f92b SN |
1015 | if (ACPI_FAILURE(status)) { |
1016 | pr_debug("Error: Failed to read SystemIO port %llx\n", | |
1017 | reg->address); | |
1018 | return -EFAULT; | |
1019 | } | |
1020 | ||
5f51c7ce | 1021 | *val = val_u32; |
a2c8f92b | 1022 | return 0; |
f489c948 VK |
1023 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { |
1024 | /* | |
1025 | * For registers in PCC space, the register size is determined | |
1026 | * by the bit width field; the access size is used to indicate | |
1027 | * the PCC subspace id. | |
1028 | */ | |
1029 | size = reg->bit_width; | |
85b1407b | 1030 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
f489c948 | 1031 | } |
5bbb86aa AC |
1032 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1033 | vaddr = reg_res->sys_mem_vaddr; | |
a6cbcdd5 SP |
1034 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
1035 | return cpc_read_ffh(cpu, reg, val); | |
5bbb86aa AC |
1036 | else |
1037 | return acpi_os_read_memory((acpi_physical_address)reg->address, | |
f489c948 | 1038 | val, size); |
2f4a4d63 JW |
1039 | |
1040 | switch (size) { | |
e69ae675 XT |
1041 | case 8: |
1042 | *val = readb_relaxed(vaddr); | |
1043 | break; | |
1044 | case 16: | |
1045 | *val = readw_relaxed(vaddr); | |
1046 | break; | |
1047 | case 32: | |
1048 | *val = readl_relaxed(vaddr); | |
1049 | break; | |
1050 | case 64: | |
1051 | *val = readq_relaxed(vaddr); | |
1052 | break; | |
1053 | default: | |
f489c948 VK |
1054 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
1055 | pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n", | |
1056 | size, reg->address); | |
1057 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
1058 | pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", | |
1059 | size, pcc_ss_id); | |
1060 | } | |
f684b107 | 1061 | return -EFAULT; |
5bbb86aa AC |
1062 | } |
1063 | ||
2f4a4d63 JW |
1064 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1065 | *val = MASK_VAL(reg, *val); | |
1066 | ||
f684b107 | 1067 | return 0; |
337aadff AC |
1068 | } |
1069 | ||
a6cbcdd5 | 1070 | static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) |
337aadff | 1071 | { |
77e3d86f | 1072 | int ret_val = 0; |
2f4a4d63 | 1073 | int size; |
26692cd9 | 1074 | void __iomem *vaddr = NULL; |
85b1407b | 1075 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
5bbb86aa | 1076 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
77e3d86f | 1077 | |
f489c948 VK |
1078 | size = GET_BIT_WIDTH(reg); |
1079 | ||
a2c8f92b | 1080 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
a2c8f92b SN |
1081 | acpi_status status; |
1082 | ||
1083 | status = acpi_os_write_port((acpi_io_address)reg->address, | |
f489c948 | 1084 | (u32)val, size); |
a2c8f92b SN |
1085 | if (ACPI_FAILURE(status)) { |
1086 | pr_debug("Error: Failed to write SystemIO port %llx\n", | |
1087 | reg->address); | |
1088 | return -EFAULT; | |
1089 | } | |
1090 | ||
1091 | return 0; | |
f489c948 VK |
1092 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { |
1093 | /* | |
1094 | * For registers in PCC space, the register size is determined | |
1095 | * by the bit width field; the access size is used to indicate | |
1096 | * the PCC subspace id. | |
1097 | */ | |
1098 | size = reg->bit_width; | |
85b1407b | 1099 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
f489c948 | 1100 | } |
5bbb86aa AC |
1101 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1102 | vaddr = reg_res->sys_mem_vaddr; | |
a6cbcdd5 SP |
1103 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
1104 | return cpc_write_ffh(cpu, reg, val); | |
5bbb86aa AC |
1105 | else |
1106 | return acpi_os_write_memory((acpi_physical_address)reg->address, | |
f489c948 | 1107 | val, size); |
2f4a4d63 JW |
1108 | |
1109 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | |
1110 | val = MASK_VAL(reg, val); | |
1111 | ||
1112 | switch (size) { | |
e69ae675 XT |
1113 | case 8: |
1114 | writeb_relaxed(val, vaddr); | |
1115 | break; | |
1116 | case 16: | |
1117 | writew_relaxed(val, vaddr); | |
1118 | break; | |
1119 | case 32: | |
1120 | writel_relaxed(val, vaddr); | |
1121 | break; | |
1122 | case 64: | |
1123 | writeq_relaxed(val, vaddr); | |
1124 | break; | |
1125 | default: | |
f489c948 VK |
1126 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
1127 | pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n", | |
1128 | size, reg->address); | |
1129 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | |
1130 | pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", | |
1131 | size, pcc_ss_id); | |
1132 | } | |
e69ae675 XT |
1133 | ret_val = -EFAULT; |
1134 | break; | |
5bbb86aa AC |
1135 | } |
1136 | ||
77e3d86f | 1137 | return ret_val; |
337aadff AC |
1138 | } |
1139 | ||
0654cf05 | 1140 | static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) |
1757d05f XW |
1141 | { |
1142 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
935dff30 RW |
1143 | struct cpc_register_resource *reg; |
1144 | ||
1145 | if (!cpc_desc) { | |
1146 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | |
1147 | return -ENODEV; | |
1148 | } | |
1149 | ||
1150 | reg = &cpc_desc->cpc_regs[reg_idx]; | |
1757d05f | 1151 | |
0654cf05 RW |
1152 | if (CPC_IN_PCC(reg)) { |
1153 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); | |
1154 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1757d05f XW |
1155 | int ret = 0; |
1156 | ||
1157 | if (pcc_ss_id < 0) | |
1158 | return -EIO; | |
1159 | ||
1160 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1161 | ||
1162 | down_write(&pcc_ss_data->pcc_lock); | |
1163 | ||
1164 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) | |
0654cf05 | 1165 | cpc_read(cpunum, reg, perf); |
1757d05f XW |
1166 | else |
1167 | ret = -EIO; | |
1168 | ||
1169 | up_write(&pcc_ss_data->pcc_lock); | |
1170 | ||
1171 | return ret; | |
1172 | } | |
1173 | ||
0654cf05 | 1174 | cpc_read(cpunum, reg, perf); |
1757d05f XW |
1175 | |
1176 | return 0; | |
1177 | } | |
0654cf05 RW |
1178 | |
1179 | /** | |
1180 | * cppc_get_desired_perf - Get the desired performance register value. | |
1181 | * @cpunum: CPU from which to get desired performance. | |
1182 | * @desired_perf: Return address. | |
1183 | * | |
1184 | * Return: 0 for success, -EIO otherwise. | |
1185 | */ | |
1186 | int cppc_get_desired_perf(int cpunum, u64 *desired_perf) | |
1187 | { | |
1188 | return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf); | |
1189 | } | |
1757d05f XW |
1190 | EXPORT_SYMBOL_GPL(cppc_get_desired_perf); |
1191 | ||
0654cf05 RW |
1192 | /** |
1193 | * cppc_get_nominal_perf - Get the nominal performance register value. | |
1194 | * @cpunum: CPU from which to get nominal performance. | |
1195 | * @nominal_perf: Return address. | |
1196 | * | |
1197 | * Return: 0 for success, -EIO otherwise. | |
1198 | */ | |
1199 | int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) | |
1200 | { | |
1201 | return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf); | |
1202 | } | |
1203 | ||
12753d71 ML |
1204 | /** |
1205 | * cppc_get_highest_perf - Get the highest performance register value. | |
1206 | * @cpunum: CPU from which to get highest performance. | |
1207 | * @highest_perf: Return address. | |
1208 | * | |
1209 | * Return: 0 for success, -EIO otherwise. | |
1210 | */ | |
1211 | int cppc_get_highest_perf(int cpunum, u64 *highest_perf) | |
1212 | { | |
1213 | return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf); | |
1214 | } | |
1215 | EXPORT_SYMBOL_GPL(cppc_get_highest_perf); | |
1216 | ||
7bc1fcd3 PY |
1217 | /** |
1218 | * cppc_get_epp_perf - Get the epp register value. | |
1219 | * @cpunum: CPU from which to get epp preference value. | |
1220 | * @epp_perf: Return address. | |
1221 | * | |
1222 | * Return: 0 for success, -EIO otherwise. | |
1223 | */ | |
1224 | int cppc_get_epp_perf(int cpunum, u64 *epp_perf) | |
1225 | { | |
1226 | return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf); | |
1227 | } | |
1228 | EXPORT_SYMBOL_GPL(cppc_get_epp_perf); | |
1229 | ||
337aadff | 1230 | /** |
603fadf3 | 1231 | * cppc_get_perf_caps - Get a CPU's performance capabilities. |
337aadff AC |
1232 | * @cpunum: CPU from which to get capabilities info. |
1233 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h | |
1234 | * | |
1235 | * Return: 0 for success with perf_caps populated else -ERRNO. | |
1236 | */ | |
1237 | int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |
1238 | { | |
1239 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
368520a6 | 1240 | struct cpc_register_resource *highest_reg, *lowest_reg, |
29523f09 | 1241 | *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, |
4773e77c | 1242 | *low_freq_reg = NULL, *nom_freq_reg = NULL; |
29523f09 | 1243 | u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; |
85b1407b | 1244 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
6fa12d58 | 1245 | struct cppc_pcc_data *pcc_ss_data = NULL; |
850d64a4 | 1246 | int ret = 0, regs_in_pcc = 0; |
337aadff | 1247 | |
6fa12d58 | 1248 | if (!cpc_desc) { |
337aadff AC |
1249 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
1250 | return -ENODEV; | |
1251 | } | |
1252 | ||
1253 | highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; | |
1254 | lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; | |
368520a6 PP |
1255 | lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; |
1256 | nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
4773e77c PP |
1257 | low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; |
1258 | nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; | |
29523f09 | 1259 | guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; |
337aadff | 1260 | |
337aadff | 1261 | /* Are any of the regs PCC ?*/ |
80b8286a | 1262 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
4773e77c PP |
1263 | CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || |
1264 | CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { | |
6fa12d58 PP |
1265 | if (pcc_ss_id < 0) { |
1266 | pr_debug("Invalid pcc_ss_id\n"); | |
1267 | return -ENODEV; | |
1268 | } | |
1269 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
850d64a4 | 1270 | regs_in_pcc = 1; |
85b1407b | 1271 | down_write(&pcc_ss_data->pcc_lock); |
337aadff | 1272 | /* Ring doorbell once to update PCC subspace */ |
85b1407b | 1273 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
337aadff AC |
1274 | ret = -EIO; |
1275 | goto out_err; | |
1276 | } | |
1277 | } | |
1278 | ||
a6cbcdd5 | 1279 | cpc_read(cpunum, highest_reg, &high); |
337aadff AC |
1280 | perf_caps->highest_perf = high; |
1281 | ||
a6cbcdd5 | 1282 | cpc_read(cpunum, lowest_reg, &low); |
337aadff AC |
1283 | perf_caps->lowest_perf = low; |
1284 | ||
368520a6 | 1285 | cpc_read(cpunum, nominal_reg, &nom); |
337aadff AC |
1286 | perf_caps->nominal_perf = nom; |
1287 | ||
edef1ef1 SP |
1288 | if (guaranteed_reg->type != ACPI_TYPE_BUFFER || |
1289 | IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { | |
1290 | perf_caps->guaranteed_perf = 0; | |
1291 | } else { | |
1292 | cpc_read(cpunum, guaranteed_reg, &guaranteed); | |
1293 | perf_caps->guaranteed_perf = guaranteed; | |
1294 | } | |
29523f09 | 1295 | |
368520a6 PP |
1296 | cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); |
1297 | perf_caps->lowest_nonlinear_perf = min_nonlinear; | |
1298 | ||
1299 | if (!high || !low || !nom || !min_nonlinear) | |
337aadff AC |
1300 | ret = -EFAULT; |
1301 | ||
4773e77c PP |
1302 | /* Read optional lowest and nominal frequencies if present */ |
1303 | if (CPC_SUPPORTED(low_freq_reg)) | |
1304 | cpc_read(cpunum, low_freq_reg, &low_f); | |
1305 | ||
1306 | if (CPC_SUPPORTED(nom_freq_reg)) | |
1307 | cpc_read(cpunum, nom_freq_reg, &nom_f); | |
1308 | ||
1309 | perf_caps->lowest_freq = low_f; | |
1310 | perf_caps->nominal_freq = nom_f; | |
1311 | ||
1312 | ||
337aadff | 1313 | out_err: |
850d64a4 | 1314 | if (regs_in_pcc) |
85b1407b | 1315 | up_write(&pcc_ss_data->pcc_lock); |
337aadff AC |
1316 | return ret; |
1317 | } | |
1318 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | |
1319 | ||
ae2df912 JL |
1320 | /** |
1321 | * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region. | |
1322 | * | |
1323 | * CPPC has flexibility about how CPU performance counters are accessed. | |
1324 | * One of the choices is PCC regions, which can have a high access latency. This | |
1325 | * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time. | |
1326 | * | |
1327 | * Return: true if any of the counters are in PCC regions, false otherwise | |
1328 | */ | |
1329 | bool cppc_perf_ctrs_in_pcc(void) | |
1330 | { | |
1331 | int cpu; | |
1332 | ||
1333 | for_each_present_cpu(cpu) { | |
1334 | struct cpc_register_resource *ref_perf_reg; | |
1335 | struct cpc_desc *cpc_desc; | |
1336 | ||
1337 | cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
1338 | ||
1339 | if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) || | |
1340 | CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) || | |
1341 | CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME])) | |
1342 | return true; | |
1343 | ||
1344 | ||
1345 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; | |
1346 | ||
1347 | /* | |
1348 | * If reference perf register is not supported then we should | |
1349 | * use the nominal perf value | |
1350 | */ | |
1351 | if (!CPC_SUPPORTED(ref_perf_reg)) | |
1352 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
1353 | ||
1354 | if (CPC_IN_PCC(ref_perf_reg)) | |
1355 | return true; | |
1356 | } | |
1357 | ||
1358 | return false; | |
1359 | } | |
1360 | EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc); | |
1361 | ||
337aadff | 1362 | /** |
603fadf3 | 1363 | * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. |
337aadff AC |
1364 | * @cpunum: CPU from which to read counters. |
1365 | * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h | |
1366 | * | |
1367 | * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. | |
1368 | */ | |
1369 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |
1370 | { | |
1371 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
158c998e AC |
1372 | struct cpc_register_resource *delivered_reg, *reference_reg, |
1373 | *ref_perf_reg, *ctr_wrap_reg; | |
85b1407b | 1374 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
6fa12d58 | 1375 | struct cppc_pcc_data *pcc_ss_data = NULL; |
158c998e | 1376 | u64 delivered, reference, ref_perf, ctr_wrap_time; |
850d64a4 | 1377 | int ret = 0, regs_in_pcc = 0; |
337aadff | 1378 | |
6fa12d58 | 1379 | if (!cpc_desc) { |
337aadff AC |
1380 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
1381 | return -ENODEV; | |
1382 | } | |
1383 | ||
1384 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; | |
1385 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; | |
158c998e AC |
1386 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; |
1387 | ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; | |
1388 | ||
1389 | /* | |
603fadf3 | 1390 | * If reference perf register is not supported then we should |
158c998e AC |
1391 | * use the nominal perf value |
1392 | */ | |
1393 | if (!CPC_SUPPORTED(ref_perf_reg)) | |
1394 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | |
337aadff | 1395 | |
337aadff | 1396 | /* Are any of the regs PCC ?*/ |
158c998e AC |
1397 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || |
1398 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { | |
6fa12d58 PP |
1399 | if (pcc_ss_id < 0) { |
1400 | pr_debug("Invalid pcc_ss_id\n"); | |
1401 | return -ENODEV; | |
1402 | } | |
1403 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b | 1404 | down_write(&pcc_ss_data->pcc_lock); |
850d64a4 | 1405 | regs_in_pcc = 1; |
337aadff | 1406 | /* Ring doorbell once to update PCC subspace */ |
85b1407b | 1407 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
337aadff AC |
1408 | ret = -EIO; |
1409 | goto out_err; | |
1410 | } | |
1411 | } | |
1412 | ||
a6cbcdd5 SP |
1413 | cpc_read(cpunum, delivered_reg, &delivered); |
1414 | cpc_read(cpunum, reference_reg, &reference); | |
1415 | cpc_read(cpunum, ref_perf_reg, &ref_perf); | |
158c998e AC |
1416 | |
1417 | /* | |
1418 | * Per spec, if ctr_wrap_time optional register is unsupported, then the | |
1419 | * performance counters are assumed to never wrap during the lifetime of | |
1420 | * platform | |
1421 | */ | |
1422 | ctr_wrap_time = (u64)(~((u64)0)); | |
1423 | if (CPC_SUPPORTED(ctr_wrap_reg)) | |
a6cbcdd5 | 1424 | cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); |
337aadff | 1425 | |
158c998e | 1426 | if (!delivered || !reference || !ref_perf) { |
337aadff AC |
1427 | ret = -EFAULT; |
1428 | goto out_err; | |
1429 | } | |
1430 | ||
1431 | perf_fb_ctrs->delivered = delivered; | |
1432 | perf_fb_ctrs->reference = reference; | |
158c998e | 1433 | perf_fb_ctrs->reference_perf = ref_perf; |
2c74d847 | 1434 | perf_fb_ctrs->wraparound_time = ctr_wrap_time; |
337aadff | 1435 | out_err: |
850d64a4 | 1436 | if (regs_in_pcc) |
85b1407b | 1437 | up_write(&pcc_ss_data->pcc_lock); |
337aadff AC |
1438 | return ret; |
1439 | } | |
1440 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | |
1441 | ||
7bc1fcd3 PY |
1442 | /* |
1443 | * Set Energy Performance Preference Register value through | |
1444 | * Performance Controls Interface | |
1445 | */ | |
1446 | int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) | |
1447 | { | |
1448 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); | |
1449 | struct cpc_register_resource *epp_set_reg; | |
1450 | struct cpc_register_resource *auto_sel_reg; | |
1451 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
1452 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1453 | int ret; | |
1454 | ||
1455 | if (!cpc_desc) { | |
1456 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); | |
1457 | return -ENODEV; | |
1458 | } | |
1459 | ||
1460 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; | |
1461 | epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF]; | |
1462 | ||
1463 | if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) { | |
1464 | if (pcc_ss_id < 0) { | |
1465 | pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu); | |
1466 | return -ENODEV; | |
1467 | } | |
1468 | ||
1469 | if (CPC_SUPPORTED(auto_sel_reg)) { | |
1470 | ret = cpc_write(cpu, auto_sel_reg, enable); | |
1471 | if (ret) | |
1472 | return ret; | |
1473 | } | |
1474 | ||
1475 | if (CPC_SUPPORTED(epp_set_reg)) { | |
1476 | ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); | |
1477 | if (ret) | |
1478 | return ret; | |
1479 | } | |
1480 | ||
1481 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1482 | ||
1483 | down_write(&pcc_ss_data->pcc_lock); | |
1484 | /* after writing CPC, transfer the ownership of PCC to platform */ | |
1485 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
1486 | up_write(&pcc_ss_data->pcc_lock); | |
1487 | } else { | |
1488 | ret = -ENOTSUPP; | |
1489 | pr_debug("_CPC in PCC is not supported\n"); | |
1490 | } | |
1491 | ||
1492 | return ret; | |
1493 | } | |
1494 | EXPORT_SYMBOL_GPL(cppc_set_epp_perf); | |
1495 | ||
c984f5d5 WK |
1496 | /** |
1497 | * cppc_get_auto_sel_caps - Read autonomous selection register. | |
1498 | * @cpunum : CPU from which to read register. | |
1499 | * @perf_caps : struct where autonomous selection register value is updated. | |
1500 | */ | |
1501 | int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |
1502 | { | |
1503 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | |
1504 | struct cpc_register_resource *auto_sel_reg; | |
1505 | u64 auto_sel; | |
1506 | ||
1507 | if (!cpc_desc) { | |
1508 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | |
1509 | return -ENODEV; | |
1510 | } | |
1511 | ||
1512 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; | |
1513 | ||
1514 | if (!CPC_SUPPORTED(auto_sel_reg)) | |
1515 | pr_warn_once("Autonomous mode is not unsupported!\n"); | |
1516 | ||
1517 | if (CPC_IN_PCC(auto_sel_reg)) { | |
1518 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); | |
1519 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1520 | int ret = 0; | |
1521 | ||
1522 | if (pcc_ss_id < 0) | |
1523 | return -ENODEV; | |
1524 | ||
1525 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1526 | ||
1527 | down_write(&pcc_ss_data->pcc_lock); | |
1528 | ||
1529 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) { | |
1530 | cpc_read(cpunum, auto_sel_reg, &auto_sel); | |
1531 | perf_caps->auto_sel = (bool)auto_sel; | |
1532 | } else { | |
1533 | ret = -EIO; | |
1534 | } | |
1535 | ||
1536 | up_write(&pcc_ss_data->pcc_lock); | |
1537 | ||
1538 | return ret; | |
1539 | } | |
1540 | ||
1541 | return 0; | |
1542 | } | |
1543 | EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps); | |
1544 | ||
1545 | /** | |
1546 | * cppc_set_auto_sel - Write autonomous selection register. | |
1547 | * @cpu : CPU to which to write register. | |
1548 | * @enable : the desired value of autonomous selection resiter to be updated. | |
1549 | */ | |
1550 | int cppc_set_auto_sel(int cpu, bool enable) | |
1551 | { | |
1552 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); | |
1553 | struct cpc_register_resource *auto_sel_reg; | |
1554 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
1555 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1556 | int ret = -EINVAL; | |
1557 | ||
1558 | if (!cpc_desc) { | |
1559 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); | |
1560 | return -ENODEV; | |
1561 | } | |
1562 | ||
1563 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; | |
1564 | ||
1565 | if (CPC_IN_PCC(auto_sel_reg)) { | |
1566 | if (pcc_ss_id < 0) { | |
1567 | pr_debug("Invalid pcc_ss_id\n"); | |
1568 | return -ENODEV; | |
1569 | } | |
1570 | ||
1571 | if (CPC_SUPPORTED(auto_sel_reg)) { | |
1572 | ret = cpc_write(cpu, auto_sel_reg, enable); | |
1573 | if (ret) | |
1574 | return ret; | |
1575 | } | |
1576 | ||
1577 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1578 | ||
1579 | down_write(&pcc_ss_data->pcc_lock); | |
1580 | /* after writing CPC, transfer the ownership of PCC to platform */ | |
1581 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
1582 | up_write(&pcc_ss_data->pcc_lock); | |
1583 | } else { | |
1584 | ret = -ENOTSUPP; | |
1585 | pr_debug("_CPC in PCC is not supported\n"); | |
1586 | } | |
1587 | ||
1588 | return ret; | |
1589 | } | |
1590 | EXPORT_SYMBOL_GPL(cppc_set_auto_sel); | |
1591 | ||
fb0b00af JS |
1592 | /** |
1593 | * cppc_set_enable - Set to enable CPPC on the processor by writing the | |
1594 | * Continuous Performance Control package EnableRegister field. | |
1595 | * @cpu: CPU for which to enable CPPC register. | |
1596 | * @enable: 0 - disable, 1 - enable CPPC feature on the processor. | |
1597 | * | |
1598 | * Return: 0 for success, -ERRNO or -EIO otherwise. | |
1599 | */ | |
1600 | int cppc_set_enable(int cpu, bool enable) | |
1601 | { | |
1602 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); | |
1603 | struct cpc_register_resource *enable_reg; | |
1604 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
1605 | struct cppc_pcc_data *pcc_ss_data = NULL; | |
1606 | int ret = -EINVAL; | |
1607 | ||
1608 | if (!cpc_desc) { | |
1609 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); | |
1610 | return -EINVAL; | |
1611 | } | |
1612 | ||
1613 | enable_reg = &cpc_desc->cpc_regs[ENABLE]; | |
1614 | ||
1615 | if (CPC_IN_PCC(enable_reg)) { | |
1616 | ||
1617 | if (pcc_ss_id < 0) | |
1618 | return -EIO; | |
1619 | ||
1620 | ret = cpc_write(cpu, enable_reg, enable); | |
1621 | if (ret) | |
1622 | return ret; | |
1623 | ||
1624 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
1625 | ||
1626 | down_write(&pcc_ss_data->pcc_lock); | |
1627 | /* after writing CPC, transfer the ownership of PCC to platfrom */ | |
1628 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
1629 | up_write(&pcc_ss_data->pcc_lock); | |
1630 | return ret; | |
1631 | } | |
1632 | ||
1633 | return cpc_write(cpu, enable_reg, enable); | |
1634 | } | |
1635 | EXPORT_SYMBOL_GPL(cppc_set_enable); | |
1636 | ||
337aadff | 1637 | /** |
603fadf3 | 1638 | * cppc_set_perf - Set a CPU's performance controls. |
337aadff AC |
1639 | * @cpu: CPU for which to set performance controls. |
1640 | * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h | |
1641 | * | |
1642 | * Return: 0 for success, -ERRNO otherwise. | |
1643 | */ | |
1644 | int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |
1645 | { | |
1646 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); | |
76531df5 | 1647 | struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; |
85b1407b | 1648 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
6fa12d58 | 1649 | struct cppc_pcc_data *pcc_ss_data = NULL; |
337aadff AC |
1650 | int ret = 0; |
1651 | ||
6fa12d58 | 1652 | if (!cpc_desc) { |
337aadff AC |
1653 | pr_debug("No CPC descriptor for CPU:%d\n", cpu); |
1654 | return -ENODEV; | |
1655 | } | |
1656 | ||
1657 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
76531df5 WK |
1658 | min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; |
1659 | max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; | |
337aadff | 1660 | |
80b8286a PP |
1661 | /* |
1662 | * This is Phase-I where we want to write to CPC registers | |
1663 | * -> We want all CPUs to be able to execute this phase in parallel | |
1664 | * | |
1665 | * Since read_lock can be acquired by multiple CPUs simultaneously we | |
1666 | * achieve that goal here | |
1667 | */ | |
76531df5 | 1668 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { |
6fa12d58 PP |
1669 | if (pcc_ss_id < 0) { |
1670 | pr_debug("Invalid pcc_ss_id\n"); | |
1671 | return -ENODEV; | |
1672 | } | |
1673 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b GC |
1674 | down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ |
1675 | if (pcc_ss_data->platform_owns_pcc) { | |
1676 | ret = check_pcc_chan(pcc_ss_id, false); | |
80b8286a | 1677 | if (ret) { |
85b1407b | 1678 | up_read(&pcc_ss_data->pcc_lock); |
80b8286a PP |
1679 | return ret; |
1680 | } | |
80b8286a | 1681 | } |
139aee73 PP |
1682 | /* |
1683 | * Update the pending_write to make sure a PCC CMD_READ will not | |
1684 | * arrive and steal the channel during the switch to write lock | |
1685 | */ | |
85b1407b GC |
1686 | pcc_ss_data->pending_pcc_write_cmd = true; |
1687 | cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; | |
80b8286a | 1688 | cpc_desc->write_cmd_status = 0; |
ad62e1e6 AC |
1689 | } |
1690 | ||
76531df5 WK |
1691 | cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); |
1692 | ||
337aadff | 1693 | /* |
76531df5 WK |
1694 | * Only write if min_perf and max_perf not zero. Some drivers pass zero |
1695 | * value to min and max perf, but they don't mean to set the zero value, | |
1696 | * they just don't want to write to those registers. | |
337aadff | 1697 | */ |
76531df5 WK |
1698 | if (perf_ctrls->min_perf) |
1699 | cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf); | |
1700 | if (perf_ctrls->max_perf) | |
1701 | cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf); | |
337aadff | 1702 | |
76531df5 | 1703 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) |
85b1407b | 1704 | up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ |
80b8286a PP |
1705 | /* |
1706 | * This is Phase-II where we transfer the ownership of PCC to Platform | |
1707 | * | |
1708 | * Short Summary: Basically if we think of a group of cppc_set_perf | |
1709 | * requests that happened in short overlapping interval. The last CPU to | |
1710 | * come out of Phase-I will enter Phase-II and ring the doorbell. | |
1711 | * | |
1712 | * We have the following requirements for Phase-II: | |
1713 | * 1. We want to execute Phase-II only when there are no CPUs | |
1714 | * currently executing in Phase-I | |
1715 | * 2. Once we start Phase-II we want to avoid all other CPUs from | |
1716 | * entering Phase-I. | |
1717 | * 3. We want only one CPU among all those who went through Phase-I | |
1718 | * to run phase-II | |
1719 | * | |
1720 | * If write_trylock fails to get the lock and doesn't transfer the | |
1721 | * PCC ownership to the platform, then one of the following will be TRUE | |
1722 | * 1. There is at-least one CPU in Phase-I which will later execute | |
1723 | * write_trylock, so the CPUs in Phase-I will be responsible for | |
1724 | * executing the Phase-II. | |
1725 | * 2. Some other CPU has beaten this CPU to successfully execute the | |
1726 | * write_trylock and has already acquired the write_lock. We know for a | |
603fadf3 | 1727 | * fact it (other CPU acquiring the write_lock) couldn't have happened |
80b8286a PP |
1728 | * before this CPU's Phase-I as we held the read_lock. |
1729 | * 3. Some other CPU executing pcc CMD_READ has stolen the | |
1730 | * down_write, in which case, send_pcc_cmd will check for pending | |
1731 | * CMD_WRITE commands by checking the pending_pcc_write_cmd. | |
1732 | * So this CPU can be certain that its request will be delivered | |
1733 | * So in all cases, this CPU knows that its request will be delivered | |
1734 | * by another CPU and can return | |
1735 | * | |
1736 | * After getting the down_write we still need to check for | |
1737 | * pending_pcc_write_cmd to take care of the following scenario | |
1738 | * The thread running this code could be scheduled out between | |
1739 | * Phase-I and Phase-II. Before it is scheduled back on, another CPU | |
1740 | * could have delivered the request to Platform by triggering the | |
1741 | * doorbell and transferred the ownership of PCC to platform. So this | |
1742 | * avoids triggering an unnecessary doorbell and more importantly before | |
1743 | * triggering the doorbell it makes sure that the PCC channel ownership | |
1744 | * is still with OSPM. | |
1745 | * pending_pcc_write_cmd can also be cleared by a different CPU, if | |
1746 | * there was a pcc CMD_READ waiting on down_write and it steals the lock | |
935ab850 | 1747 | * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this |
80b8286a PP |
1748 | * case during a CMD_READ and if there are pending writes it delivers |
1749 | * the write command before servicing the read command | |
1750 | */ | |
76531df5 | 1751 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { |
85b1407b | 1752 | if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ |
80b8286a | 1753 | /* Update only if there are pending write commands */ |
85b1407b GC |
1754 | if (pcc_ss_data->pending_pcc_write_cmd) |
1755 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); | |
1756 | up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ | |
80b8286a PP |
1757 | } else |
1758 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ | |
85b1407b GC |
1759 | wait_event(pcc_ss_data->pcc_write_wait_q, |
1760 | cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); | |
80b8286a PP |
1761 | |
1762 | /* send_pcc_cmd updates the status in case of failure */ | |
1763 | ret = cpc_desc->write_cmd_status; | |
337aadff | 1764 | } |
337aadff AC |
1765 | return ret; |
1766 | } | |
1767 | EXPORT_SYMBOL_GPL(cppc_set_perf); | |
be8b88d7 PP |
1768 | |
1769 | /** | |
1770 | * cppc_get_transition_latency - returns frequency transition latency in ns | |
fda7be20 | 1771 | * @cpu_num: CPU number for per_cpu(). |
be8b88d7 | 1772 | * |
935ab850 TS |
1773 | * ACPI CPPC does not explicitly specify how a platform can specify the |
1774 | * transition latency for performance change requests. The closest we have | |
be8b88d7 PP |
1775 | * is the timing information from the PCCT tables which provides the info |
1776 | * on the number and frequency of PCC commands the platform can handle. | |
6380b7b2 PG |
1777 | * |
1778 | * If desired_reg is in the SystemMemory or SystemIo ACPI address space, | |
1779 | * then assume there is no latency. | |
be8b88d7 PP |
1780 | */ |
1781 | unsigned int cppc_get_transition_latency(int cpu_num) | |
1782 | { | |
1783 | /* | |
1784 | * Expected transition latency is based on the PCCT timing values | |
1785 | * Below are definition from ACPI spec: | |
1786 | * pcc_nominal- Expected latency to process a command, in microseconds | |
1787 | * pcc_mpar - The maximum number of periodic requests that the subspace | |
1788 | * channel can support, reported in commands per minute. 0 | |
1789 | * indicates no limitation. | |
1790 | * pcc_mrtt - The minimum amount of time that OSPM must wait after the | |
1791 | * completion of a command before issuing the next command, | |
1792 | * in microseconds. | |
1793 | */ | |
1794 | unsigned int latency_ns = 0; | |
1795 | struct cpc_desc *cpc_desc; | |
1796 | struct cpc_register_resource *desired_reg; | |
85b1407b | 1797 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); |
1ecbd717 | 1798 | struct cppc_pcc_data *pcc_ss_data; |
be8b88d7 PP |
1799 | |
1800 | cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); | |
1801 | if (!cpc_desc) | |
1802 | return CPUFREQ_ETERNAL; | |
1803 | ||
1804 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | |
6380b7b2 PG |
1805 | if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg)) |
1806 | return 0; | |
1807 | else if (!CPC_IN_PCC(desired_reg)) | |
be8b88d7 PP |
1808 | return CPUFREQ_ETERNAL; |
1809 | ||
1ecbd717 GC |
1810 | if (pcc_ss_id < 0) |
1811 | return CPUFREQ_ETERNAL; | |
1812 | ||
1813 | pcc_ss_data = pcc_data[pcc_ss_id]; | |
85b1407b GC |
1814 | if (pcc_ss_data->pcc_mpar) |
1815 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); | |
be8b88d7 | 1816 | |
85b1407b GC |
1817 | latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); |
1818 | latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); | |
be8b88d7 PP |
1819 | |
1820 | return latency_ns; | |
1821 | } | |
1822 | EXPORT_SYMBOL_GPL(cppc_get_transition_latency); | |
50b813b1 VG |
1823 | |
1824 | /* Minimum struct length needed for the DMI processor entry we want */ | |
1825 | #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 | |
1826 | ||
1827 | /* Offset in the DMI processor structure for the max frequency */ | |
1828 | #define DMI_PROCESSOR_MAX_SPEED 0x14 | |
1829 | ||
1830 | /* Callback function used to retrieve the max frequency from DMI */ | |
1831 | static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) | |
1832 | { | |
1833 | const u8 *dmi_data = (const u8 *)dm; | |
1834 | u16 *mhz = (u16 *)private; | |
1835 | ||
1836 | if (dm->type == DMI_ENTRY_PROCESSOR && | |
1837 | dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { | |
1838 | u16 val = (u16)get_unaligned((const u16 *) | |
1839 | (dmi_data + DMI_PROCESSOR_MAX_SPEED)); | |
1840 | *mhz = val > *mhz ? val : *mhz; | |
1841 | } | |
1842 | } | |
1843 | ||
1844 | /* Look up the max frequency in DMI */ | |
1845 | static u64 cppc_get_dmi_max_khz(void) | |
1846 | { | |
1847 | u16 mhz = 0; | |
1848 | ||
1849 | dmi_walk(cppc_find_dmi_mhz, &mhz); | |
1850 | ||
1851 | /* | |
1852 | * Real stupid fallback value, just in case there is no | |
1853 | * actual value set. | |
1854 | */ | |
1855 | mhz = mhz ? mhz : 1; | |
1856 | ||
1857 | return KHZ_PER_MHZ * mhz; | |
1858 | } | |
1859 | ||
1860 | /* | |
1861 | * If CPPC lowest_freq and nominal_freq registers are exposed then we can | |
1862 | * use them to convert perf to freq and vice versa. The conversion is | |
1863 | * extrapolated as an affine function passing by the 2 points: | |
1864 | * - (Low perf, Low freq) | |
1865 | * - (Nominal perf, Nominal freq) | |
1866 | */ | |
1867 | unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf) | |
1868 | { | |
1869 | s64 retval, offset = 0; | |
1870 | static u64 max_khz; | |
1871 | u64 mul, div; | |
1872 | ||
1873 | if (caps->lowest_freq && caps->nominal_freq) { | |
1874 | mul = caps->nominal_freq - caps->lowest_freq; | |
1875 | mul *= KHZ_PER_MHZ; | |
1876 | div = caps->nominal_perf - caps->lowest_perf; | |
1877 | offset = caps->nominal_freq * KHZ_PER_MHZ - | |
1878 | div64_u64(caps->nominal_perf * mul, div); | |
1879 | } else { | |
1880 | if (!max_khz) | |
1881 | max_khz = cppc_get_dmi_max_khz(); | |
1882 | mul = max_khz; | |
1883 | div = caps->highest_perf; | |
1884 | } | |
1885 | ||
1886 | retval = offset + div64_u64(perf * mul, div); | |
1887 | if (retval >= 0) | |
1888 | return retval; | |
1889 | return 0; | |
1890 | } | |
1891 | EXPORT_SYMBOL_GPL(cppc_perf_to_khz); | |
1892 | ||
1893 | unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq) | |
1894 | { | |
1895 | s64 retval, offset = 0; | |
1896 | static u64 max_khz; | |
1897 | u64 mul, div; | |
1898 | ||
1899 | if (caps->lowest_freq && caps->nominal_freq) { | |
1900 | mul = caps->nominal_perf - caps->lowest_perf; | |
1901 | div = caps->nominal_freq - caps->lowest_freq; | |
1902 | /* | |
1903 | * We don't need to convert to kHz for computing offset and can | |
1904 | * directly use nominal_freq and lowest_freq as the div64_u64 | |
1905 | * will remove the frequency unit. | |
1906 | */ | |
1907 | offset = caps->nominal_perf - | |
1908 | div64_u64(caps->nominal_freq * mul, div); | |
1909 | /* But we need it for computing the perf level. */ | |
1910 | div *= KHZ_PER_MHZ; | |
1911 | } else { | |
1912 | if (!max_khz) | |
1913 | max_khz = cppc_get_dmi_max_khz(); | |
1914 | mul = caps->highest_perf; | |
1915 | div = max_khz; | |
1916 | } | |
1917 | ||
1918 | retval = offset + div64_u64(freq * mul, div); | |
1919 | if (retval >= 0) | |
1920 | return retval; | |
1921 | return 0; | |
1922 | } | |
1923 | EXPORT_SYMBOL_GPL(cppc_khz_to_perf); |