Merge tag 'pinctrl-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-block.git] / drivers / gpu / drm / xe / xe_guc_pc.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_guc_pc.h"
7
8#include <linux/delay.h>
9
dd08ebf6 10#include <drm/drm_managed.h>
ea9f879d 11
b67cb798
MW
12#include "abi/guc_actions_abi.h"
13#include "abi/guc_actions_slpc_abi.h"
226bfec8 14#include "regs/xe_gt_regs.h"
c5841481 15#include "regs/xe_regs.h"
dd08ebf6
MB
16#include "xe_bo.h"
17#include "xe_device.h"
18#include "xe_gt.h"
975e4a37 19#include "xe_gt_idle.h"
dd08ebf6 20#include "xe_gt_sysfs.h"
ea9f879d 21#include "xe_gt_types.h"
dd08ebf6
MB
22#include "xe_guc_ct.h"
23#include "xe_map.h"
24#include "xe_mmio.h"
25#include "xe_pcode.h"
0f06dc10 26
5ec15f83 27#define MCHBAR_MIRROR_BASE_SNB 0x140000
dd08ebf6 28
0bc519d2 29#define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
3512a78a
LDM
30#define RP0_MASK REG_GENMASK(7, 0)
31#define RP1_MASK REG_GENMASK(15, 8)
32#define RPN_MASK REG_GENMASK(23, 16)
dd08ebf6 33
0bc519d2 34#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
dd08ebf6
MB
35#define RPE_MASK REG_GENMASK(15, 8)
36
2846d103 37#define GT_PERF_STATUS XE_REG(0x1381b4)
0bc519d2 38#define CAGF_MASK REG_GENMASK(19, 11)
dd08ebf6
MB
39
40#define GT_FREQUENCY_MULTIPLIER 50
0bc519d2 41#define GT_FREQUENCY_SCALER 3
dd08ebf6
MB
42
43/**
44 * DOC: GuC Power Conservation (PC)
45 *
46 * GuC Power Conservation (PC) supports multiple features for the most
47 * efficient and performing use of the GT when GuC submission is enabled,
48 * including frequency management, Render-C states management, and various
49 * algorithms for power balancing.
50 *
51 * Single Loop Power Conservation (SLPC) is the name given to the suite of
52 * connected power conservation features in the GuC firmware. The firmware
53 * exposes a programming interface to the host for the control of SLPC.
54 *
55 * Frequency management:
56 * =====================
57 *
58 * Xe driver enables SLPC with all of its defaults features and frequency
59 * selection, which varies per platform.
dd08ebf6
MB
60 *
61 * Render-C States:
62 * ================
63 *
64 * Render-C states is also a GuC PC feature that is now enabled in Xe for
65 * all platforms.
dd08ebf6 66 *
dd08ebf6
MB
67 */
68
69static struct xe_guc *
70pc_to_guc(struct xe_guc_pc *pc)
71{
72 return container_of(pc, struct xe_guc, pc);
73}
74
75static struct xe_device *
76pc_to_xe(struct xe_guc_pc *pc)
77{
78 struct xe_guc *guc = pc_to_guc(pc);
79 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
80
81 return gt_to_xe(gt);
82}
83
84static struct xe_gt *
85pc_to_gt(struct xe_guc_pc *pc)
86{
87 return container_of(pc, struct xe_gt, uc.guc.pc);
88}
89
dd08ebf6
MB
90static struct iosys_map *
91pc_to_maps(struct xe_guc_pc *pc)
92{
93 return &pc->bo->vmap;
94}
95
96#define slpc_shared_data_read(pc_, field_) \
97 xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
98 struct slpc_shared_data, field_)
99
100#define slpc_shared_data_write(pc_, field_, val_) \
101 xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
102 struct slpc_shared_data, field_, val_)
103
104#define SLPC_EVENT(id, count) \
105 (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
106 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
107
b6f468b8
RV
108static int wait_for_pc_state(struct xe_guc_pc *pc,
109 enum slpc_global_state state)
dd08ebf6 110{
b6f468b8
RV
111 int timeout_us = 5000; /* rought 5ms, but no need for precision */
112 int slept, wait = 10;
113
dd08ebf6 114 xe_device_assert_mem_access(pc_to_xe(pc));
b6f468b8
RV
115
116 for (slept = 0; slept < timeout_us;) {
117 if (slpc_shared_data_read(pc, header.global_state) == state)
118 return 0;
119
120 usleep_range(wait, wait << 1);
121 slept += wait;
122 wait <<= 1;
123 if (slept + wait > timeout_us)
124 wait = timeout_us - slept;
125 }
126
127 return -ETIMEDOUT;
dd08ebf6
MB
128}
129
130static int pc_action_reset(struct xe_guc_pc *pc)
131{
132 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
133 int ret;
134 u32 action[] = {
135 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
136 SLPC_EVENT(SLPC_EVENT_RESET, 2),
137 xe_bo_ggtt_addr(pc->bo),
138 0,
139 };
140
141 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
142 if (ret)
143 drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
144
145 return ret;
146}
147
148static int pc_action_shutdown(struct xe_guc_pc *pc)
149{
150 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
151 int ret;
152 u32 action[] = {
153 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
154 SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
155 xe_bo_ggtt_addr(pc->bo),
156 0,
157 };
158
159 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
160 if (ret)
161 drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
162 ERR_PTR(ret));
163
164 return ret;
165}
166
167static int pc_action_query_task_state(struct xe_guc_pc *pc)
168{
169 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
170 int ret;
171 u32 action[] = {
172 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
174 xe_bo_ggtt_addr(pc->bo),
175 0,
176 };
177
b6f468b8 178 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
dd08ebf6
MB
179 return -EAGAIN;
180
181 /* Blocking here to ensure the results are ready before reading them */
182 ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
183 if (ret)
184 drm_err(&pc_to_xe(pc)->drm,
185 "GuC PC query task state failed: %pe", ERR_PTR(ret));
186
187 return ret;
188}
189
190static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
191{
192 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
193 int ret;
194 u32 action[] = {
195 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
196 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
197 id,
198 value,
199 };
200
b6f468b8 201 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
dd08ebf6
MB
202 return -EAGAIN;
203
204 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
205 if (ret)
206 drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
207 ERR_PTR(ret));
208
209 return ret;
210}
211
212static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
213{
214 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
215 u32 action[] = {
216 XE_GUC_ACTION_SETUP_PC_GUCRC,
217 mode,
218 };
219 int ret;
220
221 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
222 if (ret)
223 drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
224 ERR_PTR(ret));
225 return ret;
226}
227
228static u32 decode_freq(u32 raw)
229{
230 return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
0bc519d2 231 GT_FREQUENCY_SCALER);
dd08ebf6
MB
232}
233
43efd3ba
VB
234static u32 encode_freq(u32 freq)
235{
0bc519d2 236 return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
43efd3ba
VB
237 GT_FREQUENCY_MULTIPLIER);
238}
239
dd08ebf6
MB
240static u32 pc_get_min_freq(struct xe_guc_pc *pc)
241{
242 u32 freq;
243
244 freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
245 slpc_shared_data_read(pc, task_state_data.freq));
246
247 return decode_freq(freq);
248}
249
43efd3ba
VB
250static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
251{
252 struct xe_gt *gt = pc_to_gt(pc);
253 u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
254
255 /* Allow/Disallow punit to process software freq requests */
256 xe_mmio_write32(gt, RP_CONTROL, state);
257}
258
259static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
260{
261 struct xe_gt *gt = pc_to_gt(pc);
262 u32 rpnswreq;
263
264 pc_set_manual_rp_ctrl(pc, true);
265
266 /* Req freq is in units of 16.66 Mhz */
267 rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
268 xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
269
270 /* Sleep for a small time to allow pcode to respond */
271 usleep_range(100, 300);
272
273 pc_set_manual_rp_ctrl(pc, false);
274}
275
dd08ebf6
MB
276static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
277{
278 /*
279 * Let's only check for the rpn-rp0 range. If max < min,
280 * min becomes a fixed request.
281 */
282 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
283 return -EINVAL;
284
285 /*
286 * GuC policy is to elevate minimum frequency to the efficient levels
287 * Our goal is to have the admin choices respected.
288 */
289 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
290 freq < pc->rpe_freq);
291
292 return pc_action_set_param(pc,
293 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
294 freq);
295}
296
297static int pc_get_max_freq(struct xe_guc_pc *pc)
298{
299 u32 freq;
300
301 freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
302 slpc_shared_data_read(pc, task_state_data.freq));
303
304 return decode_freq(freq);
305}
306
307static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
308{
309 /*
310 * Let's only check for the rpn-rp0 range. If max < min,
311 * min becomes a fixed request.
312 * Also, overclocking is not supported.
313 */
314 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
315 return -EINVAL;
316
317 return pc_action_set_param(pc,
318 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
319 freq);
320}
321
b3ab1b91
RV
322static void mtl_update_rpe_value(struct xe_guc_pc *pc)
323{
324 struct xe_gt *gt = pc_to_gt(pc);
325 u32 reg;
326
327 if (xe_gt_is_media_type(gt))
ce8bf5bd 328 reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
b3ab1b91 329 else
ce8bf5bd 330 reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
b3ab1b91 331
effc560d 332 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
b3ab1b91
RV
333}
334
335static void tgl_update_rpe_value(struct xe_guc_pc *pc)
dd08ebf6
MB
336{
337 struct xe_gt *gt = pc_to_gt(pc);
338 struct xe_device *xe = gt_to_xe(gt);
339 u32 reg;
340
341 /*
342 * For PVC we still need to use fused RP1 as the approximation for RPe
343 * For other platforms than PVC we get the resolved RPe directly from
344 * PCODE at a different register
345 */
346 if (xe->info.platform == XE_PVC)
ce8bf5bd 347 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
dd08ebf6 348 else
0bc519d2 349 reg = xe_mmio_read32(gt, FREQ_INFO_REC);
dd08ebf6
MB
350
351 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
b3ab1b91
RV
352}
353
354static void pc_update_rp_values(struct xe_guc_pc *pc)
355{
356 struct xe_gt *gt = pc_to_gt(pc);
357 struct xe_device *xe = gt_to_xe(gt);
358
8a93b0b4 359 if (GRAPHICS_VERx100(xe) >= 1270)
b3ab1b91
RV
360 mtl_update_rpe_value(pc);
361 else
362 tgl_update_rpe_value(pc);
dd08ebf6
MB
363
364 /*
365 * RPe is decided at runtime by PCODE. In the rare case where that's
366 * smaller than the fused min, we will trust the PCODE and use that
367 * as our minimum one.
368 */
369 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
370}
371
bef52b5c
RV
372/**
373 * xe_guc_pc_get_act_freq - Get Actual running frequency
374 * @pc: The GuC PC
375 *
376 * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
377 */
378u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
dd08ebf6 379{
bef52b5c 380 struct xe_gt *gt = pc_to_gt(pc);
b3ab1b91 381 struct xe_device *xe = gt_to_xe(gt);
dd08ebf6 382 u32 freq;
dd08ebf6 383
a180f4e1 384 xe_device_mem_access_get(gt_to_xe(gt));
b3ab1b91 385
2846d103 386 /* When in RC6, actual frequency reported will be 0. */
8a93b0b4 387 if (GRAPHICS_VERx100(xe) >= 1270) {
ce8bf5bd 388 freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
b3ab1b91
RV
389 freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
390 } else {
2846d103 391 freq = xe_mmio_read32(gt, GT_PERF_STATUS);
0bc519d2 392 freq = REG_FIELD_GET(CAGF_MASK, freq);
b3ab1b91
RV
393 }
394
bef52b5c 395 freq = decode_freq(freq);
dd08ebf6 396
a180f4e1 397 xe_device_mem_access_put(gt_to_xe(gt));
bef52b5c
RV
398
399 return freq;
dd08ebf6 400}
dd08ebf6 401
bef52b5c
RV
402/**
403 * xe_guc_pc_get_cur_freq - Get Current requested frequency
404 * @pc: The GuC PC
405 * @freq: A pointer to a u32 where the freq value will be returned
406 *
407 * Returns: 0 on success,
408 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
409 */
410int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 411{
bef52b5c
RV
412 struct xe_gt *gt = pc_to_gt(pc);
413 int ret;
dd08ebf6 414
a180f4e1 415 xe_device_mem_access_get(gt_to_xe(gt));
dd08ebf6
MB
416 /*
417 * GuC SLPC plays with cur freq request when GuCRC is enabled
418 * Block RC6 for a more reliable read.
419 */
420 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
421 if (ret)
a180f4e1 422 goto out;
dd08ebf6 423
bef52b5c 424 *freq = xe_mmio_read32(gt, RPNSWREQ);
dd08ebf6 425
bef52b5c
RV
426 *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
427 *freq = decode_freq(*freq);
dd08ebf6
MB
428
429 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
a180f4e1
RT
430out:
431 xe_device_mem_access_put(gt_to_xe(gt));
dd08ebf6
MB
432 return ret;
433}
dd08ebf6 434
bef52b5c
RV
435/**
436 * xe_guc_pc_get_rp0_freq - Get the RP0 freq
437 * @pc: The GuC PC
438 *
439 * Returns: RP0 freq.
440 */
441u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
dd08ebf6 442{
bef52b5c 443 return pc->rp0_freq;
dd08ebf6 444}
dd08ebf6 445
bef52b5c
RV
446/**
447 * xe_guc_pc_get_rpe_freq - Get the RPe freq
448 * @pc: The GuC PC
449 *
450 * Returns: RPe freq.
451 */
452u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
dd08ebf6 453{
2d3ab1fa
MA
454 struct xe_gt *gt = pc_to_gt(pc);
455 struct xe_device *xe = gt_to_xe(gt);
dd08ebf6 456
2d3ab1fa 457 xe_device_mem_access_get(xe);
dd08ebf6 458 pc_update_rp_values(pc);
2d3ab1fa 459 xe_device_mem_access_put(xe);
bef52b5c
RV
460
461 return pc->rpe_freq;
dd08ebf6 462}
dd08ebf6 463
bef52b5c
RV
464/**
465 * xe_guc_pc_get_rpn_freq - Get the RPn freq
466 * @pc: The GuC PC
467 *
468 * Returns: RPn freq.
469 */
470u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
dd08ebf6 471{
bef52b5c 472 return pc->rpn_freq;
dd08ebf6 473}
dd08ebf6 474
bef52b5c
RV
475/**
476 * xe_guc_pc_get_min_freq - Get the min operational frequency
477 * @pc: The GuC PC
478 * @freq: A pointer to a u32 where the freq value will be returned
479 *
480 * Returns: 0 on success,
481 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
482 */
483int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 484{
dd08ebf6 485 struct xe_gt *gt = pc_to_gt(pc);
bef52b5c 486 int ret;
dd08ebf6
MB
487
488 xe_device_mem_access_get(pc_to_xe(pc));
489 mutex_lock(&pc->freq_lock);
490 if (!pc->freq_ready) {
491 /* Might be in the middle of a gt reset */
492 ret = -EAGAIN;
493 goto out;
494 }
495
496 /*
497 * GuC SLPC plays with min freq request when GuCRC is enabled
498 * Block RC6 for a more reliable read.
499 */
500 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
501 if (ret)
502 goto out;
503
504 ret = pc_action_query_task_state(pc);
505 if (ret)
506 goto fw;
507
bef52b5c 508 *freq = pc_get_min_freq(pc);
dd08ebf6
MB
509
510fw:
511 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
512out:
513 mutex_unlock(&pc->freq_lock);
514 xe_device_mem_access_put(pc_to_xe(pc));
515 return ret;
516}
517
bef52b5c
RV
518/**
519 * xe_guc_pc_set_min_freq - Set the minimal operational frequency
520 * @pc: The GuC PC
521 * @freq: The selected minimal frequency
522 *
523 * Returns: 0 on success,
524 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
525 * -EINVAL if value out of bounds.
526 */
527int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
dd08ebf6 528{
bef52b5c 529 int ret;
dd08ebf6
MB
530
531 xe_device_mem_access_get(pc_to_xe(pc));
532 mutex_lock(&pc->freq_lock);
533 if (!pc->freq_ready) {
534 /* Might be in the middle of a gt reset */
535 ret = -EAGAIN;
536 goto out;
537 }
538
539 ret = pc_set_min_freq(pc, freq);
540 if (ret)
541 goto out;
542
543 pc->user_requested_min = freq;
544
545out:
546 mutex_unlock(&pc->freq_lock);
547 xe_device_mem_access_put(pc_to_xe(pc));
bef52b5c
RV
548
549 return ret;
dd08ebf6 550}
dd08ebf6 551
bef52b5c
RV
552/**
553 * xe_guc_pc_get_max_freq - Get Maximum operational frequency
554 * @pc: The GuC PC
555 * @freq: A pointer to a u32 where the freq value will be returned
556 *
557 * Returns: 0 on success,
558 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
559 */
560int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 561{
bef52b5c 562 int ret;
dd08ebf6
MB
563
564 xe_device_mem_access_get(pc_to_xe(pc));
565 mutex_lock(&pc->freq_lock);
566 if (!pc->freq_ready) {
567 /* Might be in the middle of a gt reset */
568 ret = -EAGAIN;
569 goto out;
570 }
571
572 ret = pc_action_query_task_state(pc);
573 if (ret)
574 goto out;
575
bef52b5c 576 *freq = pc_get_max_freq(pc);
dd08ebf6
MB
577
578out:
579 mutex_unlock(&pc->freq_lock);
580 xe_device_mem_access_put(pc_to_xe(pc));
581 return ret;
582}
583
bef52b5c
RV
584/**
585 * xe_guc_pc_set_max_freq - Set the maximum operational frequency
586 * @pc: The GuC PC
587 * @freq: The selected maximum frequency value
588 *
589 * Returns: 0 on success,
590 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
591 * -EINVAL if value out of bounds.
592 */
593int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
dd08ebf6 594{
bef52b5c 595 int ret;
dd08ebf6
MB
596
597 xe_device_mem_access_get(pc_to_xe(pc));
598 mutex_lock(&pc->freq_lock);
599 if (!pc->freq_ready) {
600 /* Might be in the middle of a gt reset */
601 ret = -EAGAIN;
602 goto out;
603 }
604
605 ret = pc_set_max_freq(pc, freq);
606 if (ret)
607 goto out;
608
609 pc->user_requested_max = freq;
610
611out:
612 mutex_unlock(&pc->freq_lock);
613 xe_device_mem_access_put(pc_to_xe(pc));
bef52b5c 614 return ret;
dd08ebf6 615}
dd08ebf6 616
1c2097bb 617/**
7b076d14 618 * xe_guc_pc_c_status - get the current GT C state
1c2097bb
RT
619 * @pc: XE_GuC_PC instance
620 */
7b076d14 621enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
dd08ebf6 622{
dd08ebf6 623 struct xe_gt *gt = pc_to_gt(pc);
7b076d14 624 u32 reg, gt_c_state;
dd08ebf6
MB
625
626 xe_device_mem_access_get(gt_to_xe(gt));
7b076d14
BN
627
628 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
629 reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
630 gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
631 } else {
632 reg = xe_mmio_read32(gt, GT_CORE_STATUS);
633 gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
634 }
635
dd08ebf6
MB
636 xe_device_mem_access_put(gt_to_xe(gt));
637
7b076d14
BN
638 switch (gt_c_state) {
639 case GT_C6:
1c2097bb 640 return GT_IDLE_C6;
7b076d14 641 case GT_C0:
1c2097bb 642 return GT_IDLE_C0;
dd08ebf6 643 default:
1c2097bb 644 return GT_IDLE_UNKNOWN;
dd08ebf6
MB
645 }
646}
dd08ebf6 647
1c2097bb
RT
648/**
649 * xe_guc_pc_rc6_residency - rc6 residency counter
650 * @pc: Xe_GuC_PC instance
651 */
652u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
dd08ebf6 653{
dd08ebf6
MB
654 struct xe_gt *gt = pc_to_gt(pc);
655 u32 reg;
dd08ebf6 656
1c2097bb 657 xe_device_mem_access_get(gt_to_xe(gt));
ce8bf5bd 658 reg = xe_mmio_read32(gt, GT_GFX_RC6);
1c2097bb 659 xe_device_mem_access_put(gt_to_xe(gt));
dd08ebf6 660
1c2097bb 661 return reg;
dd08ebf6 662}
dd08ebf6 663
7b076d14
BN
664/**
665 * xe_guc_pc_mc6_residency - mc6 residency counter
666 * @pc: Xe_GuC_PC instance
667 */
668u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
669{
670 struct xe_gt *gt = pc_to_gt(pc);
671 u64 reg;
672
673 xe_device_mem_access_get(gt_to_xe(gt));
674 reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
675 xe_device_mem_access_put(gt_to_xe(gt));
676
677 return reg;
678}
679
b3ab1b91
RV
680static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
681{
682 struct xe_gt *gt = pc_to_gt(pc);
683 u32 reg;
684
685 xe_device_assert_mem_access(pc_to_xe(pc));
686
687 if (xe_gt_is_media_type(gt))
ce8bf5bd 688 reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
b3ab1b91 689 else
ce8bf5bd 690 reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
effc560d
BN
691
692 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
693
694 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
b3ab1b91
RV
695}
696
697static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
dd08ebf6
MB
698{
699 struct xe_gt *gt = pc_to_gt(pc);
700 struct xe_device *xe = gt_to_xe(gt);
701 u32 reg;
702
703 xe_device_assert_mem_access(pc_to_xe(pc));
704
705 if (xe->info.platform == XE_PVC)
ce8bf5bd 706 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
dd08ebf6 707 else
0bc519d2 708 reg = xe_mmio_read32(gt, RP_STATE_CAP);
dd08ebf6
MB
709 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
710 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
711}
712
b3ab1b91
RV
713static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
714{
715 struct xe_gt *gt = pc_to_gt(pc);
716 struct xe_device *xe = gt_to_xe(gt);
717
8a93b0b4 718 if (GRAPHICS_VERx100(xe) >= 1270)
b3ab1b91
RV
719 mtl_init_fused_rp_values(pc);
720 else
721 tgl_init_fused_rp_values(pc);
722}
43efd3ba
VB
723
724/**
725 * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
726 * frequency to allow faster GuC load times
727 * @pc: Xe_GuC_PC instance
728 */
729void xe_guc_pc_init_early(struct xe_guc_pc *pc)
730{
731 struct xe_gt *gt = pc_to_gt(pc);
732
733 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
734 pc_init_fused_rp_values(pc);
735 pc_set_cur_freq(pc, pc->rp0_freq);
736}
737
dd08ebf6
MB
738static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
739{
740 int ret;
741
742 lockdep_assert_held(&pc->freq_lock);
743
744 ret = pc_action_query_task_state(pc);
745 if (ret)
746 return ret;
747
748 /*
749 * GuC defaults to some RPmax that is not actually achievable without
750 * overclocking. Let's adjust it to the Hardware RP0, which is the
751 * regular maximum
752 */
753 if (pc_get_max_freq(pc) > pc->rp0_freq)
754 pc_set_max_freq(pc, pc->rp0_freq);
755
756 /*
757 * Same thing happens for Server platforms where min is listed as
758 * RPMax
759 */
760 if (pc_get_min_freq(pc) > pc->rp0_freq)
761 pc_set_min_freq(pc, pc->rp0_freq);
762
763 return 0;
764}
765
766static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
767{
768 int ret = 0;
769
770 lockdep_assert_held(&pc->freq_lock);
771
772 if (pc->user_requested_min != 0) {
773 ret = pc_set_min_freq(pc, pc->user_requested_min);
774 if (ret)
775 return ret;
776 }
777
778 if (pc->user_requested_max != 0) {
779 ret = pc_set_max_freq(pc, pc->user_requested_max);
780 if (ret)
781 return ret;
782 }
783
784 return ret;
785}
786
1737785a
RT
787/**
788 * xe_guc_pc_gucrc_disable - Disable GuC RC
789 * @pc: Xe_GuC_PC instance
790 *
791 * Disables GuC RC by taking control of RC6 back from GuC.
792 *
793 * Return: 0 on success, negative error code on error.
794 */
795int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
dd08ebf6 796{
68661c69 797 struct xe_device *xe = pc_to_xe(pc);
dd08ebf6 798 struct xe_gt *gt = pc_to_gt(pc);
0c005429 799 int ret = 0;
dd08ebf6 800
68661c69
VB
801 if (xe->info.skip_guc_pc)
802 return 0;
803
1737785a 804 xe_device_mem_access_get(pc_to_xe(pc));
dd08ebf6
MB
805
806 ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
807 if (ret)
0c005429 808 goto out;
dd08ebf6
MB
809
810 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
811 if (ret)
0c005429 812 goto out;
dd08ebf6 813
68661c69 814 xe_gt_idle_disable_c6(gt);
dd08ebf6
MB
815
816 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
0c005429
LDM
817
818out:
1737785a 819 xe_device_mem_access_put(pc_to_xe(pc));
0c005429 820 return ret;
dd08ebf6
MB
821}
822
823static void pc_init_pcode_freq(struct xe_guc_pc *pc)
824{
825 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
826 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
827
828 XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
829}
830
831static int pc_init_freqs(struct xe_guc_pc *pc)
832{
833 int ret;
834
835 mutex_lock(&pc->freq_lock);
836
837 ret = pc_adjust_freq_bounds(pc);
838 if (ret)
839 goto out;
840
841 ret = pc_adjust_requested_freq(pc);
842 if (ret)
843 goto out;
844
845 pc_update_rp_values(pc);
846
847 pc_init_pcode_freq(pc);
848
849 /*
850 * The frequencies are really ready for use only after the user
851 * requested ones got restored.
852 */
853 pc->freq_ready = true;
854
855out:
856 mutex_unlock(&pc->freq_lock);
857 return ret;
858}
859
860/**
861 * xe_guc_pc_start - Start GuC's Power Conservation component
862 * @pc: Xe_GuC_PC instance
863 */
864int xe_guc_pc_start(struct xe_guc_pc *pc)
865{
866 struct xe_device *xe = pc_to_xe(pc);
867 struct xe_gt *gt = pc_to_gt(pc);
868 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
869 int ret;
870
c4991ee0 871 xe_gt_assert(gt, xe_device_uc_enabled(xe));
dd08ebf6
MB
872
873 xe_device_mem_access_get(pc_to_xe(pc));
874
dd08ebf6
MB
875 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
876 if (ret)
d8b4494b 877 goto out_fail_force_wake;
dd08ebf6 878
975e4a37
VB
879 if (xe->info.skip_guc_pc) {
880 if (xe->info.platform != XE_PVC)
881 xe_gt_idle_enable_c6(gt);
882
883 /* Request max possible since dynamic freq mgmt is not enabled */
884 pc_set_cur_freq(pc, UINT_MAX);
885
886 ret = 0;
887 goto out;
888 }
889
890 memset(pc->bo->vmap.vaddr, 0, size);
891 slpc_shared_data_write(pc, header.size, size);
892
dd08ebf6
MB
893 ret = pc_action_reset(pc);
894 if (ret)
895 goto out;
896
b6f468b8 897 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
dd08ebf6
MB
898 drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
899 ret = -EIO;
900 goto out;
901 }
902
903 ret = pc_init_freqs(pc);
904 if (ret)
905 goto out;
906
907 if (xe->info.platform == XE_PVC) {
1737785a 908 xe_guc_pc_gucrc_disable(pc);
dd08ebf6
MB
909 ret = 0;
910 goto out;
911 }
912
913 ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
914
915out:
dd08ebf6 916 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
d8b4494b 917out_fail_force_wake:
03af26c9 918 xe_device_mem_access_put(pc_to_xe(pc));
dd08ebf6
MB
919 return ret;
920}
921
922/**
923 * xe_guc_pc_stop - Stop GuC's Power Conservation component
924 * @pc: Xe_GuC_PC instance
925 */
926int xe_guc_pc_stop(struct xe_guc_pc *pc)
927{
975e4a37 928 struct xe_device *xe = pc_to_xe(pc);
dd08ebf6
MB
929 int ret;
930
931 xe_device_mem_access_get(pc_to_xe(pc));
932
975e4a37
VB
933 if (xe->info.skip_guc_pc) {
934 xe_gt_idle_disable_c6(pc_to_gt(pc));
935 ret = 0;
936 goto out;
937 }
938
dd08ebf6
MB
939 mutex_lock(&pc->freq_lock);
940 pc->freq_ready = false;
941 mutex_unlock(&pc->freq_lock);
942
943 ret = pc_action_shutdown(pc);
944 if (ret)
945 goto out;
946
b6f468b8 947 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
dd08ebf6
MB
948 drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
949 ret = -EIO;
950 }
951
952out:
953 xe_device_mem_access_put(pc_to_xe(pc));
954 return ret;
955}
956
bef52b5c
RV
957/**
958 * xe_guc_pc_fini - Finalize GuC's Power Conservation component
8a4587ef
MW
959 * @drm: DRM device
960 * @arg: opaque pointer that should point to Xe_GuC_PC instance
bef52b5c 961 */
8a4587ef 962static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
dd08ebf6 963{
8a4587ef 964 struct xe_guc_pc *pc = arg;
975e4a37
VB
965 struct xe_device *xe = pc_to_xe(pc);
966
967 if (xe->info.skip_guc_pc) {
9f5971bd 968 xe_device_mem_access_get(xe);
975e4a37 969 xe_gt_idle_disable_c6(pc_to_gt(pc));
9f5971bd 970 xe_device_mem_access_put(xe);
975e4a37
VB
971 return;
972 }
973
8a4587ef 974 xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1737785a 975 XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
dd08ebf6 976 XE_WARN_ON(xe_guc_pc_stop(pc));
8a4587ef 977 xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
dd08ebf6
MB
978}
979
980/**
981 * xe_guc_pc_init - Initialize GuC's Power Conservation component
982 * @pc: Xe_GuC_PC instance
983 */
984int xe_guc_pc_init(struct xe_guc_pc *pc)
985{
986 struct xe_gt *gt = pc_to_gt(pc);
876611c2 987 struct xe_tile *tile = gt_to_tile(gt);
dd08ebf6
MB
988 struct xe_device *xe = gt_to_xe(gt);
989 struct xe_bo *bo;
990 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
8a4587ef 991 int err;
dd08ebf6 992
975e4a37
VB
993 if (xe->info.skip_guc_pc)
994 return 0;
995
8a4587ef
MW
996 err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
997 if (err)
998 return err;
dd08ebf6 999
0e1a47fc
MW
1000 bo = xe_managed_bo_create_pin_map(xe, tile, size,
1001 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
1002 XE_BO_CREATE_GGTT_BIT);
dd08ebf6
MB
1003 if (IS_ERR(bo))
1004 return PTR_ERR(bo);
1005
1006 pc->bo = bo;
8a4587ef
MW
1007
1008 err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
1009 if (err)
1010 return err;
1011
dd08ebf6
MB
1012 return 0;
1013}