drm/xe: move disable_c6 call
[linux-2.6-block.git] / drivers / gpu / drm / xe / xe_guc_pc.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_guc_pc.h"
7
8#include <linux/delay.h>
9
dd08ebf6 10#include <drm/drm_managed.h>
ea9f879d 11
b67cb798
MW
12#include "abi/guc_actions_abi.h"
13#include "abi/guc_actions_slpc_abi.h"
226bfec8 14#include "regs/xe_gt_regs.h"
c5841481 15#include "regs/xe_regs.h"
dd08ebf6
MB
16#include "xe_bo.h"
17#include "xe_device.h"
18#include "xe_gt.h"
975e4a37 19#include "xe_gt_idle.h"
dd08ebf6 20#include "xe_gt_sysfs.h"
ea9f879d 21#include "xe_gt_types.h"
dd08ebf6
MB
22#include "xe_guc_ct.h"
23#include "xe_map.h"
24#include "xe_mmio.h"
25#include "xe_pcode.h"
0f06dc10 26
5ec15f83 27#define MCHBAR_MIRROR_BASE_SNB 0x140000
dd08ebf6 28
0bc519d2 29#define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
3512a78a
LDM
30#define RP0_MASK REG_GENMASK(7, 0)
31#define RP1_MASK REG_GENMASK(15, 8)
32#define RPN_MASK REG_GENMASK(23, 16)
dd08ebf6 33
0bc519d2 34#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
dd08ebf6
MB
35#define RPE_MASK REG_GENMASK(15, 8)
36
2846d103 37#define GT_PERF_STATUS XE_REG(0x1381b4)
0bc519d2 38#define CAGF_MASK REG_GENMASK(19, 11)
dd08ebf6
MB
39
40#define GT_FREQUENCY_MULTIPLIER 50
0bc519d2 41#define GT_FREQUENCY_SCALER 3
dd08ebf6
MB
42
43/**
44 * DOC: GuC Power Conservation (PC)
45 *
46 * GuC Power Conservation (PC) supports multiple features for the most
47 * efficient and performing use of the GT when GuC submission is enabled,
48 * including frequency management, Render-C states management, and various
49 * algorithms for power balancing.
50 *
51 * Single Loop Power Conservation (SLPC) is the name given to the suite of
52 * connected power conservation features in the GuC firmware. The firmware
53 * exposes a programming interface to the host for the control of SLPC.
54 *
55 * Frequency management:
56 * =====================
57 *
58 * Xe driver enables SLPC with all of its defaults features and frequency
59 * selection, which varies per platform.
dd08ebf6
MB
60 *
61 * Render-C States:
62 * ================
63 *
64 * Render-C states is also a GuC PC feature that is now enabled in Xe for
65 * all platforms.
dd08ebf6 66 *
dd08ebf6
MB
67 */
68
69static struct xe_guc *
70pc_to_guc(struct xe_guc_pc *pc)
71{
72 return container_of(pc, struct xe_guc, pc);
73}
74
75static struct xe_device *
76pc_to_xe(struct xe_guc_pc *pc)
77{
78 struct xe_guc *guc = pc_to_guc(pc);
79 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
80
81 return gt_to_xe(gt);
82}
83
84static struct xe_gt *
85pc_to_gt(struct xe_guc_pc *pc)
86{
87 return container_of(pc, struct xe_gt, uc.guc.pc);
88}
89
dd08ebf6
MB
90static struct iosys_map *
91pc_to_maps(struct xe_guc_pc *pc)
92{
93 return &pc->bo->vmap;
94}
95
96#define slpc_shared_data_read(pc_, field_) \
97 xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
98 struct slpc_shared_data, field_)
99
100#define slpc_shared_data_write(pc_, field_, val_) \
101 xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
102 struct slpc_shared_data, field_, val_)
103
104#define SLPC_EVENT(id, count) \
105 (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
106 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
107
b6f468b8
RV
108static int wait_for_pc_state(struct xe_guc_pc *pc,
109 enum slpc_global_state state)
dd08ebf6 110{
b6f468b8
RV
111 int timeout_us = 5000; /* rought 5ms, but no need for precision */
112 int slept, wait = 10;
113
dd08ebf6 114 xe_device_assert_mem_access(pc_to_xe(pc));
b6f468b8
RV
115
116 for (slept = 0; slept < timeout_us;) {
117 if (slpc_shared_data_read(pc, header.global_state) == state)
118 return 0;
119
120 usleep_range(wait, wait << 1);
121 slept += wait;
122 wait <<= 1;
123 if (slept + wait > timeout_us)
124 wait = timeout_us - slept;
125 }
126
127 return -ETIMEDOUT;
dd08ebf6
MB
128}
129
130static int pc_action_reset(struct xe_guc_pc *pc)
131{
132 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
133 int ret;
134 u32 action[] = {
135 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
136 SLPC_EVENT(SLPC_EVENT_RESET, 2),
137 xe_bo_ggtt_addr(pc->bo),
138 0,
139 };
140
141 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
142 if (ret)
143 drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
144
145 return ret;
146}
147
dd08ebf6
MB
148static int pc_action_query_task_state(struct xe_guc_pc *pc)
149{
150 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
151 int ret;
152 u32 action[] = {
153 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
154 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
155 xe_bo_ggtt_addr(pc->bo),
156 0,
157 };
158
b6f468b8 159 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
dd08ebf6
MB
160 return -EAGAIN;
161
162 /* Blocking here to ensure the results are ready before reading them */
163 ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
164 if (ret)
165 drm_err(&pc_to_xe(pc)->drm,
166 "GuC PC query task state failed: %pe", ERR_PTR(ret));
167
168 return ret;
169}
170
171static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
172{
173 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
174 int ret;
175 u32 action[] = {
176 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
177 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
178 id,
179 value,
180 };
181
b6f468b8 182 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
dd08ebf6
MB
183 return -EAGAIN;
184
185 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
186 if (ret)
187 drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
188 ERR_PTR(ret));
189
190 return ret;
191}
192
193static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
194{
195 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
196 u32 action[] = {
197 XE_GUC_ACTION_SETUP_PC_GUCRC,
198 mode,
199 };
200 int ret;
201
202 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
203 if (ret)
204 drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
205 ERR_PTR(ret));
206 return ret;
207}
208
209static u32 decode_freq(u32 raw)
210{
211 return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
0bc519d2 212 GT_FREQUENCY_SCALER);
dd08ebf6
MB
213}
214
43efd3ba
VB
215static u32 encode_freq(u32 freq)
216{
0bc519d2 217 return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
43efd3ba
VB
218 GT_FREQUENCY_MULTIPLIER);
219}
220
dd08ebf6
MB
221static u32 pc_get_min_freq(struct xe_guc_pc *pc)
222{
223 u32 freq;
224
225 freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
226 slpc_shared_data_read(pc, task_state_data.freq));
227
228 return decode_freq(freq);
229}
230
43efd3ba
VB
231static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
232{
233 struct xe_gt *gt = pc_to_gt(pc);
234 u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
235
236 /* Allow/Disallow punit to process software freq requests */
237 xe_mmio_write32(gt, RP_CONTROL, state);
238}
239
240static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
241{
242 struct xe_gt *gt = pc_to_gt(pc);
243 u32 rpnswreq;
244
245 pc_set_manual_rp_ctrl(pc, true);
246
247 /* Req freq is in units of 16.66 Mhz */
248 rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
249 xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
250
251 /* Sleep for a small time to allow pcode to respond */
252 usleep_range(100, 300);
253
254 pc_set_manual_rp_ctrl(pc, false);
255}
256
dd08ebf6
MB
257static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
258{
259 /*
260 * Let's only check for the rpn-rp0 range. If max < min,
261 * min becomes a fixed request.
262 */
263 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
264 return -EINVAL;
265
266 /*
267 * GuC policy is to elevate minimum frequency to the efficient levels
268 * Our goal is to have the admin choices respected.
269 */
270 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
271 freq < pc->rpe_freq);
272
273 return pc_action_set_param(pc,
274 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
275 freq);
276}
277
278static int pc_get_max_freq(struct xe_guc_pc *pc)
279{
280 u32 freq;
281
282 freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
283 slpc_shared_data_read(pc, task_state_data.freq));
284
285 return decode_freq(freq);
286}
287
288static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
289{
290 /*
291 * Let's only check for the rpn-rp0 range. If max < min,
292 * min becomes a fixed request.
293 * Also, overclocking is not supported.
294 */
295 if (freq < pc->rpn_freq || freq > pc->rp0_freq)
296 return -EINVAL;
297
298 return pc_action_set_param(pc,
299 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
300 freq);
301}
302
b3ab1b91
RV
303static void mtl_update_rpe_value(struct xe_guc_pc *pc)
304{
305 struct xe_gt *gt = pc_to_gt(pc);
306 u32 reg;
307
308 if (xe_gt_is_media_type(gt))
ce8bf5bd 309 reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
b3ab1b91 310 else
ce8bf5bd 311 reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
b3ab1b91 312
effc560d 313 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
b3ab1b91
RV
314}
315
316static void tgl_update_rpe_value(struct xe_guc_pc *pc)
dd08ebf6
MB
317{
318 struct xe_gt *gt = pc_to_gt(pc);
319 struct xe_device *xe = gt_to_xe(gt);
320 u32 reg;
321
322 /*
323 * For PVC we still need to use fused RP1 as the approximation for RPe
324 * For other platforms than PVC we get the resolved RPe directly from
325 * PCODE at a different register
326 */
327 if (xe->info.platform == XE_PVC)
ce8bf5bd 328 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
dd08ebf6 329 else
0bc519d2 330 reg = xe_mmio_read32(gt, FREQ_INFO_REC);
dd08ebf6
MB
331
332 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
b3ab1b91
RV
333}
334
335static void pc_update_rp_values(struct xe_guc_pc *pc)
336{
337 struct xe_gt *gt = pc_to_gt(pc);
338 struct xe_device *xe = gt_to_xe(gt);
339
8a93b0b4 340 if (GRAPHICS_VERx100(xe) >= 1270)
b3ab1b91
RV
341 mtl_update_rpe_value(pc);
342 else
343 tgl_update_rpe_value(pc);
dd08ebf6
MB
344
345 /*
346 * RPe is decided at runtime by PCODE. In the rare case where that's
347 * smaller than the fused min, we will trust the PCODE and use that
348 * as our minimum one.
349 */
350 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
351}
352
bef52b5c
RV
353/**
354 * xe_guc_pc_get_act_freq - Get Actual running frequency
355 * @pc: The GuC PC
356 *
357 * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
358 */
359u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
dd08ebf6 360{
bef52b5c 361 struct xe_gt *gt = pc_to_gt(pc);
b3ab1b91 362 struct xe_device *xe = gt_to_xe(gt);
dd08ebf6 363 u32 freq;
dd08ebf6 364
2846d103 365 /* When in RC6, actual frequency reported will be 0. */
8a93b0b4 366 if (GRAPHICS_VERx100(xe) >= 1270) {
ce8bf5bd 367 freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
b3ab1b91
RV
368 freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
369 } else {
2846d103 370 freq = xe_mmio_read32(gt, GT_PERF_STATUS);
0bc519d2 371 freq = REG_FIELD_GET(CAGF_MASK, freq);
b3ab1b91
RV
372 }
373
bef52b5c 374 freq = decode_freq(freq);
dd08ebf6 375
bef52b5c 376 return freq;
dd08ebf6 377}
dd08ebf6 378
bef52b5c
RV
379/**
380 * xe_guc_pc_get_cur_freq - Get Current requested frequency
381 * @pc: The GuC PC
382 * @freq: A pointer to a u32 where the freq value will be returned
383 *
384 * Returns: 0 on success,
385 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
386 */
387int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 388{
bef52b5c
RV
389 struct xe_gt *gt = pc_to_gt(pc);
390 int ret;
dd08ebf6
MB
391
392 /*
393 * GuC SLPC plays with cur freq request when GuCRC is enabled
394 * Block RC6 for a more reliable read.
395 */
396 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
397 if (ret)
1e941c98 398 return ret;
dd08ebf6 399
bef52b5c 400 *freq = xe_mmio_read32(gt, RPNSWREQ);
dd08ebf6 401
bef52b5c
RV
402 *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
403 *freq = decode_freq(*freq);
dd08ebf6
MB
404
405 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1e941c98 406 return 0;
dd08ebf6 407}
dd08ebf6 408
bef52b5c
RV
409/**
410 * xe_guc_pc_get_rp0_freq - Get the RP0 freq
411 * @pc: The GuC PC
412 *
413 * Returns: RP0 freq.
414 */
415u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
dd08ebf6 416{
bef52b5c 417 return pc->rp0_freq;
dd08ebf6 418}
dd08ebf6 419
bef52b5c
RV
420/**
421 * xe_guc_pc_get_rpe_freq - Get the RPe freq
422 * @pc: The GuC PC
423 *
424 * Returns: RPe freq.
425 */
426u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
dd08ebf6 427{
dd08ebf6 428 pc_update_rp_values(pc);
bef52b5c
RV
429
430 return pc->rpe_freq;
dd08ebf6 431}
dd08ebf6 432
bef52b5c
RV
433/**
434 * xe_guc_pc_get_rpn_freq - Get the RPn freq
435 * @pc: The GuC PC
436 *
437 * Returns: RPn freq.
438 */
439u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
dd08ebf6 440{
bef52b5c 441 return pc->rpn_freq;
dd08ebf6 442}
dd08ebf6 443
bef52b5c
RV
444/**
445 * xe_guc_pc_get_min_freq - Get the min operational frequency
446 * @pc: The GuC PC
447 * @freq: A pointer to a u32 where the freq value will be returned
448 *
449 * Returns: 0 on success,
450 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
451 */
452int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 453{
dd08ebf6 454 struct xe_gt *gt = pc_to_gt(pc);
bef52b5c 455 int ret;
dd08ebf6 456
dd08ebf6
MB
457 mutex_lock(&pc->freq_lock);
458 if (!pc->freq_ready) {
459 /* Might be in the middle of a gt reset */
460 ret = -EAGAIN;
461 goto out;
462 }
463
464 /*
465 * GuC SLPC plays with min freq request when GuCRC is enabled
466 * Block RC6 for a more reliable read.
467 */
468 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
469 if (ret)
470 goto out;
471
472 ret = pc_action_query_task_state(pc);
473 if (ret)
474 goto fw;
475
bef52b5c 476 *freq = pc_get_min_freq(pc);
dd08ebf6
MB
477
478fw:
479 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
480out:
481 mutex_unlock(&pc->freq_lock);
dd08ebf6
MB
482 return ret;
483}
484
bef52b5c
RV
485/**
486 * xe_guc_pc_set_min_freq - Set the minimal operational frequency
487 * @pc: The GuC PC
488 * @freq: The selected minimal frequency
489 *
490 * Returns: 0 on success,
491 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
492 * -EINVAL if value out of bounds.
493 */
494int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
dd08ebf6 495{
bef52b5c 496 int ret;
dd08ebf6 497
dd08ebf6
MB
498 mutex_lock(&pc->freq_lock);
499 if (!pc->freq_ready) {
500 /* Might be in the middle of a gt reset */
501 ret = -EAGAIN;
502 goto out;
503 }
504
505 ret = pc_set_min_freq(pc, freq);
506 if (ret)
507 goto out;
508
509 pc->user_requested_min = freq;
510
511out:
512 mutex_unlock(&pc->freq_lock);
bef52b5c 513 return ret;
dd08ebf6 514}
dd08ebf6 515
bef52b5c
RV
516/**
517 * xe_guc_pc_get_max_freq - Get Maximum operational frequency
518 * @pc: The GuC PC
519 * @freq: A pointer to a u32 where the freq value will be returned
520 *
521 * Returns: 0 on success,
522 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
523 */
524int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
dd08ebf6 525{
bef52b5c 526 int ret;
dd08ebf6 527
dd08ebf6
MB
528 mutex_lock(&pc->freq_lock);
529 if (!pc->freq_ready) {
530 /* Might be in the middle of a gt reset */
531 ret = -EAGAIN;
532 goto out;
533 }
534
535 ret = pc_action_query_task_state(pc);
536 if (ret)
537 goto out;
538
bef52b5c 539 *freq = pc_get_max_freq(pc);
dd08ebf6
MB
540
541out:
542 mutex_unlock(&pc->freq_lock);
dd08ebf6
MB
543 return ret;
544}
545
bef52b5c
RV
546/**
547 * xe_guc_pc_set_max_freq - Set the maximum operational frequency
548 * @pc: The GuC PC
549 * @freq: The selected maximum frequency value
550 *
551 * Returns: 0 on success,
552 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
553 * -EINVAL if value out of bounds.
554 */
555int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
dd08ebf6 556{
bef52b5c 557 int ret;
dd08ebf6 558
dd08ebf6
MB
559 mutex_lock(&pc->freq_lock);
560 if (!pc->freq_ready) {
561 /* Might be in the middle of a gt reset */
562 ret = -EAGAIN;
563 goto out;
564 }
565
566 ret = pc_set_max_freq(pc, freq);
567 if (ret)
568 goto out;
569
570 pc->user_requested_max = freq;
571
572out:
573 mutex_unlock(&pc->freq_lock);
bef52b5c 574 return ret;
dd08ebf6 575}
dd08ebf6 576
1c2097bb 577/**
7b076d14 578 * xe_guc_pc_c_status - get the current GT C state
1c2097bb
RT
579 * @pc: XE_GuC_PC instance
580 */
7b076d14 581enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
dd08ebf6 582{
dd08ebf6 583 struct xe_gt *gt = pc_to_gt(pc);
7b076d14 584 u32 reg, gt_c_state;
dd08ebf6 585
7b076d14
BN
586 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
587 reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
588 gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
589 } else {
590 reg = xe_mmio_read32(gt, GT_CORE_STATUS);
591 gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
592 }
593
7b076d14
BN
594 switch (gt_c_state) {
595 case GT_C6:
1c2097bb 596 return GT_IDLE_C6;
7b076d14 597 case GT_C0:
1c2097bb 598 return GT_IDLE_C0;
dd08ebf6 599 default:
1c2097bb 600 return GT_IDLE_UNKNOWN;
dd08ebf6
MB
601 }
602}
dd08ebf6 603
1c2097bb
RT
604/**
605 * xe_guc_pc_rc6_residency - rc6 residency counter
606 * @pc: Xe_GuC_PC instance
607 */
608u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
dd08ebf6 609{
dd08ebf6
MB
610 struct xe_gt *gt = pc_to_gt(pc);
611 u32 reg;
dd08ebf6 612
ce8bf5bd 613 reg = xe_mmio_read32(gt, GT_GFX_RC6);
dd08ebf6 614
1c2097bb 615 return reg;
dd08ebf6 616}
dd08ebf6 617
7b076d14
BN
618/**
619 * xe_guc_pc_mc6_residency - mc6 residency counter
620 * @pc: Xe_GuC_PC instance
621 */
622u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
623{
624 struct xe_gt *gt = pc_to_gt(pc);
625 u64 reg;
626
7b076d14 627 reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
7b076d14
BN
628
629 return reg;
630}
631
b3ab1b91
RV
632static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
633{
634 struct xe_gt *gt = pc_to_gt(pc);
635 u32 reg;
636
637 xe_device_assert_mem_access(pc_to_xe(pc));
638
639 if (xe_gt_is_media_type(gt))
ce8bf5bd 640 reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
b3ab1b91 641 else
ce8bf5bd 642 reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
effc560d
BN
643
644 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
645
646 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
b3ab1b91
RV
647}
648
649static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
dd08ebf6
MB
650{
651 struct xe_gt *gt = pc_to_gt(pc);
652 struct xe_device *xe = gt_to_xe(gt);
653 u32 reg;
654
655 xe_device_assert_mem_access(pc_to_xe(pc));
656
657 if (xe->info.platform == XE_PVC)
ce8bf5bd 658 reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
dd08ebf6 659 else
0bc519d2 660 reg = xe_mmio_read32(gt, RP_STATE_CAP);
dd08ebf6
MB
661 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
662 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
663}
664
b3ab1b91
RV
665static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
666{
667 struct xe_gt *gt = pc_to_gt(pc);
668 struct xe_device *xe = gt_to_xe(gt);
669
8a93b0b4 670 if (GRAPHICS_VERx100(xe) >= 1270)
b3ab1b91
RV
671 mtl_init_fused_rp_values(pc);
672 else
673 tgl_init_fused_rp_values(pc);
674}
43efd3ba
VB
675
676/**
677 * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
678 * frequency to allow faster GuC load times
679 * @pc: Xe_GuC_PC instance
680 */
681void xe_guc_pc_init_early(struct xe_guc_pc *pc)
682{
683 struct xe_gt *gt = pc_to_gt(pc);
684
685 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
686 pc_init_fused_rp_values(pc);
687 pc_set_cur_freq(pc, pc->rp0_freq);
688}
689
dd08ebf6
MB
690static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
691{
692 int ret;
693
694 lockdep_assert_held(&pc->freq_lock);
695
696 ret = pc_action_query_task_state(pc);
697 if (ret)
c04b8aae 698 goto out;
dd08ebf6
MB
699
700 /*
701 * GuC defaults to some RPmax that is not actually achievable without
702 * overclocking. Let's adjust it to the Hardware RP0, which is the
703 * regular maximum
704 */
c04b8aae
VB
705 if (pc_get_max_freq(pc) > pc->rp0_freq) {
706 ret = pc_set_max_freq(pc, pc->rp0_freq);
707 if (ret)
708 goto out;
709 }
dd08ebf6
MB
710
711 /*
712 * Same thing happens for Server platforms where min is listed as
713 * RPMax
714 */
715 if (pc_get_min_freq(pc) > pc->rp0_freq)
c04b8aae 716 ret = pc_set_min_freq(pc, pc->rp0_freq);
dd08ebf6 717
c04b8aae
VB
718out:
719 return ret;
dd08ebf6
MB
720}
721
722static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
723{
724 int ret = 0;
725
726 lockdep_assert_held(&pc->freq_lock);
727
728 if (pc->user_requested_min != 0) {
729 ret = pc_set_min_freq(pc, pc->user_requested_min);
730 if (ret)
731 return ret;
732 }
733
734 if (pc->user_requested_max != 0) {
735 ret = pc_set_max_freq(pc, pc->user_requested_max);
736 if (ret)
737 return ret;
738 }
739
740 return ret;
741}
742
1737785a
RT
743/**
744 * xe_guc_pc_gucrc_disable - Disable GuC RC
745 * @pc: Xe_GuC_PC instance
746 *
747 * Disables GuC RC by taking control of RC6 back from GuC.
748 *
749 * Return: 0 on success, negative error code on error.
750 */
751int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
dd08ebf6 752{
68661c69 753 struct xe_device *xe = pc_to_xe(pc);
dd08ebf6 754 struct xe_gt *gt = pc_to_gt(pc);
0c005429 755 int ret = 0;
dd08ebf6 756
68661c69
VB
757 if (xe->info.skip_guc_pc)
758 return 0;
759
dd08ebf6
MB
760 ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
761 if (ret)
1e941c98 762 return ret;
dd08ebf6
MB
763
764 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
765 if (ret)
1e941c98 766 return ret;
dd08ebf6 767
68661c69 768 xe_gt_idle_disable_c6(gt);
dd08ebf6
MB
769
770 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
0c005429 771
1e941c98 772 return 0;
dd08ebf6
MB
773}
774
775static void pc_init_pcode_freq(struct xe_guc_pc *pc)
776{
777 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
778 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
779
780 XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
781}
782
783static int pc_init_freqs(struct xe_guc_pc *pc)
784{
785 int ret;
786
787 mutex_lock(&pc->freq_lock);
788
789 ret = pc_adjust_freq_bounds(pc);
790 if (ret)
791 goto out;
792
793 ret = pc_adjust_requested_freq(pc);
794 if (ret)
795 goto out;
796
797 pc_update_rp_values(pc);
798
799 pc_init_pcode_freq(pc);
800
801 /*
802 * The frequencies are really ready for use only after the user
803 * requested ones got restored.
804 */
805 pc->freq_ready = true;
806
807out:
808 mutex_unlock(&pc->freq_lock);
809 return ret;
810}
811
812/**
813 * xe_guc_pc_start - Start GuC's Power Conservation component
814 * @pc: Xe_GuC_PC instance
815 */
816int xe_guc_pc_start(struct xe_guc_pc *pc)
817{
818 struct xe_device *xe = pc_to_xe(pc);
819 struct xe_gt *gt = pc_to_gt(pc);
820 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
821 int ret;
822
c4991ee0 823 xe_gt_assert(gt, xe_device_uc_enabled(xe));
dd08ebf6 824
dd08ebf6
MB
825 ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
826 if (ret)
1e941c98 827 return ret;
dd08ebf6 828
975e4a37
VB
829 if (xe->info.skip_guc_pc) {
830 if (xe->info.platform != XE_PVC)
831 xe_gt_idle_enable_c6(gt);
832
833 /* Request max possible since dynamic freq mgmt is not enabled */
834 pc_set_cur_freq(pc, UINT_MAX);
835
836 ret = 0;
837 goto out;
838 }
839
840 memset(pc->bo->vmap.vaddr, 0, size);
841 slpc_shared_data_write(pc, header.size, size);
842
dd08ebf6
MB
843 ret = pc_action_reset(pc);
844 if (ret)
845 goto out;
846
b6f468b8 847 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
dd08ebf6
MB
848 drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
849 ret = -EIO;
850 goto out;
851 }
852
853 ret = pc_init_freqs(pc);
854 if (ret)
855 goto out;
856
857 if (xe->info.platform == XE_PVC) {
1737785a 858 xe_guc_pc_gucrc_disable(pc);
dd08ebf6
MB
859 ret = 0;
860 goto out;
861 }
862
863 ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
864
865out:
dd08ebf6
MB
866 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
867 return ret;
868}
869
870/**
871 * xe_guc_pc_stop - Stop GuC's Power Conservation component
872 * @pc: Xe_GuC_PC instance
873 */
874int xe_guc_pc_stop(struct xe_guc_pc *pc)
875{
975e4a37 876 struct xe_device *xe = pc_to_xe(pc);
dd08ebf6 877
975e4a37
VB
878 if (xe->info.skip_guc_pc) {
879 xe_gt_idle_disable_c6(pc_to_gt(pc));
1e941c98 880 return 0;
975e4a37
VB
881 }
882
dd08ebf6
MB
883 mutex_lock(&pc->freq_lock);
884 pc->freq_ready = false;
885 mutex_unlock(&pc->freq_lock);
886
1e941c98 887 return 0;
dd08ebf6
MB
888}
889
bef52b5c
RV
890/**
891 * xe_guc_pc_fini - Finalize GuC's Power Conservation component
8a4587ef
MW
892 * @drm: DRM device
893 * @arg: opaque pointer that should point to Xe_GuC_PC instance
bef52b5c 894 */
8a4587ef 895static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
dd08ebf6 896{
8a4587ef 897 struct xe_guc_pc *pc = arg;
975e4a37 898
649a125a 899 XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
1737785a 900 XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
dd08ebf6 901 XE_WARN_ON(xe_guc_pc_stop(pc));
8a4587ef 902 xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
dd08ebf6
MB
903}
904
905/**
906 * xe_guc_pc_init - Initialize GuC's Power Conservation component
907 * @pc: Xe_GuC_PC instance
908 */
909int xe_guc_pc_init(struct xe_guc_pc *pc)
910{
911 struct xe_gt *gt = pc_to_gt(pc);
876611c2 912 struct xe_tile *tile = gt_to_tile(gt);
dd08ebf6
MB
913 struct xe_device *xe = gt_to_xe(gt);
914 struct xe_bo *bo;
915 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
8a4587ef 916 int err;
dd08ebf6 917
975e4a37
VB
918 if (xe->info.skip_guc_pc)
919 return 0;
920
8a4587ef
MW
921 err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
922 if (err)
923 return err;
dd08ebf6 924
0e1a47fc 925 bo = xe_managed_bo_create_pin_map(xe, tile, size,
62742d12
LDM
926 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
927 XE_BO_FLAG_GGTT |
928 XE_BO_FLAG_GGTT_INVALIDATE);
dd08ebf6
MB
929 if (IS_ERR(bo))
930 return PTR_ERR(bo);
931
932 pc->bo = bo;
8a4587ef 933
5a73dd61 934 return drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
dd08ebf6 935}