1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2019 Intel Corporation
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_gt.h"
8 #include "gt/intel_gt_irq.h"
9 #include "gt/intel_gt_pm_irq.h"
10 #include "gt/intel_gt_regs.h"
11 #include "intel_guc.h"
12 #include "intel_guc_ads.h"
13 #include "intel_guc_capture.h"
14 #include "intel_guc_slpc.h"
15 #include "intel_guc_submission.h"
22 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
23 * designed to offload some of the functionality usually performed by the host
24 * driver; currently the main operations it can take care of are:
26 * - Authentication of the HuC, which is required to fully enable HuC usage.
27 * - Low latency graphics context scheduling (a.k.a. GuC submission).
28 * - GT Power management.
30 * The enable_guc module parameter can be used to select which of those
31 * operations to enable within GuC. Note that not all the operations are
32 * supported on all gen9+ platforms.
34 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
35 * if at least one of the operations is selected. However, not loading the GuC
36 * might result in the loss of some features that do require the GuC (currently
37 * just the HuC, but more are expected to land in the future).
40 void intel_guc_notify(struct intel_guc *guc)
42 struct intel_gt *gt = guc_to_gt(guc);
45 * On Gen11+, the value written to the register is passes as a payload
46 * to the FW. However, the FW currently treats all values the same way
47 * (H2G interrupt), so we can just write the value that the HW expects
50 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
53 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
55 GEM_BUG_ON(!guc->send_regs.base);
56 GEM_BUG_ON(!guc->send_regs.count);
57 GEM_BUG_ON(i >= guc->send_regs.count);
59 return _MMIO(guc->send_regs.base + 4 * i);
62 void intel_guc_init_send_regs(struct intel_guc *guc)
64 struct intel_gt *gt = guc_to_gt(guc);
65 enum forcewake_domains fw_domains = 0;
68 GEM_BUG_ON(!guc->send_regs.base);
69 GEM_BUG_ON(!guc->send_regs.count);
71 for (i = 0; i < guc->send_regs.count; i++) {
72 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
74 FW_REG_READ | FW_REG_WRITE);
76 guc->send_regs.fw_domains = fw_domains;
79 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
81 struct intel_gt *gt = guc_to_gt(guc);
83 assert_rpm_wakelock_held(>->i915->runtime_pm);
85 spin_lock_irq(>->irq_lock);
86 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
87 spin_unlock_irq(>->irq_lock);
90 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
92 struct intel_gt *gt = guc_to_gt(guc);
94 assert_rpm_wakelock_held(>->i915->runtime_pm);
96 spin_lock_irq(>->irq_lock);
97 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
99 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
100 spin_unlock_irq(>->irq_lock);
103 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
105 struct intel_gt *gt = guc_to_gt(guc);
107 assert_rpm_wakelock_held(>->i915->runtime_pm);
109 spin_lock_irq(>->irq_lock);
111 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
113 spin_unlock_irq(>->irq_lock);
114 intel_synchronize_irq(gt->i915);
116 gen9_reset_guc_interrupts(guc);
119 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
121 struct intel_gt *gt = guc_to_gt(guc);
123 spin_lock_irq(>->irq_lock);
124 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
125 spin_unlock_irq(>->irq_lock);
128 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
130 struct intel_gt *gt = guc_to_gt(guc);
131 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
133 spin_lock_irq(>->irq_lock);
134 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
135 intel_uncore_write(gt->uncore,
136 GEN11_GUC_SG_INTR_ENABLE, events);
137 intel_uncore_write(gt->uncore,
138 GEN11_GUC_SG_INTR_MASK, ~events);
139 spin_unlock_irq(>->irq_lock);
142 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
144 struct intel_gt *gt = guc_to_gt(guc);
146 spin_lock_irq(>->irq_lock);
148 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
149 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
151 spin_unlock_irq(>->irq_lock);
152 intel_synchronize_irq(gt->i915);
154 gen11_reset_guc_interrupts(guc);
157 void intel_guc_init_early(struct intel_guc *guc)
159 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
161 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
162 intel_guc_ct_init_early(&guc->ct);
163 intel_guc_log_init_early(&guc->log);
164 intel_guc_submission_init_early(guc);
165 intel_guc_slpc_init_early(&guc->slpc);
166 intel_guc_rc_init_early(guc);
168 mutex_init(&guc->send_mutex);
169 spin_lock_init(&guc->irq_lock);
170 if (GRAPHICS_VER(i915) >= 11) {
171 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
172 guc->interrupts.reset = gen11_reset_guc_interrupts;
173 guc->interrupts.enable = gen11_enable_guc_interrupts;
174 guc->interrupts.disable = gen11_disable_guc_interrupts;
175 guc->send_regs.base =
176 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
177 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
180 guc->notify_reg = GUC_SEND_INTERRUPT;
181 guc->interrupts.reset = gen9_reset_guc_interrupts;
182 guc->interrupts.enable = gen9_enable_guc_interrupts;
183 guc->interrupts.disable = gen9_disable_guc_interrupts;
184 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
185 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
186 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
189 intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
190 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
193 void intel_guc_init_late(struct intel_guc *guc)
195 intel_guc_ads_init_late(guc);
198 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
200 u32 level = intel_guc_log_get_level(&guc->log);
203 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
204 flags |= GUC_LOG_DISABLED;
206 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
207 GUC_LOG_VERBOSITY_SHIFT;
212 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
216 if (!intel_guc_submission_is_used(guc))
217 flags |= GUC_CTL_DISABLE_SCHEDULER;
219 if (intel_guc_slpc_is_used(guc))
220 flags |= GUC_CTL_ENABLE_SLPC;
225 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
227 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
230 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
231 #define LOG_UNIT SZ_1M
232 #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
234 #define LOG_UNIT SZ_4K
238 #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
239 #define CAPTURE_UNIT SZ_1M
240 #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
242 #define CAPTURE_UNIT SZ_4K
243 #define CAPTURE_FLAG 0
246 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
247 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
248 BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
249 BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
250 BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
251 BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
253 BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
254 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
255 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
256 (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
257 BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
258 (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
260 flags = GUC_LOG_VALID |
261 GUC_LOG_NOTIFY_ON_HALF_FULL |
264 ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
265 ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
266 ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
267 (offset << GUC_LOG_BUF_ADDR_SHIFT);
277 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
279 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
280 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
285 static u32 guc_ctl_wa_flags(struct intel_guc *guc)
287 struct intel_gt *gt = guc_to_gt(guc);
290 /* Wa_22012773006:gen11,gen12 < XeHP */
291 if (GRAPHICS_VER(gt->i915) >= 11 &&
292 GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
293 flags |= GUC_WA_POLLCS;
295 /* Wa_16011759253:dg2_g10:a0 */
296 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
297 flags |= GUC_WA_GAM_CREDITS;
299 /* Wa_14014475959:dg2 */
300 if (IS_DG2(gt->i915))
301 flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
304 * Wa_14012197797:dg2_g10:a0,dg2_g11:a0
305 * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
307 * The same WA bit is used for both and 22011391025 is applicable to
310 if (IS_DG2(gt->i915))
311 flags |= GUC_WA_DUAL_QUEUE;
313 /* Wa_22011802037: graphics version 11/12 */
314 if (IS_GRAPHICS_VER(gt->i915, 11, 12))
315 flags |= GUC_WA_PRE_PARSER;
317 /* Wa_16011777198:dg2 */
318 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
319 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
320 flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
323 * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
324 * Wa_22012727685:dg2_g11[a0..)
326 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
327 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
328 flags |= GUC_WA_CONTEXT_ISOLATION;
332 flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
337 static u32 guc_ctl_devid(struct intel_guc *guc)
339 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
341 return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
345 * Initialise the GuC parameter block before starting the firmware
346 * transfer. These parameters are read by the firmware on startup
347 * and cannot be changed thereafter.
349 static void guc_init_params(struct intel_guc *guc)
351 u32 *params = guc->params;
354 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
356 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
357 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
358 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
359 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
360 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
361 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
363 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
364 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
368 * Initialise the GuC parameter block before starting the firmware
369 * transfer. These parameters are read by the firmware on startup
370 * and cannot be changed thereafter.
372 void intel_guc_write_params(struct intel_guc *guc)
374 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
378 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
379 * they are power context saved so it's ok to release forcewake
380 * when we are done here and take it again at xfer time.
382 intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
384 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
386 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
387 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
389 intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
392 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
394 struct intel_gt *gt = guc_to_gt(guc);
395 intel_wakeref_t wakeref;
399 intel_device_info_print_runtime(RUNTIME_INFO(gt->i915), p);
401 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
402 stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
403 ktime = ktime_get_boottime_ns();
405 drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
406 drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
407 drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
408 gt->clock_frequency, gt->clock_period_ns);
411 int intel_guc_init(struct intel_guc *guc)
413 struct intel_gt *gt = guc_to_gt(guc);
416 ret = intel_uc_fw_init(&guc->fw);
420 ret = intel_guc_log_create(&guc->log);
424 ret = intel_guc_capture_init(guc);
428 ret = intel_guc_ads_create(guc);
432 GEM_BUG_ON(!guc->ads_vma);
434 ret = intel_guc_ct_init(&guc->ct);
438 if (intel_guc_submission_is_used(guc)) {
440 * This is stuff we need to have available at fw load time
441 * if we are planning to enable submission later
443 ret = intel_guc_submission_init(guc);
448 if (intel_guc_slpc_is_used(guc)) {
449 ret = intel_guc_slpc_init(&guc->slpc);
454 /* now that everything is perma-pinned, initialize the parameters */
455 guc_init_params(guc);
457 /* We need to notify the guc whenever we change the GGTT */
458 i915_ggtt_enable_guc(gt->ggtt);
460 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
465 intel_guc_submission_fini(guc);
467 intel_guc_ct_fini(&guc->ct);
469 intel_guc_ads_destroy(guc);
471 intel_guc_capture_destroy(guc);
473 intel_guc_log_destroy(&guc->log);
475 intel_uc_fw_fini(&guc->fw);
477 i915_probe_error(gt->i915, "failed with %d\n", ret);
481 void intel_guc_fini(struct intel_guc *guc)
483 struct intel_gt *gt = guc_to_gt(guc);
485 if (!intel_uc_fw_is_loadable(&guc->fw))
488 i915_ggtt_disable_guc(gt->ggtt);
490 if (intel_guc_slpc_is_used(guc))
491 intel_guc_slpc_fini(&guc->slpc);
493 if (intel_guc_submission_is_used(guc))
494 intel_guc_submission_fini(guc);
496 intel_guc_ct_fini(&guc->ct);
498 intel_guc_ads_destroy(guc);
499 intel_guc_capture_destroy(guc);
500 intel_guc_log_destroy(&guc->log);
501 intel_uc_fw_fini(&guc->fw);
505 * This function implements the MMIO based host to GuC interface.
507 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
508 u32 *response_buf, u32 response_buf_size)
510 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
511 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
517 GEM_BUG_ON(len > guc->send_regs.count);
519 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
520 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
522 mutex_lock(&guc->send_mutex);
523 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
526 for (i = 0; i < len; i++)
527 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
529 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
531 intel_guc_notify(guc);
534 * No GuC command should ever take longer than 10ms.
535 * Fast commands should still complete in 10us.
537 ret = __intel_wait_for_register_fw(uncore,
538 guc_send_reg(guc, 0),
539 GUC_HXG_MSG_0_ORIGIN,
540 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
545 drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
550 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
551 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
552 FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
553 FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
555 ret = wait_for(done, 1000);
558 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
564 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
565 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
567 drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
572 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
573 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
574 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
576 drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
577 request[0], error, hint);
582 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
584 drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
591 int count = min(response_buf_size, guc->send_regs.count);
595 response_buf[0] = header;
597 for (i = 1; i < count; i++)
598 response_buf[i] = intel_uncore_read(uncore,
599 guc_send_reg(guc, i));
601 /* Use number of copied dwords as our return value */
604 /* Use data from the GuC response as our return value */
605 ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
609 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
610 mutex_unlock(&guc->send_mutex);
615 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
616 const u32 *payload, u32 len)
623 /* Make sure to handle only enabled messages */
624 msg = payload[0] & guc->msg_enabled_mask;
626 if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
627 drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
628 if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
629 drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
635 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
636 * @guc: intel_guc structure
637 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
639 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
640 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
643 * Return: non-zero code on error
645 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
648 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
652 return intel_guc_send(guc, action, ARRAY_SIZE(action));
656 * intel_guc_suspend() - notify GuC entering suspend state
659 int intel_guc_suspend(struct intel_guc *guc)
663 INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
666 if (!intel_guc_is_ready(guc))
669 if (intel_guc_submission_is_used(guc)) {
671 * This H2G MMIO command tears down the GuC in two steps. First it will
672 * generate a G2H CTB for every active context indicating a reset. In
673 * practice the i915 shouldn't ever get a G2H as suspend should only be
674 * called when the GPU is idle. Next, it tears down the CTBs and this
675 * H2G MMIO command completes.
677 * Don't abort on a failure code from the GuC. Keep going and do the
678 * clean up in santize() and re-initialisation on resume and hopefully
679 * the error here won't be problematic.
681 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
683 DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
686 /* Signal that the GuC isn't running. */
687 intel_guc_sanitize(guc);
693 * intel_guc_resume() - notify GuC resuming from suspend state
696 int intel_guc_resume(struct intel_guc *guc)
699 * NB: This function can still be called even if GuC submission is
700 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
701 * if any code is later added here, it must be support doing nothing
702 * if submission is disabled (as per intel_guc_suspend).
708 * DOC: GuC Memory Management
710 * GuC can't allocate any memory for its own usage, so all the allocations must
711 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
712 * exception of the top and bottom parts of the 4GB address space, which are
713 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
714 * or other parts of the HW. The driver must take care not to place objects that
715 * the GuC is going to access in these reserved ranges. The layout of the GuC
716 * address space is shown below:
720 * +===========> +====================+ <== FFFF_FFFF
722 * | +====================+ <== GUC_GGTT_TOP
726 * Address +===> +====================+ <== GuC ggtt_pin_bias
734 * +=======+===> +====================+ <== 0000_0000
736 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
737 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
738 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
742 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
744 * @size: size of area to allocate (both virtual space and memory)
746 * This is a wrapper to create an object for use with the GuC. In order to
747 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
748 * both some backing storage and a range inside the Global GTT. We must pin
749 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
750 * range is reserved inside GuC.
752 * Return: A i915_vma if successful, otherwise an ERR_PTR.
754 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
756 struct intel_gt *gt = guc_to_gt(guc);
757 struct drm_i915_gem_object *obj;
758 struct i915_vma *vma;
762 if (HAS_LMEM(gt->i915))
763 obj = i915_gem_object_create_lmem(gt->i915, size,
764 I915_BO_ALLOC_CPU_CLEAR |
765 I915_BO_ALLOC_CONTIGUOUS |
766 I915_BO_ALLOC_PM_EARLY);
768 obj = i915_gem_object_create_shmem(gt->i915, size);
771 return ERR_CAST(obj);
773 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
777 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
778 ret = i915_ggtt_pin(vma, NULL, 0, flags);
784 return i915_vma_make_unshrinkable(vma);
787 i915_gem_object_put(obj);
792 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
794 * @size: size of area to allocate (both virtual space and memory)
795 * @out_vma: return variable for the allocated vma pointer
796 * @out_vaddr: return variable for the obj mapping
798 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
799 * object with I915_MAP_WB.
801 * Return: 0 if successful, a negative errno code otherwise.
803 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
804 struct i915_vma **out_vma, void **out_vaddr)
806 struct i915_vma *vma;
809 vma = intel_guc_allocate_vma(guc, size);
813 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
814 i915_coherent_map_type(guc_to_gt(guc)->i915,
817 i915_vma_unpin_and_release(&vma, 0);
818 return PTR_ERR(vaddr);
827 static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
829 u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
830 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
831 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
832 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
833 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
834 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
835 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
836 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
841 GEM_BUG_ON(len == 1 && upper_32_bits(value));
843 /* Self config must go over MMIO */
844 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
846 if (unlikely(ret < 0))
848 if (unlikely(ret > 1))
856 static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
858 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
859 int err = __guc_action_self_cfg(guc, key, len, value);
862 i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
863 ERR_PTR(err), key, value);
867 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
869 return __guc_self_cfg(guc, key, 1, value);
872 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
874 return __guc_self_cfg(guc, key, 2, value);
878 * intel_guc_load_status - dump information about GuC load status
880 * @p: the &drm_printer
882 * Pretty printer for GuC load status.
884 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
886 struct intel_gt *gt = guc_to_gt(guc);
887 struct intel_uncore *uncore = gt->uncore;
888 intel_wakeref_t wakeref;
890 if (!intel_guc_is_supported(guc)) {
891 drm_printf(p, "GuC not supported\n");
895 if (!intel_guc_is_wanted(guc)) {
896 drm_printf(p, "GuC disabled\n");
900 intel_uc_fw_dump(&guc->fw, p);
902 with_intel_runtime_pm(uncore->rpm, wakeref) {
903 u32 status = intel_uncore_read(uncore, GUC_STATUS);
906 drm_printf(p, "\nGuC status 0x%08x:\n", status);
907 drm_printf(p, "\tBootrom status = 0x%x\n",
908 (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
909 drm_printf(p, "\tuKernel status = 0x%x\n",
910 (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
911 drm_printf(p, "\tMIA Core status = 0x%x\n",
912 (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
913 drm_puts(p, "\nScratch registers:\n");
914 for (i = 0; i < 16; i++) {
915 drm_printf(p, "\t%2d: \t0x%x\n",
916 i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
921 void intel_guc_write_barrier(struct intel_guc *guc)
923 struct intel_gt *gt = guc_to_gt(guc);
925 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
927 * Ensure intel_uncore_write_fw can be used rather than
928 * intel_uncore_write.
930 GEM_BUG_ON(guc->send_regs.fw_domains);
933 * This register is used by the i915 and GuC for MMIO based
934 * communication. Once we are in this code CTBs are the only
935 * method the i915 uses to communicate with the GuC so it is
936 * safe to write to this register (a value of 0 is NOP for MMIO
937 * communication). If we ever start mixing CTBs and MMIOs a new
938 * register will have to be chosen. This function is also used
939 * to enforce ordering of a work queue item write and an update
940 * to the process descriptor. When a work queue is being used,
941 * CTBs are also the only mechanism of communication.
943 intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
945 /* wmb() sufficient for a barrier if in smem */