2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/firmware.h>
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_rlc.h"
30 #include "amdgpu_ras.h"
32 /* delay 0.1 second to enable gfx off feature */
33 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
35 #define GFX_OFF_NO_DELAY 0
38 * GPU GFX IP block helpers function.
41 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
46 bit += mec * adev->gfx.mec.num_pipe_per_mec
47 * adev->gfx.mec.num_queue_per_pipe;
48 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
54 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
55 int *mec, int *pipe, int *queue)
57 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
58 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
59 % adev->gfx.mec.num_pipe_per_mec;
60 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
61 / adev->gfx.mec.num_pipe_per_mec;
65 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
66 int mec, int pipe, int queue)
68 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
69 adev->gfx.mec.queue_bitmap);
72 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
73 int me, int pipe, int queue)
77 bit += me * adev->gfx.me.num_pipe_per_me
78 * adev->gfx.me.num_queue_per_pipe;
79 bit += pipe * adev->gfx.me.num_queue_per_pipe;
85 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
86 int *me, int *pipe, int *queue)
88 *queue = bit % adev->gfx.me.num_queue_per_pipe;
89 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
90 % adev->gfx.me.num_pipe_per_me;
91 *me = (bit / adev->gfx.me.num_queue_per_pipe)
92 / adev->gfx.me.num_pipe_per_me;
95 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
96 int me, int pipe, int queue)
98 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
99 adev->gfx.me.queue_bitmap);
103 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
105 * @mask: array in which the per-shader array disable masks will be stored
106 * @max_se: number of SEs
107 * @max_sh: number of SHs
109 * The bitmask of CUs to be disabled in the shader array determined by se and
110 * sh is stored in mask[se * max_sh + sh].
112 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
117 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
119 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
122 p = amdgpu_disable_cu;
125 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
127 DRM_ERROR("amdgpu: could not parse disable_cu\n");
131 if (se < max_se && sh < max_sh && cu < 16) {
132 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
133 mask[se * max_sh + sh] |= 1u << cu;
135 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
139 next = strchr(p, ',');
146 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
148 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
151 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
153 if (amdgpu_compute_multipipe != -1) {
154 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
155 amdgpu_compute_multipipe);
156 return amdgpu_compute_multipipe == 1;
159 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
162 /* FIXME: spreading the queues across pipes causes perf regressions
163 * on POLARIS11 compute workloads */
164 if (adev->asic_type == CHIP_POLARIS11)
167 return adev->gfx.mec.num_mec > 1;
170 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
171 struct amdgpu_ring *ring)
173 int queue = ring->queue;
174 int pipe = ring->pipe;
176 /* Policy: use pipe1 queue0 as high priority graphics queue if we
177 * have more than one gfx pipe.
179 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
180 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
184 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
185 if (ring == &adev->gfx.gfx_ring[bit])
192 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
193 struct amdgpu_ring *ring)
195 /* Policy: use 1st queue as high priority compute queue if we
196 * have more than one compute queue.
198 if (adev->gfx.num_compute_rings > 1 &&
199 ring == &adev->gfx.compute_ring[0])
205 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
208 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
209 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
210 adev->gfx.mec.num_queue_per_pipe,
211 adev->gfx.num_compute_rings);
213 if (multipipe_policy) {
214 /* policy: make queues evenly cross all pipes on MEC1 only */
215 for (i = 0; i < max_queues_per_mec; i++) {
216 pipe = i % adev->gfx.mec.num_pipe_per_mec;
217 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
218 adev->gfx.mec.num_queue_per_pipe;
220 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
221 adev->gfx.mec.queue_bitmap);
224 /* policy: amdgpu owns all queues in the given pipe */
225 for (i = 0; i < max_queues_per_mec; ++i)
226 set_bit(i, adev->gfx.mec.queue_bitmap);
229 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
232 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
235 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
236 int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
237 adev->gfx.me.num_queue_per_pipe;
239 if (multipipe_policy) {
240 /* policy: amdgpu owns the first queue per pipe at this stage
241 * will extend to mulitple queues per pipe later */
242 for (i = 0; i < max_queues_per_me; i++) {
243 pipe = i % adev->gfx.me.num_pipe_per_me;
244 queue = (i / adev->gfx.me.num_pipe_per_me) %
245 adev->gfx.me.num_queue_per_pipe;
247 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
248 adev->gfx.me.queue_bitmap);
251 for (i = 0; i < max_queues_per_me; ++i)
252 set_bit(i, adev->gfx.me.queue_bitmap);
255 /* update the number of active graphics rings */
256 adev->gfx.num_gfx_rings =
257 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
260 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
261 struct amdgpu_ring *ring)
264 int mec, pipe, queue;
266 queue_bit = adev->gfx.mec.num_mec
267 * adev->gfx.mec.num_pipe_per_mec
268 * adev->gfx.mec.num_queue_per_pipe;
270 while (--queue_bit >= 0) {
271 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
274 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
277 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
278 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
279 * only can be issued on queue 0.
281 if ((mec == 1 && pipe > 1) || queue != 0)
291 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
295 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
296 struct amdgpu_ring *ring,
297 struct amdgpu_irq_src *irq)
299 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
302 spin_lock_init(&kiq->ring_lock);
305 ring->ring_obj = NULL;
306 ring->use_doorbell = true;
307 ring->doorbell_index = adev->doorbell_index.kiq;
309 r = amdgpu_gfx_kiq_acquire(adev, ring);
313 ring->eop_gpu_addr = kiq->eop_gpu_addr;
314 ring->no_scheduler = true;
315 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
316 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
317 AMDGPU_RING_PRIO_DEFAULT, NULL);
319 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
324 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
326 amdgpu_ring_fini(ring);
329 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
331 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
333 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
336 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
341 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
343 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
344 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
345 &kiq->eop_gpu_addr, (void **)&hpd);
347 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
351 memset(hpd, 0, hpd_size);
353 r = amdgpu_bo_reserve(kiq->eop_obj, true);
354 if (unlikely(r != 0))
355 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
356 amdgpu_bo_kunmap(kiq->eop_obj);
357 amdgpu_bo_unreserve(kiq->eop_obj);
362 /* create MQD for each compute/gfx queue */
363 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
366 struct amdgpu_ring *ring = NULL;
369 /* create MQD for KIQ */
370 ring = &adev->gfx.kiq.ring;
371 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
372 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
373 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
374 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
375 * KIQ MQD no matter SRIOV or Bare-metal
377 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
378 AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
379 &ring->mqd_gpu_addr, &ring->mqd_ptr);
381 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
385 /* prepare MQD backup */
386 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
387 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
388 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
391 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
392 /* create MQD for each KGQ */
393 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
394 ring = &adev->gfx.gfx_ring[i];
395 if (!ring->mqd_obj) {
396 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
397 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
398 &ring->mqd_gpu_addr, &ring->mqd_ptr);
400 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
404 /* prepare MQD backup */
405 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
406 if (!adev->gfx.me.mqd_backup[i])
407 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
412 /* create MQD for each KCQ */
413 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
414 ring = &adev->gfx.compute_ring[i];
415 if (!ring->mqd_obj) {
416 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
417 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
418 &ring->mqd_gpu_addr, &ring->mqd_ptr);
420 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
424 /* prepare MQD backup */
425 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
426 if (!adev->gfx.mec.mqd_backup[i])
427 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
434 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
436 struct amdgpu_ring *ring = NULL;
439 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
440 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
441 ring = &adev->gfx.gfx_ring[i];
442 kfree(adev->gfx.me.mqd_backup[i]);
443 amdgpu_bo_free_kernel(&ring->mqd_obj,
449 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
450 ring = &adev->gfx.compute_ring[i];
451 kfree(adev->gfx.mec.mqd_backup[i]);
452 amdgpu_bo_free_kernel(&ring->mqd_obj,
457 ring = &adev->gfx.kiq.ring;
458 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
459 amdgpu_bo_free_kernel(&ring->mqd_obj,
464 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
466 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
467 struct amdgpu_ring *kiq_ring = &kiq->ring;
470 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
473 spin_lock(&adev->gfx.kiq.ring_lock);
474 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
475 adev->gfx.num_compute_rings)) {
476 spin_unlock(&adev->gfx.kiq.ring_lock);
480 for (i = 0; i < adev->gfx.num_compute_rings; i++)
481 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
484 if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
485 r = amdgpu_ring_test_helper(kiq_ring);
486 spin_unlock(&adev->gfx.kiq.ring_lock);
491 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
494 int mec, pipe, queue;
495 int set_resource_bit = 0;
497 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
499 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
501 return set_resource_bit;
504 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
506 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
507 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
508 uint64_t queue_mask = 0;
511 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
514 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
515 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
518 /* This situation may be hit in the future if a new HW
519 * generation exposes more than 64 queues. If so, the
520 * definition of queue_mask needs updating */
521 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
522 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
526 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
529 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
531 spin_lock(&adev->gfx.kiq.ring_lock);
532 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
533 adev->gfx.num_compute_rings +
534 kiq->pmf->set_resources_size);
536 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
537 spin_unlock(&adev->gfx.kiq.ring_lock);
541 if (adev->enable_mes)
544 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
545 for (i = 0; i < adev->gfx.num_compute_rings; i++)
546 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
548 r = amdgpu_ring_test_helper(kiq_ring);
549 spin_unlock(&adev->gfx.kiq.ring_lock);
551 DRM_ERROR("KCQ enable failed\n");
556 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
558 * @adev: amdgpu_device pointer
559 * @bool enable true: enable gfx off feature, false: disable gfx off feature
561 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
562 * 2. other client can send request to disable gfx off feature, the request should be honored.
563 * 3. other client can cancel their request of disable gfx off feature
564 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
567 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
569 unsigned long delay = GFX_OFF_DELAY_ENABLE;
571 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
574 mutex_lock(&adev->gfx.gfx_off_mutex);
577 /* If the count is already 0, it means there's an imbalance bug somewhere.
578 * Note that the bug may be in a different caller than the one which triggers the
581 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
584 adev->gfx.gfx_off_req_count--;
586 if (adev->gfx.gfx_off_req_count == 0 &&
587 !adev->gfx.gfx_off_state) {
588 /* If going to s2idle, no need to wait */
590 if (!amdgpu_dpm_set_powergating_by_smu(adev,
591 AMD_IP_BLOCK_TYPE_GFX, true))
592 adev->gfx.gfx_off_state = true;
594 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
599 if (adev->gfx.gfx_off_req_count == 0) {
600 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
602 if (adev->gfx.gfx_off_state &&
603 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
604 adev->gfx.gfx_off_state = false;
606 if (adev->gfx.funcs->init_spm_golden) {
608 "GFXOFF is disabled, re-init SPM golden settings\n");
609 amdgpu_gfx_init_spm_golden(adev);
614 adev->gfx.gfx_off_req_count++;
618 mutex_unlock(&adev->gfx.gfx_off_mutex);
621 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
625 mutex_lock(&adev->gfx.gfx_off_mutex);
627 r = amdgpu_dpm_set_residency_gfxoff(adev, value);
629 mutex_unlock(&adev->gfx.gfx_off_mutex);
634 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
638 mutex_lock(&adev->gfx.gfx_off_mutex);
640 r = amdgpu_dpm_get_residency_gfxoff(adev, value);
642 mutex_unlock(&adev->gfx.gfx_off_mutex);
647 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
651 mutex_lock(&adev->gfx.gfx_off_mutex);
653 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
655 mutex_unlock(&adev->gfx.gfx_off_mutex);
660 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
665 mutex_lock(&adev->gfx.gfx_off_mutex);
667 r = amdgpu_dpm_get_status_gfxoff(adev, value);
669 mutex_unlock(&adev->gfx.gfx_off_mutex);
674 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
678 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
679 if (!amdgpu_persistent_edc_harvesting_supported(adev))
680 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
682 r = amdgpu_ras_block_late_init(adev, ras_block);
686 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
690 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
695 amdgpu_ras_block_late_fini(adev, ras_block);
699 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
701 struct amdgpu_iv_entry *entry)
703 /* TODO ue will trigger an interrupt.
705 * When “Full RAS” is enabled, the per-IP interrupt sources should
706 * be disabled and the driver should only look for the aggregated
707 * interrupt via sync flood
709 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
710 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
711 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
712 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
713 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
714 amdgpu_ras_reset_gpu(adev);
716 return AMDGPU_RAS_SUCCESS;
719 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
720 struct amdgpu_irq_src *source,
721 struct amdgpu_iv_entry *entry)
723 struct ras_common_if *ras_if = adev->gfx.ras_if;
724 struct ras_dispatch_if ih_data = {
731 ih_data.head = *ras_if;
733 DRM_ERROR("CP ECC ERROR IRQ\n");
734 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
738 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
740 signed long r, cnt = 0;
742 uint32_t seq, reg_val_offs = 0, value = 0;
743 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
744 struct amdgpu_ring *ring = &kiq->ring;
746 if (amdgpu_device_skip_hw_access(adev))
749 if (adev->mes.ring.sched.ready)
750 return amdgpu_mes_rreg(adev, reg);
752 BUG_ON(!ring->funcs->emit_rreg);
754 spin_lock_irqsave(&kiq->ring_lock, flags);
755 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
756 pr_err("critical bug! too many kiq readers\n");
759 amdgpu_ring_alloc(ring, 32);
760 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
761 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
765 amdgpu_ring_commit(ring);
766 spin_unlock_irqrestore(&kiq->ring_lock, flags);
768 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
770 /* don't wait anymore for gpu reset case because this way may
771 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
772 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
773 * never return if we keep waiting in virt_kiq_rreg, which cause
774 * gpu_recover() hang there.
776 * also don't wait anymore for IRQ context
778 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
779 goto failed_kiq_read;
782 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
783 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
784 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
787 if (cnt > MAX_KIQ_REG_TRY)
788 goto failed_kiq_read;
791 value = adev->wb.wb[reg_val_offs];
792 amdgpu_device_wb_free(adev, reg_val_offs);
796 amdgpu_ring_undo(ring);
798 spin_unlock_irqrestore(&kiq->ring_lock, flags);
801 amdgpu_device_wb_free(adev, reg_val_offs);
802 dev_err(adev->dev, "failed to read reg:%x\n", reg);
806 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
808 signed long r, cnt = 0;
811 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
812 struct amdgpu_ring *ring = &kiq->ring;
814 BUG_ON(!ring->funcs->emit_wreg);
816 if (amdgpu_device_skip_hw_access(adev))
819 if (adev->mes.ring.sched.ready) {
820 amdgpu_mes_wreg(adev, reg, v);
824 spin_lock_irqsave(&kiq->ring_lock, flags);
825 amdgpu_ring_alloc(ring, 32);
826 amdgpu_ring_emit_wreg(ring, reg, v);
827 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
831 amdgpu_ring_commit(ring);
832 spin_unlock_irqrestore(&kiq->ring_lock, flags);
834 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
836 /* don't wait anymore for gpu reset case because this way may
837 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
838 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
839 * never return if we keep waiting in virt_kiq_rreg, which cause
840 * gpu_recover() hang there.
842 * also don't wait anymore for IRQ context
844 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
845 goto failed_kiq_write;
848 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
850 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
851 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
854 if (cnt > MAX_KIQ_REG_TRY)
855 goto failed_kiq_write;
860 amdgpu_ring_undo(ring);
861 spin_unlock_irqrestore(&kiq->ring_lock, flags);
863 dev_err(adev->dev, "failed to write reg:%x\n", reg);
866 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
868 if (amdgpu_num_kcq == -1) {
870 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
871 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
874 return amdgpu_num_kcq;
877 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
880 const struct gfx_firmware_header_v1_0 *cp_hdr;
881 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
882 struct amdgpu_firmware_info *info = NULL;
883 const struct firmware *ucode_fw;
884 unsigned int fw_size;
887 case AMDGPU_UCODE_ID_CP_PFP:
888 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
889 adev->gfx.pfp_fw->data;
890 adev->gfx.pfp_fw_version =
891 le32_to_cpu(cp_hdr->header.ucode_version);
892 adev->gfx.pfp_feature_version =
893 le32_to_cpu(cp_hdr->ucode_feature_version);
894 ucode_fw = adev->gfx.pfp_fw;
895 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
897 case AMDGPU_UCODE_ID_CP_RS64_PFP:
898 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
899 adev->gfx.pfp_fw->data;
900 adev->gfx.pfp_fw_version =
901 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
902 adev->gfx.pfp_feature_version =
903 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
904 ucode_fw = adev->gfx.pfp_fw;
905 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
907 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
908 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
909 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
910 adev->gfx.pfp_fw->data;
911 ucode_fw = adev->gfx.pfp_fw;
912 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
914 case AMDGPU_UCODE_ID_CP_ME:
915 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
916 adev->gfx.me_fw->data;
917 adev->gfx.me_fw_version =
918 le32_to_cpu(cp_hdr->header.ucode_version);
919 adev->gfx.me_feature_version =
920 le32_to_cpu(cp_hdr->ucode_feature_version);
921 ucode_fw = adev->gfx.me_fw;
922 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
924 case AMDGPU_UCODE_ID_CP_RS64_ME:
925 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
926 adev->gfx.me_fw->data;
927 adev->gfx.me_fw_version =
928 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
929 adev->gfx.me_feature_version =
930 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
931 ucode_fw = adev->gfx.me_fw;
932 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
934 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
935 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
936 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
937 adev->gfx.me_fw->data;
938 ucode_fw = adev->gfx.me_fw;
939 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
941 case AMDGPU_UCODE_ID_CP_CE:
942 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
943 adev->gfx.ce_fw->data;
944 adev->gfx.ce_fw_version =
945 le32_to_cpu(cp_hdr->header.ucode_version);
946 adev->gfx.ce_feature_version =
947 le32_to_cpu(cp_hdr->ucode_feature_version);
948 ucode_fw = adev->gfx.ce_fw;
949 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
951 case AMDGPU_UCODE_ID_CP_MEC1:
952 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
953 adev->gfx.mec_fw->data;
954 adev->gfx.mec_fw_version =
955 le32_to_cpu(cp_hdr->header.ucode_version);
956 adev->gfx.mec_feature_version =
957 le32_to_cpu(cp_hdr->ucode_feature_version);
958 ucode_fw = adev->gfx.mec_fw;
959 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
960 le32_to_cpu(cp_hdr->jt_size) * 4;
962 case AMDGPU_UCODE_ID_CP_MEC1_JT:
963 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
964 adev->gfx.mec_fw->data;
965 ucode_fw = adev->gfx.mec_fw;
966 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
968 case AMDGPU_UCODE_ID_CP_MEC2:
969 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
970 adev->gfx.mec2_fw->data;
971 adev->gfx.mec2_fw_version =
972 le32_to_cpu(cp_hdr->header.ucode_version);
973 adev->gfx.mec2_feature_version =
974 le32_to_cpu(cp_hdr->ucode_feature_version);
975 ucode_fw = adev->gfx.mec2_fw;
976 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
977 le32_to_cpu(cp_hdr->jt_size) * 4;
979 case AMDGPU_UCODE_ID_CP_MEC2_JT:
980 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
981 adev->gfx.mec2_fw->data;
982 ucode_fw = adev->gfx.mec2_fw;
983 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
985 case AMDGPU_UCODE_ID_CP_RS64_MEC:
986 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
987 adev->gfx.mec_fw->data;
988 adev->gfx.mec_fw_version =
989 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
990 adev->gfx.mec_feature_version =
991 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
992 ucode_fw = adev->gfx.mec_fw;
993 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
995 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
996 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
997 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
998 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
999 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1000 adev->gfx.mec_fw->data;
1001 ucode_fw = adev->gfx.mec_fw;
1002 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1008 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1009 info = &adev->firmware.ucode[ucode_id];
1010 info->ucode_id = ucode_id;
1011 info->fw = ucode_fw;
1012 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);