drm/amdgpu: move more defines into amdgpu_irq.h
authorChristian König <christian.koenig@amd.com>
Mon, 17 Sep 2018 13:29:28 +0000 (15:29 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 27 Sep 2018 02:09:22 +0000 (21:09 -0500)
Everything that isn't related to the IH ring.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
35 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_ih.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/si_ih.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c

index fd2bbaa20ab4f6c7924c72d2092900abe0ff23aa..9ce8c93ec19bf9422e8e1a8c9f60f1a58291bc87 100644 (file)
 #ifndef __AMDGPU_IH_H__
 #define __AMDGPU_IH_H__
 
-#include "soc15_ih_clientid.h"
-
 struct amdgpu_device;
-
-#define AMDGPU_IH_CLIENTID_LEGACY 0
-#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+struct amdgpu_iv_entry;
 
 /*
  * R6xx+ IH ring
@@ -51,22 +47,6 @@ struct amdgpu_ih_ring {
        dma_addr_t              rb_dma_addr; /* only used when use_bus_addr = true */
 };
 
-#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
-
-struct amdgpu_iv_entry {
-       unsigned client_id;
-       unsigned src_id;
-       unsigned ring_id;
-       unsigned vmid;
-       unsigned vmid_src;
-       uint64_t timestamp;
-       unsigned timestamp_src;
-       unsigned pasid;
-       unsigned pasid_src;
-       unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
-       const uint32_t *iv_entry;
-};
-
 /* provided by the ih block */
 struct amdgpu_ih_funcs {
        /* ring read/write ptr handling, called from interrupt context */
index 2fca08e130b6ee0da5c121ca4f2878f778674d0f..52c17f6219a706d2793d999eef39979342c5ab28 100644 (file)
@@ -124,7 +124,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
        int r;
 
        spin_lock_irqsave(&adev->irq.lock, irqflags);
-       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+       for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
                if (!adev->irq.client[i].sources)
                        continue;
 
@@ -302,7 +302,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
                cancel_work_sync(&adev->reset_work);
        }
 
-       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+       for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
                if (!adev->irq.client[i].sources)
                        continue;
 
@@ -342,7 +342,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
                      unsigned client_id, unsigned src_id,
                      struct amdgpu_irq_src *source)
 {
-       if (client_id >= AMDGPU_IH_CLIENTID_MAX)
+       if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
                return -EINVAL;
 
        if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
@@ -396,7 +396,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 
        trace_amdgpu_iv(entry);
 
-       if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
+       if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
                DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
                return;
        }
@@ -469,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 {
        int i, j, k;
 
-       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+       for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
                if (!adev->irq.client[i].sources)
                        continue;
 
index 3375ad778edce4723ad6b09ee5410933d91fe2db..f6ce171cb8aa76a3ba38ba396ee0ad2d7b5b153d 100644 (file)
 #define __AMDGPU_IRQ_H__
 
 #include <linux/irqdomain.h>
+#include "soc15_ih_clientid.h"
 #include "amdgpu_ih.h"
 
-#define AMDGPU_MAX_IRQ_SRC_ID  0x100
+#define AMDGPU_MAX_IRQ_SRC_ID          0x100
 #define AMDGPU_MAX_IRQ_CLIENT_ID       0x100
 
+#define AMDGPU_IRQ_CLIENTID_LEGACY     0
+#define AMDGPU_IRQ_CLIENTID_MAX                SOC15_IH_CLIENTID_MAX
+
+#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW        4
+
 struct amdgpu_device;
-struct amdgpu_iv_entry;
 
 enum amdgpu_interrupt_state {
        AMDGPU_IRQ_STATE_DISABLE,
        AMDGPU_IRQ_STATE_ENABLE,
 };
 
+struct amdgpu_iv_entry {
+       unsigned client_id;
+       unsigned src_id;
+       unsigned ring_id;
+       unsigned vmid;
+       unsigned vmid_src;
+       uint64_t timestamp;
+       unsigned timestamp_src;
+       unsigned pasid;
+       unsigned pasid_src;
+       unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
+       const uint32_t *iv_entry;
+};
+
 struct amdgpu_irq_src {
        unsigned                                num_types;
        atomic_t                                *enabled_types;
@@ -63,7 +82,7 @@ struct amdgpu_irq {
        bool                            installed;
        spinlock_t                      lock;
        /* interrupt sources */
-       struct amdgpu_irq_client        client[AMDGPU_IH_CLIENTID_MAX];
+       struct amdgpu_irq_client        client[AMDGPU_IRQ_CLIENTID_MAX];
 
        /* status, etc. */
        bool                            msi_enabled; /* msi enabled */
index d2469453dca26e8d4848fa98037d44bdea92cd8d..79220a91abe3d48c32ef68c007efc07ee98b1638 100644 (file)
@@ -6277,12 +6277,12 @@ static int ci_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
                                &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
                                &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
index c37c4b76e7e9f74f13e066b9aa0c5b1fb3399cba..b5775c6a857ba63148d21df914509263383326d0 100644 (file)
@@ -276,7 +276,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
-       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index ee9d5c92edb10ad2a4bbdfaac059f736b6d32500..b918c8886b75c4104d2fc5b03c7863bf9a4d6e41 100644 (file)
@@ -970,19 +970,19 @@ static int cik_sdma_sw_init(void *handle)
        }
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
index 306e0bd154fa1a958185c930286f688bca0ede70..df5ac4d85a00a5767011c398904a92ab11edce37 100644 (file)
@@ -255,7 +255,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
-       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 89c09c396fe6450f4ff39da30fdc266a85749869..4cfecdce29a3c86dc30e509007ecec3a22eda338 100644 (file)
@@ -2746,19 +2746,19 @@ static int dce_v10_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
        if (r)
                return r;
 
index cf6faaa05dbbf8d15a607deca29197f7f6f39077..7c868916d90f83a4493ffb27db4e6286430c75f5 100644 (file)
@@ -2867,19 +2867,19 @@ static int dce_v11_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
        if (r)
                return r;
 
index 371aa05bf537c2ef5cf80c31a4dd0b2c3b4cfdda..17eaaba3601706ce4f8ed9010c9460a5b074e619 100644 (file)
@@ -2616,19 +2616,19 @@ static int dce_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index 30e76f2407c23e953e91d4db97a850979f9ac768..8c0576978d36220d305e1b0231764d8b78b2b8ea 100644 (file)
@@ -2643,19 +2643,19 @@ static int dce_v8_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index 2cc480d653941bb87d30068feab4c5fe4d478d70..fdace004544d4ff814f43d2c5e894f8cd0dca51a 100644 (file)
@@ -372,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
        int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
        if (r)
                return r;
 
index 95d916ff099e31538673e84aadc6554011b22492..d76eb27945dc897230640e1727cc2b8def35f1cb 100644 (file)
@@ -3094,15 +3094,15 @@ static int gfx_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i, r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
index 1c9ede0ba77ff45816e53bfe9e3a1da2ddbb1730..0e72bc09939aca1415320b027d9f57380e6eebc4 100644 (file)
@@ -4516,18 +4516,18 @@ static int gfx_v7_0_sw_init(void *handle)
        adev->gfx.mec.num_queue_per_pipe = 8;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
                              &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
                              &adev->gfx.priv_inst_irq);
        if (r)
                return r;
index 463d07e186d40e52afa9b00916e60cd6855ecce5..2aeef2bb93a465633a6224e0d3e52a95bb3f9799 100644 (file)
@@ -2049,35 +2049,35 @@ static int gfx_v8_0_sw_init(void *handle)
        adev->gfx.mec.num_queue_per_pipe = 8;
 
        /* KIQ event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
        if (r)
                return r;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
                              &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
                              &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
        /* Add CP EDC/ECC irq  */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
                              &adev->gfx.cp_ecc_error_irq);
        if (r)
                return r;
 
        /* SQ interrupts. */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
                              &adev->gfx.sq_irq);
        if (r) {
                DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
index 3b8ac4442f06280c1346248e9de870091fd47e64..e1c2b4e9c7b23a10ac3b1b2b5375d2bf84eae9c2 100644 (file)
@@ -859,11 +859,11 @@ static int gmc_v6_0_sw_init(void *handle)
                adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
        if (r)
                return r;
 
index 899634ce42386b0395b4dd9b7c63d1f63a51ecd5..910c4ce19cb3b329e49217e9901c1da5840a829d 100644 (file)
@@ -991,11 +991,11 @@ static int gmc_v7_0_sw_init(void *handle)
                adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
index 79143ca7cfac1558919065dffd6cf95ea4d01c1f..1d3265c97b704b5a403cca7721818ac91dad6c4c 100644 (file)
@@ -1095,11 +1095,11 @@ static int gmc_v8_0_sw_init(void *handle)
                adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
index 9005deeec612a22fbffeca016826a9f6f99653d0..cf0fc61aebe6d5f2de6f9217126dbb1fa94fe05b 100644 (file)
@@ -255,7 +255,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
-       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index cb79a93c2eb73a5f23fb008cee50e80325ada627..d0e478f434434b633be26692138042e896459714 100644 (file)
@@ -2995,12 +2995,12 @@ static int kv_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
                                &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
                                &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
index 842567b53df56d1824b6ba9e696caa436cdfafad..64e875d528dd858bf2686368a77b95a57372bac9 100644 (file)
@@ -580,11 +580,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
 {
        int r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
        if (r) {
                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
                return r;
index 0c5a576dee13196c0080c62b0bc55a1af76cb820..cd781abc4953217cbd87ac33ea8724486dae6c09 100644 (file)
@@ -898,19 +898,19 @@ static int sdma_v2_4_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
index 2587b8de918a1fdf8c424732d3c79cd733fccb67..6d5c8ac64874f80e86812387f3a2a4731aafc775 100644 (file)
@@ -1177,19 +1177,19 @@ static int sdma_v3_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
index c3510a703f9ff4c4c66d8575b5c1c85f81d5b514..d4ceaf440f26a67ebf40efa85f1318b44334609e 100644 (file)
@@ -502,12 +502,12 @@ static int si_dma_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* DMA0 trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* DMA1 trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
        if (r)
                return r;
 
index 1de96995e6900c934c91cf610160768b6c08cd37..da58040fdbdc6f27c1cc3eee7c1b822fcce21baf 100644 (file)
@@ -7687,11 +7687,11 @@ static int si_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
index acdf6075957aa8f87b83ca3ca447c130fc563dfb..b3d7d9f83202d8a31379ef42288e2a2dc7f5f6af 100644 (file)
@@ -142,7 +142,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
-       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 83fdf810ffc7757eda7ccc0057c8463640d3b8bd..3abffd06b5c785488795a952ece19680e4d559d1 100644 (file)
@@ -266,7 +266,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
-       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 8a926d1df939a43a2531f2a3d5c9fbf550dff1d9..1fc17bf39fed710f77c8ff94d741af579965639d 100644 (file)
@@ -108,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
        int r;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
        if (r)
                return r;
 
index 50248059412e78353d2c653819f8b3b311c8214e..fde6ad5ac9ab3ff8dc640a5a73cfb32dd99976e6 100644 (file)
@@ -105,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
        int r;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
        if (r)
                return r;
 
index 6ae82cc2e55e007cd8b4af958f6e0104510455ae..8ef4a5392112463f151fe9acefba40a3682e4fa0 100644 (file)
@@ -393,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
        if (r)
                return r;
 
        /* UVD ENC TRAP */
        if (uvd_v6_0_enc_support(adev)) {
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-                       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
+                       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
                        if (r)
                                return r;
                }
index 7eaa54ba016b7848fc3c5f09697407599eac2fd8..ea28828360d3b3c1d181d0214134b33f8dba7704 100644 (file)
@@ -417,7 +417,7 @@ static int vce_v2_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCE */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
        if (r)
                return r;
 
index c8390f9adfd6ce750e1deb179692d0a5cdc3867c..6dbd39730070a30132f7841a1e7dc18a8e54a35a 100644 (file)
@@ -423,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
        int r, i;
 
        /* VCE */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
        if (r)
                return r;
 
index 70111c5fb7105753d2ce4a237983d326d511a435..715422bb30db6fb1f3dd89c8fa55cd8688378b92 100644 (file)
@@ -1204,7 +1204,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
-       unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+       unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 
        if (adev->asic_type == CHIP_VEGA10 ||
            adev->asic_type == CHIP_VEGA12 ||
index 0bfb3b4025caf3525b96bd3500cb089f5747e57b..6c99cbf51c08fd035fa3da585c06b8b7074bdb29 100644 (file)
@@ -4106,17 +4106,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
        source->funcs = &smu7_irq_funcs;
 
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
-                       AMDGPU_IH_CLIENTID_LEGACY,
+                       AMDGPU_IRQ_CLIENTID_LEGACY,
                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
                        source);
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
-                       AMDGPU_IH_CLIENTID_LEGACY,
+                       AMDGPU_IRQ_CLIENTID_LEGACY,
                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
                        source);
 
        /* Register CTF(GPIO_19) interrupt */
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
-                       AMDGPU_IH_CLIENTID_LEGACY,
+                       AMDGPU_IRQ_CLIENTID_LEGACY,
                        VISLANDS30_IV_SRCID_GPIO_19,
                        source);
 
index 2aab1b4759459fb421443b30d563a1a4e1860e3a..8ad4e6960efd0727310ff037d465b3398f142080 100644 (file)
@@ -545,7 +545,7 @@ int phm_irq_process(struct amdgpu_device *adev,
        uint32_t client_id = entry->client_id;
        uint32_t src_id = entry->src_id;
 
-       if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
+       if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
                if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
                        pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
                                                PCI_BUS_NUM(adev->pdev->devfn),