fda89ec1d33b7f11235c896d480dff4ae4f5002e
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / vce_v2_0.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "cikd.h"
33
34 #include "vce/vce_2_0_d.h"
35 #include "vce/vce_2_0_sh_mask.h"
36
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
43
44 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
47 static int vce_v2_0_wait_for_idle(void *handle);
48 /**
49  * vce_v2_0_ring_get_rptr - get read pointer
50  *
51  * @ring: amdgpu_ring pointer
52  *
53  * Returns the current hardware read pointer
54  */
55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56 {
57         struct amdgpu_device *adev = ring->adev;
58
59         if (ring == &adev->vce.ring[0])
60                 return RREG32(mmVCE_RB_RPTR);
61         else
62                 return RREG32(mmVCE_RB_RPTR2);
63 }
64
65 /**
66  * vce_v2_0_ring_get_wptr - get write pointer
67  *
68  * @ring: amdgpu_ring pointer
69  *
70  * Returns the current hardware write pointer
71  */
72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73 {
74         struct amdgpu_device *adev = ring->adev;
75
76         if (ring == &adev->vce.ring[0])
77                 return RREG32(mmVCE_RB_WPTR);
78         else
79                 return RREG32(mmVCE_RB_WPTR2);
80 }
81
82 /**
83  * vce_v2_0_ring_set_wptr - set write pointer
84  *
85  * @ring: amdgpu_ring pointer
86  *
87  * Commits the write pointer to the hardware
88  */
89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90 {
91         struct amdgpu_device *adev = ring->adev;
92
93         if (ring == &adev->vce.ring[0])
94                 WREG32(mmVCE_RB_WPTR, ring->wptr);
95         else
96                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
97 }
98
99 /**
100  * vce_v2_0_start - start VCE block
101  *
102  * @adev: amdgpu_device pointer
103  *
104  * Setup and start the VCE block
105  */
106 static int vce_v2_0_start(struct amdgpu_device *adev)
107 {
108         struct amdgpu_ring *ring;
109         int i, j, r;
110
111         vce_v2_0_mc_resume(adev);
112
113         /* set BUSY flag */
114         WREG32_P(mmVCE_STATUS, 1, ~1);
115
116         ring = &adev->vce.ring[0];
117         WREG32(mmVCE_RB_RPTR, ring->wptr);
118         WREG32(mmVCE_RB_WPTR, ring->wptr);
119         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
120         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
121         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
122
123         ring = &adev->vce.ring[1];
124         WREG32(mmVCE_RB_RPTR2, ring->wptr);
125         WREG32(mmVCE_RB_WPTR2, ring->wptr);
126         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
127         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
128         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
129
130         WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
131
132         WREG32_P(mmVCE_SOFT_RESET,
133                  VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
134                  ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
135
136         mdelay(100);
137
138         WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
139
140         for (i = 0; i < 10; ++i) {
141                 uint32_t status;
142                 for (j = 0; j < 100; ++j) {
143                         status = RREG32(mmVCE_STATUS);
144                         if (status & 2)
145                                 break;
146                         mdelay(10);
147                 }
148                 r = 0;
149                 if (status & 2)
150                         break;
151
152                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
153                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
154                                 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
155                 mdelay(10);
156                 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
157                 mdelay(10);
158                 r = -1;
159         }
160
161         /* clear BUSY flag */
162         WREG32_P(mmVCE_STATUS, 0, ~1);
163
164         if (r) {
165                 DRM_ERROR("VCE not responding, giving up!!!\n");
166                 return r;
167         }
168
169         return 0;
170 }
171
172 static int vce_v2_0_early_init(void *handle)
173 {
174         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
175
176         vce_v2_0_set_ring_funcs(adev);
177         vce_v2_0_set_irq_funcs(adev);
178
179         return 0;
180 }
181
182 static int vce_v2_0_sw_init(void *handle)
183 {
184         struct amdgpu_ring *ring;
185         int r;
186         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187
188         /* VCE */
189         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
190         if (r)
191                 return r;
192
193         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
194                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
195         if (r)
196                 return r;
197
198         r = amdgpu_vce_resume(adev);
199         if (r)
200                 return r;
201
202         ring = &adev->vce.ring[0];
203         sprintf(ring->name, "vce0");
204         r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
205                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
206         if (r)
207                 return r;
208
209         ring = &adev->vce.ring[1];
210         sprintf(ring->name, "vce1");
211         r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
212                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
213         if (r)
214                 return r;
215
216         return r;
217 }
218
219 static int vce_v2_0_sw_fini(void *handle)
220 {
221         int r;
222         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223
224         r = amdgpu_vce_suspend(adev);
225         if (r)
226                 return r;
227
228         r = amdgpu_vce_sw_fini(adev);
229         if (r)
230                 return r;
231
232         return r;
233 }
234
235 static int vce_v2_0_hw_init(void *handle)
236 {
237         struct amdgpu_ring *ring;
238         int r;
239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241         r = vce_v2_0_start(adev);
242         if (r)
243                 return r;
244
245         ring = &adev->vce.ring[0];
246         ring->ready = true;
247         r = amdgpu_ring_test_ring(ring);
248         if (r) {
249                 ring->ready = false;
250                 return r;
251         }
252
253         ring = &adev->vce.ring[1];
254         ring->ready = true;
255         r = amdgpu_ring_test_ring(ring);
256         if (r) {
257                 ring->ready = false;
258                 return r;
259         }
260
261         DRM_INFO("VCE initialized successfully.\n");
262
263         return 0;
264 }
265
266 static int vce_v2_0_hw_fini(void *handle)
267 {
268         return 0;
269 }
270
271 static int vce_v2_0_suspend(void *handle)
272 {
273         int r;
274         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
275
276         r = vce_v2_0_hw_fini(adev);
277         if (r)
278                 return r;
279
280         r = amdgpu_vce_suspend(adev);
281         if (r)
282                 return r;
283
284         return r;
285 }
286
287 static int vce_v2_0_resume(void *handle)
288 {
289         int r;
290         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
291
292         r = amdgpu_vce_resume(adev);
293         if (r)
294                 return r;
295
296         r = vce_v2_0_hw_init(adev);
297         if (r)
298                 return r;
299
300         return r;
301 }
302
303 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
304 {
305         u32 tmp;
306
307         if (gated) {
308                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
309                 tmp |= 0xe70000;
310                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
311
312                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
313                 tmp |= 0xff000000;
314                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
315
316                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
317                 tmp &= ~0x3fc;
318                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
319
320                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
321     } else {
322                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
323                 tmp |= 0xe7;
324                 tmp &= ~0xe70000;
325                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
326
327                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
328                 tmp |= 0x1fe000;
329                 tmp &= ~0xff000000;
330                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
331
332                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
333                 tmp |= 0x3fc;
334                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
335         }
336 }
337
338 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
339 {
340         u32 orig, tmp;
341
342         if (gated) {
343                 if (vce_v2_0_wait_for_idle(adev)) {
344                         DRM_INFO("VCE is busy, Can't set clock gateing");
345                         return;
346                 }
347                 WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
348                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
349                 mdelay(100);
350                 WREG32(mmVCE_STATUS, 0);
351         } else {
352                 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
353                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
354                 mdelay(100);
355         }
356
357         tmp = RREG32(mmVCE_CLOCK_GATING_B);
358         tmp &= ~0x00060006;
359         if (gated) {
360                 tmp |= 0xe10000;
361         } else {
362                 tmp |= 0xe1;
363                 tmp &= ~0xe10000;
364         }
365         WREG32(mmVCE_CLOCK_GATING_B, tmp);
366
367         orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
368         tmp &= ~0x1fe000;
369         tmp &= ~0xff000000;
370         if (tmp != orig)
371                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
372
373         orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
374         tmp &= ~0x3fc;
375         if (tmp != orig)
376                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
377
378         if (gated)
379                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
380         WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
381 }
382
383 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
384 {
385         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
386 }
387
388 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
389 {
390         bool sw_cg = false;
391
392         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
393                 if (sw_cg)
394                         vce_v2_0_set_sw_cg(adev, true);
395                 else
396                         vce_v2_0_set_dyn_cg(adev, true);
397         } else {
398                 vce_v2_0_disable_cg(adev);
399
400                 if (sw_cg)
401                         vce_v2_0_set_sw_cg(adev, false);
402                 else
403                         vce_v2_0_set_dyn_cg(adev, false);
404         }
405 }
406
407 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
408 {
409         u32 tmp;
410
411         tmp = RREG32(mmVCE_CLOCK_GATING_A);
412         tmp &= ~0xfff;
413         tmp |= ((0 << 0) | (4 << 4));
414         tmp |= 0x40000;
415         WREG32(mmVCE_CLOCK_GATING_A, tmp);
416
417         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
418         tmp &= ~0xfff;
419         tmp |= ((0 << 0) | (4 << 4));
420         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
421
422         tmp = RREG32(mmVCE_CLOCK_GATING_B);
423         tmp |= 0x10;
424         tmp &= ~0x100000;
425         WREG32(mmVCE_CLOCK_GATING_B, tmp);
426 }
427
428 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
429 {
430         uint64_t addr = adev->vce.gpu_addr;
431         uint32_t size;
432
433         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
434         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
435         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
436         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
437
438         WREG32(mmVCE_LMI_CTRL, 0x00398000);
439         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
440         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
441         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
442         WREG32(mmVCE_LMI_VM_CTRL, 0);
443
444         addr += AMDGPU_VCE_FIRMWARE_OFFSET;
445         size = VCE_V2_0_FW_SIZE;
446         WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
447         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
448
449         addr += size;
450         size = VCE_V2_0_STACK_SIZE;
451         WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
452         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
453
454         addr += size;
455         size = VCE_V2_0_DATA_SIZE;
456         WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
457         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
458
459         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
460
461         WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
462                  ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
463
464         vce_v2_0_init_cg(adev);
465 }
466
467 static bool vce_v2_0_is_idle(void *handle)
468 {
469         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
470
471         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
472 }
473
474 static int vce_v2_0_wait_for_idle(void *handle)
475 {
476         unsigned i;
477         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478
479         for (i = 0; i < adev->usec_timeout; i++) {
480                 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
481                         return 0;
482         }
483         return -ETIMEDOUT;
484 }
485
486 static int vce_v2_0_soft_reset(void *handle)
487 {
488         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
489
490         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
491                         ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
492         mdelay(5);
493
494         return vce_v2_0_start(adev);
495 }
496
497 static void vce_v2_0_print_status(void *handle)
498 {
499         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
500
501         dev_info(adev->dev, "VCE 2.0 registers\n");
502         dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
503                  RREG32(mmVCE_STATUS));
504         dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
505                  RREG32(mmVCE_VCPU_CNTL));
506         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
507                  RREG32(mmVCE_VCPU_CACHE_OFFSET0));
508         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
509                  RREG32(mmVCE_VCPU_CACHE_SIZE0));
510         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
511                  RREG32(mmVCE_VCPU_CACHE_OFFSET1));
512         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
513                  RREG32(mmVCE_VCPU_CACHE_SIZE1));
514         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
515                  RREG32(mmVCE_VCPU_CACHE_OFFSET2));
516         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
517                  RREG32(mmVCE_VCPU_CACHE_SIZE2));
518         dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
519                  RREG32(mmVCE_SOFT_RESET));
520         dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
521                  RREG32(mmVCE_RB_BASE_LO2));
522         dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
523                  RREG32(mmVCE_RB_BASE_HI2));
524         dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
525                  RREG32(mmVCE_RB_SIZE2));
526         dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
527                  RREG32(mmVCE_RB_RPTR2));
528         dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
529                  RREG32(mmVCE_RB_WPTR2));
530         dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
531                  RREG32(mmVCE_RB_BASE_LO));
532         dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
533                  RREG32(mmVCE_RB_BASE_HI));
534         dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
535                  RREG32(mmVCE_RB_SIZE));
536         dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
537                  RREG32(mmVCE_RB_RPTR));
538         dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
539                  RREG32(mmVCE_RB_WPTR));
540         dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
541                  RREG32(mmVCE_CLOCK_GATING_A));
542         dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
543                  RREG32(mmVCE_CLOCK_GATING_B));
544         dev_info(adev->dev, "  VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
545                  RREG32(mmVCE_CGTT_CLK_OVERRIDE));
546         dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
547                  RREG32(mmVCE_UENC_CLOCK_GATING));
548         dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
549                  RREG32(mmVCE_UENC_REG_CLOCK_GATING));
550         dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
551                  RREG32(mmVCE_SYS_INT_EN));
552         dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
553                  RREG32(mmVCE_LMI_CTRL2));
554         dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
555                  RREG32(mmVCE_LMI_CTRL));
556         dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
557                  RREG32(mmVCE_LMI_VM_CTRL));
558         dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
559                  RREG32(mmVCE_LMI_SWAP_CNTL));
560         dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
561                  RREG32(mmVCE_LMI_SWAP_CNTL1));
562         dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
563                  RREG32(mmVCE_LMI_CACHE_CTRL));
564 }
565
566 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
567                                         struct amdgpu_irq_src *source,
568                                         unsigned type,
569                                         enum amdgpu_interrupt_state state)
570 {
571         uint32_t val = 0;
572
573         if (state == AMDGPU_IRQ_STATE_ENABLE)
574                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
575
576         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
577         return 0;
578 }
579
580 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
581                                       struct amdgpu_irq_src *source,
582                                       struct amdgpu_iv_entry *entry)
583 {
584         DRM_DEBUG("IH: VCE\n");
585         switch (entry->src_data) {
586         case 0:
587                 amdgpu_fence_process(&adev->vce.ring[0]);
588                 break;
589         case 1:
590                 amdgpu_fence_process(&adev->vce.ring[1]);
591                 break;
592         default:
593                 DRM_ERROR("Unhandled interrupt: %d %d\n",
594                           entry->src_id, entry->src_data);
595                 break;
596         }
597
598         return 0;
599 }
600
601 static int vce_v2_0_set_clockgating_state(void *handle,
602                                           enum amd_clockgating_state state)
603 {
604         bool gate = false;
605         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
606
607         if (state == AMD_CG_STATE_GATE)
608                 gate = true;
609
610         vce_v2_0_enable_mgcg(adev, gate);
611
612         return 0;
613 }
614
615 static int vce_v2_0_set_powergating_state(void *handle,
616                                           enum amd_powergating_state state)
617 {
618         /* This doesn't actually powergate the VCE block.
619          * That's done in the dpm code via the SMC.  This
620          * just re-inits the block as necessary.  The actual
621          * gating still happens in the dpm code.  We should
622          * revisit this when there is a cleaner line between
623          * the smc and the hw blocks
624          */
625         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
626
627         if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
628                 return 0;
629
630         if (state == AMD_PG_STATE_GATE)
631                 /* XXX do we need a vce_v2_0_stop()? */
632                 return 0;
633         else
634                 return vce_v2_0_start(adev);
635 }
636
637 const struct amd_ip_funcs vce_v2_0_ip_funcs = {
638         .early_init = vce_v2_0_early_init,
639         .late_init = NULL,
640         .sw_init = vce_v2_0_sw_init,
641         .sw_fini = vce_v2_0_sw_fini,
642         .hw_init = vce_v2_0_hw_init,
643         .hw_fini = vce_v2_0_hw_fini,
644         .suspend = vce_v2_0_suspend,
645         .resume = vce_v2_0_resume,
646         .is_idle = vce_v2_0_is_idle,
647         .wait_for_idle = vce_v2_0_wait_for_idle,
648         .soft_reset = vce_v2_0_soft_reset,
649         .print_status = vce_v2_0_print_status,
650         .set_clockgating_state = vce_v2_0_set_clockgating_state,
651         .set_powergating_state = vce_v2_0_set_powergating_state,
652 };
653
654 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
655         .get_rptr = vce_v2_0_ring_get_rptr,
656         .get_wptr = vce_v2_0_ring_get_wptr,
657         .set_wptr = vce_v2_0_ring_set_wptr,
658         .parse_cs = amdgpu_vce_ring_parse_cs,
659         .emit_ib = amdgpu_vce_ring_emit_ib,
660         .emit_fence = amdgpu_vce_ring_emit_fence,
661         .test_ring = amdgpu_vce_ring_test_ring,
662         .test_ib = amdgpu_vce_ring_test_ib,
663         .insert_nop = amdgpu_ring_insert_nop,
664         .pad_ib = amdgpu_ring_generic_pad_ib,
665 };
666
667 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
668 {
669         adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
670         adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
671 }
672
673 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
674         .set = vce_v2_0_set_interrupt_state,
675         .process = vce_v2_0_process_interrupt,
676 };
677
678 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
679 {
680         adev->vce.irq.num_types = 1;
681         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
682 };