drm/amdgpu: use a global LRU list for VMIDs
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / sdma_v3_0.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "vi.h"
30#include "vid.h"
31
32#include "oss/oss_3_0_d.h"
33#include "oss/oss_3_0_sh_mask.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "gca/gfx_8_0_d.h"
74a5d165 39#include "gca/gfx_8_0_enum.h"
aaa36a97
AD
40#include "gca/gfx_8_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "tonga_sdma_pkt_open.h"
46
47static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
50static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
c65444fe
JZ
52MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
1a5bbb66
DZ
56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
bb16e3b6 58MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
aaa36a97
AD
59
60static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
61{
62 SDMA0_REGISTER_OFFSET,
63 SDMA1_REGISTER_OFFSET
64};
65
66static const u32 golden_settings_tonga_a11[] =
67{
68 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
69 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
70 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
71 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
72 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
73 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
74 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
75 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
76 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
77 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
78};
79
80static const u32 tonga_mgcg_cgcg_init[] =
81{
82 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
83 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
84};
85
1a5bbb66
DZ
86static const u32 golden_settings_fiji_a10[] =
87{
88 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
89 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
90 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
91 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
92 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
93 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
94 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
95 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
96};
97
98static const u32 fiji_mgcg_cgcg_init[] =
99{
100 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
101 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
102};
103
aaa36a97
AD
104static const u32 cz_golden_settings_a11[] =
105{
106 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
108 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
109 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
110 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
111 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
112 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
113 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
114 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
115 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
116 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
117 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
118};
119
120static const u32 cz_mgcg_cgcg_init[] =
121{
122 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
123 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
124};
125
bb16e3b6
SL
126static const u32 stoney_golden_settings_a11[] =
127{
128 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
129 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
130 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
132};
133
134static const u32 stoney_mgcg_cgcg_init[] =
135{
136 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
137};
138
aaa36a97
AD
139/*
140 * sDMA - System DMA
141 * Starting with CIK, the GPU has new asynchronous
142 * DMA engines. These engines are used for compute
143 * and gfx. There are two DMA engines (SDMA0, SDMA1)
144 * and each one supports 1 ring buffer used for gfx
145 * and 2 queues used for compute.
146 *
147 * The programming model is very similar to the CP
148 * (ring buffer, IBs, etc.), but sDMA has it's own
149 * packet format that is different from the PM4 format
150 * used by the CP. sDMA supports copying data, writing
151 * embedded data, solid fills, and a number of other
152 * things. It also has support for tiling/detiling of
153 * buffers.
154 */
155
156static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
157{
158 switch (adev->asic_type) {
1a5bbb66
DZ
159 case CHIP_FIJI:
160 amdgpu_program_register_sequence(adev,
161 fiji_mgcg_cgcg_init,
162 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
163 amdgpu_program_register_sequence(adev,
164 golden_settings_fiji_a10,
165 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
166 break;
aaa36a97
AD
167 case CHIP_TONGA:
168 amdgpu_program_register_sequence(adev,
169 tonga_mgcg_cgcg_init,
170 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
171 amdgpu_program_register_sequence(adev,
172 golden_settings_tonga_a11,
173 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
174 break;
175 case CHIP_CARRIZO:
176 amdgpu_program_register_sequence(adev,
177 cz_mgcg_cgcg_init,
178 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
179 amdgpu_program_register_sequence(adev,
180 cz_golden_settings_a11,
181 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
182 break;
bb16e3b6
SL
183 case CHIP_STONEY:
184 amdgpu_program_register_sequence(adev,
185 stoney_mgcg_cgcg_init,
186 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
187 amdgpu_program_register_sequence(adev,
188 stoney_golden_settings_a11,
189 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
190 break;
aaa36a97
AD
191 default:
192 break;
193 }
194}
195
196/**
197 * sdma_v3_0_init_microcode - load ucode images from disk
198 *
199 * @adev: amdgpu_device pointer
200 *
201 * Use the firmware interface to load the ucode images into
202 * the driver (not loaded into hw).
203 * Returns 0 on success, error on failure.
204 */
205static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
206{
207 const char *chip_name;
208 char fw_name[30];
c113ea1c 209 int err = 0, i;
aaa36a97
AD
210 struct amdgpu_firmware_info *info = NULL;
211 const struct common_firmware_header *header = NULL;
595fd013 212 const struct sdma_firmware_header_v1_0 *hdr;
aaa36a97
AD
213
214 DRM_DEBUG("\n");
215
216 switch (adev->asic_type) {
217 case CHIP_TONGA:
218 chip_name = "tonga";
219 break;
1a5bbb66
DZ
220 case CHIP_FIJI:
221 chip_name = "fiji";
222 break;
aaa36a97
AD
223 case CHIP_CARRIZO:
224 chip_name = "carrizo";
225 break;
bb16e3b6
SL
226 case CHIP_STONEY:
227 chip_name = "stoney";
228 break;
aaa36a97
AD
229 default: BUG();
230 }
231
c113ea1c 232 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97 233 if (i == 0)
c65444fe 234 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
aaa36a97 235 else
c65444fe 236 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
c113ea1c 237 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
aaa36a97
AD
238 if (err)
239 goto out;
c113ea1c 240 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
aaa36a97
AD
241 if (err)
242 goto out;
c113ea1c
AD
243 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
244 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
245 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
246 if (adev->sdma.instance[i].feature_version >= 20)
247 adev->sdma.instance[i].burst_nop = true;
aaa36a97
AD
248
249 if (adev->firmware.smu_load) {
250 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
251 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
c113ea1c 252 info->fw = adev->sdma.instance[i].fw;
aaa36a97
AD
253 header = (const struct common_firmware_header *)info->fw->data;
254 adev->firmware.fw_size +=
255 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
256 }
257 }
258out:
259 if (err) {
260 printk(KERN_ERR
261 "sdma_v3_0: Failed to load firmware \"%s\"\n",
262 fw_name);
c113ea1c
AD
263 for (i = 0; i < adev->sdma.num_instances; i++) {
264 release_firmware(adev->sdma.instance[i].fw);
265 adev->sdma.instance[i].fw = NULL;
aaa36a97
AD
266 }
267 }
268 return err;
269}
270
271/**
272 * sdma_v3_0_ring_get_rptr - get the current read pointer
273 *
274 * @ring: amdgpu ring pointer
275 *
276 * Get the current rptr from the hardware (VI+).
277 */
278static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
279{
280 u32 rptr;
281
282 /* XXX check if swapping is necessary on BE */
283 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
284
285 return rptr;
286}
287
288/**
289 * sdma_v3_0_ring_get_wptr - get the current write pointer
290 *
291 * @ring: amdgpu ring pointer
292 *
293 * Get the current wptr from the hardware (VI+).
294 */
295static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
296{
297 struct amdgpu_device *adev = ring->adev;
298 u32 wptr;
299
300 if (ring->use_doorbell) {
301 /* XXX check if swapping is necessary on BE */
302 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
303 } else {
c113ea1c 304 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
aaa36a97
AD
305
306 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
307 }
308
309 return wptr;
310}
311
312/**
313 * sdma_v3_0_ring_set_wptr - commit the write pointer
314 *
315 * @ring: amdgpu ring pointer
316 *
317 * Write the wptr back to the hardware (VI+).
318 */
319static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
320{
321 struct amdgpu_device *adev = ring->adev;
322
323 if (ring->use_doorbell) {
324 /* XXX check if swapping is necessary on BE */
325 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
326 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
327 } else {
c113ea1c 328 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
aaa36a97
AD
329
330 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
331 }
332}
333
ac01db3d
JZ
334static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
335{
c113ea1c 336 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
ac01db3d
JZ
337 int i;
338
339 for (i = 0; i < count; i++)
340 if (sdma && sdma->burst_nop && (i == 0))
341 amdgpu_ring_write(ring, ring->nop |
342 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
343 else
344 amdgpu_ring_write(ring, ring->nop);
345}
346
aaa36a97
AD
347/**
348 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
349 *
350 * @ring: amdgpu ring pointer
351 * @ib: IB object to schedule
352 *
353 * Schedule an IB in the DMA ring (VI).
354 */
355static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
356 struct amdgpu_ib *ib)
357{
358 u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
359 u32 next_rptr = ring->wptr + 5;
360
aaa36a97
AD
361 while ((next_rptr & 7) != 2)
362 next_rptr++;
363 next_rptr += 6;
364
365 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
366 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
367 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
368 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
369 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
370 amdgpu_ring_write(ring, next_rptr);
371
aaa36a97 372 /* IB packet must end on a 8 DW boundary */
ac01db3d 373 sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
aaa36a97
AD
374
375 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
376 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
377 /* base must be 32 byte aligned */
378 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
379 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
380 amdgpu_ring_write(ring, ib->length_dw);
381 amdgpu_ring_write(ring, 0);
382 amdgpu_ring_write(ring, 0);
383
384}
385
386/**
d2edb07b 387 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
aaa36a97
AD
388 *
389 * @ring: amdgpu ring pointer
390 *
391 * Emit an hdp flush packet on the requested DMA ring.
392 */
d2edb07b 393static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
aaa36a97
AD
394{
395 u32 ref_and_mask = 0;
396
c113ea1c 397 if (ring == &ring->adev->sdma.instance[0].ring)
aaa36a97
AD
398 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
399 else
400 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
401
402 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
403 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
404 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
405 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
406 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
407 amdgpu_ring_write(ring, ref_and_mask); /* reference */
408 amdgpu_ring_write(ring, ref_and_mask); /* mask */
409 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
410 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
411}
412
413/**
414 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
415 *
416 * @ring: amdgpu ring pointer
417 * @fence: amdgpu fence object
418 *
419 * Add a DMA fence packet to the ring to write
420 * the fence seq number and DMA trap packet to generate
421 * an interrupt if needed (VI).
422 */
423static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
890ee23f 424 unsigned flags)
aaa36a97 425{
890ee23f 426 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
aaa36a97
AD
427 /* write the fence */
428 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
429 amdgpu_ring_write(ring, lower_32_bits(addr));
430 amdgpu_ring_write(ring, upper_32_bits(addr));
431 amdgpu_ring_write(ring, lower_32_bits(seq));
432
433 /* optionally write high bits as well */
890ee23f 434 if (write64bit) {
aaa36a97
AD
435 addr += 4;
436 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
437 amdgpu_ring_write(ring, lower_32_bits(addr));
438 amdgpu_ring_write(ring, upper_32_bits(addr));
439 amdgpu_ring_write(ring, upper_32_bits(seq));
440 }
441
442 /* generate an interrupt */
443 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
444 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
445}
446
aaa36a97
AD
447/**
448 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
449 *
450 * @adev: amdgpu_device pointer
451 *
452 * Stop the gfx async dma ring buffers (VI).
453 */
454static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
455{
c113ea1c
AD
456 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
457 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
aaa36a97
AD
458 u32 rb_cntl, ib_cntl;
459 int i;
460
461 if ((adev->mman.buffer_funcs_ring == sdma0) ||
462 (adev->mman.buffer_funcs_ring == sdma1))
463 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
464
c113ea1c 465 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97
AD
466 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
467 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
468 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
469 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
470 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
471 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
472 }
473 sdma0->ready = false;
474 sdma1->ready = false;
475}
476
477/**
478 * sdma_v3_0_rlc_stop - stop the compute async dma engines
479 *
480 * @adev: amdgpu_device pointer
481 *
482 * Stop the compute async dma queues (VI).
483 */
484static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
485{
486 /* XXX todo */
487}
488
cd06bf68
BG
489/**
490 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
491 *
492 * @adev: amdgpu_device pointer
493 * @enable: enable/disable the DMA MEs context switch.
494 *
495 * Halt or unhalt the async dma engines context switch (VI).
496 */
497static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
498{
499 u32 f32_cntl;
500 int i;
501
c113ea1c 502 for (i = 0; i < adev->sdma.num_instances; i++) {
cd06bf68
BG
503 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
504 if (enable)
505 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
506 AUTO_CTXSW_ENABLE, 1);
507 else
508 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
509 AUTO_CTXSW_ENABLE, 0);
510 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
511 }
512}
513
aaa36a97
AD
514/**
515 * sdma_v3_0_enable - stop the async dma engines
516 *
517 * @adev: amdgpu_device pointer
518 * @enable: enable/disable the DMA MEs.
519 *
520 * Halt or unhalt the async dma engines (VI).
521 */
522static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
523{
524 u32 f32_cntl;
525 int i;
526
527 if (enable == false) {
528 sdma_v3_0_gfx_stop(adev);
529 sdma_v3_0_rlc_stop(adev);
530 }
531
c113ea1c 532 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97
AD
533 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
534 if (enable)
535 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
536 else
537 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
538 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
539 }
540}
541
542/**
543 * sdma_v3_0_gfx_resume - setup and start the async dma engines
544 *
545 * @adev: amdgpu_device pointer
546 *
547 * Set up the gfx DMA ring buffers and enable them (VI).
548 * Returns 0 for success, error for failure.
549 */
550static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
551{
552 struct amdgpu_ring *ring;
553 u32 rb_cntl, ib_cntl;
554 u32 rb_bufsz;
555 u32 wb_offset;
556 u32 doorbell;
557 int i, j, r;
558
c113ea1c
AD
559 for (i = 0; i < adev->sdma.num_instances; i++) {
560 ring = &adev->sdma.instance[i].ring;
aaa36a97
AD
561 wb_offset = (ring->rptr_offs * 4);
562
563 mutex_lock(&adev->srbm_mutex);
564 for (j = 0; j < 16; j++) {
565 vi_srbm_select(adev, 0, 0, 0, j);
566 /* SDMA GFX */
567 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
568 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
569 }
570 vi_srbm_select(adev, 0, 0, 0, 0);
571 mutex_unlock(&adev->srbm_mutex);
572
573 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
574
575 /* Set ring buffer size in dwords */
576 rb_bufsz = order_base_2(ring->ring_size / 4);
577 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
578 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
579#ifdef __BIG_ENDIAN
580 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
581 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
582 RPTR_WRITEBACK_SWAP_ENABLE, 1);
583#endif
584 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
585
586 /* Initialize the ring buffer's read and write pointers */
587 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
588 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
589
590 /* set the wb address whether it's enabled or not */
591 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
592 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
593 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
594 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
595
596 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
597
598 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
599 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
600
601 ring->wptr = 0;
602 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
603
604 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
605
606 if (ring->use_doorbell) {
607 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
608 OFFSET, ring->doorbell_index);
609 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
610 } else {
611 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
612 }
613 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
614
615 /* enable DMA RB */
616 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
617 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
618
619 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
620 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
621#ifdef __BIG_ENDIAN
622 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
623#endif
624 /* enable DMA IBs */
625 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
626
627 ring->ready = true;
628
629 r = amdgpu_ring_test_ring(ring);
630 if (r) {
631 ring->ready = false;
632 return r;
633 }
634
635 if (adev->mman.buffer_funcs_ring == ring)
636 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
637 }
638
639 return 0;
640}
641
642/**
643 * sdma_v3_0_rlc_resume - setup and start the async dma engines
644 *
645 * @adev: amdgpu_device pointer
646 *
647 * Set up the compute DMA queues and enable them (VI).
648 * Returns 0 for success, error for failure.
649 */
650static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
651{
652 /* XXX todo */
653 return 0;
654}
655
656/**
657 * sdma_v3_0_load_microcode - load the sDMA ME ucode
658 *
659 * @adev: amdgpu_device pointer
660 *
661 * Loads the sDMA0/1 ucode.
662 * Returns 0 for success, -EINVAL if the ucode is not available.
663 */
664static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
665{
666 const struct sdma_firmware_header_v1_0 *hdr;
667 const __le32 *fw_data;
668 u32 fw_size;
669 int i, j;
670
aaa36a97
AD
671 /* halt the MEs */
672 sdma_v3_0_enable(adev, false);
673
c113ea1c
AD
674 for (i = 0; i < adev->sdma.num_instances; i++) {
675 if (!adev->sdma.instance[i].fw)
676 return -EINVAL;
677 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
aaa36a97
AD
678 amdgpu_ucode_print_sdma_hdr(&hdr->header);
679 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
aaa36a97 680 fw_data = (const __le32 *)
c113ea1c 681 (adev->sdma.instance[i].fw->data +
aaa36a97
AD
682 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
683 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
684 for (j = 0; j < fw_size; j++)
685 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
c113ea1c 686 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
aaa36a97
AD
687 }
688
689 return 0;
690}
691
692/**
693 * sdma_v3_0_start - setup and start the async dma engines
694 *
695 * @adev: amdgpu_device pointer
696 *
697 * Set up the DMA engines and enable them (VI).
698 * Returns 0 for success, error for failure.
699 */
700static int sdma_v3_0_start(struct amdgpu_device *adev)
701{
c113ea1c 702 int r, i;
aaa36a97 703
e61710c5 704 if (!adev->pp_enabled) {
ba5c2a87
RZ
705 if (!adev->firmware.smu_load) {
706 r = sdma_v3_0_load_microcode(adev);
c113ea1c 707 if (r)
ba5c2a87
RZ
708 return r;
709 } else {
710 for (i = 0; i < adev->sdma.num_instances; i++) {
711 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
712 (i == 0) ?
713 AMDGPU_UCODE_ID_SDMA0 :
714 AMDGPU_UCODE_ID_SDMA1);
715 if (r)
716 return -EINVAL;
717 }
c113ea1c 718 }
aaa36a97
AD
719 }
720
721 /* unhalt the MEs */
722 sdma_v3_0_enable(adev, true);
cd06bf68
BG
723 /* enable sdma ring preemption */
724 sdma_v3_0_ctx_switch_enable(adev, true);
aaa36a97
AD
725
726 /* start the gfx rings and rlc compute queues */
727 r = sdma_v3_0_gfx_resume(adev);
728 if (r)
729 return r;
730 r = sdma_v3_0_rlc_resume(adev);
731 if (r)
732 return r;
733
734 return 0;
735}
736
737/**
738 * sdma_v3_0_ring_test_ring - simple async dma engine test
739 *
740 * @ring: amdgpu_ring structure holding ring information
741 *
742 * Test the DMA engine by writing using it to write an
743 * value to memory. (VI).
744 * Returns 0 for success, error for failure.
745 */
746static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
747{
748 struct amdgpu_device *adev = ring->adev;
749 unsigned i;
750 unsigned index;
751 int r;
752 u32 tmp;
753 u64 gpu_addr;
754
755 r = amdgpu_wb_get(adev, &index);
756 if (r) {
757 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
758 return r;
759 }
760
761 gpu_addr = adev->wb.gpu_addr + (index * 4);
762 tmp = 0xCAFEDEAD;
763 adev->wb.wb[index] = cpu_to_le32(tmp);
764
765 r = amdgpu_ring_lock(ring, 5);
766 if (r) {
767 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
768 amdgpu_wb_free(adev, index);
769 return r;
770 }
771
772 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
773 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
774 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
775 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
776 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
777 amdgpu_ring_write(ring, 0xDEADBEEF);
778 amdgpu_ring_unlock_commit(ring);
779
780 for (i = 0; i < adev->usec_timeout; i++) {
781 tmp = le32_to_cpu(adev->wb.wb[index]);
782 if (tmp == 0xDEADBEEF)
783 break;
784 DRM_UDELAY(1);
785 }
786
787 if (i < adev->usec_timeout) {
788 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
789 } else {
790 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
791 ring->idx, tmp);
792 r = -EINVAL;
793 }
794 amdgpu_wb_free(adev, index);
795
796 return r;
797}
798
799/**
800 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
801 *
802 * @ring: amdgpu_ring structure holding ring information
803 *
804 * Test a simple IB in the DMA ring (VI).
805 * Returns 0 on success, error on failure.
806 */
807static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
808{
809 struct amdgpu_device *adev = ring->adev;
810 struct amdgpu_ib ib;
1763552e 811 struct fence *f = NULL;
aaa36a97
AD
812 unsigned i;
813 unsigned index;
814 int r;
815 u32 tmp = 0;
816 u64 gpu_addr;
817
818 r = amdgpu_wb_get(adev, &index);
819 if (r) {
820 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
821 return r;
822 }
823
824 gpu_addr = adev->wb.gpu_addr + (index * 4);
825 tmp = 0xCAFEDEAD;
826 adev->wb.wb[index] = cpu_to_le32(tmp);
b203dd95 827 memset(&ib, 0, sizeof(ib));
aaa36a97
AD
828 r = amdgpu_ib_get(ring, NULL, 256, &ib);
829 if (r) {
aaa36a97 830 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
0011fdaa 831 goto err0;
aaa36a97
AD
832 }
833
834 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
835 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
836 ib.ptr[1] = lower_32_bits(gpu_addr);
837 ib.ptr[2] = upper_32_bits(gpu_addr);
838 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
839 ib.ptr[4] = 0xDEADBEEF;
840 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
841 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
842 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
843 ib.length_dw = 8;
844
0011fdaa 845 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
1763552e
CZ
846 AMDGPU_FENCE_OWNER_UNDEFINED,
847 &f);
0011fdaa
CZ
848 if (r)
849 goto err1;
850
1763552e 851 r = fence_wait(f, false);
aaa36a97 852 if (r) {
aaa36a97 853 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
0011fdaa 854 goto err1;
aaa36a97
AD
855 }
856 for (i = 0; i < adev->usec_timeout; i++) {
857 tmp = le32_to_cpu(adev->wb.wb[index]);
858 if (tmp == 0xDEADBEEF)
859 break;
860 DRM_UDELAY(1);
861 }
862 if (i < adev->usec_timeout) {
863 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
0011fdaa
CZ
864 ring->idx, i);
865 goto err1;
aaa36a97
AD
866 } else {
867 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
868 r = -EINVAL;
869 }
0011fdaa 870err1:
281b4223 871 fence_put(f);
aaa36a97 872 amdgpu_ib_free(adev, &ib);
0011fdaa 873err0:
aaa36a97
AD
874 amdgpu_wb_free(adev, index);
875 return r;
876}
877
878/**
879 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
880 *
881 * @ib: indirect buffer to fill with commands
882 * @pe: addr of the page entry
883 * @src: src addr to copy from
884 * @count: number of page entries to update
885 *
886 * Update PTEs by copying them from the GART using sDMA (CIK).
887 */
888static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
889 uint64_t pe, uint64_t src,
890 unsigned count)
891{
892 while (count) {
893 unsigned bytes = count * 8;
894 if (bytes > 0x1FFFF8)
895 bytes = 0x1FFFF8;
896
897 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
898 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
899 ib->ptr[ib->length_dw++] = bytes;
900 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
901 ib->ptr[ib->length_dw++] = lower_32_bits(src);
902 ib->ptr[ib->length_dw++] = upper_32_bits(src);
903 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
904 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
905
906 pe += bytes;
907 src += bytes;
908 count -= bytes / 8;
909 }
910}
911
912/**
913 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
914 *
915 * @ib: indirect buffer to fill with commands
916 * @pe: addr of the page entry
917 * @addr: dst addr to write into pe
918 * @count: number of page entries to update
919 * @incr: increase next addr by incr bytes
920 * @flags: access flags
921 *
922 * Update PTEs by writing them manually using sDMA (CIK).
923 */
924static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
925 uint64_t pe,
926 uint64_t addr, unsigned count,
927 uint32_t incr, uint32_t flags)
928{
929 uint64_t value;
930 unsigned ndw;
931
932 while (count) {
933 ndw = count * 2;
934 if (ndw > 0xFFFFE)
935 ndw = 0xFFFFE;
936
937 /* for non-physically contiguous pages (system) */
938 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
939 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
940 ib->ptr[ib->length_dw++] = pe;
941 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
942 ib->ptr[ib->length_dw++] = ndw;
943 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
944 if (flags & AMDGPU_PTE_SYSTEM) {
945 value = amdgpu_vm_map_gart(ib->ring->adev, addr);
946 value &= 0xFFFFFFFFFFFFF000ULL;
947 } else if (flags & AMDGPU_PTE_VALID) {
948 value = addr;
949 } else {
950 value = 0;
951 }
952 addr += incr;
953 value |= flags;
954 ib->ptr[ib->length_dw++] = value;
955 ib->ptr[ib->length_dw++] = upper_32_bits(value);
956 }
957 }
958}
959
960/**
961 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
962 *
963 * @ib: indirect buffer to fill with commands
964 * @pe: addr of the page entry
965 * @addr: dst addr to write into pe
966 * @count: number of page entries to update
967 * @incr: increase next addr by incr bytes
968 * @flags: access flags
969 *
970 * Update the page tables using sDMA (CIK).
971 */
972static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
973 uint64_t pe,
974 uint64_t addr, unsigned count,
975 uint32_t incr, uint32_t flags)
976{
977 uint64_t value;
978 unsigned ndw;
979
980 while (count) {
981 ndw = count;
982 if (ndw > 0x7FFFF)
983 ndw = 0x7FFFF;
984
985 if (flags & AMDGPU_PTE_VALID)
986 value = addr;
987 else
988 value = 0;
989
990 /* for physically contiguous pages (vram) */
991 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
992 ib->ptr[ib->length_dw++] = pe; /* dst addr */
993 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
994 ib->ptr[ib->length_dw++] = flags; /* mask */
995 ib->ptr[ib->length_dw++] = 0;
996 ib->ptr[ib->length_dw++] = value; /* value */
997 ib->ptr[ib->length_dw++] = upper_32_bits(value);
998 ib->ptr[ib->length_dw++] = incr; /* increment size */
999 ib->ptr[ib->length_dw++] = 0;
1000 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
1001
1002 pe += ndw * 8;
1003 addr += ndw * incr;
1004 count -= ndw;
1005 }
1006}
1007
1008/**
1009 * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw
1010 *
1011 * @ib: indirect buffer to fill with padding
1012 *
1013 */
1014static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
1015{
c113ea1c 1016 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
ac01db3d
JZ
1017 u32 pad_count;
1018 int i;
1019
1020 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1021 for (i = 0; i < pad_count; i++)
1022 if (sdma && sdma->burst_nop && (i == 0))
1023 ib->ptr[ib->length_dw++] =
1024 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1025 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1026 else
1027 ib->ptr[ib->length_dw++] =
1028 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
aaa36a97
AD
1029}
1030
1031/**
1032 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1033 *
1034 * @ring: amdgpu_ring pointer
1035 * @vm: amdgpu_vm pointer
1036 *
1037 * Update the page table base and flush the VM TLB
1038 * using sDMA (VI).
1039 */
1040static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1041 unsigned vm_id, uint64_t pd_addr)
1042{
aaa36a97
AD
1043 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1044 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1045 if (vm_id < 8) {
1046 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
1047 } else {
1048 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
1049 }
1050 amdgpu_ring_write(ring, pd_addr >> 12);
1051
aaa36a97
AD
1052 /* flush TLB */
1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1055 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
1056 amdgpu_ring_write(ring, 1 << vm_id);
1057
1058 /* wait for flush */
1059 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1060 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1061 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1062 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1063 amdgpu_ring_write(ring, 0);
1064 amdgpu_ring_write(ring, 0); /* reference */
1065 amdgpu_ring_write(ring, 0); /* mask */
1066 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1067 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1068}
1069
5fc3aeeb 1070static int sdma_v3_0_early_init(void *handle)
aaa36a97 1071{
5fc3aeeb 1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1073
c113ea1c 1074 switch (adev->asic_type) {
bb16e3b6
SL
1075 case CHIP_STONEY:
1076 adev->sdma.num_instances = 1;
1077 break;
c113ea1c
AD
1078 default:
1079 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1080 break;
1081 }
1082
aaa36a97
AD
1083 sdma_v3_0_set_ring_funcs(adev);
1084 sdma_v3_0_set_buffer_funcs(adev);
1085 sdma_v3_0_set_vm_pte_funcs(adev);
1086 sdma_v3_0_set_irq_funcs(adev);
1087
1088 return 0;
1089}
1090
5fc3aeeb 1091static int sdma_v3_0_sw_init(void *handle)
aaa36a97
AD
1092{
1093 struct amdgpu_ring *ring;
c113ea1c 1094 int r, i;
5fc3aeeb 1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1096
1097 /* SDMA trap event */
c113ea1c 1098 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
aaa36a97
AD
1099 if (r)
1100 return r;
1101
1102 /* SDMA Privileged inst */
c113ea1c 1103 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
aaa36a97
AD
1104 if (r)
1105 return r;
1106
1107 /* SDMA Privileged inst */
c113ea1c 1108 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
aaa36a97
AD
1109 if (r)
1110 return r;
1111
1112 r = sdma_v3_0_init_microcode(adev);
1113 if (r) {
1114 DRM_ERROR("Failed to load sdma firmware!\n");
1115 return r;
1116 }
1117
c113ea1c
AD
1118 for (i = 0; i < adev->sdma.num_instances; i++) {
1119 ring = &adev->sdma.instance[i].ring;
1120 ring->ring_obj = NULL;
1121 ring->use_doorbell = true;
1122 ring->doorbell_index = (i == 0) ?
1123 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1124
1125 sprintf(ring->name, "sdma%d", i);
1126 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1127 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1128 &adev->sdma.trap_irq,
1129 (i == 0) ?
1130 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
1131 AMDGPU_RING_TYPE_SDMA);
1132 if (r)
1133 return r;
1134 }
aaa36a97
AD
1135
1136 return r;
1137}
1138
5fc3aeeb 1139static int sdma_v3_0_sw_fini(void *handle)
aaa36a97 1140{
5fc3aeeb 1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
c113ea1c 1142 int i;
5fc3aeeb 1143
c113ea1c
AD
1144 for (i = 0; i < adev->sdma.num_instances; i++)
1145 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
aaa36a97
AD
1146
1147 return 0;
1148}
1149
5fc3aeeb 1150static int sdma_v3_0_hw_init(void *handle)
aaa36a97
AD
1151{
1152 int r;
5fc3aeeb 1153 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1154
1155 sdma_v3_0_init_golden_registers(adev);
1156
1157 r = sdma_v3_0_start(adev);
1158 if (r)
1159 return r;
1160
1161 return r;
1162}
1163
5fc3aeeb 1164static int sdma_v3_0_hw_fini(void *handle)
aaa36a97 1165{
5fc3aeeb 1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167
cd06bf68 1168 sdma_v3_0_ctx_switch_enable(adev, false);
aaa36a97
AD
1169 sdma_v3_0_enable(adev, false);
1170
1171 return 0;
1172}
1173
5fc3aeeb 1174static int sdma_v3_0_suspend(void *handle)
aaa36a97 1175{
5fc3aeeb 1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1177
1178 return sdma_v3_0_hw_fini(adev);
1179}
1180
5fc3aeeb 1181static int sdma_v3_0_resume(void *handle)
aaa36a97 1182{
5fc3aeeb 1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1184
1185 return sdma_v3_0_hw_init(adev);
1186}
1187
5fc3aeeb 1188static bool sdma_v3_0_is_idle(void *handle)
aaa36a97 1189{
5fc3aeeb 1190 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1191 u32 tmp = RREG32(mmSRBM_STATUS2);
1192
1193 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1194 SRBM_STATUS2__SDMA1_BUSY_MASK))
1195 return false;
1196
1197 return true;
1198}
1199
5fc3aeeb 1200static int sdma_v3_0_wait_for_idle(void *handle)
aaa36a97
AD
1201{
1202 unsigned i;
1203 u32 tmp;
5fc3aeeb 1204 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1205
1206 for (i = 0; i < adev->usec_timeout; i++) {
1207 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1208 SRBM_STATUS2__SDMA1_BUSY_MASK);
1209
1210 if (!tmp)
1211 return 0;
1212 udelay(1);
1213 }
1214 return -ETIMEDOUT;
1215}
1216
5fc3aeeb 1217static void sdma_v3_0_print_status(void *handle)
aaa36a97
AD
1218{
1219 int i, j;
5fc3aeeb 1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1221
1222 dev_info(adev->dev, "VI SDMA registers\n");
1223 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1224 RREG32(mmSRBM_STATUS2));
c113ea1c 1225 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97
AD
1226 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1227 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1228 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
1229 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1230 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
1231 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1232 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1233 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1234 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1235 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1236 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1237 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1238 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1239 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1240 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1241 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1242 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1243 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1244 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1245 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1246 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1247 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1248 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1249 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1250 dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n",
1251 i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
1252 mutex_lock(&adev->srbm_mutex);
1253 for (j = 0; j < 16; j++) {
1254 vi_srbm_select(adev, 0, 0, 0, j);
1255 dev_info(adev->dev, " VM %d:\n", j);
1256 dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
1257 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1258 dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
1259 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1260 }
1261 vi_srbm_select(adev, 0, 0, 0, 0);
1262 mutex_unlock(&adev->srbm_mutex);
1263 }
1264}
1265
5fc3aeeb 1266static int sdma_v3_0_soft_reset(void *handle)
aaa36a97
AD
1267{
1268 u32 srbm_soft_reset = 0;
5fc3aeeb 1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1270 u32 tmp = RREG32(mmSRBM_STATUS2);
1271
1272 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1273 /* sdma0 */
1274 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1275 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1276 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1277 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1278 }
1279 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1280 /* sdma1 */
1281 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1282 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1283 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1284 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1285 }
1286
1287 if (srbm_soft_reset) {
5fc3aeeb 1288 sdma_v3_0_print_status((void *)adev);
aaa36a97
AD
1289
1290 tmp = RREG32(mmSRBM_SOFT_RESET);
1291 tmp |= srbm_soft_reset;
1292 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1293 WREG32(mmSRBM_SOFT_RESET, tmp);
1294 tmp = RREG32(mmSRBM_SOFT_RESET);
1295
1296 udelay(50);
1297
1298 tmp &= ~srbm_soft_reset;
1299 WREG32(mmSRBM_SOFT_RESET, tmp);
1300 tmp = RREG32(mmSRBM_SOFT_RESET);
1301
1302 /* Wait a little for things to settle down */
1303 udelay(50);
1304
5fc3aeeb 1305 sdma_v3_0_print_status((void *)adev);
aaa36a97
AD
1306 }
1307
1308 return 0;
1309}
1310
1311static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1312 struct amdgpu_irq_src *source,
1313 unsigned type,
1314 enum amdgpu_interrupt_state state)
1315{
1316 u32 sdma_cntl;
1317
1318 switch (type) {
1319 case AMDGPU_SDMA_IRQ_TRAP0:
1320 switch (state) {
1321 case AMDGPU_IRQ_STATE_DISABLE:
1322 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1323 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1324 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1325 break;
1326 case AMDGPU_IRQ_STATE_ENABLE:
1327 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1328 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1329 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1330 break;
1331 default:
1332 break;
1333 }
1334 break;
1335 case AMDGPU_SDMA_IRQ_TRAP1:
1336 switch (state) {
1337 case AMDGPU_IRQ_STATE_DISABLE:
1338 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1339 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1340 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1341 break;
1342 case AMDGPU_IRQ_STATE_ENABLE:
1343 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1344 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1345 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1346 break;
1347 default:
1348 break;
1349 }
1350 break;
1351 default:
1352 break;
1353 }
1354 return 0;
1355}
1356
1357static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1358 struct amdgpu_irq_src *source,
1359 struct amdgpu_iv_entry *entry)
1360{
1361 u8 instance_id, queue_id;
1362
1363 instance_id = (entry->ring_id & 0x3) >> 0;
1364 queue_id = (entry->ring_id & 0xc) >> 2;
1365 DRM_DEBUG("IH: SDMA trap\n");
1366 switch (instance_id) {
1367 case 0:
1368 switch (queue_id) {
1369 case 0:
c113ea1c 1370 amdgpu_fence_process(&adev->sdma.instance[0].ring);
aaa36a97
AD
1371 break;
1372 case 1:
1373 /* XXX compute */
1374 break;
1375 case 2:
1376 /* XXX compute */
1377 break;
1378 }
1379 break;
1380 case 1:
1381 switch (queue_id) {
1382 case 0:
c113ea1c 1383 amdgpu_fence_process(&adev->sdma.instance[1].ring);
aaa36a97
AD
1384 break;
1385 case 1:
1386 /* XXX compute */
1387 break;
1388 case 2:
1389 /* XXX compute */
1390 break;
1391 }
1392 break;
1393 }
1394 return 0;
1395}
1396
1397static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1398 struct amdgpu_irq_src *source,
1399 struct amdgpu_iv_entry *entry)
1400{
1401 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1402 schedule_work(&adev->reset_work);
1403 return 0;
1404}
1405
3c997d24
EH
1406static void fiji_update_sdma_medium_grain_clock_gating(
1407 struct amdgpu_device *adev,
1408 bool enable)
1409{
1410 uint32_t temp, data;
1411
1412 if (enable) {
1413 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1414 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1415 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1416 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1417 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1418 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1419 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1420 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1421 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1422 if (data != temp)
1423 WREG32(mmSDMA0_CLK_CTRL, data);
1424
1425 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1426 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1427 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1428 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1429 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1430 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1431 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1432 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1433 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1434
1435 if (data != temp)
1436 WREG32(mmSDMA1_CLK_CTRL, data);
1437 } else {
1438 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1439 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1440 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1441 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1442 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1443 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1444 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1445 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1446 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1447
1448 if (data != temp)
1449 WREG32(mmSDMA0_CLK_CTRL, data);
1450
1451 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1452 data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1453 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1454 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1455 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1456 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1457 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1458 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1459 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1460
1461 if (data != temp)
1462 WREG32(mmSDMA1_CLK_CTRL, data);
1463 }
1464}
1465
1466static void fiji_update_sdma_medium_grain_light_sleep(
1467 struct amdgpu_device *adev,
1468 bool enable)
1469{
1470 uint32_t temp, data;
1471
1472 if (enable) {
1473 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1474 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1475
1476 if (temp != data)
1477 WREG32(mmSDMA0_POWER_CNTL, data);
1478
1479 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1480 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1481
1482 if (temp != data)
1483 WREG32(mmSDMA1_POWER_CNTL, data);
1484 } else {
1485 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1486 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1487
1488 if (temp != data)
1489 WREG32(mmSDMA0_POWER_CNTL, data);
1490
1491 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1492 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1493
1494 if (temp != data)
1495 WREG32(mmSDMA1_POWER_CNTL, data);
1496 }
1497}
1498
5fc3aeeb 1499static int sdma_v3_0_set_clockgating_state(void *handle,
1500 enum amd_clockgating_state state)
aaa36a97 1501{
3c997d24
EH
1502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503
1504 switch (adev->asic_type) {
1505 case CHIP_FIJI:
1506 fiji_update_sdma_medium_grain_clock_gating(adev,
1507 state == AMD_CG_STATE_GATE ? true : false);
1508 fiji_update_sdma_medium_grain_light_sleep(adev,
1509 state == AMD_CG_STATE_GATE ? true : false);
1510 break;
1511 default:
1512 break;
1513 }
aaa36a97
AD
1514 return 0;
1515}
1516
5fc3aeeb 1517static int sdma_v3_0_set_powergating_state(void *handle,
1518 enum amd_powergating_state state)
aaa36a97
AD
1519{
1520 return 0;
1521}
1522
5fc3aeeb 1523const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
aaa36a97
AD
1524 .early_init = sdma_v3_0_early_init,
1525 .late_init = NULL,
1526 .sw_init = sdma_v3_0_sw_init,
1527 .sw_fini = sdma_v3_0_sw_fini,
1528 .hw_init = sdma_v3_0_hw_init,
1529 .hw_fini = sdma_v3_0_hw_fini,
1530 .suspend = sdma_v3_0_suspend,
1531 .resume = sdma_v3_0_resume,
1532 .is_idle = sdma_v3_0_is_idle,
1533 .wait_for_idle = sdma_v3_0_wait_for_idle,
1534 .soft_reset = sdma_v3_0_soft_reset,
1535 .print_status = sdma_v3_0_print_status,
1536 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1537 .set_powergating_state = sdma_v3_0_set_powergating_state,
1538};
1539
aaa36a97
AD
1540static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1541 .get_rptr = sdma_v3_0_ring_get_rptr,
1542 .get_wptr = sdma_v3_0_ring_get_wptr,
1543 .set_wptr = sdma_v3_0_ring_set_wptr,
1544 .parse_cs = NULL,
1545 .emit_ib = sdma_v3_0_ring_emit_ib,
1546 .emit_fence = sdma_v3_0_ring_emit_fence,
aaa36a97 1547 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
d2edb07b 1548 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
aaa36a97
AD
1549 .test_ring = sdma_v3_0_ring_test_ring,
1550 .test_ib = sdma_v3_0_ring_test_ib,
ac01db3d 1551 .insert_nop = sdma_v3_0_ring_insert_nop,
aaa36a97
AD
1552};
1553
1554static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1555{
c113ea1c
AD
1556 int i;
1557
1558 for (i = 0; i < adev->sdma.num_instances; i++)
1559 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
aaa36a97
AD
1560}
1561
1562static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1563 .set = sdma_v3_0_set_trap_irq_state,
1564 .process = sdma_v3_0_process_trap_irq,
1565};
1566
1567static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1568 .process = sdma_v3_0_process_illegal_inst_irq,
1569};
1570
1571static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1572{
c113ea1c
AD
1573 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1574 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1575 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
aaa36a97
AD
1576}
1577
1578/**
1579 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1580 *
1581 * @ring: amdgpu_ring structure holding ring information
1582 * @src_offset: src GPU address
1583 * @dst_offset: dst GPU address
1584 * @byte_count: number of bytes to xfer
1585 *
1586 * Copy GPU buffers using the DMA engine (VI).
1587 * Used by the amdgpu ttm implementation to move pages if
1588 * registered as the asic copy callback.
1589 */
c7ae72c0 1590static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
aaa36a97
AD
1591 uint64_t src_offset,
1592 uint64_t dst_offset,
1593 uint32_t byte_count)
1594{
c7ae72c0
CZ
1595 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1596 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1597 ib->ptr[ib->length_dw++] = byte_count;
1598 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1599 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1600 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1601 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1602 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
aaa36a97
AD
1603}
1604
1605/**
1606 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1607 *
1608 * @ring: amdgpu_ring structure holding ring information
1609 * @src_data: value to write to buffer
1610 * @dst_offset: dst GPU address
1611 * @byte_count: number of bytes to xfer
1612 *
1613 * Fill GPU buffers using the DMA engine (VI).
1614 */
6e7a3840 1615static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
aaa36a97
AD
1616 uint32_t src_data,
1617 uint64_t dst_offset,
1618 uint32_t byte_count)
1619{
6e7a3840
CZ
1620 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1621 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1622 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1623 ib->ptr[ib->length_dw++] = src_data;
1624 ib->ptr[ib->length_dw++] = byte_count;
aaa36a97
AD
1625}
1626
1627static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1628 .copy_max_bytes = 0x1fffff,
1629 .copy_num_dw = 7,
1630 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1631
1632 .fill_max_bytes = 0x1fffff,
1633 .fill_num_dw = 5,
1634 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1635};
1636
1637static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1638{
1639 if (adev->mman.buffer_funcs == NULL) {
1640 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
c113ea1c 1641 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
aaa36a97
AD
1642 }
1643}
1644
1645static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1646 .copy_pte = sdma_v3_0_vm_copy_pte,
1647 .write_pte = sdma_v3_0_vm_write_pte,
1648 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1649 .pad_ib = sdma_v3_0_vm_pad_ib,
1650};
1651
1652static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1653{
1654 if (adev->vm_manager.vm_pte_funcs == NULL) {
1655 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
c113ea1c 1656 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
4274f5d4 1657 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
aaa36a97
AD
1658 }
1659}