Commit | Line | Data |
---|---|---|
30d1574f KW |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include "amdgpu.h" | |
26 | #include "amdgpu_trace.h" | |
4fef88bd | 27 | #include "si.h" |
689957b1 | 28 | #include "sid.h" |
30d1574f KW |
29 | |
30 | const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | |
31 | { | |
32 | DMA0_REGISTER_OFFSET, | |
33 | DMA1_REGISTER_OFFSET | |
34 | }; | |
35 | ||
36 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev); | |
37 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); | |
38 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); | |
39 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev); | |
40 | ||
536fbf94 | 41 | static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) |
30d1574f | 42 | { |
cb5df31b | 43 | return ring->adev->wb.wb[ring->rptr_offs>>2]; |
30d1574f KW |
44 | } |
45 | ||
536fbf94 | 46 | static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) |
30d1574f | 47 | { |
30d1574f KW |
48 | struct amdgpu_device *adev = ring->adev; |
49 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
50 | ||
51 | return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | |
52 | } | |
53 | ||
54 | static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) | |
55 | { | |
56 | struct amdgpu_device *adev = ring->adev; | |
57 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
58 | ||
536fbf94 KW |
59 | WREG32(DMA_RB_WPTR + sdma_offsets[me], |
60 | (lower_32_bits(ring->wptr) << 2) & 0x3fffc); | |
30d1574f KW |
61 | } |
62 | ||
63 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, | |
64 | struct amdgpu_ib *ib, | |
c4f46f22 | 65 | unsigned vmid, bool ctx_switch) |
30d1574f KW |
66 | { |
67 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | |
68 | * Pad as necessary with NOPs. | |
69 | */ | |
536fbf94 | 70 | while ((lower_32_bits(ring->wptr) & 7) != 5) |
30d1574f | 71 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
c4f46f22 | 72 | amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); |
30d1574f KW |
73 | amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
74 | amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | |
75 | ||
76 | } | |
77 | ||
30d1574f KW |
78 | /** |
79 | * si_dma_ring_emit_fence - emit a fence on the DMA ring | |
80 | * | |
81 | * @ring: amdgpu ring pointer | |
82 | * @fence: amdgpu fence object | |
83 | * | |
84 | * Add a DMA fence packet to the ring to write | |
85 | * the fence seq number and DMA trap packet to generate | |
86 | * an interrupt if needed (VI). | |
87 | */ | |
88 | static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
89 | unsigned flags) | |
90 | { | |
91 | ||
92 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
93 | /* write the fence */ | |
94 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
95 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
96 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
97 | amdgpu_ring_write(ring, seq); | |
98 | /* optionally write high bits as well */ | |
99 | if (write64bit) { | |
100 | addr += 4; | |
101 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
102 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
103 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
104 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
105 | } | |
106 | /* generate an interrupt */ | |
107 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); | |
108 | } | |
109 | ||
110 | static void si_dma_stop(struct amdgpu_device *adev) | |
111 | { | |
112 | struct amdgpu_ring *ring; | |
113 | u32 rb_cntl; | |
114 | unsigned i; | |
115 | ||
116 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
117 | ring = &adev->sdma.instance[i].ring; | |
118 | /* dma0 */ | |
119 | rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); | |
120 | rb_cntl &= ~DMA_RB_ENABLE; | |
121 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
122 | ||
e7b54945 | 123 | if (adev->mman.buffer_funcs_ring == ring) |
57adc4ce | 124 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
c66ed765 | 125 | ring->sched.ready = false; |
30d1574f KW |
126 | } |
127 | } | |
128 | ||
129 | static int si_dma_start(struct amdgpu_device *adev) | |
130 | { | |
131 | struct amdgpu_ring *ring; | |
132 | u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; | |
133 | int i, r; | |
134 | uint64_t rptr_addr; | |
135 | ||
136 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
137 | ring = &adev->sdma.instance[i].ring; | |
138 | ||
139 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); | |
140 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | |
141 | ||
142 | /* Set ring buffer size in dwords */ | |
143 | rb_bufsz = order_base_2(ring->ring_size / 4); | |
144 | rb_cntl = rb_bufsz << 1; | |
145 | #ifdef __BIG_ENDIAN | |
146 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; | |
147 | #endif | |
148 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
149 | ||
150 | /* Initialize the ring buffer's read and write pointers */ | |
151 | WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); | |
152 | WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); | |
153 | ||
154 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
155 | ||
156 | WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); | |
157 | WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); | |
158 | ||
159 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; | |
160 | ||
161 | WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | |
162 | ||
163 | /* enable DMA IBs */ | |
164 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; | |
165 | #ifdef __BIG_ENDIAN | |
166 | ib_cntl |= DMA_IB_SWAP_ENABLE; | |
167 | #endif | |
168 | WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); | |
169 | ||
170 | dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); | |
171 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; | |
172 | WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); | |
173 | ||
174 | ring->wptr = 0; | |
536fbf94 | 175 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); |
30d1574f KW |
176 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); |
177 | ||
c66ed765 | 178 | ring->sched.ready = true; |
30d1574f | 179 | |
c66ed765 AG |
180 | r = amdgpu_ring_test_helper(ring); |
181 | if (r) | |
30d1574f | 182 | return r; |
e7b54945 MD |
183 | |
184 | if (adev->mman.buffer_funcs_ring == ring) | |
57adc4ce | 185 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
30d1574f KW |
186 | } |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
191 | /** | |
192 | * si_dma_ring_test_ring - simple async dma engine test | |
193 | * | |
194 | * @ring: amdgpu_ring structure holding ring information | |
195 | * | |
196 | * Test the DMA engine by writing using it to write an | |
197 | * value to memory. (VI). | |
198 | * Returns 0 for success, error for failure. | |
199 | */ | |
200 | static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |
201 | { | |
202 | struct amdgpu_device *adev = ring->adev; | |
203 | unsigned i; | |
204 | unsigned index; | |
205 | int r; | |
206 | u32 tmp; | |
207 | u64 gpu_addr; | |
208 | ||
131b4b36 | 209 | r = amdgpu_device_wb_get(adev, &index); |
30d1574f KW |
210 | if (r) { |
211 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | |
212 | return r; | |
213 | } | |
214 | ||
215 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
216 | tmp = 0xCAFEDEAD; | |
217 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
218 | ||
219 | r = amdgpu_ring_alloc(ring, 4); | |
220 | if (r) { | |
221 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | |
131b4b36 | 222 | amdgpu_device_wb_free(adev, index); |
30d1574f KW |
223 | return r; |
224 | } | |
225 | ||
226 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); | |
227 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | |
228 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); | |
229 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
230 | amdgpu_ring_commit(ring); | |
231 | ||
232 | for (i = 0; i < adev->usec_timeout; i++) { | |
233 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
234 | if (tmp == 0xDEADBEEF) | |
235 | break; | |
236 | DRM_UDELAY(1); | |
237 | } | |
238 | ||
239 | if (i < adev->usec_timeout) { | |
9953b72f | 240 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); |
30d1574f KW |
241 | } else { |
242 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
243 | ring->idx, tmp); | |
244 | r = -EINVAL; | |
245 | } | |
131b4b36 | 246 | amdgpu_device_wb_free(adev, index); |
30d1574f KW |
247 | |
248 | return r; | |
249 | } | |
250 | ||
251 | /** | |
252 | * si_dma_ring_test_ib - test an IB on the DMA engine | |
253 | * | |
254 | * @ring: amdgpu_ring structure holding ring information | |
255 | * | |
256 | * Test a simple IB in the DMA ring (VI). | |
257 | * Returns 0 on success, error on failure. | |
258 | */ | |
259 | static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
260 | { | |
261 | struct amdgpu_device *adev = ring->adev; | |
262 | struct amdgpu_ib ib; | |
f54d1867 | 263 | struct dma_fence *f = NULL; |
30d1574f KW |
264 | unsigned index; |
265 | u32 tmp = 0; | |
266 | u64 gpu_addr; | |
267 | long r; | |
268 | ||
131b4b36 | 269 | r = amdgpu_device_wb_get(adev, &index); |
30d1574f KW |
270 | if (r) { |
271 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | |
272 | return r; | |
273 | } | |
274 | ||
275 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
276 | tmp = 0xCAFEDEAD; | |
277 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
278 | memset(&ib, 0, sizeof(ib)); | |
279 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | |
280 | if (r) { | |
281 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | |
282 | goto err0; | |
283 | } | |
284 | ||
285 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); | |
286 | ib.ptr[1] = lower_32_bits(gpu_addr); | |
287 | ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; | |
288 | ib.ptr[3] = 0xDEADBEEF; | |
289 | ib.length_dw = 4; | |
50ddc75e | 290 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); |
30d1574f KW |
291 | if (r) |
292 | goto err1; | |
293 | ||
f54d1867 | 294 | r = dma_fence_wait_timeout(f, false, timeout); |
30d1574f KW |
295 | if (r == 0) { |
296 | DRM_ERROR("amdgpu: IB test timed out\n"); | |
297 | r = -ETIMEDOUT; | |
298 | goto err1; | |
299 | } else if (r < 0) { | |
300 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
301 | goto err1; | |
302 | } | |
303 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
304 | if (tmp == 0xDEADBEEF) { | |
9953b72f | 305 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
30d1574f KW |
306 | r = 0; |
307 | } else { | |
308 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | |
309 | r = -EINVAL; | |
310 | } | |
311 | ||
312 | err1: | |
313 | amdgpu_ib_free(adev, &ib, NULL); | |
f54d1867 | 314 | dma_fence_put(f); |
30d1574f | 315 | err0: |
131b4b36 | 316 | amdgpu_device_wb_free(adev, index); |
30d1574f KW |
317 | return r; |
318 | } | |
319 | ||
320 | /** | |
321 | * cik_dma_vm_copy_pte - update PTEs by copying them from the GART | |
322 | * | |
323 | * @ib: indirect buffer to fill with commands | |
324 | * @pe: addr of the page entry | |
325 | * @src: src addr to copy from | |
326 | * @count: number of page entries to update | |
327 | * | |
328 | * Update PTEs by copying them from the GART using DMA (SI). | |
329 | */ | |
330 | static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, | |
331 | uint64_t pe, uint64_t src, | |
332 | unsigned count) | |
333 | { | |
334 | unsigned bytes = count * 8; | |
335 | ||
336 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
337 | 1, 0, 0, bytes); | |
338 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
339 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | |
340 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
341 | ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; | |
342 | } | |
343 | ||
344 | /** | |
345 | * si_dma_vm_write_pte - update PTEs by writing them manually | |
346 | * | |
347 | * @ib: indirect buffer to fill with commands | |
348 | * @pe: addr of the page entry | |
349 | * @value: dst addr to write into pe | |
350 | * @count: number of page entries to update | |
351 | * @incr: increase next addr by incr bytes | |
352 | * | |
353 | * Update PTEs by writing them manually using DMA (SI). | |
354 | */ | |
355 | static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | |
356 | uint64_t value, unsigned count, | |
357 | uint32_t incr) | |
358 | { | |
359 | unsigned ndw = count * 2; | |
360 | ||
361 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); | |
362 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
363 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
364 | for (; ndw > 0; ndw -= 2) { | |
365 | ib->ptr[ib->length_dw++] = lower_32_bits(value); | |
366 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
367 | value += incr; | |
368 | } | |
369 | } | |
370 | ||
371 | /** | |
372 | * si_dma_vm_set_pte_pde - update the page tables using sDMA | |
373 | * | |
374 | * @ib: indirect buffer to fill with commands | |
375 | * @pe: addr of the page entry | |
376 | * @addr: dst addr to write into pe | |
377 | * @count: number of page entries to update | |
378 | * @incr: increase next addr by incr bytes | |
379 | * @flags: access flags | |
380 | * | |
381 | * Update the page tables using sDMA (CIK). | |
382 | */ | |
383 | static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, | |
384 | uint64_t pe, | |
385 | uint64_t addr, unsigned count, | |
6b777607 | 386 | uint32_t incr, uint64_t flags) |
30d1574f KW |
387 | { |
388 | uint64_t value; | |
389 | unsigned ndw; | |
390 | ||
391 | while (count) { | |
392 | ndw = count * 2; | |
393 | if (ndw > 0xFFFFE) | |
394 | ndw = 0xFFFFE; | |
395 | ||
396 | if (flags & AMDGPU_PTE_VALID) | |
397 | value = addr; | |
398 | else | |
399 | value = 0; | |
400 | ||
401 | /* for physically contiguous pages (vram) */ | |
402 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | |
403 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | |
404 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
b9be700e JZ |
405 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ |
406 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); | |
30d1574f KW |
407 | ib->ptr[ib->length_dw++] = value; /* value */ |
408 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
409 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
410 | ib->ptr[ib->length_dw++] = 0; | |
411 | pe += ndw * 4; | |
412 | addr += (ndw / 2) * incr; | |
413 | count -= ndw / 2; | |
414 | } | |
415 | } | |
416 | ||
417 | /** | |
418 | * si_dma_pad_ib - pad the IB to the required number of dw | |
419 | * | |
420 | * @ib: indirect buffer to fill with padding | |
421 | * | |
422 | */ | |
423 | static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | |
424 | { | |
425 | while (ib->length_dw & 0x7) | |
426 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); | |
427 | } | |
428 | ||
429 | /** | |
430 | * cik_sdma_ring_emit_pipeline_sync - sync the pipeline | |
431 | * | |
432 | * @ring: amdgpu_ring pointer | |
433 | * | |
434 | * Make sure all previous operations are completed (CIK). | |
435 | */ | |
436 | static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
437 | { | |
438 | uint32_t seq = ring->fence_drv.sync_seq; | |
439 | uint64_t addr = ring->fence_drv.gpu_addr; | |
440 | ||
441 | /* wait for idle */ | |
442 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | | |
443 | (1 << 27)); /* Poll memory */ | |
444 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
445 | amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ | |
446 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | |
447 | amdgpu_ring_write(ring, seq); /* value */ | |
448 | amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ | |
449 | } | |
450 | ||
451 | /** | |
452 | * si_dma_ring_emit_vm_flush - cik vm flush using sDMA | |
453 | * | |
454 | * @ring: amdgpu_ring pointer | |
455 | * @vm: amdgpu_vm pointer | |
456 | * | |
457 | * Update the page table base and flush the VM TLB | |
458 | * using sDMA (VI). | |
459 | */ | |
460 | static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
c633c00b | 461 | unsigned vmid, uint64_t pd_addr) |
30d1574f | 462 | { |
c633c00b | 463 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
30d1574f KW |
464 | |
465 | /* wait for invalidate to complete */ | |
466 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); | |
467 | amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); | |
468 | amdgpu_ring_write(ring, 0xff << 16); /* retry */ | |
c4f46f22 | 469 | amdgpu_ring_write(ring, 1 << vmid); /* mask */ |
30d1574f KW |
470 | amdgpu_ring_write(ring, 0); /* value */ |
471 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | |
472 | } | |
473 | ||
5b9263d9 CK |
474 | static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, |
475 | uint32_t reg, uint32_t val) | |
476 | { | |
477 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
478 | amdgpu_ring_write(ring, (0xf << 16) | reg); | |
479 | amdgpu_ring_write(ring, val); | |
480 | } | |
481 | ||
30d1574f KW |
482 | static int si_dma_early_init(void *handle) |
483 | { | |
484 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
485 | ||
486 | adev->sdma.num_instances = 2; | |
487 | ||
488 | si_dma_set_ring_funcs(adev); | |
489 | si_dma_set_buffer_funcs(adev); | |
490 | si_dma_set_vm_pte_funcs(adev); | |
491 | si_dma_set_irq_funcs(adev); | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
496 | static int si_dma_sw_init(void *handle) | |
497 | { | |
498 | struct amdgpu_ring *ring; | |
499 | int r, i; | |
500 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
501 | ||
502 | /* DMA0 trap event */ | |
4eb10b5b CK |
503 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, |
504 | &adev->sdma.trap_irq); | |
30d1574f KW |
505 | if (r) |
506 | return r; | |
507 | ||
508 | /* DMA1 trap event */ | |
4eb10b5b CK |
509 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, |
510 | &adev->sdma.trap_irq); | |
30d1574f KW |
511 | if (r) |
512 | return r; | |
513 | ||
514 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
515 | ring = &adev->sdma.instance[i].ring; | |
516 | ring->ring_obj = NULL; | |
517 | ring->use_doorbell = false; | |
518 | sprintf(ring->name, "sdma%d", i); | |
519 | r = amdgpu_ring_init(adev, ring, 1024, | |
30d1574f KW |
520 | &adev->sdma.trap_irq, |
521 | (i == 0) ? | |
21cd942e CK |
522 | AMDGPU_SDMA_IRQ_TRAP0 : |
523 | AMDGPU_SDMA_IRQ_TRAP1); | |
30d1574f KW |
524 | if (r) |
525 | return r; | |
526 | } | |
527 | ||
528 | return r; | |
529 | } | |
530 | ||
531 | static int si_dma_sw_fini(void *handle) | |
532 | { | |
533 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
534 | int i; | |
535 | ||
536 | for (i = 0; i < adev->sdma.num_instances; i++) | |
537 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
542 | static int si_dma_hw_init(void *handle) | |
543 | { | |
30d1574f KW |
544 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
545 | ||
cb5df31b | 546 | return si_dma_start(adev); |
30d1574f KW |
547 | } |
548 | ||
549 | static int si_dma_hw_fini(void *handle) | |
550 | { | |
551 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
552 | ||
553 | si_dma_stop(adev); | |
554 | ||
555 | return 0; | |
556 | } | |
557 | ||
558 | static int si_dma_suspend(void *handle) | |
559 | { | |
560 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
561 | ||
562 | return si_dma_hw_fini(adev); | |
563 | } | |
564 | ||
565 | static int si_dma_resume(void *handle) | |
566 | { | |
567 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
568 | ||
569 | return si_dma_hw_init(adev); | |
570 | } | |
571 | ||
572 | static bool si_dma_is_idle(void *handle) | |
573 | { | |
574 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
575 | u32 tmp = RREG32(SRBM_STATUS2); | |
576 | ||
577 | if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) | |
578 | return false; | |
579 | ||
580 | return true; | |
581 | } | |
582 | ||
583 | static int si_dma_wait_for_idle(void *handle) | |
584 | { | |
585 | unsigned i; | |
30d1574f KW |
586 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
587 | ||
588 | for (i = 0; i < adev->usec_timeout; i++) { | |
cb5df31b | 589 | if (si_dma_is_idle(handle)) |
30d1574f KW |
590 | return 0; |
591 | udelay(1); | |
592 | } | |
593 | return -ETIMEDOUT; | |
594 | } | |
595 | ||
596 | static int si_dma_soft_reset(void *handle) | |
597 | { | |
598 | DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, | |
603 | struct amdgpu_irq_src *src, | |
604 | unsigned type, | |
605 | enum amdgpu_interrupt_state state) | |
606 | { | |
607 | u32 sdma_cntl; | |
608 | ||
609 | switch (type) { | |
610 | case AMDGPU_SDMA_IRQ_TRAP0: | |
611 | switch (state) { | |
612 | case AMDGPU_IRQ_STATE_DISABLE: | |
613 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
614 | sdma_cntl &= ~TRAP_ENABLE; | |
615 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
616 | break; | |
617 | case AMDGPU_IRQ_STATE_ENABLE: | |
618 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
619 | sdma_cntl |= TRAP_ENABLE; | |
620 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
621 | break; | |
622 | default: | |
623 | break; | |
624 | } | |
625 | break; | |
626 | case AMDGPU_SDMA_IRQ_TRAP1: | |
627 | switch (state) { | |
628 | case AMDGPU_IRQ_STATE_DISABLE: | |
629 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
630 | sdma_cntl &= ~TRAP_ENABLE; | |
631 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
632 | break; | |
633 | case AMDGPU_IRQ_STATE_ENABLE: | |
634 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
635 | sdma_cntl |= TRAP_ENABLE; | |
636 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
637 | break; | |
638 | default: | |
639 | break; | |
640 | } | |
641 | break; | |
642 | default: | |
643 | break; | |
644 | } | |
645 | return 0; | |
646 | } | |
647 | ||
648 | static int si_dma_process_trap_irq(struct amdgpu_device *adev, | |
649 | struct amdgpu_irq_src *source, | |
650 | struct amdgpu_iv_entry *entry) | |
651 | { | |
4eb10b5b CK |
652 | if (entry->src_id == 224) |
653 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | |
654 | else | |
655 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | |
30d1574f KW |
656 | return 0; |
657 | } | |
658 | ||
30d1574f KW |
659 | static int si_dma_set_clockgating_state(void *handle, |
660 | enum amd_clockgating_state state) | |
661 | { | |
662 | u32 orig, data, offset; | |
663 | int i; | |
664 | bool enable; | |
665 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
666 | ||
667 | enable = (state == AMD_CG_STATE_GATE) ? true : false; | |
668 | ||
669 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { | |
670 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
671 | if (i == 0) | |
672 | offset = DMA0_REGISTER_OFFSET; | |
673 | else | |
674 | offset = DMA1_REGISTER_OFFSET; | |
675 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
676 | data &= ~MEM_POWER_OVERRIDE; | |
677 | if (data != orig) | |
678 | WREG32(DMA_POWER_CNTL + offset, data); | |
679 | WREG32(DMA_CLK_CTRL + offset, 0x00000100); | |
680 | } | |
681 | } else { | |
682 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
683 | if (i == 0) | |
684 | offset = DMA0_REGISTER_OFFSET; | |
685 | else | |
686 | offset = DMA1_REGISTER_OFFSET; | |
687 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
688 | data |= MEM_POWER_OVERRIDE; | |
689 | if (data != orig) | |
690 | WREG32(DMA_POWER_CNTL + offset, data); | |
691 | ||
692 | orig = data = RREG32(DMA_CLK_CTRL + offset); | |
693 | data = 0xff000000; | |
694 | if (data != orig) | |
695 | WREG32(DMA_CLK_CTRL + offset, data); | |
696 | } | |
697 | } | |
698 | ||
699 | return 0; | |
700 | } | |
701 | ||
702 | static int si_dma_set_powergating_state(void *handle, | |
703 | enum amd_powergating_state state) | |
704 | { | |
705 | u32 tmp; | |
706 | ||
707 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
708 | ||
709 | WREG32(DMA_PGFSM_WRITE, 0x00002000); | |
710 | WREG32(DMA_PGFSM_CONFIG, 0x100010ff); | |
711 | ||
712 | for (tmp = 0; tmp < 5; tmp++) | |
713 | WREG32(DMA_PGFSM_WRITE, 0); | |
714 | ||
715 | return 0; | |
716 | } | |
717 | ||
a1255107 | 718 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
30d1574f KW |
719 | .name = "si_dma", |
720 | .early_init = si_dma_early_init, | |
721 | .late_init = NULL, | |
722 | .sw_init = si_dma_sw_init, | |
723 | .sw_fini = si_dma_sw_fini, | |
724 | .hw_init = si_dma_hw_init, | |
725 | .hw_fini = si_dma_hw_fini, | |
726 | .suspend = si_dma_suspend, | |
727 | .resume = si_dma_resume, | |
728 | .is_idle = si_dma_is_idle, | |
729 | .wait_for_idle = si_dma_wait_for_idle, | |
730 | .soft_reset = si_dma_soft_reset, | |
731 | .set_clockgating_state = si_dma_set_clockgating_state, | |
732 | .set_powergating_state = si_dma_set_powergating_state, | |
733 | }; | |
734 | ||
735 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |
21cd942e | 736 | .type = AMDGPU_RING_TYPE_SDMA, |
79887142 CK |
737 | .align_mask = 0xf, |
738 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | |
536fbf94 | 739 | .support_64bit_ptrs = false, |
30d1574f KW |
740 | .get_rptr = si_dma_ring_get_rptr, |
741 | .get_wptr = si_dma_ring_get_wptr, | |
742 | .set_wptr = si_dma_ring_set_wptr, | |
e12f3d7a | 743 | .emit_frame_size = |
2ee150cd | 744 | 3 + 3 + /* hdp flush / invalidate */ |
e12f3d7a | 745 | 6 + /* si_dma_ring_emit_pipeline_sync */ |
4fef88bd | 746 | SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ |
e12f3d7a CK |
747 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ |
748 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | |
30d1574f KW |
749 | .emit_ib = si_dma_ring_emit_ib, |
750 | .emit_fence = si_dma_ring_emit_fence, | |
751 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | |
752 | .emit_vm_flush = si_dma_ring_emit_vm_flush, | |
30d1574f KW |
753 | .test_ring = si_dma_ring_test_ring, |
754 | .test_ib = si_dma_ring_test_ib, | |
755 | .insert_nop = amdgpu_ring_insert_nop, | |
756 | .pad_ib = si_dma_ring_pad_ib, | |
5b9263d9 | 757 | .emit_wreg = si_dma_ring_emit_wreg, |
30d1574f KW |
758 | }; |
759 | ||
760 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | |
761 | { | |
762 | int i; | |
763 | ||
764 | for (i = 0; i < adev->sdma.num_instances; i++) | |
765 | adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; | |
766 | } | |
767 | ||
768 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { | |
769 | .set = si_dma_set_trap_irq_state, | |
770 | .process = si_dma_process_trap_irq, | |
771 | }; | |
772 | ||
30d1574f KW |
773 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) |
774 | { | |
775 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | |
776 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; | |
30d1574f KW |
777 | } |
778 | ||
779 | /** | |
780 | * si_dma_emit_copy_buffer - copy buffer using the sDMA engine | |
781 | * | |
782 | * @ring: amdgpu_ring structure holding ring information | |
783 | * @src_offset: src GPU address | |
784 | * @dst_offset: dst GPU address | |
785 | * @byte_count: number of bytes to xfer | |
786 | * | |
787 | * Copy GPU buffers using the DMA engine (VI). | |
788 | * Used by the amdgpu ttm implementation to move pages if | |
789 | * registered as the asic copy callback. | |
790 | */ | |
791 | static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, | |
792 | uint64_t src_offset, | |
793 | uint64_t dst_offset, | |
794 | uint32_t byte_count) | |
795 | { | |
796 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
797 | 1, 0, 0, byte_count); | |
798 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
799 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); | |
800 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; | |
801 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; | |
802 | } | |
803 | ||
804 | /** | |
805 | * si_dma_emit_fill_buffer - fill buffer using the sDMA engine | |
806 | * | |
807 | * @ring: amdgpu_ring structure holding ring information | |
808 | * @src_data: value to write to buffer | |
809 | * @dst_offset: dst GPU address | |
810 | * @byte_count: number of bytes to xfer | |
811 | * | |
812 | * Fill GPU buffers using the DMA engine (VI). | |
813 | */ | |
814 | static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, | |
815 | uint32_t src_data, | |
816 | uint64_t dst_offset, | |
817 | uint32_t byte_count) | |
818 | { | |
819 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, | |
820 | 0, 0, 0, byte_count / 4); | |
821 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
822 | ib->ptr[ib->length_dw++] = src_data; | |
823 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; | |
824 | } | |
825 | ||
826 | ||
827 | static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { | |
828 | .copy_max_bytes = 0xffff8, | |
829 | .copy_num_dw = 5, | |
830 | .emit_copy_buffer = si_dma_emit_copy_buffer, | |
831 | ||
832 | .fill_max_bytes = 0xffff8, | |
833 | .fill_num_dw = 4, | |
834 | .emit_fill_buffer = si_dma_emit_fill_buffer, | |
835 | }; | |
836 | ||
837 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) | |
838 | { | |
f54b30d7 CK |
839 | adev->mman.buffer_funcs = &si_dma_buffer_funcs; |
840 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | |
30d1574f KW |
841 | } |
842 | ||
843 | static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { | |
e6d92197 | 844 | .copy_pte_num_dw = 5, |
30d1574f | 845 | .copy_pte = si_dma_vm_copy_pte, |
e6d92197 | 846 | |
30d1574f KW |
847 | .write_pte = si_dma_vm_write_pte, |
848 | .set_pte_pde = si_dma_vm_set_pte_pde, | |
849 | }; | |
850 | ||
851 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |
852 | { | |
3798e9a6 | 853 | struct drm_gpu_scheduler *sched; |
30d1574f KW |
854 | unsigned i; |
855 | ||
f54b30d7 CK |
856 | adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; |
857 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
858 | sched = &adev->sdma.instance[i].ring.sched; | |
859 | adev->vm_manager.vm_pte_rqs[i] = | |
860 | &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; | |
30d1574f | 861 | } |
f54b30d7 | 862 | adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; |
30d1574f | 863 | } |
a1255107 AD |
864 | |
865 | const struct amdgpu_ip_block_version si_dma_ip_block = | |
866 | { | |
867 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
868 | .major = 1, | |
869 | .minor = 0, | |
870 | .rev = 0, | |
871 | .funcs = &si_dma_ip_funcs, | |
872 | }; |