Commit | Line | Data |
---|---|---|
30d1574f KW |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
47b757fb | 24 | |
30d1574f KW |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_trace.h" | |
4fef88bd | 27 | #include "si.h" |
689957b1 | 28 | #include "sid.h" |
30d1574f KW |
29 | |
30 | const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | |
31 | { | |
32 | DMA0_REGISTER_OFFSET, | |
33 | DMA1_REGISTER_OFFSET | |
34 | }; | |
35 | ||
36 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev); | |
37 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); | |
38 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); | |
39 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev); | |
40 | ||
536fbf94 | 41 | static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) |
30d1574f | 42 | { |
cb5df31b | 43 | return ring->adev->wb.wb[ring->rptr_offs>>2]; |
30d1574f KW |
44 | } |
45 | ||
536fbf94 | 46 | static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) |
30d1574f | 47 | { |
30d1574f KW |
48 | struct amdgpu_device *adev = ring->adev; |
49 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
50 | ||
51 | return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | |
52 | } | |
53 | ||
54 | static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) | |
55 | { | |
56 | struct amdgpu_device *adev = ring->adev; | |
57 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
58 | ||
536fbf94 KW |
59 | WREG32(DMA_RB_WPTR + sdma_offsets[me], |
60 | (lower_32_bits(ring->wptr) << 2) & 0x3fffc); | |
30d1574f KW |
61 | } |
62 | ||
63 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, | |
34955e03 | 64 | struct amdgpu_job *job, |
30d1574f | 65 | struct amdgpu_ib *ib, |
c4c905ec | 66 | uint32_t flags) |
30d1574f | 67 | { |
34955e03 | 68 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
30d1574f KW |
69 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
70 | * Pad as necessary with NOPs. | |
71 | */ | |
536fbf94 | 72 | while ((lower_32_bits(ring->wptr) & 7) != 5) |
30d1574f | 73 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
c4f46f22 | 74 | amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); |
30d1574f KW |
75 | amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
76 | amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | |
77 | ||
78 | } | |
79 | ||
30d1574f KW |
80 | /** |
81 | * si_dma_ring_emit_fence - emit a fence on the DMA ring | |
82 | * | |
83 | * @ring: amdgpu ring pointer | |
24940373 LJ |
84 | * @addr: address |
85 | * @seq: sequence number | |
86 | * @flags: fence related flags | |
30d1574f KW |
87 | * |
88 | * Add a DMA fence packet to the ring to write | |
89 | * the fence seq number and DMA trap packet to generate | |
90 | * an interrupt if needed (VI). | |
91 | */ | |
92 | static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
93 | unsigned flags) | |
94 | { | |
95 | ||
96 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
97 | /* write the fence */ | |
98 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
99 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
100 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
101 | amdgpu_ring_write(ring, seq); | |
102 | /* optionally write high bits as well */ | |
103 | if (write64bit) { | |
104 | addr += 4; | |
105 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
106 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
107 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
108 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
109 | } | |
110 | /* generate an interrupt */ | |
111 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); | |
112 | } | |
113 | ||
114 | static void si_dma_stop(struct amdgpu_device *adev) | |
115 | { | |
116 | struct amdgpu_ring *ring; | |
117 | u32 rb_cntl; | |
118 | unsigned i; | |
119 | ||
120 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
121 | ring = &adev->sdma.instance[i].ring; | |
122 | /* dma0 */ | |
123 | rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); | |
124 | rb_cntl &= ~DMA_RB_ENABLE; | |
125 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
126 | ||
e7b54945 | 127 | if (adev->mman.buffer_funcs_ring == ring) |
57adc4ce | 128 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
30d1574f KW |
129 | } |
130 | } | |
131 | ||
132 | static int si_dma_start(struct amdgpu_device *adev) | |
133 | { | |
134 | struct amdgpu_ring *ring; | |
135 | u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; | |
136 | int i, r; | |
137 | uint64_t rptr_addr; | |
138 | ||
139 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
140 | ring = &adev->sdma.instance[i].ring; | |
141 | ||
142 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); | |
143 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | |
144 | ||
145 | /* Set ring buffer size in dwords */ | |
146 | rb_bufsz = order_base_2(ring->ring_size / 4); | |
147 | rb_cntl = rb_bufsz << 1; | |
148 | #ifdef __BIG_ENDIAN | |
149 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; | |
150 | #endif | |
151 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
152 | ||
153 | /* Initialize the ring buffer's read and write pointers */ | |
154 | WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); | |
155 | WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); | |
156 | ||
157 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
158 | ||
159 | WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); | |
160 | WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); | |
161 | ||
162 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; | |
163 | ||
164 | WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | |
165 | ||
166 | /* enable DMA IBs */ | |
167 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; | |
168 | #ifdef __BIG_ENDIAN | |
169 | ib_cntl |= DMA_IB_SWAP_ENABLE; | |
170 | #endif | |
171 | WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); | |
172 | ||
173 | dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); | |
174 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; | |
175 | WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); | |
176 | ||
177 | ring->wptr = 0; | |
536fbf94 | 178 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); |
30d1574f KW |
179 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); |
180 | ||
c66ed765 | 181 | ring->sched.ready = true; |
30d1574f | 182 | |
c66ed765 AG |
183 | r = amdgpu_ring_test_helper(ring); |
184 | if (r) | |
30d1574f | 185 | return r; |
e7b54945 MD |
186 | |
187 | if (adev->mman.buffer_funcs_ring == ring) | |
57adc4ce | 188 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
30d1574f KW |
189 | } |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
194 | /** | |
195 | * si_dma_ring_test_ring - simple async dma engine test | |
196 | * | |
197 | * @ring: amdgpu_ring structure holding ring information | |
198 | * | |
199 | * Test the DMA engine by writing using it to write an | |
200 | * value to memory. (VI). | |
201 | * Returns 0 for success, error for failure. | |
202 | */ | |
203 | static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |
204 | { | |
205 | struct amdgpu_device *adev = ring->adev; | |
206 | unsigned i; | |
207 | unsigned index; | |
208 | int r; | |
209 | u32 tmp; | |
210 | u64 gpu_addr; | |
211 | ||
131b4b36 | 212 | r = amdgpu_device_wb_get(adev, &index); |
dc9eeff8 | 213 | if (r) |
30d1574f | 214 | return r; |
30d1574f KW |
215 | |
216 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
217 | tmp = 0xCAFEDEAD; | |
218 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
219 | ||
220 | r = amdgpu_ring_alloc(ring, 4); | |
dc9eeff8 CK |
221 | if (r) |
222 | goto error_free_wb; | |
30d1574f KW |
223 | |
224 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); | |
225 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | |
226 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); | |
227 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
228 | amdgpu_ring_commit(ring); | |
229 | ||
230 | for (i = 0; i < adev->usec_timeout; i++) { | |
231 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
232 | if (tmp == 0xDEADBEEF) | |
233 | break; | |
c366be54 | 234 | udelay(1); |
30d1574f KW |
235 | } |
236 | ||
dc9eeff8 CK |
237 | if (i >= adev->usec_timeout) |
238 | r = -ETIMEDOUT; | |
30d1574f | 239 | |
dc9eeff8 CK |
240 | error_free_wb: |
241 | amdgpu_device_wb_free(adev, index); | |
30d1574f KW |
242 | return r; |
243 | } | |
244 | ||
245 | /** | |
246 | * si_dma_ring_test_ib - test an IB on the DMA engine | |
247 | * | |
248 | * @ring: amdgpu_ring structure holding ring information | |
24940373 | 249 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
30d1574f KW |
250 | * |
251 | * Test a simple IB in the DMA ring (VI). | |
252 | * Returns 0 on success, error on failure. | |
253 | */ | |
254 | static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
255 | { | |
256 | struct amdgpu_device *adev = ring->adev; | |
257 | struct amdgpu_ib ib; | |
f54d1867 | 258 | struct dma_fence *f = NULL; |
30d1574f KW |
259 | unsigned index; |
260 | u32 tmp = 0; | |
261 | u64 gpu_addr; | |
262 | long r; | |
263 | ||
131b4b36 | 264 | r = amdgpu_device_wb_get(adev, &index); |
98079389 | 265 | if (r) |
30d1574f | 266 | return r; |
30d1574f KW |
267 | |
268 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
269 | tmp = 0xCAFEDEAD; | |
270 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
271 | memset(&ib, 0, sizeof(ib)); | |
c8e42d57 | 272 | r = amdgpu_ib_get(adev, NULL, 256, |
273 | AMDGPU_IB_POOL_DIRECT, &ib); | |
98079389 | 274 | if (r) |
30d1574f | 275 | goto err0; |
30d1574f KW |
276 | |
277 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); | |
278 | ib.ptr[1] = lower_32_bits(gpu_addr); | |
279 | ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; | |
280 | ib.ptr[3] = 0xDEADBEEF; | |
281 | ib.length_dw = 4; | |
50ddc75e | 282 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); |
30d1574f KW |
283 | if (r) |
284 | goto err1; | |
285 | ||
f54d1867 | 286 | r = dma_fence_wait_timeout(f, false, timeout); |
30d1574f | 287 | if (r == 0) { |
30d1574f KW |
288 | r = -ETIMEDOUT; |
289 | goto err1; | |
290 | } else if (r < 0) { | |
30d1574f KW |
291 | goto err1; |
292 | } | |
293 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
b7ff853f | 294 | if (tmp == 0xDEADBEEF) |
30d1574f | 295 | r = 0; |
b7ff853f | 296 | else |
30d1574f | 297 | r = -EINVAL; |
30d1574f KW |
298 | |
299 | err1: | |
300 | amdgpu_ib_free(adev, &ib, NULL); | |
f54d1867 | 301 | dma_fence_put(f); |
30d1574f | 302 | err0: |
131b4b36 | 303 | amdgpu_device_wb_free(adev, index); |
30d1574f KW |
304 | return r; |
305 | } | |
306 | ||
307 | /** | |
308 | * cik_dma_vm_copy_pte - update PTEs by copying them from the GART | |
309 | * | |
310 | * @ib: indirect buffer to fill with commands | |
311 | * @pe: addr of the page entry | |
312 | * @src: src addr to copy from | |
313 | * @count: number of page entries to update | |
314 | * | |
315 | * Update PTEs by copying them from the GART using DMA (SI). | |
316 | */ | |
317 | static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, | |
318 | uint64_t pe, uint64_t src, | |
319 | unsigned count) | |
320 | { | |
321 | unsigned bytes = count * 8; | |
322 | ||
323 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
324 | 1, 0, 0, bytes); | |
325 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
326 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | |
327 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
328 | ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; | |
329 | } | |
330 | ||
331 | /** | |
332 | * si_dma_vm_write_pte - update PTEs by writing them manually | |
333 | * | |
334 | * @ib: indirect buffer to fill with commands | |
335 | * @pe: addr of the page entry | |
336 | * @value: dst addr to write into pe | |
337 | * @count: number of page entries to update | |
338 | * @incr: increase next addr by incr bytes | |
339 | * | |
340 | * Update PTEs by writing them manually using DMA (SI). | |
341 | */ | |
342 | static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | |
343 | uint64_t value, unsigned count, | |
344 | uint32_t incr) | |
345 | { | |
346 | unsigned ndw = count * 2; | |
347 | ||
348 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); | |
349 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
350 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
351 | for (; ndw > 0; ndw -= 2) { | |
352 | ib->ptr[ib->length_dw++] = lower_32_bits(value); | |
353 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
354 | value += incr; | |
355 | } | |
356 | } | |
357 | ||
358 | /** | |
359 | * si_dma_vm_set_pte_pde - update the page tables using sDMA | |
360 | * | |
361 | * @ib: indirect buffer to fill with commands | |
362 | * @pe: addr of the page entry | |
363 | * @addr: dst addr to write into pe | |
364 | * @count: number of page entries to update | |
365 | * @incr: increase next addr by incr bytes | |
366 | * @flags: access flags | |
367 | * | |
368 | * Update the page tables using sDMA (CIK). | |
369 | */ | |
370 | static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, | |
371 | uint64_t pe, | |
372 | uint64_t addr, unsigned count, | |
6b777607 | 373 | uint32_t incr, uint64_t flags) |
30d1574f KW |
374 | { |
375 | uint64_t value; | |
376 | unsigned ndw; | |
377 | ||
378 | while (count) { | |
379 | ndw = count * 2; | |
380 | if (ndw > 0xFFFFE) | |
381 | ndw = 0xFFFFE; | |
382 | ||
383 | if (flags & AMDGPU_PTE_VALID) | |
384 | value = addr; | |
385 | else | |
386 | value = 0; | |
387 | ||
388 | /* for physically contiguous pages (vram) */ | |
389 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | |
390 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | |
391 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
b9be700e JZ |
392 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ |
393 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); | |
30d1574f KW |
394 | ib->ptr[ib->length_dw++] = value; /* value */ |
395 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
396 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
397 | ib->ptr[ib->length_dw++] = 0; | |
398 | pe += ndw * 4; | |
399 | addr += (ndw / 2) * incr; | |
400 | count -= ndw / 2; | |
401 | } | |
402 | } | |
403 | ||
404 | /** | |
405 | * si_dma_pad_ib - pad the IB to the required number of dw | |
406 | * | |
24940373 | 407 | * @ring: amdgpu_ring pointer |
30d1574f KW |
408 | * @ib: indirect buffer to fill with padding |
409 | * | |
410 | */ | |
411 | static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | |
412 | { | |
413 | while (ib->length_dw & 0x7) | |
414 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); | |
415 | } | |
416 | ||
417 | /** | |
418 | * cik_sdma_ring_emit_pipeline_sync - sync the pipeline | |
419 | * | |
420 | * @ring: amdgpu_ring pointer | |
421 | * | |
422 | * Make sure all previous operations are completed (CIK). | |
423 | */ | |
424 | static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
425 | { | |
426 | uint32_t seq = ring->fence_drv.sync_seq; | |
427 | uint64_t addr = ring->fence_drv.gpu_addr; | |
428 | ||
429 | /* wait for idle */ | |
430 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | | |
431 | (1 << 27)); /* Poll memory */ | |
432 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
433 | amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ | |
434 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | |
435 | amdgpu_ring_write(ring, seq); /* value */ | |
436 | amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ | |
437 | } | |
438 | ||
439 | /** | |
440 | * si_dma_ring_emit_vm_flush - cik vm flush using sDMA | |
441 | * | |
442 | * @ring: amdgpu_ring pointer | |
24940373 LJ |
443 | * @vmid: vmid number to use |
444 | * @pd_addr: address | |
30d1574f KW |
445 | * |
446 | * Update the page table base and flush the VM TLB | |
447 | * using sDMA (VI). | |
448 | */ | |
449 | static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
c633c00b | 450 | unsigned vmid, uint64_t pd_addr) |
30d1574f | 451 | { |
c633c00b | 452 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
30d1574f KW |
453 | |
454 | /* wait for invalidate to complete */ | |
455 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); | |
456 | amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); | |
457 | amdgpu_ring_write(ring, 0xff << 16); /* retry */ | |
c4f46f22 | 458 | amdgpu_ring_write(ring, 1 << vmid); /* mask */ |
30d1574f KW |
459 | amdgpu_ring_write(ring, 0); /* value */ |
460 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | |
461 | } | |
462 | ||
5b9263d9 CK |
463 | static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, |
464 | uint32_t reg, uint32_t val) | |
465 | { | |
466 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
467 | amdgpu_ring_write(ring, (0xf << 16) | reg); | |
468 | amdgpu_ring_write(ring, val); | |
469 | } | |
470 | ||
30d1574f KW |
471 | static int si_dma_early_init(void *handle) |
472 | { | |
473 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
474 | ||
475 | adev->sdma.num_instances = 2; | |
476 | ||
477 | si_dma_set_ring_funcs(adev); | |
478 | si_dma_set_buffer_funcs(adev); | |
479 | si_dma_set_vm_pte_funcs(adev); | |
480 | si_dma_set_irq_funcs(adev); | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
485 | static int si_dma_sw_init(void *handle) | |
486 | { | |
487 | struct amdgpu_ring *ring; | |
488 | int r, i; | |
489 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
490 | ||
491 | /* DMA0 trap event */ | |
4eb10b5b CK |
492 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, |
493 | &adev->sdma.trap_irq); | |
30d1574f KW |
494 | if (r) |
495 | return r; | |
496 | ||
497 | /* DMA1 trap event */ | |
4eb10b5b CK |
498 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, |
499 | &adev->sdma.trap_irq); | |
30d1574f KW |
500 | if (r) |
501 | return r; | |
502 | ||
503 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
504 | ring = &adev->sdma.instance[i].ring; | |
505 | ring->ring_obj = NULL; | |
506 | ring->use_doorbell = false; | |
507 | sprintf(ring->name, "sdma%d", i); | |
508 | r = amdgpu_ring_init(adev, ring, 1024, | |
30d1574f | 509 | &adev->sdma.trap_irq, |
c107171b | 510 | (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : |
1c6d567b | 511 | AMDGPU_SDMA_IRQ_INSTANCE1, |
c107171b | 512 | AMDGPU_RING_PRIO_DEFAULT, NULL); |
30d1574f KW |
513 | if (r) |
514 | return r; | |
515 | } | |
516 | ||
517 | return r; | |
518 | } | |
519 | ||
520 | static int si_dma_sw_fini(void *handle) | |
521 | { | |
522 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
523 | int i; | |
524 | ||
525 | for (i = 0; i < adev->sdma.num_instances; i++) | |
526 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
531 | static int si_dma_hw_init(void *handle) | |
532 | { | |
30d1574f KW |
533 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
534 | ||
cb5df31b | 535 | return si_dma_start(adev); |
30d1574f KW |
536 | } |
537 | ||
538 | static int si_dma_hw_fini(void *handle) | |
539 | { | |
540 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
541 | ||
542 | si_dma_stop(adev); | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
547 | static int si_dma_suspend(void *handle) | |
548 | { | |
549 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
550 | ||
551 | return si_dma_hw_fini(adev); | |
552 | } | |
553 | ||
554 | static int si_dma_resume(void *handle) | |
555 | { | |
556 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
557 | ||
558 | return si_dma_hw_init(adev); | |
559 | } | |
560 | ||
561 | static bool si_dma_is_idle(void *handle) | |
562 | { | |
563 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
564 | u32 tmp = RREG32(SRBM_STATUS2); | |
565 | ||
566 | if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) | |
567 | return false; | |
568 | ||
569 | return true; | |
570 | } | |
571 | ||
572 | static int si_dma_wait_for_idle(void *handle) | |
573 | { | |
574 | unsigned i; | |
30d1574f KW |
575 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
576 | ||
577 | for (i = 0; i < adev->usec_timeout; i++) { | |
cb5df31b | 578 | if (si_dma_is_idle(handle)) |
30d1574f KW |
579 | return 0; |
580 | udelay(1); | |
581 | } | |
582 | return -ETIMEDOUT; | |
583 | } | |
584 | ||
585 | static int si_dma_soft_reset(void *handle) | |
586 | { | |
587 | DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); | |
588 | return 0; | |
589 | } | |
590 | ||
591 | static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, | |
592 | struct amdgpu_irq_src *src, | |
593 | unsigned type, | |
594 | enum amdgpu_interrupt_state state) | |
595 | { | |
596 | u32 sdma_cntl; | |
597 | ||
598 | switch (type) { | |
af67772d | 599 | case AMDGPU_SDMA_IRQ_INSTANCE0: |
30d1574f KW |
600 | switch (state) { |
601 | case AMDGPU_IRQ_STATE_DISABLE: | |
602 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
603 | sdma_cntl &= ~TRAP_ENABLE; | |
604 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
605 | break; | |
606 | case AMDGPU_IRQ_STATE_ENABLE: | |
607 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
608 | sdma_cntl |= TRAP_ENABLE; | |
609 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
610 | break; | |
611 | default: | |
612 | break; | |
613 | } | |
614 | break; | |
af67772d | 615 | case AMDGPU_SDMA_IRQ_INSTANCE1: |
30d1574f KW |
616 | switch (state) { |
617 | case AMDGPU_IRQ_STATE_DISABLE: | |
618 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
619 | sdma_cntl &= ~TRAP_ENABLE; | |
620 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
621 | break; | |
622 | case AMDGPU_IRQ_STATE_ENABLE: | |
623 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
624 | sdma_cntl |= TRAP_ENABLE; | |
625 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
626 | break; | |
627 | default: | |
628 | break; | |
629 | } | |
630 | break; | |
631 | default: | |
632 | break; | |
633 | } | |
634 | return 0; | |
635 | } | |
636 | ||
637 | static int si_dma_process_trap_irq(struct amdgpu_device *adev, | |
638 | struct amdgpu_irq_src *source, | |
639 | struct amdgpu_iv_entry *entry) | |
640 | { | |
4eb10b5b CK |
641 | if (entry->src_id == 224) |
642 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | |
643 | else | |
644 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | |
30d1574f KW |
645 | return 0; |
646 | } | |
647 | ||
30d1574f KW |
648 | static int si_dma_set_clockgating_state(void *handle, |
649 | enum amd_clockgating_state state) | |
650 | { | |
651 | u32 orig, data, offset; | |
652 | int i; | |
653 | bool enable; | |
654 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
655 | ||
a9d4fe2f | 656 | enable = (state == AMD_CG_STATE_GATE); |
30d1574f KW |
657 | |
658 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { | |
659 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
660 | if (i == 0) | |
661 | offset = DMA0_REGISTER_OFFSET; | |
662 | else | |
663 | offset = DMA1_REGISTER_OFFSET; | |
664 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
665 | data &= ~MEM_POWER_OVERRIDE; | |
666 | if (data != orig) | |
667 | WREG32(DMA_POWER_CNTL + offset, data); | |
668 | WREG32(DMA_CLK_CTRL + offset, 0x00000100); | |
669 | } | |
670 | } else { | |
671 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
672 | if (i == 0) | |
673 | offset = DMA0_REGISTER_OFFSET; | |
674 | else | |
675 | offset = DMA1_REGISTER_OFFSET; | |
676 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
677 | data |= MEM_POWER_OVERRIDE; | |
678 | if (data != orig) | |
679 | WREG32(DMA_POWER_CNTL + offset, data); | |
680 | ||
681 | orig = data = RREG32(DMA_CLK_CTRL + offset); | |
682 | data = 0xff000000; | |
683 | if (data != orig) | |
684 | WREG32(DMA_CLK_CTRL + offset, data); | |
685 | } | |
686 | } | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | static int si_dma_set_powergating_state(void *handle, | |
692 | enum amd_powergating_state state) | |
693 | { | |
694 | u32 tmp; | |
695 | ||
696 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
697 | ||
698 | WREG32(DMA_PGFSM_WRITE, 0x00002000); | |
699 | WREG32(DMA_PGFSM_CONFIG, 0x100010ff); | |
700 | ||
701 | for (tmp = 0; tmp < 5; tmp++) | |
702 | WREG32(DMA_PGFSM_WRITE, 0); | |
703 | ||
704 | return 0; | |
705 | } | |
706 | ||
a1255107 | 707 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
30d1574f KW |
708 | .name = "si_dma", |
709 | .early_init = si_dma_early_init, | |
710 | .late_init = NULL, | |
711 | .sw_init = si_dma_sw_init, | |
712 | .sw_fini = si_dma_sw_fini, | |
713 | .hw_init = si_dma_hw_init, | |
714 | .hw_fini = si_dma_hw_fini, | |
715 | .suspend = si_dma_suspend, | |
716 | .resume = si_dma_resume, | |
717 | .is_idle = si_dma_is_idle, | |
718 | .wait_for_idle = si_dma_wait_for_idle, | |
719 | .soft_reset = si_dma_soft_reset, | |
720 | .set_clockgating_state = si_dma_set_clockgating_state, | |
721 | .set_powergating_state = si_dma_set_powergating_state, | |
722 | }; | |
723 | ||
724 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |
21cd942e | 725 | .type = AMDGPU_RING_TYPE_SDMA, |
79887142 CK |
726 | .align_mask = 0xf, |
727 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | |
536fbf94 | 728 | .support_64bit_ptrs = false, |
30d1574f KW |
729 | .get_rptr = si_dma_ring_get_rptr, |
730 | .get_wptr = si_dma_ring_get_wptr, | |
731 | .set_wptr = si_dma_ring_set_wptr, | |
e12f3d7a | 732 | .emit_frame_size = |
2ee150cd | 733 | 3 + 3 + /* hdp flush / invalidate */ |
e12f3d7a | 734 | 6 + /* si_dma_ring_emit_pipeline_sync */ |
4fef88bd | 735 | SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ |
e12f3d7a CK |
736 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ |
737 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | |
30d1574f KW |
738 | .emit_ib = si_dma_ring_emit_ib, |
739 | .emit_fence = si_dma_ring_emit_fence, | |
740 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | |
741 | .emit_vm_flush = si_dma_ring_emit_vm_flush, | |
30d1574f KW |
742 | .test_ring = si_dma_ring_test_ring, |
743 | .test_ib = si_dma_ring_test_ib, | |
744 | .insert_nop = amdgpu_ring_insert_nop, | |
745 | .pad_ib = si_dma_ring_pad_ib, | |
5b9263d9 | 746 | .emit_wreg = si_dma_ring_emit_wreg, |
30d1574f KW |
747 | }; |
748 | ||
749 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | |
750 | { | |
751 | int i; | |
752 | ||
753 | for (i = 0; i < adev->sdma.num_instances; i++) | |
754 | adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; | |
755 | } | |
756 | ||
757 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { | |
758 | .set = si_dma_set_trap_irq_state, | |
759 | .process = si_dma_process_trap_irq, | |
760 | }; | |
761 | ||
30d1574f KW |
762 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) |
763 | { | |
764 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | |
765 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; | |
30d1574f KW |
766 | } |
767 | ||
768 | /** | |
769 | * si_dma_emit_copy_buffer - copy buffer using the sDMA engine | |
770 | * | |
24940373 | 771 | * @ib: indirect buffer to copy to |
30d1574f KW |
772 | * @src_offset: src GPU address |
773 | * @dst_offset: dst GPU address | |
774 | * @byte_count: number of bytes to xfer | |
24940373 | 775 | * @tmz: is this a secure operation |
30d1574f KW |
776 | * |
777 | * Copy GPU buffers using the DMA engine (VI). | |
778 | * Used by the amdgpu ttm implementation to move pages if | |
779 | * registered as the asic copy callback. | |
780 | */ | |
781 | static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, | |
782 | uint64_t src_offset, | |
783 | uint64_t dst_offset, | |
be7538ff AL |
784 | uint32_t byte_count, |
785 | bool tmz) | |
30d1574f KW |
786 | { |
787 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
788 | 1, 0, 0, byte_count); | |
789 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
790 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); | |
791 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; | |
792 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; | |
793 | } | |
794 | ||
795 | /** | |
796 | * si_dma_emit_fill_buffer - fill buffer using the sDMA engine | |
797 | * | |
24940373 | 798 | * @ib: indirect buffer to copy to |
30d1574f KW |
799 | * @src_data: value to write to buffer |
800 | * @dst_offset: dst GPU address | |
801 | * @byte_count: number of bytes to xfer | |
802 | * | |
803 | * Fill GPU buffers using the DMA engine (VI). | |
804 | */ | |
805 | static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, | |
806 | uint32_t src_data, | |
807 | uint64_t dst_offset, | |
808 | uint32_t byte_count) | |
809 | { | |
810 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, | |
811 | 0, 0, 0, byte_count / 4); | |
812 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
813 | ib->ptr[ib->length_dw++] = src_data; | |
814 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; | |
815 | } | |
816 | ||
817 | ||
818 | static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { | |
819 | .copy_max_bytes = 0xffff8, | |
820 | .copy_num_dw = 5, | |
821 | .emit_copy_buffer = si_dma_emit_copy_buffer, | |
822 | ||
823 | .fill_max_bytes = 0xffff8, | |
824 | .fill_num_dw = 4, | |
825 | .emit_fill_buffer = si_dma_emit_fill_buffer, | |
826 | }; | |
827 | ||
828 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) | |
829 | { | |
f54b30d7 CK |
830 | adev->mman.buffer_funcs = &si_dma_buffer_funcs; |
831 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | |
30d1574f KW |
832 | } |
833 | ||
834 | static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { | |
e6d92197 | 835 | .copy_pte_num_dw = 5, |
30d1574f | 836 | .copy_pte = si_dma_vm_copy_pte, |
e6d92197 | 837 | |
30d1574f KW |
838 | .write_pte = si_dma_vm_write_pte, |
839 | .set_pte_pde = si_dma_vm_set_pte_pde, | |
840 | }; | |
841 | ||
842 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |
843 | { | |
844 | unsigned i; | |
845 | ||
f54b30d7 CK |
846 | adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; |
847 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
0c88b430 ND |
848 | adev->vm_manager.vm_pte_scheds[i] = |
849 | &adev->sdma.instance[i].ring.sched; | |
30d1574f | 850 | } |
0c88b430 | 851 | adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; |
30d1574f | 852 | } |
a1255107 AD |
853 | |
854 | const struct amdgpu_ip_block_version si_dma_ip_block = | |
855 | { | |
856 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
857 | .major = 1, | |
858 | .minor = 0, | |
859 | .rev = 0, | |
860 | .funcs = &si_dma_ip_funcs, | |
861 | }; |