Commit | Line | Data |
---|---|---|
2483b4ea CK |
1 | /* |
2 | * Copyright 2010 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include "radeon.h" | |
26 | #include "radeon_asic.h" | |
27 | #include "nid.h" | |
28 | ||
29 | u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); | |
30 | ||
31 | /* | |
32 | * DMA | |
33 | * Starting with R600, the GPU has an asynchronous | |
34 | * DMA engine. The programming model is very similar | |
35 | * to the 3D engine (ring buffer, IBs, etc.), but the | |
36 | * DMA controller has it's own packet format that is | |
37 | * different form the PM4 format used by the 3D engine. | |
38 | * It supports copying data, writing embedded data, | |
39 | * solid fills, and a number of other things. It also | |
40 | * has support for tiling/detiling of buffers. | |
41 | * Cayman and newer support two asynchronous DMA engines. | |
42 | */ | |
43 | ||
44 | /** | |
45 | * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine | |
46 | * | |
47 | * @rdev: radeon_device pointer | |
48 | * @ib: IB object to schedule | |
49 | * | |
50 | * Schedule an IB in the DMA ring (cayman-SI). | |
51 | */ | |
52 | void cayman_dma_ring_ib_execute(struct radeon_device *rdev, | |
53 | struct radeon_ib *ib) | |
54 | { | |
55 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | |
56 | ||
57 | if (rdev->wb.enabled) { | |
58 | u32 next_rptr = ring->wptr + 4; | |
59 | while ((next_rptr & 7) != 5) | |
60 | next_rptr++; | |
61 | next_rptr += 3; | |
62 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); | |
63 | radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | |
64 | radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); | |
65 | radeon_ring_write(ring, next_rptr); | |
66 | } | |
67 | ||
68 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | |
69 | * Pad as necessary with NOPs. | |
70 | */ | |
71 | while ((ring->wptr & 7) != 5) | |
72 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | |
73 | radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); | |
74 | radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); | |
75 | radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | |
76 | ||
77 | } | |
78 | ||
79 | /** | |
80 | * cayman_dma_stop - stop the async dma engines | |
81 | * | |
82 | * @rdev: radeon_device pointer | |
83 | * | |
84 | * Stop the async dma engines (cayman-SI). | |
85 | */ | |
86 | void cayman_dma_stop(struct radeon_device *rdev) | |
87 | { | |
88 | u32 rb_cntl; | |
89 | ||
90 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | |
91 | ||
92 | /* dma0 */ | |
93 | rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); | |
94 | rb_cntl &= ~DMA_RB_ENABLE; | |
95 | WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); | |
96 | ||
97 | /* dma1 */ | |
98 | rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); | |
99 | rb_cntl &= ~DMA_RB_ENABLE; | |
100 | WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); | |
101 | ||
102 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; | |
103 | rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; | |
104 | } | |
105 | ||
106 | /** | |
107 | * cayman_dma_resume - setup and start the async dma engines | |
108 | * | |
109 | * @rdev: radeon_device pointer | |
110 | * | |
111 | * Set up the DMA ring buffers and enable them. (cayman-SI). | |
112 | * Returns 0 for success, error for failure. | |
113 | */ | |
114 | int cayman_dma_resume(struct radeon_device *rdev) | |
115 | { | |
116 | struct radeon_ring *ring; | |
117 | u32 rb_cntl, dma_cntl, ib_cntl; | |
118 | u32 rb_bufsz; | |
119 | u32 reg_offset, wb_offset; | |
120 | int i, r; | |
121 | ||
122 | /* Reset dma */ | |
123 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); | |
124 | RREG32(SRBM_SOFT_RESET); | |
125 | udelay(50); | |
126 | WREG32(SRBM_SOFT_RESET, 0); | |
127 | ||
128 | for (i = 0; i < 2; i++) { | |
129 | if (i == 0) { | |
130 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | |
131 | reg_offset = DMA0_REGISTER_OFFSET; | |
132 | wb_offset = R600_WB_DMA_RPTR_OFFSET; | |
133 | } else { | |
134 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | |
135 | reg_offset = DMA1_REGISTER_OFFSET; | |
136 | wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; | |
137 | } | |
138 | ||
139 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); | |
140 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); | |
141 | ||
142 | /* Set ring buffer size in dwords */ | |
9c725e5b | 143 | rb_bufsz = order_base_2(ring->ring_size / 4); |
2483b4ea CK |
144 | rb_cntl = rb_bufsz << 1; |
145 | #ifdef __BIG_ENDIAN | |
146 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; | |
147 | #endif | |
148 | WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); | |
149 | ||
150 | /* Initialize the ring buffer's read and write pointers */ | |
151 | WREG32(DMA_RB_RPTR + reg_offset, 0); | |
152 | WREG32(DMA_RB_WPTR + reg_offset, 0); | |
153 | ||
154 | /* set the wb address whether it's enabled or not */ | |
155 | WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, | |
156 | upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); | |
157 | WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, | |
158 | ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); | |
159 | ||
160 | if (rdev->wb.enabled) | |
161 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; | |
162 | ||
163 | WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); | |
164 | ||
165 | /* enable DMA IBs */ | |
166 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; | |
167 | #ifdef __BIG_ENDIAN | |
168 | ib_cntl |= DMA_IB_SWAP_ENABLE; | |
169 | #endif | |
170 | WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); | |
171 | ||
172 | dma_cntl = RREG32(DMA_CNTL + reg_offset); | |
173 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; | |
174 | WREG32(DMA_CNTL + reg_offset, dma_cntl); | |
175 | ||
176 | ring->wptr = 0; | |
177 | WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); | |
178 | ||
179 | ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; | |
180 | ||
181 | WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); | |
182 | ||
183 | ring->ready = true; | |
184 | ||
185 | r = radeon_ring_test(rdev, ring->idx, ring); | |
186 | if (r) { | |
187 | ring->ready = false; | |
188 | return r; | |
189 | } | |
190 | } | |
191 | ||
192 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
197 | /** | |
198 | * cayman_dma_fini - tear down the async dma engines | |
199 | * | |
200 | * @rdev: radeon_device pointer | |
201 | * | |
202 | * Stop the async dma engines and free the rings (cayman-SI). | |
203 | */ | |
204 | void cayman_dma_fini(struct radeon_device *rdev) | |
205 | { | |
206 | cayman_dma_stop(rdev); | |
207 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | |
208 | radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); | |
209 | } | |
210 | ||
211 | /** | |
212 | * cayman_dma_is_lockup - Check if the DMA engine is locked up | |
213 | * | |
214 | * @rdev: radeon_device pointer | |
215 | * @ring: radeon_ring structure holding ring information | |
216 | * | |
217 | * Check if the async DMA engine is locked up. | |
218 | * Returns true if the engine appears to be locked up, false if not. | |
219 | */ | |
220 | bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |
221 | { | |
222 | u32 reset_mask = cayman_gpu_check_soft_reset(rdev); | |
223 | u32 mask; | |
224 | ||
225 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | |
226 | mask = RADEON_RESET_DMA; | |
227 | else | |
228 | mask = RADEON_RESET_DMA1; | |
229 | ||
230 | if (!(reset_mask & mask)) { | |
231 | radeon_ring_lockup_update(ring); | |
232 | return false; | |
233 | } | |
234 | /* force ring activities */ | |
235 | radeon_ring_force_activity(rdev, ring); | |
236 | return radeon_ring_test_lockup(rdev, ring); | |
237 | } | |
238 | ||
239 | /** | |
240 | * cayman_dma_vm_set_page - update the page tables using the DMA | |
241 | * | |
242 | * @rdev: radeon_device pointer | |
243 | * @ib: indirect buffer to fill with commands | |
244 | * @pe: addr of the page entry | |
245 | * @addr: dst addr to write into pe | |
246 | * @count: number of page entries to update | |
247 | * @incr: increase next addr by incr bytes | |
248 | * @flags: access flags | |
249 | * @r600_flags: hw access flags | |
250 | * | |
251 | * Update the page tables using the DMA (cayman/TN). | |
252 | */ | |
253 | void cayman_dma_vm_set_page(struct radeon_device *rdev, | |
254 | struct radeon_ib *ib, | |
255 | uint64_t pe, | |
256 | uint64_t addr, unsigned count, | |
257 | uint32_t incr, uint32_t flags) | |
258 | { | |
259 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | |
260 | uint64_t value; | |
261 | unsigned ndw; | |
262 | ||
263 | if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) { | |
264 | while (count) { | |
265 | ndw = count * 2; | |
266 | if (ndw > 0xFFFFE) | |
267 | ndw = 0xFFFFE; | |
268 | ||
269 | /* for non-physically contiguous pages (system) */ | |
270 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); | |
271 | ib->ptr[ib->length_dw++] = pe; | |
272 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
273 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | |
274 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
275 | value = radeon_vm_map_gart(rdev, addr); | |
276 | value &= 0xFFFFFFFFFFFFF000ULL; | |
277 | } else if (flags & RADEON_VM_PAGE_VALID) { | |
278 | value = addr; | |
279 | } else { | |
280 | value = 0; | |
281 | } | |
282 | addr += incr; | |
283 | value |= r600_flags; | |
284 | ib->ptr[ib->length_dw++] = value; | |
285 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
286 | } | |
287 | } | |
288 | } else { | |
289 | while (count) { | |
290 | ndw = count * 2; | |
291 | if (ndw > 0xFFFFE) | |
292 | ndw = 0xFFFFE; | |
293 | ||
294 | if (flags & RADEON_VM_PAGE_VALID) | |
295 | value = addr; | |
296 | else | |
297 | value = 0; | |
298 | /* for physically contiguous pages (vram) */ | |
299 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | |
300 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | |
301 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
302 | ib->ptr[ib->length_dw++] = r600_flags; /* mask */ | |
303 | ib->ptr[ib->length_dw++] = 0; | |
304 | ib->ptr[ib->length_dw++] = value; /* value */ | |
305 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
306 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
307 | ib->ptr[ib->length_dw++] = 0; | |
308 | pe += ndw * 4; | |
309 | addr += (ndw / 2) * incr; | |
310 | count -= ndw / 2; | |
311 | } | |
312 | } | |
313 | while (ib->length_dw & 0x7) | |
314 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); | |
315 | } | |
316 | ||
317 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |
318 | { | |
319 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
320 | ||
321 | if (vm == NULL) | |
322 | return; | |
323 | ||
324 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); | |
325 | radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); | |
326 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); | |
327 | ||
328 | /* flush hdp cache */ | |
329 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); | |
330 | radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); | |
331 | radeon_ring_write(ring, 1); | |
332 | ||
333 | /* bits 0-7 are the VM contexts0-7 */ | |
334 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); | |
335 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); | |
336 | radeon_ring_write(ring, 1 << vm->id); | |
337 | } | |
338 |