Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | * Christian König | |
28 | */ | |
29 | #include <linux/seq_file.h> | |
30 | #include <linux/slab.h> | |
31 | #include <drm/drmP.h> | |
32 | #include <drm/amdgpu_drm.h> | |
33 | #include "amdgpu.h" | |
34 | #include "atom.h" | |
35 | ||
bb7ad55b | 36 | #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) |
bbec97aa | 37 | |
d38ceaf9 AD |
38 | /* |
39 | * IB | |
40 | * IBs (Indirect Buffers) and areas of GPU accessible memory where | |
41 | * commands are stored. You can put a pointer to the IB in the | |
42 | * command ring and the hw will fetch the commands from the IB | |
43 | * and execute them. Generally userspace acceleration drivers | |
44 | * produce command buffers which are send to the kernel and | |
45 | * put in IBs for execution by the requested ring. | |
46 | */ | |
47 | static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); | |
48 | ||
49 | /** | |
50 | * amdgpu_ib_get - request an IB (Indirect Buffer) | |
51 | * | |
52 | * @ring: ring index the IB is associated with | |
53 | * @size: requested IB size | |
54 | * @ib: IB object returned | |
55 | * | |
56 | * Request an IB (all asics). IBs are allocated using the | |
57 | * suballocator. | |
58 | * Returns 0 on success, error on failure. | |
59 | */ | |
b07c60c0 | 60 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
d38ceaf9 AD |
61 | unsigned size, struct amdgpu_ib *ib) |
62 | { | |
d38ceaf9 AD |
63 | int r; |
64 | ||
65 | if (size) { | |
bbf0b345 | 66 | r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, |
d38ceaf9 AD |
67 | &ib->sa_bo, size, 256); |
68 | if (r) { | |
69 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); | |
70 | return r; | |
71 | } | |
72 | ||
73 | ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); | |
74 | ||
75 | if (!vm) | |
76 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | |
d38ceaf9 AD |
77 | } |
78 | ||
d38ceaf9 AD |
79 | return 0; |
80 | } | |
81 | ||
82 | /** | |
83 | * amdgpu_ib_free - free an IB (Indirect Buffer) | |
84 | * | |
85 | * @adev: amdgpu_device pointer | |
86 | * @ib: IB object to free | |
cc55c45d | 87 | * @f: the fence SA bo need wait on for the ib alloation |
d38ceaf9 AD |
88 | * |
89 | * Free an IB (all asics). | |
90 | */ | |
4d9c514d | 91 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
f54d1867 | 92 | struct dma_fence *f) |
d38ceaf9 | 93 | { |
cc55c45d | 94 | amdgpu_sa_bo_free(adev, &ib->sa_bo, f); |
d38ceaf9 AD |
95 | } |
96 | ||
97 | /** | |
98 | * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring | |
99 | * | |
100 | * @adev: amdgpu_device pointer | |
101 | * @num_ibs: number of IBs to schedule | |
102 | * @ibs: IB objects to schedule | |
ec72b800 | 103 | * @f: fence created during this submission |
d38ceaf9 AD |
104 | * |
105 | * Schedule an IB on the associated ring (all asics). | |
106 | * Returns 0 on success, error on failure. | |
107 | * | |
108 | * On SI, there are two parallel engines fed from the primary ring, | |
109 | * the CE (Constant Engine) and the DE (Drawing Engine). Since | |
110 | * resource descriptors have moved to memory, the CE allows you to | |
111 | * prime the caches while the DE is updating register state so that | |
112 | * the resource descriptors will be already in cache when the draw is | |
113 | * processed. To accomplish this, the userspace driver submits two | |
114 | * IBs, one for the CE and one for the DE. If there is a CE IB (called | |
115 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior | |
116 | * to SI there was just a DE IB. | |
117 | */ | |
b07c60c0 | 118 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
f54d1867 CW |
119 | struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, |
120 | struct amdgpu_job *job, struct dma_fence **f) | |
d38ceaf9 | 121 | { |
b07c60c0 | 122 | struct amdgpu_device *adev = ring->adev; |
d38ceaf9 | 123 | struct amdgpu_ib *ib = &ibs[0]; |
f153d286 | 124 | bool skip_preamble, need_ctx_switch; |
92f25098 CK |
125 | unsigned patch_offset = ~0; |
126 | struct amdgpu_vm *vm; | |
3aecd24c | 127 | uint64_t fence_ctx; |
9a9db6ef | 128 | uint32_t status = 0, alloc_size; |
03ccf481 | 129 | |
92f25098 | 130 | unsigned i; |
d38ceaf9 | 131 | int r = 0; |
d38ceaf9 AD |
132 | |
133 | if (num_ibs == 0) | |
134 | return -EINVAL; | |
135 | ||
92f25098 CK |
136 | /* ring tests don't use a job */ |
137 | if (job) { | |
c5637837 | 138 | vm = job->vm; |
3aecd24c | 139 | fence_ctx = job->fence_ctx; |
92f25098 CK |
140 | } else { |
141 | vm = NULL; | |
3aecd24c | 142 | fence_ctx = 0; |
92f25098 | 143 | } |
d919ad49 | 144 | |
d38ceaf9 | 145 | if (!ring->ready) { |
1b583649 | 146 | dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); |
d38ceaf9 AD |
147 | return -EINVAL; |
148 | } | |
be86c606 | 149 | |
d88bf583 | 150 | if (vm && !job->vm_id) { |
8d0a7cea CK |
151 | dev_err(adev->dev, "VM IB without ID\n"); |
152 | return -EINVAL; | |
153 | } | |
154 | ||
9a9db6ef AD |
155 | alloc_size = amdgpu_ring_get_dma_frame_size(ring) + |
156 | num_ibs * amdgpu_ring_get_emit_ib_size(ring); | |
157 | ||
158 | r = amdgpu_ring_alloc(ring, alloc_size); | |
d38ceaf9 AD |
159 | if (r) { |
160 | dev_err(adev->dev, "scheduling IB failed (%d).\n", r); | |
161 | return r; | |
162 | } | |
163 | ||
03ccf481 ML |
164 | if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) |
165 | patch_offset = amdgpu_ring_init_cond_exec(ring); | |
166 | ||
d38ceaf9 | 167 | if (vm) { |
fd53be30 | 168 | r = amdgpu_vm_flush(ring, job); |
41d9eb2c CK |
169 | if (r) { |
170 | amdgpu_ring_undo(ring); | |
171 | return r; | |
172 | } | |
e722b71a | 173 | } |
d2edb07b | 174 | |
794ff571 ML |
175 | if (ring->funcs->emit_hdp_flush) |
176 | amdgpu_ring_emit_hdp_flush(ring); | |
177 | ||
128cff1a ML |
178 | /* always set cond_exec_polling to CONTINUE */ |
179 | *ring->cond_exe_cpu_addr = 1; | |
180 | ||
3aecd24c ML |
181 | skip_preamble = ring->current_ctx == fence_ctx; |
182 | need_ctx_switch = ring->current_ctx != fence_ctx; | |
753ad49c ML |
183 | if (job && ring->funcs->emit_cntxcntl) { |
184 | if (need_ctx_switch) | |
185 | status |= AMDGPU_HAVE_CTX_SWITCH; | |
186 | status |= job->preamble_status; | |
187 | amdgpu_ring_emit_cntxcntl(ring, status); | |
188 | } | |
189 | ||
d38ceaf9 | 190 | for (i = 0; i < num_ibs; ++i) { |
f153d286 | 191 | ib = &ibs[i]; |
9f8fb5a2 CK |
192 | |
193 | /* drop preamble IBs if we don't have a context switch */ | |
753ad49c ML |
194 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && |
195 | skip_preamble && | |
196 | !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST)) | |
9f8fb5a2 CK |
197 | continue; |
198 | ||
d88bf583 CK |
199 | amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, |
200 | need_ctx_switch); | |
f153d286 | 201 | need_ctx_switch = false; |
d38ceaf9 AD |
202 | } |
203 | ||
794ff571 ML |
204 | if (ring->funcs->emit_hdp_invalidate) |
205 | amdgpu_ring_emit_hdp_invalidate(ring); | |
11afbde8 | 206 | |
22a77cf6 | 207 | r = amdgpu_fence_emit(ring, f); |
d38ceaf9 AD |
208 | if (r) { |
209 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | |
d88bf583 CK |
210 | if (job && job->vm_id) |
211 | amdgpu_vm_reset_id(adev, job->vm_id); | |
a27de35c | 212 | amdgpu_ring_undo(ring); |
d38ceaf9 AD |
213 | return r; |
214 | } | |
215 | ||
216 | /* wrap the last IB with fence */ | |
b5f5acbc CK |
217 | if (job && job->uf_addr) { |
218 | amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, | |
890ee23f | 219 | AMDGPU_FENCE_FLAG_64BIT); |
d38ceaf9 AD |
220 | } |
221 | ||
03ccf481 ML |
222 | if (patch_offset != ~0 && ring->funcs->patch_cond_exec) |
223 | amdgpu_ring_patch_cond_exec(ring, patch_offset); | |
224 | ||
3aecd24c | 225 | ring->current_ctx = fence_ctx; |
c2167a65 ML |
226 | if (ring->funcs->emit_switch_buffer) |
227 | amdgpu_ring_emit_switch_buffer(ring); | |
a27de35c | 228 | amdgpu_ring_commit(ring); |
d38ceaf9 AD |
229 | return 0; |
230 | } | |
231 | ||
232 | /** | |
233 | * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool | |
234 | * | |
235 | * @adev: amdgpu_device pointer | |
236 | * | |
237 | * Initialize the suballocator to manage a pool of memory | |
238 | * for use as IBs (all asics). | |
239 | * Returns 0 on success, error on failure. | |
240 | */ | |
241 | int amdgpu_ib_pool_init(struct amdgpu_device *adev) | |
242 | { | |
243 | int r; | |
244 | ||
245 | if (adev->ib_pool_ready) { | |
246 | return 0; | |
247 | } | |
248 | r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, | |
249 | AMDGPU_IB_POOL_SIZE*64*1024, | |
250 | AMDGPU_GPU_PAGE_SIZE, | |
251 | AMDGPU_GEM_DOMAIN_GTT); | |
252 | if (r) { | |
253 | return r; | |
254 | } | |
255 | ||
256 | r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo); | |
257 | if (r) { | |
258 | return r; | |
259 | } | |
260 | ||
261 | adev->ib_pool_ready = true; | |
262 | if (amdgpu_debugfs_sa_init(adev)) { | |
263 | dev_err(adev->dev, "failed to register debugfs file for SA\n"); | |
264 | } | |
265 | return 0; | |
266 | } | |
267 | ||
268 | /** | |
269 | * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool | |
270 | * | |
271 | * @adev: amdgpu_device pointer | |
272 | * | |
273 | * Tear down the suballocator managing the pool of memory | |
274 | * for use as IBs (all asics). | |
275 | */ | |
276 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev) | |
277 | { | |
278 | if (adev->ib_pool_ready) { | |
279 | amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo); | |
280 | amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); | |
281 | adev->ib_pool_ready = false; | |
282 | } | |
283 | } | |
284 | ||
285 | /** | |
286 | * amdgpu_ib_ring_tests - test IBs on the rings | |
287 | * | |
288 | * @adev: amdgpu_device pointer | |
289 | * | |
290 | * Test an IB (Indirect Buffer) on each ring. | |
291 | * If the test fails, disable the ring. | |
292 | * Returns 0 on success, error if the primary GFX ring | |
293 | * IB test fails. | |
294 | */ | |
295 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |
296 | { | |
297 | unsigned i; | |
1f703e66 | 298 | int r, ret = 0; |
d38ceaf9 AD |
299 | |
300 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
301 | struct amdgpu_ring *ring = adev->rings[i]; | |
302 | ||
303 | if (!ring || !ring->ready) | |
304 | continue; | |
305 | ||
bbec97aa | 306 | r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); |
d38ceaf9 AD |
307 | if (r) { |
308 | ring->ready = false; | |
d38ceaf9 AD |
309 | |
310 | if (ring == &adev->gfx.gfx_ring[0]) { | |
311 | /* oh, oh, that's really bad */ | |
312 | DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); | |
313 | adev->accel_working = false; | |
314 | return r; | |
315 | ||
316 | } else { | |
317 | /* still not good, but we can live with it */ | |
318 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); | |
1f703e66 | 319 | ret = r; |
d38ceaf9 AD |
320 | } |
321 | } | |
322 | } | |
1f703e66 | 323 | return ret; |
d38ceaf9 AD |
324 | } |
325 | ||
326 | /* | |
327 | * Debugfs info | |
328 | */ | |
329 | #if defined(CONFIG_DEBUG_FS) | |
330 | ||
331 | static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) | |
332 | { | |
333 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
334 | struct drm_device *dev = node->minor->dev; | |
335 | struct amdgpu_device *adev = dev->dev_private; | |
336 | ||
337 | amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); | |
338 | ||
339 | return 0; | |
340 | ||
341 | } | |
342 | ||
06ab6832 | 343 | static const struct drm_info_list amdgpu_debugfs_sa_list[] = { |
d38ceaf9 AD |
344 | {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, |
345 | }; | |
346 | ||
347 | #endif | |
348 | ||
349 | static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) | |
350 | { | |
351 | #if defined(CONFIG_DEBUG_FS) | |
352 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); | |
353 | #else | |
354 | return 0; | |
355 | #endif | |
356 | } |