Merge tag 'tag-chrome-platform-for-v5.13' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ring.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
fdf2f6c5 31#include <linux/uaccess.h>
4f4824b5 32#include <linux/debugfs.h>
fdf2f6c5 33
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include "amdgpu.h"
36#include "atom.h"
37
38/*
39 * Rings
40 * Most engines on the GPU are fed via ring buffers. Ring
41 * buffers are areas of GPU accessible memory that the host
42 * writes commands into and the GPU reads commands out of.
43 * There is a rptr (read pointer) that determines where the
44 * GPU is currently reading, and a wptr (write pointer)
45 * which determines where the host has written. When the
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
48 * wptr. The GPU then starts fetching commands and executes
49 * them until the pointers are equal again.
50 */
d38ceaf9 51
d38ceaf9
AD
52/**
53 * amdgpu_ring_alloc - allocate space on the ring buffer
54 *
d38ceaf9
AD
55 * @ring: amdgpu_ring structure holding ring information
56 * @ndw: number of dwords to allocate in the ring buffer
57 *
58 * Allocate @ndw dwords in the ring buffer (all asics).
59 * Returns 0 on success, error on failure.
60 */
61int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
62{
d38ceaf9
AD
63 /* Align requested size with padding so unlock_commit can
64 * pad safely */
79887142 65 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
c7e6be23
CK
66
67 /* Make sure we aren't trying to allocate more space
68 * than the maximum for one submission
69 */
70 if (WARN_ON_ONCE(ndw > ring->max_dw))
71 return -ENOMEM;
72
d38ceaf9
AD
73 ring->count_dw = ndw;
74 ring->wptr_old = ring->wptr;
f06505b8
CK
75
76 if (ring->funcs->begin_use)
77 ring->funcs->begin_use(ring);
78
d38ceaf9
AD
79 return 0;
80}
81
edff0e28
JZ
82/** amdgpu_ring_insert_nop - insert NOP packets
83 *
84 * @ring: amdgpu_ring structure holding ring information
85 * @count: the number of NOP packets to insert
86 *
87 * This is the generic insert_nop function for rings except SDMA
88 */
89void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
90{
91 int i;
92
93 for (i = 0; i < count; i++)
79887142 94 amdgpu_ring_write(ring, ring->funcs->nop);
edff0e28
JZ
95}
96
35c7fad9
LJ
97/**
98 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
9e5d5309
CK
99 *
100 * @ring: amdgpu_ring structure holding ring information
101 * @ib: IB to add NOP packets to
102 *
103 * This is the generic pad_ib function for rings except SDMA
104 */
105void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
106{
79887142
CK
107 while (ib->length_dw & ring->funcs->align_mask)
108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
9e5d5309
CK
109}
110
d38ceaf9
AD
111/**
112 * amdgpu_ring_commit - tell the GPU to execute the new
113 * commands on the ring buffer
114 *
d38ceaf9
AD
115 * @ring: amdgpu_ring structure holding ring information
116 *
117 * Update the wptr (write pointer) to tell the GPU to
118 * execute new commands on the ring buffer (all asics).
119 */
120void amdgpu_ring_commit(struct amdgpu_ring *ring)
121{
edff0e28
JZ
122 uint32_t count;
123
d38ceaf9 124 /* We pad to match fetch size */
79887142
CK
125 count = ring->funcs->align_mask + 1 -
126 (ring->wptr & ring->funcs->align_mask);
127 count %= ring->funcs->align_mask + 1;
edff0e28
JZ
128 ring->funcs->insert_nop(ring, count);
129
d38ceaf9
AD
130 mb();
131 amdgpu_ring_set_wptr(ring);
f06505b8
CK
132
133 if (ring->funcs->end_use)
134 ring->funcs->end_use(ring);
d38ceaf9
AD
135}
136
d38ceaf9
AD
137/**
138 * amdgpu_ring_undo - reset the wptr
139 *
140 * @ring: amdgpu_ring structure holding ring information
141 *
142 * Reset the driver's copy of the wptr (all asics).
143 */
144void amdgpu_ring_undo(struct amdgpu_ring *ring)
145{
146 ring->wptr = ring->wptr_old;
f06505b8
CK
147
148 if (ring->funcs->end_use)
149 ring->funcs->end_use(ring);
d38ceaf9
AD
150}
151
d38ceaf9
AD
152/**
153 * amdgpu_ring_init - init driver ring struct.
154 *
155 * @adev: amdgpu_device pointer
156 * @ring: amdgpu_ring structure holding ring information
80854e83 157 * @max_dw: maximum number of dw for ring alloc
35c7fad9
LJ
158 * @irq_src: interrupt source to use for this ring
159 * @irq_type: interrupt type to use for this ring
160 * @hw_prio: ring priority (NORMAL/HIGH)
d38ceaf9
AD
161 *
162 * Initialize the driver information for the selected ring (all asics).
163 * Returns 0 on success, error on failure.
164 */
165int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1c6d567b 166 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
c107171b
CK
167 unsigned int irq_type, unsigned int hw_prio,
168 atomic_t *sched_score)
d38ceaf9 169{
ebdd2e9d 170 int r;
b249e18d 171 int sched_hw_submission = amdgpu_sched_hw_submission;
1c6d567b
ND
172 u32 *num_sched;
173 u32 hw_ip;
b249e18d
AD
174
175 /* Set the hw submission limit higher for KIQ because
176 * it's used for a number of gfx/compute tasks by both
177 * KFD and KGD which may have outstanding fences and
178 * it doesn't really use the gpu scheduler anyway;
179 * KIQ tasks get submitted directly to the ring.
180 */
181 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
182 sched_hw_submission = max(sched_hw_submission, 256);
1d31408a
CK
183 else if (ring == &adev->sdma.instance[0].page)
184 sched_hw_submission = 256;
d38ceaf9
AD
185
186 if (ring->adev == NULL) {
187 if (adev->num_rings >= AMDGPU_MAX_RINGS)
188 return -EINVAL;
189
190 ring->adev = adev;
191 ring->idx = adev->num_rings++;
192 adev->rings[ring->idx] = ring;
c107171b
CK
193 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
194 sched_score);
4f839a24
CK
195 if (r)
196 return r;
d38ceaf9
AD
197 }
198
131b4b36 199 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
97407b63
AD
200 if (r) {
201 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
202 return r;
d38ceaf9
AD
203 }
204
131b4b36 205 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
97407b63
AD
206 if (r) {
207 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
208 return r;
209 }
0915fdbc 210
131b4b36 211 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
97407b63
AD
212 if (r) {
213 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
214 return r;
d38ceaf9
AD
215 }
216
ef3e1323
JX
217 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
218 if (r) {
219 dev_err(adev->dev,
220 "(%d) ring trail_fence_offs wb alloc failed\n", r);
221 return r;
222 }
223 ring->trail_fence_gpu_addr =
224 adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
225 ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
226
131b4b36 227 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
128cff1a
ML
228 if (r) {
229 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
230 return r;
231 }
232 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
233 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
714fbf80
ML
234 /* always set cond_exec_polling to CONTINUE */
235 *ring->cond_exe_cpu_addr = 1;
128cff1a 236
d38ceaf9
AD
237 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
238 if (r) {
239 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
240 return r;
241 }
242
b249e18d 243 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
d38ceaf9 244
e09706f4
ML
245 ring->buf_mask = (ring->ring_size / 4) - 1;
246 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
247 0xffffffffffffffff : ring->buf_mask;
d38ceaf9
AD
248 /* Allocate ring buffer */
249 if (ring->ring_obj == NULL) {
c8c1a1d2 250 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
37ac235b
CK
251 AMDGPU_GEM_DOMAIN_GTT,
252 &ring->ring_obj,
253 &ring->gpu_addr,
254 (void **)&ring->ring);
d38ceaf9
AD
255 if (r) {
256 dev_err(adev->dev, "(%d) ring create failed\n", r);
257 return r;
258 }
f6bd7942 259 amdgpu_ring_clear_ring(ring);
d38ceaf9 260 }
536fbf94 261
a3f1cf35 262 ring->max_dw = max_dw;
ebdd2e9d 263 ring->hw_prio = hw_prio;
d38ceaf9 264
a4c24680 265 if (!ring->no_scheduler) {
1c6d567b
ND
266 hw_ip = ring->funcs->type;
267 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
268 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
269 &ring->sched;
270 }
271
d38ceaf9
AD
272 return 0;
273}
274
275/**
276 * amdgpu_ring_fini - tear down the driver ring struct.
277 *
d38ceaf9
AD
278 * @ring: amdgpu_ring structure holding ring information
279 *
280 * Tear down the driver information for the selected ring (all asics).
281 */
282void amdgpu_ring_fini(struct amdgpu_ring *ring)
283{
d38ceaf9 284
41cc07cf
TH
285 /* Not to finish a ring which is not initialized */
286 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
287 return;
288
6f9f9604
ND
289 ring->sched.ready = false;
290
131b4b36
AD
291 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
292 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
7014285a 293
131b4b36
AD
294 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
295 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
d38ceaf9 296
8640faed
JZ
297 amdgpu_bo_free_kernel(&ring->ring_obj,
298 &ring->gpu_addr,
299 (void **)&ring->ring);
300
3af81440
CK
301 dma_fence_put(ring->vmid_wait);
302 ring->vmid_wait = NULL;
10dd74ea 303 ring->me = 0;
3af81440 304
d8907643 305 ring->adev->rings[ring->idx] = NULL;
d38ceaf9
AD
306}
307
82853638
AD
308/**
309 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
310 *
35c7fad9 311 * @ring: ring to write to
82853638
AD
312 * @reg0: register to write
313 * @reg1: register to wait on
314 * @ref: reference value to write/wait on
315 * @mask: mask to wait on
316 *
317 * Helper for rings that don't support write and wait in a
318 * single oneshot packet.
319 */
320void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
321 uint32_t reg0, uint32_t reg1,
322 uint32_t ref, uint32_t mask)
323{
324 amdgpu_ring_emit_wreg(ring, reg0, ref);
325 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
326}
327
7876fa4f
CK
328/**
329 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
330 *
331 * @ring: ring to try the recovery on
332 * @vmid: VMID we try to get going again
333 * @fence: timedout fence
334 *
335 * Tries to get a ring proceeding again when it is stuck.
336 */
337bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
338 struct dma_fence *fence)
339{
340 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
341
ae1589f6 342 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
7876fa4f
CK
343 return false;
344
345 atomic_inc(&ring->adev->gpu_reset_counter);
346 while (!dma_fence_is_signaled(fence) &&
347 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
348 ring->funcs->soft_recovery(ring, vmid);
349
350 return dma_fence_is_signaled(fence);
351}
352
d38ceaf9
AD
353/*
354 * Debugfs info
355 */
356#if defined(CONFIG_DEBUG_FS)
357
4f4824b5
TSD
358/* Layout of file is 12 bytes consisting of
359 * - rptr
360 * - wptr
361 * - driver's copy of wptr
362 *
363 * followed by n-words of ring data
364 */
365static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
366 size_t size, loff_t *pos)
d38ceaf9 367{
45063097 368 struct amdgpu_ring *ring = file_inode(f)->i_private;
4f4824b5
TSD
369 int r, i;
370 uint32_t value, result, early[3];
371
c71dbd93 372 if (*pos & 3 || size & 3)
4f4824b5
TSD
373 return -EINVAL;
374
375 result = 0;
376
377 if (*pos < 12) {
9c5c71bb 378 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
ec63982e
TSD
379 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
380 early[2] = ring->wptr & ring->buf_mask;
4f4824b5
TSD
381 for (i = *pos / 4; i < 3 && size; i++) {
382 r = put_user(early[i], (uint32_t *)buf);
383 if (r)
384 return r;
385 buf += 4;
386 result += 4;
387 size -= 4;
388 *pos += 4;
389 }
c7e6be23 390 }
4f4824b5
TSD
391
392 while (size) {
393 if (*pos >= (ring->ring_size + 12))
394 return result;
714fbf80 395
4f4824b5 396 value = ring->ring[(*pos - 12)/4];
c4c5ae67 397 r = put_user(value, (uint32_t *)buf);
4f4824b5
TSD
398 if (r)
399 return r;
400 buf += 4;
401 result += 4;
402 size -= 4;
403 *pos += 4;
d38ceaf9 404 }
4f4824b5
TSD
405
406 return result;
d38ceaf9
AD
407}
408
4f4824b5
TSD
409static const struct file_operations amdgpu_debugfs_ring_fops = {
410 .owner = THIS_MODULE,
411 .read = amdgpu_debugfs_ring_read,
412 .llseek = default_llseek
413};
d38ceaf9
AD
414
415#endif
416
fd23cfcc
AD
417int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
418 struct amdgpu_ring *ring)
d38ceaf9
AD
419{
420#if defined(CONFIG_DEBUG_FS)
4a580877 421 struct drm_minor *minor = adev_to_drm(adev)->primary;
4f4824b5
TSD
422 struct dentry *ent, *root = minor->debugfs_root;
423 char name[32];
d38ceaf9 424
771c8ec1 425 sprintf(name, "amdgpu_ring_%s", ring->name);
771c8ec1 426
4f4824b5
TSD
427 ent = debugfs_create_file(name,
428 S_IFREG | S_IRUGO, root,
429 ring, &amdgpu_debugfs_ring_fops);
eeb2fa0c
DC
430 if (!ent)
431 return -ENOMEM;
4f4824b5
TSD
432
433 i_size_write(ent->d_inode, ring->ring_size + 12);
a909c6bd 434 ring->ent = ent;
d38ceaf9
AD
435#endif
436 return 0;
437}
a909c6bd 438
c66ed765
AG
439/**
440 * amdgpu_ring_test_helper - tests ring and set sched readiness status
441 *
442 * @ring: ring to try the recovery on
443 *
444 * Tests ring and set sched readiness status
445 *
446 * Returns 0 on success, error on failure.
447 */
448int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
449{
dc9eeff8 450 struct amdgpu_device *adev = ring->adev;
c66ed765
AG
451 int r;
452
453 r = amdgpu_ring_test_ring(ring);
dc9eeff8
CK
454 if (r)
455 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
456 ring->name, r);
457 else
458 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
459 ring->name);
c66ed765
AG
460
461 ring->sched.ready = !r;
c66ed765
AG
462 return r;
463}