Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | */ | |
fdf2f6c5 | 25 | |
d38ceaf9 | 26 | #include "amdgpu.h" |
356aee30 | 27 | #include "amdgpu_gfx.h" |
88dfc9a3 | 28 | #include "amdgpu_rlc.h" |
6caeee7a | 29 | #include "amdgpu_ras.h" |
d38ceaf9 | 30 | |
bf9b1d9d RZ |
31 | /* delay 0.1 second to enable gfx off feature */ |
32 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) | |
1e317b99 | 33 | |
d38ceaf9 | 34 | /* |
448fe192 | 35 | * GPU GFX IP block helpers function. |
d38ceaf9 | 36 | */ |
448fe192 | 37 | |
7470bfcf HZ |
38 | int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, |
39 | int pipe, int queue) | |
448fe192 HR |
40 | { |
41 | int bit = 0; | |
42 | ||
43 | bit += mec * adev->gfx.mec.num_pipe_per_mec | |
44 | * adev->gfx.mec.num_queue_per_pipe; | |
45 | bit += pipe * adev->gfx.mec.num_queue_per_pipe; | |
46 | bit += queue; | |
47 | ||
48 | return bit; | |
49 | } | |
50 | ||
5c180eb9 | 51 | void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, |
7470bfcf | 52 | int *mec, int *pipe, int *queue) |
448fe192 HR |
53 | { |
54 | *queue = bit % adev->gfx.mec.num_queue_per_pipe; | |
55 | *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) | |
56 | % adev->gfx.mec.num_pipe_per_mec; | |
57 | *mec = (bit / adev->gfx.mec.num_queue_per_pipe) | |
58 | / adev->gfx.mec.num_pipe_per_mec; | |
59 | ||
60 | } | |
61 | ||
62 | bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, | |
63 | int mec, int pipe, int queue) | |
64 | { | |
7470bfcf | 65 | return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), |
448fe192 HR |
66 | adev->gfx.mec.queue_bitmap); |
67 | } | |
68 | ||
7470bfcf HZ |
69 | int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, |
70 | int me, int pipe, int queue) | |
71 | { | |
72 | int bit = 0; | |
73 | ||
74 | bit += me * adev->gfx.me.num_pipe_per_me | |
75 | * adev->gfx.me.num_queue_per_pipe; | |
76 | bit += pipe * adev->gfx.me.num_queue_per_pipe; | |
77 | bit += queue; | |
78 | ||
79 | return bit; | |
80 | } | |
81 | ||
82 | void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, | |
83 | int *me, int *pipe, int *queue) | |
84 | { | |
85 | *queue = bit % adev->gfx.me.num_queue_per_pipe; | |
86 | *pipe = (bit / adev->gfx.me.num_queue_per_pipe) | |
87 | % adev->gfx.me.num_pipe_per_me; | |
88 | *me = (bit / adev->gfx.me.num_queue_per_pipe) | |
89 | / adev->gfx.me.num_pipe_per_me; | |
90 | } | |
91 | ||
92 | bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, | |
93 | int me, int pipe, int queue) | |
94 | { | |
95 | return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), | |
96 | adev->gfx.me.queue_bitmap); | |
97 | } | |
98 | ||
d38ceaf9 AD |
99 | /** |
100 | * amdgpu_gfx_scratch_get - Allocate a scratch register | |
101 | * | |
102 | * @adev: amdgpu_device pointer | |
103 | * @reg: scratch register mmio offset | |
104 | * | |
105 | * Allocate a CP scratch register for use by the driver (all asics). | |
106 | * Returns 0 on success or -EINVAL on failure. | |
107 | */ | |
108 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) | |
109 | { | |
110 | int i; | |
111 | ||
50261151 NW |
112 | i = ffs(adev->gfx.scratch.free_mask); |
113 | if (i != 0 && i <= adev->gfx.scratch.num_reg) { | |
114 | i--; | |
115 | adev->gfx.scratch.free_mask &= ~(1u << i); | |
116 | *reg = adev->gfx.scratch.reg_base + i; | |
117 | return 0; | |
d38ceaf9 AD |
118 | } |
119 | return -EINVAL; | |
120 | } | |
121 | ||
122 | /** | |
123 | * amdgpu_gfx_scratch_free - Free a scratch register | |
124 | * | |
125 | * @adev: amdgpu_device pointer | |
126 | * @reg: scratch register mmio offset | |
127 | * | |
128 | * Free a CP scratch register allocated for use by the driver (all asics) | |
129 | */ | |
130 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) | |
131 | { | |
50261151 | 132 | adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); |
d38ceaf9 | 133 | } |
6f8941a2 NH |
134 | |
135 | /** | |
136 | * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter | |
137 | * | |
138 | * @mask: array in which the per-shader array disable masks will be stored | |
139 | * @max_se: number of SEs | |
140 | * @max_sh: number of SHs | |
141 | * | |
142 | * The bitmask of CUs to be disabled in the shader array determined by se and | |
143 | * sh is stored in mask[se * max_sh + sh]. | |
144 | */ | |
145 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) | |
146 | { | |
147 | unsigned se, sh, cu; | |
148 | const char *p; | |
149 | ||
150 | memset(mask, 0, sizeof(*mask) * max_se * max_sh); | |
151 | ||
152 | if (!amdgpu_disable_cu || !*amdgpu_disable_cu) | |
153 | return; | |
154 | ||
155 | p = amdgpu_disable_cu; | |
156 | for (;;) { | |
157 | char *next; | |
158 | int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); | |
159 | if (ret < 3) { | |
160 | DRM_ERROR("amdgpu: could not parse disable_cu\n"); | |
161 | return; | |
162 | } | |
163 | ||
164 | if (se < max_se && sh < max_sh && cu < 16) { | |
165 | DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); | |
166 | mask[se * max_sh + sh] |= 1u << cu; | |
167 | } else { | |
168 | DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", | |
169 | se, sh, cu); | |
170 | } | |
171 | ||
172 | next = strchr(p, ','); | |
173 | if (!next) | |
174 | break; | |
175 | p = next + 1; | |
176 | } | |
177 | } | |
41f6a99a | 178 | |
0f7607d4 AR |
179 | static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) |
180 | { | |
4a75aefe AR |
181 | if (amdgpu_compute_multipipe != -1) { |
182 | DRM_INFO("amdgpu: forcing compute pipe policy %d\n", | |
183 | amdgpu_compute_multipipe); | |
184 | return amdgpu_compute_multipipe == 1; | |
185 | } | |
186 | ||
0f7607d4 AR |
187 | /* FIXME: spreading the queues across pipes causes perf regressions |
188 | * on POLARIS11 compute workloads */ | |
189 | if (adev->asic_type == CHIP_POLARIS11) | |
190 | return false; | |
191 | ||
192 | return adev->gfx.mec.num_mec > 1; | |
193 | } | |
194 | ||
33abcb1f | 195 | bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, |
8c0225d7 | 196 | struct amdgpu_ring *ring) |
33abcb1f | 197 | { |
8c0225d7 ND |
198 | /* Policy: use 1st queue as high priority compute queue if we |
199 | * have more than one compute queue. | |
200 | */ | |
201 | if (adev->gfx.num_compute_rings > 1 && | |
202 | ring == &adev->gfx.compute_ring[0]) | |
203 | return true; | |
204 | ||
205 | return false; | |
33abcb1f ND |
206 | } |
207 | ||
41f6a99a AD |
208 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) |
209 | { | |
a300de40 | 210 | int i, queue, pipe; |
0f7607d4 | 211 | bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); |
a300de40 ML |
212 | int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * |
213 | adev->gfx.mec.num_queue_per_pipe, | |
214 | adev->gfx.num_compute_rings); | |
215 | ||
216 | if (multipipe_policy) { | |
217 | /* policy: make queues evenly cross all pipes on MEC1 only */ | |
218 | for (i = 0; i < max_queues_per_mec; i++) { | |
219 | pipe = i % adev->gfx.mec.num_pipe_per_mec; | |
220 | queue = (i / adev->gfx.mec.num_pipe_per_mec) % | |
221 | adev->gfx.mec.num_queue_per_pipe; | |
222 | ||
223 | set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, | |
224 | adev->gfx.mec.queue_bitmap); | |
41f6a99a | 225 | } |
a300de40 ML |
226 | } else { |
227 | /* policy: amdgpu owns all queues in the given pipe */ | |
228 | for (i = 0; i < max_queues_per_mec; ++i) | |
229 | set_bit(i, adev->gfx.mec.queue_bitmap); | |
41f6a99a AD |
230 | } |
231 | ||
a300de40 | 232 | dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); |
41f6a99a | 233 | } |
71c37505 | 234 | |
e537c994 HZ |
235 | void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) |
236 | { | |
2e0db9de | 237 | int i, queue, me; |
e537c994 HZ |
238 | |
239 | for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) { | |
240 | queue = i % adev->gfx.me.num_queue_per_pipe; | |
e537c994 HZ |
241 | me = (i / adev->gfx.me.num_queue_per_pipe) |
242 | / adev->gfx.me.num_pipe_per_me; | |
243 | ||
244 | if (me >= adev->gfx.me.num_me) | |
245 | break; | |
246 | /* policy: amdgpu owns the first queue per pipe at this stage | |
247 | * will extend to mulitple queues per pipe later */ | |
248 | if (me == 0 && queue < 1) | |
249 | set_bit(i, adev->gfx.me.queue_bitmap); | |
250 | } | |
251 | ||
252 | /* update the number of active graphics rings */ | |
253 | adev->gfx.num_gfx_rings = | |
254 | bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); | |
255 | } | |
256 | ||
71c37505 AD |
257 | static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, |
258 | struct amdgpu_ring *ring) | |
259 | { | |
260 | int queue_bit; | |
261 | int mec, pipe, queue; | |
262 | ||
263 | queue_bit = adev->gfx.mec.num_mec | |
264 | * adev->gfx.mec.num_pipe_per_mec | |
265 | * adev->gfx.mec.num_queue_per_pipe; | |
266 | ||
267 | while (queue_bit-- >= 0) { | |
268 | if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) | |
269 | continue; | |
270 | ||
5c180eb9 | 271 | amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); |
71c37505 | 272 | |
59fd27cd HR |
273 | /* |
274 | * 1. Using pipes 2/3 from MEC 2 seems cause problems. | |
275 | * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN | |
276 | * only can be issued on queue 0. | |
277 | */ | |
278 | if ((mec == 1 && pipe > 1) || queue != 0) | |
71c37505 AD |
279 | continue; |
280 | ||
281 | ring->me = mec + 1; | |
282 | ring->pipe = pipe; | |
283 | ring->queue = queue; | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | dev_err(adev->dev, "Failed to find a queue for KIQ\n"); | |
289 | return -EINVAL; | |
290 | } | |
291 | ||
292 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, | |
293 | struct amdgpu_ring *ring, | |
294 | struct amdgpu_irq_src *irq) | |
295 | { | |
296 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
297 | int r = 0; | |
298 | ||
43ca8efa | 299 | spin_lock_init(&kiq->ring_lock); |
71c37505 | 300 | |
71c37505 AD |
301 | ring->adev = NULL; |
302 | ring->ring_obj = NULL; | |
303 | ring->use_doorbell = true; | |
9564f192 | 304 | ring->doorbell_index = adev->doorbell_index.kiq; |
71c37505 AD |
305 | |
306 | r = amdgpu_gfx_kiq_acquire(adev, ring); | |
307 | if (r) | |
308 | return r; | |
309 | ||
310 | ring->eop_gpu_addr = kiq->eop_gpu_addr; | |
a783910d | 311 | ring->no_scheduler = true; |
2119d0db | 312 | sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
c107171b CK |
313 | r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, |
314 | AMDGPU_RING_PRIO_DEFAULT, NULL); | |
71c37505 AD |
315 | if (r) |
316 | dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); | |
317 | ||
318 | return r; | |
319 | } | |
320 | ||
9f0256da | 321 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) |
71c37505 | 322 | { |
71c37505 AD |
323 | amdgpu_ring_fini(ring); |
324 | } | |
325 | ||
326 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) | |
327 | { | |
328 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
329 | ||
330 | amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); | |
331 | } | |
332 | ||
333 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, | |
334 | unsigned hpd_size) | |
335 | { | |
336 | int r; | |
337 | u32 *hpd; | |
338 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
339 | ||
340 | r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, | |
341 | AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, | |
342 | &kiq->eop_gpu_addr, (void **)&hpd); | |
343 | if (r) { | |
344 | dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); | |
345 | return r; | |
346 | } | |
347 | ||
348 | memset(hpd, 0, hpd_size); | |
349 | ||
350 | r = amdgpu_bo_reserve(kiq->eop_obj, true); | |
351 | if (unlikely(r != 0)) | |
352 | dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); | |
353 | amdgpu_bo_kunmap(kiq->eop_obj); | |
354 | amdgpu_bo_unreserve(kiq->eop_obj); | |
355 | ||
356 | return 0; | |
357 | } | |
b9683c21 | 358 | |
4fc6a88f HZ |
359 | /* create MQD for each compute/gfx queue */ |
360 | int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, | |
361 | unsigned mqd_size) | |
b9683c21 AD |
362 | { |
363 | struct amdgpu_ring *ring = NULL; | |
364 | int r, i; | |
365 | ||
366 | /* create MQD for KIQ */ | |
367 | ring = &adev->gfx.kiq.ring; | |
368 | if (!ring->mqd_obj) { | |
beb84102 ML |
369 | /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must |
370 | * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD | |
371 | * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for | |
372 | * KIQ MQD no matter SRIOV or Bare-metal | |
373 | */ | |
b9683c21 | 374 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, |
beb84102 | 375 | AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, |
b9683c21 AD |
376 | &ring->mqd_gpu_addr, &ring->mqd_ptr); |
377 | if (r) { | |
378 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); | |
379 | return r; | |
380 | } | |
381 | ||
382 | /* prepare MQD backup */ | |
383 | adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); | |
384 | if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) | |
385 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); | |
386 | } | |
387 | ||
5e0f378d | 388 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
54fc4472 HZ |
389 | /* create MQD for each KGQ */ |
390 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | |
391 | ring = &adev->gfx.gfx_ring[i]; | |
392 | if (!ring->mqd_obj) { | |
393 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | |
394 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, | |
395 | &ring->mqd_gpu_addr, &ring->mqd_ptr); | |
396 | if (r) { | |
397 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); | |
398 | return r; | |
399 | } | |
400 | ||
401 | /* prepare MQD backup */ | |
402 | adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); | |
403 | if (!adev->gfx.me.mqd_backup[i]) | |
404 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); | |
405 | } | |
406 | } | |
407 | } | |
408 | ||
b9683c21 AD |
409 | /* create MQD for each KCQ */ |
410 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
411 | ring = &adev->gfx.compute_ring[i]; | |
412 | if (!ring->mqd_obj) { | |
413 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | |
414 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, | |
415 | &ring->mqd_gpu_addr, &ring->mqd_ptr); | |
416 | if (r) { | |
54fc4472 | 417 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); |
b9683c21 AD |
418 | return r; |
419 | } | |
420 | ||
421 | /* prepare MQD backup */ | |
422 | adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); | |
423 | if (!adev->gfx.mec.mqd_backup[i]) | |
424 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); | |
425 | } | |
426 | } | |
427 | ||
428 | return 0; | |
429 | } | |
430 | ||
4fc6a88f | 431 | void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) |
b9683c21 AD |
432 | { |
433 | struct amdgpu_ring *ring = NULL; | |
434 | int i; | |
435 | ||
5e0f378d | 436 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
54fc4472 HZ |
437 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
438 | ring = &adev->gfx.gfx_ring[i]; | |
439 | kfree(adev->gfx.me.mqd_backup[i]); | |
440 | amdgpu_bo_free_kernel(&ring->mqd_obj, | |
441 | &ring->mqd_gpu_addr, | |
442 | &ring->mqd_ptr); | |
443 | } | |
444 | } | |
445 | ||
b9683c21 AD |
446 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
447 | ring = &adev->gfx.compute_ring[i]; | |
448 | kfree(adev->gfx.mec.mqd_backup[i]); | |
449 | amdgpu_bo_free_kernel(&ring->mqd_obj, | |
450 | &ring->mqd_gpu_addr, | |
451 | &ring->mqd_ptr); | |
452 | } | |
453 | ||
454 | ring = &adev->gfx.kiq.ring; | |
455 | kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); | |
456 | amdgpu_bo_free_kernel(&ring->mqd_obj, | |
457 | &ring->mqd_gpu_addr, | |
458 | &ring->mqd_ptr); | |
459 | } | |
d23ee13f | 460 | |
ba0c13b7 RZ |
461 | int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) |
462 | { | |
463 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
464 | struct amdgpu_ring *kiq_ring = &kiq->ring; | |
5a8cd98e | 465 | int i, r; |
ba0c13b7 RZ |
466 | |
467 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) | |
468 | return -EINVAL; | |
469 | ||
5a8cd98e | 470 | spin_lock(&adev->gfx.kiq.ring_lock); |
ba0c13b7 | 471 | if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * |
5a8cd98e ND |
472 | adev->gfx.num_compute_rings)) { |
473 | spin_unlock(&adev->gfx.kiq.ring_lock); | |
ba0c13b7 | 474 | return -ENOMEM; |
5a8cd98e | 475 | } |
ba0c13b7 RZ |
476 | |
477 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | |
19191961 JX |
478 | kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], |
479 | RESET_QUEUES, 0, 0); | |
5a8cd98e ND |
480 | r = amdgpu_ring_test_helper(kiq_ring); |
481 | spin_unlock(&adev->gfx.kiq.ring_lock); | |
ba0c13b7 | 482 | |
5a8cd98e | 483 | return r; |
ba0c13b7 RZ |
484 | } |
485 | ||
5c180eb9 | 486 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
3ab6fe4b LG |
487 | int queue_bit) |
488 | { | |
489 | int mec, pipe, queue; | |
5c180eb9 | 490 | int set_resource_bit = 0; |
3ab6fe4b | 491 | |
5c180eb9 | 492 | amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); |
3ab6fe4b | 493 | |
5c180eb9 | 494 | set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; |
3ab6fe4b | 495 | |
5c180eb9 | 496 | return set_resource_bit; |
3ab6fe4b LG |
497 | } |
498 | ||
849aca9f HZ |
499 | int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) |
500 | { | |
501 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
502 | struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; | |
503 | uint64_t queue_mask = 0; | |
504 | int r, i; | |
505 | ||
506 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) | |
507 | return -EINVAL; | |
508 | ||
509 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | |
510 | if (!test_bit(i, adev->gfx.mec.queue_bitmap)) | |
511 | continue; | |
512 | ||
513 | /* This situation may be hit in the future if a new HW | |
514 | * generation exposes more than 64 queues. If so, the | |
515 | * definition of queue_mask needs updating */ | |
516 | if (WARN_ON(i > (sizeof(queue_mask)*8))) { | |
517 | DRM_ERROR("Invalid KCQ enabled: %d\n", i); | |
518 | break; | |
519 | } | |
520 | ||
5c180eb9 | 521 | queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); |
849aca9f HZ |
522 | } |
523 | ||
524 | DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, | |
525 | kiq_ring->queue); | |
5a8cd98e | 526 | spin_lock(&adev->gfx.kiq.ring_lock); |
849aca9f HZ |
527 | r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * |
528 | adev->gfx.num_compute_rings + | |
529 | kiq->pmf->set_resources_size); | |
530 | if (r) { | |
531 | DRM_ERROR("Failed to lock KIQ (%d).\n", r); | |
5a8cd98e | 532 | spin_unlock(&adev->gfx.kiq.ring_lock); |
849aca9f HZ |
533 | return r; |
534 | } | |
535 | ||
536 | kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); | |
537 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | |
538 | kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); | |
539 | ||
540 | r = amdgpu_ring_test_helper(kiq_ring); | |
5a8cd98e | 541 | spin_unlock(&adev->gfx.kiq.ring_lock); |
849aca9f HZ |
542 | if (r) |
543 | DRM_ERROR("KCQ enable failed\n"); | |
544 | ||
545 | return r; | |
546 | } | |
547 | ||
d23ee13f RZ |
548 | /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable |
549 | * | |
550 | * @adev: amdgpu_device pointer | |
551 | * @bool enable true: enable gfx off feature, false: disable gfx off feature | |
552 | * | |
553 | * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. | |
554 | * 2. other client can send request to disable gfx off feature, the request should be honored. | |
555 | * 3. other client can cancel their request of disable gfx off feature | |
556 | * 4. other client should not send request to enable gfx off feature before disable gfx off feature. | |
557 | */ | |
558 | ||
559 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) | |
560 | { | |
3b94fb10 | 561 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
d23ee13f RZ |
562 | return; |
563 | ||
d23ee13f RZ |
564 | mutex_lock(&adev->gfx.gfx_off_mutex); |
565 | ||
32bc8f83 MD |
566 | if (enable) { |
567 | /* If the count is already 0, it means there's an imbalance bug somewhere. | |
568 | * Note that the bug may be in a different caller than the one which triggers the | |
569 | * WARN_ON_ONCE. | |
570 | */ | |
571 | if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) | |
572 | goto unlock; | |
573 | ||
d23ee13f RZ |
574 | adev->gfx.gfx_off_req_count--; |
575 | ||
32bc8f83 MD |
576 | if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) |
577 | schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); | |
578 | } else { | |
579 | if (adev->gfx.gfx_off_req_count == 0) { | |
580 | cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); | |
581 | ||
582 | if (adev->gfx.gfx_off_state && | |
583 | !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { | |
584 | adev->gfx.gfx_off_state = false; | |
425a78f4 | 585 | |
32bc8f83 MD |
586 | if (adev->gfx.funcs->init_spm_golden) { |
587 | dev_dbg(adev->dev, | |
588 | "GFXOFF is disabled, re-init SPM golden settings\n"); | |
589 | amdgpu_gfx_init_spm_golden(adev); | |
590 | } | |
425a78f4 TY |
591 | } |
592 | } | |
32bc8f83 MD |
593 | |
594 | adev->gfx.gfx_off_req_count++; | |
d23ee13f | 595 | } |
1e317b99 | 596 | |
32bc8f83 | 597 | unlock: |
d23ee13f RZ |
598 | mutex_unlock(&adev->gfx.gfx_off_mutex); |
599 | } | |
6caeee7a | 600 | |
443c7f3c JS |
601 | int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) |
602 | { | |
603 | ||
604 | int r = 0; | |
605 | ||
606 | mutex_lock(&adev->gfx.gfx_off_mutex); | |
607 | ||
608 | r = smu_get_status_gfxoff(adev, value); | |
609 | ||
610 | mutex_unlock(&adev->gfx.gfx_off_mutex); | |
611 | ||
612 | return r; | |
613 | } | |
614 | ||
41190cd7 | 615 | int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) |
6caeee7a HZ |
616 | { |
617 | int r; | |
6caeee7a HZ |
618 | struct ras_fs_if fs_info = { |
619 | .sysfs_name = "gfx_err_count", | |
6caeee7a | 620 | }; |
41190cd7 TZ |
621 | struct ras_ih_if ih_info = { |
622 | .cb = amdgpu_gfx_process_ras_data_cb, | |
623 | }; | |
6caeee7a HZ |
624 | |
625 | if (!adev->gfx.ras_if) { | |
626 | adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); | |
627 | if (!adev->gfx.ras_if) | |
628 | return -ENOMEM; | |
629 | adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; | |
630 | adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; | |
631 | adev->gfx.ras_if->sub_block_index = 0; | |
632 | strcpy(adev->gfx.ras_if->name, "gfx"); | |
633 | } | |
41190cd7 | 634 | fs_info.head = ih_info.head = *adev->gfx.ras_if; |
6caeee7a | 635 | r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, |
41190cd7 | 636 | &fs_info, &ih_info); |
6caeee7a HZ |
637 | if (r) |
638 | goto free; | |
639 | ||
640 | if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { | |
8f6368a9 | 641 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) |
761d86d3 | 642 | amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); |
761d86d3 | 643 | |
6caeee7a HZ |
644 | r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); |
645 | if (r) | |
646 | goto late_fini; | |
647 | } else { | |
648 | /* free gfx ras_if if ras is not supported */ | |
649 | r = 0; | |
650 | goto free; | |
651 | } | |
652 | ||
653 | return 0; | |
654 | late_fini: | |
41190cd7 | 655 | amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); |
6caeee7a HZ |
656 | free: |
657 | kfree(adev->gfx.ras_if); | |
658 | adev->gfx.ras_if = NULL; | |
659 | return r; | |
660 | } | |
725253ab | 661 | |
3b7b7647 TZ |
662 | void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) |
663 | { | |
664 | if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && | |
665 | adev->gfx.ras_if) { | |
666 | struct ras_common_if *ras_if = adev->gfx.ras_if; | |
667 | struct ras_ih_if ih_info = { | |
668 | .head = *ras_if, | |
669 | .cb = amdgpu_gfx_process_ras_data_cb, | |
670 | }; | |
671 | ||
672 | amdgpu_ras_late_fini(adev, ras_if, &ih_info); | |
673 | kfree(ras_if); | |
674 | } | |
675 | } | |
676 | ||
725253ab TZ |
677 | int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, |
678 | void *err_data, | |
679 | struct amdgpu_iv_entry *entry) | |
680 | { | |
3d8361b1 TZ |
681 | /* TODO ue will trigger an interrupt. |
682 | * | |
683 | * When “Full RAS” is enabled, the per-IP interrupt sources should | |
684 | * be disabled and the driver should only look for the aggregated | |
685 | * interrupt via sync flood | |
686 | */ | |
725253ab TZ |
687 | if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { |
688 | kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); | |
719a9b33 HZ |
689 | if (adev->gfx.ras_funcs && |
690 | adev->gfx.ras_funcs->query_ras_error_count) | |
691 | adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); | |
61934624 | 692 | amdgpu_ras_reset_gpu(adev); |
725253ab TZ |
693 | } |
694 | return AMDGPU_RAS_SUCCESS; | |
695 | } | |
696 | ||
697 | int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, | |
698 | struct amdgpu_irq_src *source, | |
699 | struct amdgpu_iv_entry *entry) | |
700 | { | |
701 | struct ras_common_if *ras_if = adev->gfx.ras_if; | |
702 | struct ras_dispatch_if ih_data = { | |
703 | .entry = entry, | |
704 | }; | |
705 | ||
706 | if (!ras_if) | |
707 | return 0; | |
708 | ||
709 | ih_data.head = *ras_if; | |
710 | ||
711 | DRM_ERROR("CP ECC ERROR IRQ\n"); | |
712 | amdgpu_ras_interrupt_dispatch(adev, &ih_data); | |
713 | return 0; | |
714 | } | |
d33a99c4 | 715 | |
716 | uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) | |
717 | { | |
718 | signed long r, cnt = 0; | |
719 | unsigned long flags; | |
54208194 | 720 | uint32_t seq, reg_val_offs = 0, value = 0; |
d33a99c4 | 721 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; |
722 | struct amdgpu_ring *ring = &kiq->ring; | |
723 | ||
56b53c0b | 724 | if (amdgpu_device_skip_hw_access(adev)) |
bf36b52e AG |
725 | return 0; |
726 | ||
d33a99c4 | 727 | BUG_ON(!ring->funcs->emit_rreg); |
728 | ||
729 | spin_lock_irqsave(&kiq->ring_lock, flags); | |
54208194 | 730 | if (amdgpu_device_wb_get(adev, ®_val_offs)) { |
54208194 | 731 | pr_err("critical bug! too many kiq readers\n"); |
04e4e2e9 | 732 | goto failed_unlock; |
54208194 | 733 | } |
d33a99c4 | 734 | amdgpu_ring_alloc(ring, 32); |
54208194 | 735 | amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); |
04e4e2e9 YT |
736 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); |
737 | if (r) | |
738 | goto failed_undo; | |
739 | ||
d33a99c4 | 740 | amdgpu_ring_commit(ring); |
741 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
742 | ||
743 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
744 | ||
745 | /* don't wait anymore for gpu reset case because this way may | |
746 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg | |
747 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will | |
748 | * never return if we keep waiting in virt_kiq_rreg, which cause | |
749 | * gpu_recover() hang there. | |
750 | * | |
751 | * also don't wait anymore for IRQ context | |
752 | * */ | |
53b3f8f4 | 753 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
d33a99c4 | 754 | goto failed_kiq_read; |
755 | ||
756 | might_sleep(); | |
757 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | |
758 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | |
759 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
760 | } | |
761 | ||
762 | if (cnt > MAX_KIQ_REG_TRY) | |
763 | goto failed_kiq_read; | |
764 | ||
54208194 YT |
765 | mb(); |
766 | value = adev->wb.wb[reg_val_offs]; | |
767 | amdgpu_device_wb_free(adev, reg_val_offs); | |
768 | return value; | |
d33a99c4 | 769 | |
04e4e2e9 YT |
770 | failed_undo: |
771 | amdgpu_ring_undo(ring); | |
772 | failed_unlock: | |
773 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
d33a99c4 | 774 | failed_kiq_read: |
04e4e2e9 YT |
775 | if (reg_val_offs) |
776 | amdgpu_device_wb_free(adev, reg_val_offs); | |
aac89168 | 777 | dev_err(adev->dev, "failed to read reg:%x\n", reg); |
d33a99c4 | 778 | return ~0; |
779 | } | |
780 | ||
781 | void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) | |
782 | { | |
783 | signed long r, cnt = 0; | |
784 | unsigned long flags; | |
785 | uint32_t seq; | |
786 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
787 | struct amdgpu_ring *ring = &kiq->ring; | |
788 | ||
789 | BUG_ON(!ring->funcs->emit_wreg); | |
790 | ||
56b53c0b | 791 | if (amdgpu_device_skip_hw_access(adev)) |
bf36b52e AG |
792 | return; |
793 | ||
d33a99c4 | 794 | spin_lock_irqsave(&kiq->ring_lock, flags); |
795 | amdgpu_ring_alloc(ring, 32); | |
796 | amdgpu_ring_emit_wreg(ring, reg, v); | |
04e4e2e9 YT |
797 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); |
798 | if (r) | |
799 | goto failed_undo; | |
800 | ||
d33a99c4 | 801 | amdgpu_ring_commit(ring); |
802 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
803 | ||
804 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
805 | ||
806 | /* don't wait anymore for gpu reset case because this way may | |
807 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg | |
808 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will | |
809 | * never return if we keep waiting in virt_kiq_rreg, which cause | |
810 | * gpu_recover() hang there. | |
811 | * | |
812 | * also don't wait anymore for IRQ context | |
813 | * */ | |
53b3f8f4 | 814 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
d33a99c4 | 815 | goto failed_kiq_write; |
816 | ||
817 | might_sleep(); | |
818 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | |
819 | ||
820 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | |
821 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
822 | } | |
823 | ||
824 | if (cnt > MAX_KIQ_REG_TRY) | |
825 | goto failed_kiq_write; | |
826 | ||
827 | return; | |
828 | ||
04e4e2e9 YT |
829 | failed_undo: |
830 | amdgpu_ring_undo(ring); | |
831 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
d33a99c4 | 832 | failed_kiq_write: |
aac89168 | 833 | dev_err(adev->dev, "failed to write reg:%x\n", reg); |
d33a99c4 | 834 | } |
a3bab325 AD |
835 | |
836 | int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) | |
837 | { | |
838 | if (amdgpu_num_kcq == -1) { | |
839 | return 8; | |
840 | } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { | |
841 | dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); | |
842 | return 8; | |
843 | } | |
844 | return amdgpu_num_kcq; | |
845 | } | |
d90a53d6 PL |
846 | |
847 | /* amdgpu_gfx_state_change_set - Handle gfx power state change set | |
848 | * @adev: amdgpu_device pointer | |
849 | * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) | |
850 | * | |
851 | */ | |
852 | ||
853 | void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state) | |
854 | { | |
2d64d23e EQ |
855 | mutex_lock(&adev->pm.mutex); |
856 | if (adev->powerplay.pp_funcs && | |
857 | adev->powerplay.pp_funcs->gfx_state_change_set) | |
858 | ((adev)->powerplay.pp_funcs->gfx_state_change_set( | |
859 | (adev)->powerplay.pp_handle, state)); | |
860 | mutex_unlock(&adev->pm.mutex); | |
d90a53d6 | 861 | } |