Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | */ | |
fdf2f6c5 | 25 | |
ec71b250 | 26 | #include <linux/firmware.h> |
d38ceaf9 | 27 | #include "amdgpu.h" |
356aee30 | 28 | #include "amdgpu_gfx.h" |
88dfc9a3 | 29 | #include "amdgpu_rlc.h" |
6caeee7a | 30 | #include "amdgpu_ras.h" |
8e7fd193 | 31 | #include "amdgpu_xcp.h" |
b1338a8e | 32 | #include "amdgpu_xgmi.h" |
d38ceaf9 | 33 | |
bf9b1d9d RZ |
34 | /* delay 0.1 second to enable gfx off feature */ |
35 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) | |
1e317b99 | 36 | |
1d617c02 LL |
37 | #define GFX_OFF_NO_DELAY 0 |
38 | ||
d38ceaf9 | 39 | /* |
448fe192 | 40 | * GPU GFX IP block helpers function. |
d38ceaf9 | 41 | */ |
448fe192 | 42 | |
7470bfcf HZ |
43 | int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, |
44 | int pipe, int queue) | |
448fe192 HR |
45 | { |
46 | int bit = 0; | |
47 | ||
48 | bit += mec * adev->gfx.mec.num_pipe_per_mec | |
49 | * adev->gfx.mec.num_queue_per_pipe; | |
50 | bit += pipe * adev->gfx.mec.num_queue_per_pipe; | |
51 | bit += queue; | |
52 | ||
53 | return bit; | |
54 | } | |
55 | ||
5c180eb9 | 56 | void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, |
7470bfcf | 57 | int *mec, int *pipe, int *queue) |
448fe192 HR |
58 | { |
59 | *queue = bit % adev->gfx.mec.num_queue_per_pipe; | |
60 | *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) | |
61 | % adev->gfx.mec.num_pipe_per_mec; | |
62 | *mec = (bit / adev->gfx.mec.num_queue_per_pipe) | |
63 | / adev->gfx.mec.num_pipe_per_mec; | |
64 | ||
65 | } | |
66 | ||
67 | bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, | |
be697aa3 | 68 | int xcc_id, int mec, int pipe, int queue) |
448fe192 | 69 | { |
7470bfcf | 70 | return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), |
be697aa3 | 71 | adev->gfx.mec_bitmap[xcc_id].queue_bitmap); |
448fe192 HR |
72 | } |
73 | ||
7470bfcf HZ |
74 | int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, |
75 | int me, int pipe, int queue) | |
76 | { | |
77 | int bit = 0; | |
78 | ||
79 | bit += me * adev->gfx.me.num_pipe_per_me | |
80 | * adev->gfx.me.num_queue_per_pipe; | |
81 | bit += pipe * adev->gfx.me.num_queue_per_pipe; | |
82 | bit += queue; | |
83 | ||
84 | return bit; | |
85 | } | |
86 | ||
87 | void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, | |
88 | int *me, int *pipe, int *queue) | |
89 | { | |
90 | *queue = bit % adev->gfx.me.num_queue_per_pipe; | |
91 | *pipe = (bit / adev->gfx.me.num_queue_per_pipe) | |
92 | % adev->gfx.me.num_pipe_per_me; | |
93 | *me = (bit / adev->gfx.me.num_queue_per_pipe) | |
94 | / adev->gfx.me.num_pipe_per_me; | |
95 | } | |
96 | ||
97 | bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, | |
98 | int me, int pipe, int queue) | |
99 | { | |
100 | return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), | |
101 | adev->gfx.me.queue_bitmap); | |
102 | } | |
103 | ||
6f8941a2 NH |
104 | /** |
105 | * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter | |
106 | * | |
107 | * @mask: array in which the per-shader array disable masks will be stored | |
108 | * @max_se: number of SEs | |
109 | * @max_sh: number of SHs | |
110 | * | |
111 | * The bitmask of CUs to be disabled in the shader array determined by se and | |
112 | * sh is stored in mask[se * max_sh + sh]. | |
113 | */ | |
50fbe0cc | 114 | void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) |
6f8941a2 | 115 | { |
50fbe0cc | 116 | unsigned int se, sh, cu; |
6f8941a2 NH |
117 | const char *p; |
118 | ||
119 | memset(mask, 0, sizeof(*mask) * max_se * max_sh); | |
120 | ||
121 | if (!amdgpu_disable_cu || !*amdgpu_disable_cu) | |
122 | return; | |
123 | ||
124 | p = amdgpu_disable_cu; | |
125 | for (;;) { | |
126 | char *next; | |
127 | int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); | |
50fbe0cc | 128 | |
6f8941a2 NH |
129 | if (ret < 3) { |
130 | DRM_ERROR("amdgpu: could not parse disable_cu\n"); | |
131 | return; | |
132 | } | |
133 | ||
134 | if (se < max_se && sh < max_sh && cu < 16) { | |
135 | DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); | |
136 | mask[se * max_sh + sh] |= 1u << cu; | |
137 | } else { | |
138 | DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", | |
139 | se, sh, cu); | |
140 | } | |
141 | ||
142 | next = strchr(p, ','); | |
143 | if (!next) | |
144 | break; | |
145 | p = next + 1; | |
146 | } | |
147 | } | |
41f6a99a | 148 | |
b07d1d73 APS |
149 | static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) |
150 | { | |
151 | return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; | |
152 | } | |
153 | ||
154 | static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) | |
0f7607d4 | 155 | { |
4a75aefe AR |
156 | if (amdgpu_compute_multipipe != -1) { |
157 | DRM_INFO("amdgpu: forcing compute pipe policy %d\n", | |
158 | amdgpu_compute_multipipe); | |
159 | return amdgpu_compute_multipipe == 1; | |
160 | } | |
25959dd6 | 161 | |
4e8303cf | 162 | if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) |
25959dd6 | 163 | return true; |
4a75aefe | 164 | |
0f7607d4 AR |
165 | /* FIXME: spreading the queues across pipes causes perf regressions |
166 | * on POLARIS11 compute workloads */ | |
167 | if (adev->asic_type == CHIP_POLARIS11) | |
168 | return false; | |
169 | ||
170 | return adev->gfx.mec.num_mec > 1; | |
171 | } | |
172 | ||
b07d1d73 APS |
173 | bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, |
174 | struct amdgpu_ring *ring) | |
175 | { | |
176 | int queue = ring->queue; | |
177 | int pipe = ring->pipe; | |
178 | ||
179 | /* Policy: use pipe1 queue0 as high priority graphics queue if we | |
180 | * have more than one gfx pipe. | |
181 | */ | |
182 | if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && | |
183 | adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { | |
184 | int me = ring->me; | |
185 | int bit; | |
186 | ||
187 | bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); | |
188 | if (ring == &adev->gfx.gfx_ring[bit]) | |
189 | return true; | |
190 | } | |
191 | ||
192 | return false; | |
193 | } | |
194 | ||
33abcb1f | 195 | bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, |
8c0225d7 | 196 | struct amdgpu_ring *ring) |
33abcb1f | 197 | { |
8c0225d7 ND |
198 | /* Policy: use 1st queue as high priority compute queue if we |
199 | * have more than one compute queue. | |
200 | */ | |
201 | if (adev->gfx.num_compute_rings > 1 && | |
202 | ring == &adev->gfx.compute_ring[0]) | |
203 | return true; | |
204 | ||
205 | return false; | |
33abcb1f ND |
206 | } |
207 | ||
41f6a99a AD |
208 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) |
209 | { | |
be697aa3 | 210 | int i, j, queue, pipe; |
b07d1d73 | 211 | bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); |
a300de40 ML |
212 | int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * |
213 | adev->gfx.mec.num_queue_per_pipe, | |
214 | adev->gfx.num_compute_rings); | |
8078f1c6 | 215 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
a300de40 ML |
216 | |
217 | if (multipipe_policy) { | |
be697aa3 LM |
218 | /* policy: make queues evenly cross all pipes on MEC1 only |
219 | * for multiple xcc, just use the original policy for simplicity */ | |
8078f1c6 | 220 | for (j = 0; j < num_xcc; j++) { |
be697aa3 LM |
221 | for (i = 0; i < max_queues_per_mec; i++) { |
222 | pipe = i % adev->gfx.mec.num_pipe_per_mec; | |
223 | queue = (i / adev->gfx.mec.num_pipe_per_mec) % | |
224 | adev->gfx.mec.num_queue_per_pipe; | |
225 | ||
226 | set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, | |
227 | adev->gfx.mec_bitmap[j].queue_bitmap); | |
228 | } | |
41f6a99a | 229 | } |
a300de40 ML |
230 | } else { |
231 | /* policy: amdgpu owns all queues in the given pipe */ | |
8078f1c6 | 232 | for (j = 0; j < num_xcc; j++) { |
be697aa3 LM |
233 | for (i = 0; i < max_queues_per_mec; ++i) |
234 | set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); | |
235 | } | |
41f6a99a AD |
236 | } |
237 | ||
8078f1c6 | 238 | for (j = 0; j < num_xcc; j++) { |
be697aa3 LM |
239 | dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", |
240 | bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); | |
241 | } | |
41f6a99a | 242 | } |
71c37505 | 243 | |
e537c994 HZ |
244 | void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) |
245 | { | |
b07d1d73 APS |
246 | int i, queue, pipe; |
247 | bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); | |
248 | int max_queues_per_me = adev->gfx.me.num_pipe_per_me * | |
249 | adev->gfx.me.num_queue_per_pipe; | |
e537c994 | 250 | |
b07d1d73 | 251 | if (multipipe_policy) { |
e537c994 HZ |
252 | /* policy: amdgpu owns the first queue per pipe at this stage |
253 | * will extend to mulitple queues per pipe later */ | |
b07d1d73 APS |
254 | for (i = 0; i < max_queues_per_me; i++) { |
255 | pipe = i % adev->gfx.me.num_pipe_per_me; | |
256 | queue = (i / adev->gfx.me.num_pipe_per_me) % | |
257 | adev->gfx.me.num_queue_per_pipe; | |
258 | ||
259 | set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue, | |
61243c17 | 260 | adev->gfx.me.queue_bitmap); |
b07d1d73 APS |
261 | } |
262 | } else { | |
263 | for (i = 0; i < max_queues_per_me; ++i) | |
e537c994 HZ |
264 | set_bit(i, adev->gfx.me.queue_bitmap); |
265 | } | |
266 | ||
267 | /* update the number of active graphics rings */ | |
268 | adev->gfx.num_gfx_rings = | |
269 | bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); | |
270 | } | |
271 | ||
71c37505 | 272 | static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, |
def799c6 | 273 | struct amdgpu_ring *ring, int xcc_id) |
71c37505 AD |
274 | { |
275 | int queue_bit; | |
276 | int mec, pipe, queue; | |
277 | ||
278 | queue_bit = adev->gfx.mec.num_mec | |
279 | * adev->gfx.mec.num_pipe_per_mec | |
280 | * adev->gfx.mec.num_queue_per_pipe; | |
281 | ||
1647b54e | 282 | while (--queue_bit >= 0) { |
def799c6 | 283 | if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
71c37505 AD |
284 | continue; |
285 | ||
5c180eb9 | 286 | amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); |
71c37505 | 287 | |
59fd27cd HR |
288 | /* |
289 | * 1. Using pipes 2/3 from MEC 2 seems cause problems. | |
290 | * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN | |
291 | * only can be issued on queue 0. | |
292 | */ | |
293 | if ((mec == 1 && pipe > 1) || queue != 0) | |
71c37505 AD |
294 | continue; |
295 | ||
296 | ring->me = mec + 1; | |
297 | ring->pipe = pipe; | |
298 | ring->queue = queue; | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | dev_err(adev->dev, "Failed to find a queue for KIQ\n"); | |
304 | return -EINVAL; | |
305 | } | |
306 | ||
4acd31e6 | 307 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) |
71c37505 | 308 | { |
def799c6 | 309 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
4acd31e6 MJ |
310 | struct amdgpu_irq_src *irq = &kiq->irq; |
311 | struct amdgpu_ring *ring = &kiq->ring; | |
71c37505 AD |
312 | int r = 0; |
313 | ||
43ca8efa | 314 | spin_lock_init(&kiq->ring_lock); |
71c37505 | 315 | |
71c37505 AD |
316 | ring->adev = NULL; |
317 | ring->ring_obj = NULL; | |
318 | ring->use_doorbell = true; | |
def799c6 | 319 | ring->xcc_id = xcc_id; |
3566938b | 320 | ring->vm_hub = AMDGPU_GFXHUB(xcc_id); |
233bb373 LL |
321 | ring->doorbell_index = |
322 | (adev->doorbell_index.kiq + | |
323 | xcc_id * adev->doorbell_index.xcc_doorbell_range) | |
324 | << 1; | |
71c37505 | 325 | |
def799c6 | 326 | r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); |
71c37505 AD |
327 | if (r) |
328 | return r; | |
329 | ||
330 | ring->eop_gpu_addr = kiq->eop_gpu_addr; | |
a783910d | 331 | ring->no_scheduler = true; |
345a36c4 JN |
332 | snprintf(ring->name, sizeof(ring->name), "kiq_%d.%d.%d.%d", |
333 | xcc_id, ring->me, ring->pipe, ring->queue); | |
c107171b CK |
334 | r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, |
335 | AMDGPU_RING_PRIO_DEFAULT, NULL); | |
71c37505 AD |
336 | if (r) |
337 | dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); | |
338 | ||
339 | return r; | |
340 | } | |
341 | ||
9f0256da | 342 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) |
71c37505 | 343 | { |
71c37505 AD |
344 | amdgpu_ring_fini(ring); |
345 | } | |
346 | ||
def799c6 | 347 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) |
71c37505 | 348 | { |
def799c6 | 349 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
71c37505 AD |
350 | |
351 | amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); | |
352 | } | |
353 | ||
354 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, | |
50fbe0cc | 355 | unsigned int hpd_size, int xcc_id) |
71c37505 AD |
356 | { |
357 | int r; | |
358 | u32 *hpd; | |
def799c6 | 359 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
71c37505 AD |
360 | |
361 | r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, | |
362 | AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, | |
363 | &kiq->eop_gpu_addr, (void **)&hpd); | |
364 | if (r) { | |
365 | dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); | |
366 | return r; | |
367 | } | |
368 | ||
369 | memset(hpd, 0, hpd_size); | |
370 | ||
371 | r = amdgpu_bo_reserve(kiq->eop_obj, true); | |
372 | if (unlikely(r != 0)) | |
373 | dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); | |
374 | amdgpu_bo_kunmap(kiq->eop_obj); | |
375 | amdgpu_bo_unreserve(kiq->eop_obj); | |
376 | ||
377 | return 0; | |
378 | } | |
b9683c21 | 379 | |
4fc6a88f HZ |
380 | /* create MQD for each compute/gfx queue */ |
381 | int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, | |
50fbe0cc | 382 | unsigned int mqd_size, int xcc_id) |
b9683c21 | 383 | { |
34305ac3 | 384 | int r, i, j; |
def799c6 LM |
385 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
386 | struct amdgpu_ring *ring = &kiq->ring; | |
1cfb4d61 AD |
387 | u32 domain = AMDGPU_GEM_DOMAIN_GTT; |
388 | ||
ba0fb4b4 | 389 | #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) |
1cfb4d61 | 390 | /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ |
4e8303cf | 391 | if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) |
1cfb4d61 | 392 | domain |= AMDGPU_GEM_DOMAIN_VRAM; |
ba0fb4b4 | 393 | #endif |
b9683c21 AD |
394 | |
395 | /* create MQD for KIQ */ | |
18ee4ce6 | 396 | if (!adev->enable_mes_kiq && !ring->mqd_obj) { |
beb84102 ML |
397 | /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must |
398 | * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD | |
399 | * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for | |
400 | * KIQ MQD no matter SRIOV or Bare-metal | |
401 | */ | |
b9683c21 | 402 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, |
58ab2c08 CK |
403 | AMDGPU_GEM_DOMAIN_VRAM | |
404 | AMDGPU_GEM_DOMAIN_GTT, | |
405 | &ring->mqd_obj, | |
406 | &ring->mqd_gpu_addr, | |
407 | &ring->mqd_ptr); | |
b9683c21 AD |
408 | if (r) { |
409 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); | |
410 | return r; | |
411 | } | |
412 | ||
413 | /* prepare MQD backup */ | |
def799c6 | 414 | kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); |
37c3fc66 SS |
415 | if (!kiq->mqd_backup) { |
416 | dev_warn(adev->dev, | |
417 | "no memory to create MQD backup for ring %s\n", ring->name); | |
418 | return -ENOMEM; | |
419 | } | |
b9683c21 AD |
420 | } |
421 | ||
5e0f378d | 422 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
54fc4472 HZ |
423 | /* create MQD for each KGQ */ |
424 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | |
425 | ring = &adev->gfx.gfx_ring[i]; | |
426 | if (!ring->mqd_obj) { | |
427 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | |
1cfb4d61 | 428 | domain, &ring->mqd_obj, |
54fc4472 HZ |
429 | &ring->mqd_gpu_addr, &ring->mqd_ptr); |
430 | if (r) { | |
431 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); | |
432 | return r; | |
433 | } | |
434 | ||
b185c318 | 435 | ring->mqd_size = mqd_size; |
54fc4472 HZ |
436 | /* prepare MQD backup */ |
437 | adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); | |
37c3fc66 | 438 | if (!adev->gfx.me.mqd_backup[i]) { |
54fc4472 | 439 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); |
37c3fc66 SS |
440 | return -ENOMEM; |
441 | } | |
54fc4472 HZ |
442 | } |
443 | } | |
444 | } | |
445 | ||
b9683c21 AD |
446 | /* create MQD for each KCQ */ |
447 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
34305ac3 GC |
448 | j = i + xcc_id * adev->gfx.num_compute_rings; |
449 | ring = &adev->gfx.compute_ring[j]; | |
b9683c21 AD |
450 | if (!ring->mqd_obj) { |
451 | r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, | |
1cfb4d61 | 452 | domain, &ring->mqd_obj, |
b9683c21 AD |
453 | &ring->mqd_gpu_addr, &ring->mqd_ptr); |
454 | if (r) { | |
54fc4472 | 455 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); |
b9683c21 AD |
456 | return r; |
457 | } | |
458 | ||
b185c318 | 459 | ring->mqd_size = mqd_size; |
b9683c21 | 460 | /* prepare MQD backup */ |
e825fb64 | 461 | adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); |
50fbe0cc | 462 | if (!adev->gfx.mec.mqd_backup[j]) { |
b9683c21 | 463 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); |
50fbe0cc SS |
464 | return -ENOMEM; |
465 | } | |
b9683c21 AD |
466 | } |
467 | } | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
def799c6 | 472 | void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) |
b9683c21 AD |
473 | { |
474 | struct amdgpu_ring *ring = NULL; | |
def799c6 LM |
475 | int i, j; |
476 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; | |
b9683c21 | 477 | |
5e0f378d | 478 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
54fc4472 HZ |
479 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
480 | ring = &adev->gfx.gfx_ring[i]; | |
481 | kfree(adev->gfx.me.mqd_backup[i]); | |
482 | amdgpu_bo_free_kernel(&ring->mqd_obj, | |
483 | &ring->mqd_gpu_addr, | |
484 | &ring->mqd_ptr); | |
485 | } | |
486 | } | |
487 | ||
b9683c21 | 488 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
def799c6 | 489 | j = i + xcc_id * adev->gfx.num_compute_rings; |
7a4685cd GC |
490 | ring = &adev->gfx.compute_ring[j]; |
491 | kfree(adev->gfx.mec.mqd_backup[j]); | |
b9683c21 AD |
492 | amdgpu_bo_free_kernel(&ring->mqd_obj, |
493 | &ring->mqd_gpu_addr, | |
494 | &ring->mqd_ptr); | |
495 | } | |
496 | ||
def799c6 LM |
497 | ring = &kiq->ring; |
498 | kfree(kiq->mqd_backup); | |
b9683c21 AD |
499 | amdgpu_bo_free_kernel(&ring->mqd_obj, |
500 | &ring->mqd_gpu_addr, | |
501 | &ring->mqd_ptr); | |
502 | } | |
d23ee13f | 503 | |
def799c6 | 504 | int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) |
ba0c13b7 | 505 | { |
def799c6 | 506 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
ba0c13b7 | 507 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
b1338a8e SY |
508 | struct amdgpu_hive_info *hive; |
509 | struct amdgpu_ras *ras; | |
510 | int hive_ras_recovery = 0; | |
18ee4ce6 | 511 | int i, r = 0; |
def799c6 | 512 | int j; |
ba0c13b7 RZ |
513 | |
514 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) | |
515 | return -EINVAL; | |
516 | ||
def799c6 | 517 | spin_lock(&kiq->ring_lock); |
147862d0 SZ |
518 | if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * |
519 | adev->gfx.num_compute_rings)) { | |
520 | spin_unlock(&kiq->ring_lock); | |
521 | return -ENOMEM; | |
522 | } | |
ba0c13b7 | 523 | |
147862d0 SZ |
524 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
525 | j = i + xcc_id * adev->gfx.num_compute_rings; | |
526 | kiq->pmf->kiq_unmap_queues(kiq_ring, | |
8cce1682 | 527 | &adev->gfx.compute_ring[j], |
147862d0 | 528 | RESET_QUEUES, 0, 0); |
def799c6 | 529 | } |
18ee4ce6 | 530 | |
b1338a8e SY |
531 | /** |
532 | * This is workaround: only skip kiq_ring test | |
533 | * during ras recovery in suspend stage for gfx9.4.3 | |
534 | */ | |
535 | hive = amdgpu_get_xgmi_hive(adev); | |
536 | if (hive) { | |
537 | hive_ras_recovery = atomic_read(&hive->ras_recovery); | |
538 | amdgpu_put_xgmi_hive(hive); | |
539 | } | |
540 | ||
541 | ras = amdgpu_ras_get_context(adev); | |
542 | if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && | |
543 | ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) { | |
544 | spin_unlock(&kiq->ring_lock); | |
545 | return 0; | |
546 | } | |
547 | ||
147862d0 | 548 | if (kiq_ring->sched.ready && !adev->job_hang) |
18ee4ce6 | 549 | r = amdgpu_ring_test_helper(kiq_ring); |
def799c6 | 550 | spin_unlock(&kiq->ring_lock); |
ba0c13b7 | 551 | |
5a8cd98e | 552 | return r; |
ba0c13b7 RZ |
553 | } |
554 | ||
1156e1a6 AD |
555 | int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) |
556 | { | |
557 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; | |
558 | struct amdgpu_ring *kiq_ring = &kiq->ring; | |
559 | int i, r = 0; | |
560 | int j; | |
561 | ||
562 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) | |
563 | return -EINVAL; | |
564 | ||
565 | spin_lock(&kiq->ring_lock); | |
566 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { | |
567 | if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * | |
568 | adev->gfx.num_gfx_rings)) { | |
569 | spin_unlock(&kiq->ring_lock); | |
570 | return -ENOMEM; | |
571 | } | |
572 | ||
573 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | |
574 | j = i + xcc_id * adev->gfx.num_gfx_rings; | |
575 | kiq->pmf->kiq_unmap_queues(kiq_ring, | |
8cce1682 | 576 | &adev->gfx.gfx_ring[j], |
1156e1a6 AD |
577 | PREEMPT_QUEUES, 0, 0); |
578 | } | |
579 | } | |
580 | ||
581 | if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) | |
582 | r = amdgpu_ring_test_helper(kiq_ring); | |
583 | spin_unlock(&kiq->ring_lock); | |
584 | ||
585 | return r; | |
586 | } | |
587 | ||
5c180eb9 | 588 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
3ab6fe4b LG |
589 | int queue_bit) |
590 | { | |
591 | int mec, pipe, queue; | |
5c180eb9 | 592 | int set_resource_bit = 0; |
3ab6fe4b | 593 | |
5c180eb9 | 594 | amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); |
3ab6fe4b | 595 | |
5c180eb9 | 596 | set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; |
3ab6fe4b | 597 | |
5c180eb9 | 598 | return set_resource_bit; |
3ab6fe4b LG |
599 | } |
600 | ||
def799c6 | 601 | int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) |
849aca9f | 602 | { |
def799c6 LM |
603 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
604 | struct amdgpu_ring *kiq_ring = &kiq->ring; | |
849aca9f | 605 | uint64_t queue_mask = 0; |
def799c6 | 606 | int r, i, j; |
849aca9f HZ |
607 | |
608 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) | |
609 | return -EINVAL; | |
610 | ||
611 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | |
def799c6 | 612 | if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
849aca9f HZ |
613 | continue; |
614 | ||
615 | /* This situation may be hit in the future if a new HW | |
616 | * generation exposes more than 64 queues. If so, the | |
617 | * definition of queue_mask needs updating */ | |
618 | if (WARN_ON(i > (sizeof(queue_mask)*8))) { | |
619 | DRM_ERROR("Invalid KCQ enabled: %d\n", i); | |
620 | break; | |
621 | } | |
622 | ||
5c180eb9 | 623 | queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); |
849aca9f HZ |
624 | } |
625 | ||
626 | DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, | |
627 | kiq_ring->queue); | |
e602157e JX |
628 | amdgpu_device_flush_hdp(adev, NULL); |
629 | ||
def799c6 | 630 | spin_lock(&kiq->ring_lock); |
147862d0 SZ |
631 | r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * |
632 | adev->gfx.num_compute_rings + | |
633 | kiq->pmf->set_resources_size); | |
634 | if (r) { | |
635 | DRM_ERROR("Failed to lock KIQ (%d).\n", r); | |
636 | spin_unlock(&kiq->ring_lock); | |
637 | return r; | |
638 | } | |
849aca9f | 639 | |
147862d0 SZ |
640 | if (adev->enable_mes) |
641 | queue_mask = ~0ULL; | |
712ce872 | 642 | |
147862d0 SZ |
643 | kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); |
644 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
645 | j = i + xcc_id * adev->gfx.num_compute_rings; | |
be91a828 SS |
646 | kiq->pmf->kiq_map_queues(kiq_ring, |
647 | &adev->gfx.compute_ring[j]); | |
def799c6 | 648 | } |
849aca9f HZ |
649 | |
650 | r = amdgpu_ring_test_helper(kiq_ring); | |
def799c6 | 651 | spin_unlock(&kiq->ring_lock); |
849aca9f HZ |
652 | if (r) |
653 | DRM_ERROR("KCQ enable failed\n"); | |
654 | ||
655 | return r; | |
656 | } | |
657 | ||
1156e1a6 AD |
658 | int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) |
659 | { | |
660 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; | |
661 | struct amdgpu_ring *kiq_ring = &kiq->ring; | |
662 | int r, i, j; | |
663 | ||
664 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues) | |
665 | return -EINVAL; | |
666 | ||
e602157e JX |
667 | amdgpu_device_flush_hdp(adev, NULL); |
668 | ||
1156e1a6 AD |
669 | spin_lock(&kiq->ring_lock); |
670 | /* No need to map kcq on the slave */ | |
671 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { | |
672 | r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * | |
673 | adev->gfx.num_gfx_rings); | |
674 | if (r) { | |
675 | DRM_ERROR("Failed to lock KIQ (%d).\n", r); | |
3fb9dd5f | 676 | spin_unlock(&kiq->ring_lock); |
1156e1a6 AD |
677 | return r; |
678 | } | |
679 | ||
680 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | |
681 | j = i + xcc_id * adev->gfx.num_gfx_rings; | |
682 | kiq->pmf->kiq_map_queues(kiq_ring, | |
8cce1682 | 683 | &adev->gfx.gfx_ring[j]); |
1156e1a6 AD |
684 | } |
685 | } | |
686 | ||
687 | r = amdgpu_ring_test_helper(kiq_ring); | |
688 | spin_unlock(&kiq->ring_lock); | |
689 | if (r) | |
43bda3e7 | 690 | DRM_ERROR("KGQ enable failed\n"); |
1156e1a6 AD |
691 | |
692 | return r; | |
693 | } | |
694 | ||
d23ee13f RZ |
695 | /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable |
696 | * | |
697 | * @adev: amdgpu_device pointer | |
698 | * @bool enable true: enable gfx off feature, false: disable gfx off feature | |
699 | * | |
700 | * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. | |
701 | * 2. other client can send request to disable gfx off feature, the request should be honored. | |
702 | * 3. other client can cancel their request of disable gfx off feature | |
703 | * 4. other client should not send request to enable gfx off feature before disable gfx off feature. | |
704 | */ | |
705 | ||
706 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) | |
707 | { | |
1d617c02 LL |
708 | unsigned long delay = GFX_OFF_DELAY_ENABLE; |
709 | ||
3b94fb10 | 710 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
d23ee13f RZ |
711 | return; |
712 | ||
d23ee13f RZ |
713 | mutex_lock(&adev->gfx.gfx_off_mutex); |
714 | ||
90a92662 MD |
715 | if (enable) { |
716 | /* If the count is already 0, it means there's an imbalance bug somewhere. | |
717 | * Note that the bug may be in a different caller than the one which triggers the | |
718 | * WARN_ON_ONCE. | |
719 | */ | |
720 | if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) | |
721 | goto unlock; | |
722 | ||
d23ee13f RZ |
723 | adev->gfx.gfx_off_req_count--; |
724 | ||
1d617c02 LL |
725 | if (adev->gfx.gfx_off_req_count == 0 && |
726 | !adev->gfx.gfx_off_state) { | |
ce311df9 ML |
727 | /* If going to s2idle, no need to wait */ |
728 | if (adev->in_s0ix) { | |
729 | if (!amdgpu_dpm_set_powergating_by_smu(adev, | |
730 | AMD_IP_BLOCK_TYPE_GFX, true)) | |
731 | adev->gfx.gfx_off_state = true; | |
732 | } else { | |
733 | schedule_delayed_work(&adev->gfx.gfx_off_delay_work, | |
1d617c02 | 734 | delay); |
ce311df9 | 735 | } |
1d617c02 | 736 | } |
90a92662 MD |
737 | } else { |
738 | if (adev->gfx.gfx_off_req_count == 0) { | |
739 | cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); | |
740 | ||
741 | if (adev->gfx.gfx_off_state && | |
742 | !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { | |
743 | adev->gfx.gfx_off_state = false; | |
425a78f4 | 744 | |
90a92662 MD |
745 | if (adev->gfx.funcs->init_spm_golden) { |
746 | dev_dbg(adev->dev, | |
747 | "GFXOFF is disabled, re-init SPM golden settings\n"); | |
748 | amdgpu_gfx_init_spm_golden(adev); | |
749 | } | |
425a78f4 TY |
750 | } |
751 | } | |
90a92662 MD |
752 | |
753 | adev->gfx.gfx_off_req_count++; | |
d23ee13f | 754 | } |
1e317b99 | 755 | |
90a92662 | 756 | unlock: |
d23ee13f RZ |
757 | mutex_unlock(&adev->gfx.gfx_off_mutex); |
758 | } | |
6caeee7a | 759 | |
0ad7347a AA |
760 | int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) |
761 | { | |
762 | int r = 0; | |
763 | ||
764 | mutex_lock(&adev->gfx.gfx_off_mutex); | |
765 | ||
766 | r = amdgpu_dpm_set_residency_gfxoff(adev, value); | |
767 | ||
768 | mutex_unlock(&adev->gfx.gfx_off_mutex); | |
769 | ||
770 | return r; | |
771 | } | |
772 | ||
773 | int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) | |
774 | { | |
775 | int r = 0; | |
776 | ||
777 | mutex_lock(&adev->gfx.gfx_off_mutex); | |
778 | ||
779 | r = amdgpu_dpm_get_residency_gfxoff(adev, value); | |
780 | ||
781 | mutex_unlock(&adev->gfx.gfx_off_mutex); | |
782 | ||
783 | return r; | |
784 | } | |
785 | ||
786 | int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) | |
787 | { | |
788 | int r = 0; | |
789 | ||
790 | mutex_lock(&adev->gfx.gfx_off_mutex); | |
791 | ||
792 | r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); | |
793 | ||
794 | mutex_unlock(&adev->gfx.gfx_off_mutex); | |
795 | ||
796 | return r; | |
797 | } | |
798 | ||
443c7f3c JS |
799 | int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) |
800 | { | |
801 | ||
802 | int r = 0; | |
803 | ||
804 | mutex_lock(&adev->gfx.gfx_off_mutex); | |
805 | ||
bc143d8b | 806 | r = amdgpu_dpm_get_status_gfxoff(adev, value); |
443c7f3c JS |
807 | |
808 | mutex_unlock(&adev->gfx.gfx_off_mutex); | |
809 | ||
810 | return r; | |
811 | } | |
812 | ||
4e9b1fa5 | 813 | int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) |
6caeee7a HZ |
814 | { |
815 | int r; | |
6caeee7a | 816 | |
caae42f0 | 817 | if (amdgpu_ras_is_supported(adev, ras_block->block)) { |
8f6368a9 | 818 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) |
761d86d3 | 819 | amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); |
761d86d3 | 820 | |
2a460963 CL |
821 | r = amdgpu_ras_block_late_init(adev, ras_block); |
822 | if (r) | |
823 | return r; | |
824 | ||
2f48965b HZ |
825 | if (adev->gfx.cp_ecc_error_irq.funcs) { |
826 | r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); | |
827 | if (r) | |
828 | goto late_fini; | |
829 | } | |
2a460963 CL |
830 | } else { |
831 | amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); | |
6caeee7a HZ |
832 | } |
833 | ||
834 | return 0; | |
835 | late_fini: | |
caae42f0 | 836 | amdgpu_ras_block_late_fini(adev, ras_block); |
6caeee7a HZ |
837 | return r; |
838 | } | |
725253ab | 839 | |
89e4c448 YC |
840 | int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) |
841 | { | |
842 | int err = 0; | |
843 | struct amdgpu_gfx_ras *ras = NULL; | |
844 | ||
845 | /* adev->gfx.ras is NULL, which means gfx does not | |
846 | * support ras function, then do nothing here. | |
847 | */ | |
848 | if (!adev->gfx.ras) | |
849 | return 0; | |
850 | ||
851 | ras = adev->gfx.ras; | |
852 | ||
853 | err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); | |
854 | if (err) { | |
855 | dev_err(adev->dev, "Failed to register gfx ras block!\n"); | |
856 | return err; | |
857 | } | |
858 | ||
859 | strcpy(ras->ras_block.ras_comm.name, "gfx"); | |
860 | ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; | |
861 | ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; | |
862 | adev->gfx.ras_if = &ras->ras_block.ras_comm; | |
863 | ||
864 | /* If not define special ras_late_init function, use gfx default ras_late_init */ | |
865 | if (!ras->ras_block.ras_late_init) | |
af8312a3 | 866 | ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; |
89e4c448 YC |
867 | |
868 | /* If not defined special ras_cb function, use default ras_cb */ | |
869 | if (!ras->ras_block.ras_cb) | |
870 | ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; | |
871 | ||
872 | return 0; | |
873 | } | |
874 | ||
ac7b25d9 YC |
875 | int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, |
876 | struct amdgpu_iv_entry *entry) | |
877 | { | |
878 | if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) | |
879 | return adev->gfx.ras->poison_consumption_handler(adev, entry); | |
880 | ||
881 | return 0; | |
882 | } | |
883 | ||
725253ab TZ |
884 | int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, |
885 | void *err_data, | |
886 | struct amdgpu_iv_entry *entry) | |
887 | { | |
3d8361b1 TZ |
888 | /* TODO ue will trigger an interrupt. |
889 | * | |
890 | * When “Full RAS” is enabled, the per-IP interrupt sources should | |
891 | * be disabled and the driver should only look for the aggregated | |
892 | * interrupt via sync flood | |
893 | */ | |
725253ab TZ |
894 | if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { |
895 | kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); | |
8b0fb0e9 | 896 | if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && |
897 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) | |
898 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); | |
61934624 | 899 | amdgpu_ras_reset_gpu(adev); |
725253ab TZ |
900 | } |
901 | return AMDGPU_RAS_SUCCESS; | |
902 | } | |
903 | ||
904 | int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, | |
905 | struct amdgpu_irq_src *source, | |
906 | struct amdgpu_iv_entry *entry) | |
907 | { | |
908 | struct ras_common_if *ras_if = adev->gfx.ras_if; | |
909 | struct ras_dispatch_if ih_data = { | |
910 | .entry = entry, | |
911 | }; | |
912 | ||
913 | if (!ras_if) | |
914 | return 0; | |
915 | ||
916 | ih_data.head = *ras_if; | |
917 | ||
918 | DRM_ERROR("CP ECC ERROR IRQ\n"); | |
919 | amdgpu_ras_interrupt_dispatch(adev, &ih_data); | |
920 | return 0; | |
921 | } | |
d33a99c4 | 922 | |
d78c7132 TZ |
923 | void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, |
924 | void *ras_error_status, | |
925 | void (*func)(struct amdgpu_device *adev, void *ras_error_status, | |
926 | int xcc_id)) | |
927 | { | |
928 | int i; | |
929 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; | |
930 | uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); | |
931 | struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; | |
932 | ||
933 | if (err_data) { | |
934 | err_data->ue_count = 0; | |
935 | err_data->ce_count = 0; | |
936 | } | |
937 | ||
938 | for_each_inst(i, xcc_mask) | |
939 | func(adev, ras_error_status, i); | |
940 | } | |
941 | ||
85150626 | 942 | uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id) |
d33a99c4 | 943 | { |
944 | signed long r, cnt = 0; | |
945 | unsigned long flags; | |
54208194 | 946 | uint32_t seq, reg_val_offs = 0, value = 0; |
85150626 | 947 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
d33a99c4 | 948 | struct amdgpu_ring *ring = &kiq->ring; |
949 | ||
56b53c0b | 950 | if (amdgpu_device_skip_hw_access(adev)) |
bf36b52e AG |
951 | return 0; |
952 | ||
cf606729 JX |
953 | if (adev->mes.ring.sched.ready) |
954 | return amdgpu_mes_rreg(adev, reg); | |
955 | ||
d33a99c4 | 956 | BUG_ON(!ring->funcs->emit_rreg); |
957 | ||
958 | spin_lock_irqsave(&kiq->ring_lock, flags); | |
54208194 | 959 | if (amdgpu_device_wb_get(adev, ®_val_offs)) { |
54208194 | 960 | pr_err("critical bug! too many kiq readers\n"); |
04e4e2e9 | 961 | goto failed_unlock; |
54208194 | 962 | } |
d33a99c4 | 963 | amdgpu_ring_alloc(ring, 32); |
54208194 | 964 | amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); |
04e4e2e9 YT |
965 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); |
966 | if (r) | |
967 | goto failed_undo; | |
968 | ||
d33a99c4 | 969 | amdgpu_ring_commit(ring); |
970 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
971 | ||
972 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
973 | ||
974 | /* don't wait anymore for gpu reset case because this way may | |
975 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg | |
976 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will | |
977 | * never return if we keep waiting in virt_kiq_rreg, which cause | |
978 | * gpu_recover() hang there. | |
979 | * | |
980 | * also don't wait anymore for IRQ context | |
981 | * */ | |
53b3f8f4 | 982 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
d33a99c4 | 983 | goto failed_kiq_read; |
984 | ||
985 | might_sleep(); | |
986 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | |
987 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | |
988 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
989 | } | |
990 | ||
991 | if (cnt > MAX_KIQ_REG_TRY) | |
992 | goto failed_kiq_read; | |
993 | ||
54208194 YT |
994 | mb(); |
995 | value = adev->wb.wb[reg_val_offs]; | |
996 | amdgpu_device_wb_free(adev, reg_val_offs); | |
997 | return value; | |
d33a99c4 | 998 | |
04e4e2e9 YT |
999 | failed_undo: |
1000 | amdgpu_ring_undo(ring); | |
1001 | failed_unlock: | |
1002 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
d33a99c4 | 1003 | failed_kiq_read: |
04e4e2e9 YT |
1004 | if (reg_val_offs) |
1005 | amdgpu_device_wb_free(adev, reg_val_offs); | |
aac89168 | 1006 | dev_err(adev->dev, "failed to read reg:%x\n", reg); |
d33a99c4 | 1007 | return ~0; |
1008 | } | |
1009 | ||
85150626 | 1010 | void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id) |
d33a99c4 | 1011 | { |
1012 | signed long r, cnt = 0; | |
1013 | unsigned long flags; | |
1014 | uint32_t seq; | |
85150626 | 1015 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
d33a99c4 | 1016 | struct amdgpu_ring *ring = &kiq->ring; |
1017 | ||
1018 | BUG_ON(!ring->funcs->emit_wreg); | |
1019 | ||
56b53c0b | 1020 | if (amdgpu_device_skip_hw_access(adev)) |
bf36b52e AG |
1021 | return; |
1022 | ||
cf606729 JX |
1023 | if (adev->mes.ring.sched.ready) { |
1024 | amdgpu_mes_wreg(adev, reg, v); | |
1025 | return; | |
1026 | } | |
1027 | ||
d33a99c4 | 1028 | spin_lock_irqsave(&kiq->ring_lock, flags); |
1029 | amdgpu_ring_alloc(ring, 32); | |
1030 | amdgpu_ring_emit_wreg(ring, reg, v); | |
04e4e2e9 YT |
1031 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); |
1032 | if (r) | |
1033 | goto failed_undo; | |
1034 | ||
d33a99c4 | 1035 | amdgpu_ring_commit(ring); |
1036 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
1037 | ||
1038 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
1039 | ||
1040 | /* don't wait anymore for gpu reset case because this way may | |
1041 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg | |
1042 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will | |
1043 | * never return if we keep waiting in virt_kiq_rreg, which cause | |
1044 | * gpu_recover() hang there. | |
1045 | * | |
1046 | * also don't wait anymore for IRQ context | |
1047 | * */ | |
53b3f8f4 | 1048 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
d33a99c4 | 1049 | goto failed_kiq_write; |
1050 | ||
1051 | might_sleep(); | |
1052 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | |
1053 | ||
1054 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | |
1055 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
1056 | } | |
1057 | ||
1058 | if (cnt > MAX_KIQ_REG_TRY) | |
1059 | goto failed_kiq_write; | |
1060 | ||
1061 | return; | |
1062 | ||
04e4e2e9 YT |
1063 | failed_undo: |
1064 | amdgpu_ring_undo(ring); | |
1065 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
d33a99c4 | 1066 | failed_kiq_write: |
aac89168 | 1067 | dev_err(adev->dev, "failed to write reg:%x\n", reg); |
d33a99c4 | 1068 | } |
a3bab325 AD |
1069 | |
1070 | int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) | |
1071 | { | |
1072 | if (amdgpu_num_kcq == -1) { | |
1073 | return 8; | |
1074 | } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { | |
1075 | dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); | |
1076 | return 8; | |
1077 | } | |
1078 | return amdgpu_num_kcq; | |
1079 | } | |
ec71b250 LG |
1080 | |
1081 | void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, | |
2d89e2dd | 1082 | uint32_t ucode_id) |
ec71b250 LG |
1083 | { |
1084 | const struct gfx_firmware_header_v1_0 *cp_hdr; | |
1085 | const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; | |
1086 | struct amdgpu_firmware_info *info = NULL; | |
1087 | const struct firmware *ucode_fw; | |
1088 | unsigned int fw_size; | |
1089 | ||
1090 | switch (ucode_id) { | |
1091 | case AMDGPU_UCODE_ID_CP_PFP: | |
1092 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1093 | adev->gfx.pfp_fw->data; | |
1094 | adev->gfx.pfp_fw_version = | |
1095 | le32_to_cpu(cp_hdr->header.ucode_version); | |
1096 | adev->gfx.pfp_feature_version = | |
1097 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
1098 | ucode_fw = adev->gfx.pfp_fw; | |
1099 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); | |
1100 | break; | |
1101 | case AMDGPU_UCODE_ID_CP_RS64_PFP: | |
1102 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1103 | adev->gfx.pfp_fw->data; | |
1104 | adev->gfx.pfp_fw_version = | |
1105 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); | |
1106 | adev->gfx.pfp_feature_version = | |
1107 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); | |
1108 | ucode_fw = adev->gfx.pfp_fw; | |
1109 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); | |
1110 | break; | |
1111 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: | |
1112 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: | |
1113 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1114 | adev->gfx.pfp_fw->data; | |
1115 | ucode_fw = adev->gfx.pfp_fw; | |
1116 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); | |
1117 | break; | |
1118 | case AMDGPU_UCODE_ID_CP_ME: | |
1119 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1120 | adev->gfx.me_fw->data; | |
1121 | adev->gfx.me_fw_version = | |
1122 | le32_to_cpu(cp_hdr->header.ucode_version); | |
1123 | adev->gfx.me_feature_version = | |
1124 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
1125 | ucode_fw = adev->gfx.me_fw; | |
1126 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); | |
1127 | break; | |
1128 | case AMDGPU_UCODE_ID_CP_RS64_ME: | |
1129 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1130 | adev->gfx.me_fw->data; | |
1131 | adev->gfx.me_fw_version = | |
1132 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); | |
1133 | adev->gfx.me_feature_version = | |
1134 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); | |
1135 | ucode_fw = adev->gfx.me_fw; | |
1136 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); | |
1137 | break; | |
1138 | case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: | |
1139 | case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: | |
1140 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1141 | adev->gfx.me_fw->data; | |
1142 | ucode_fw = adev->gfx.me_fw; | |
1143 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); | |
1144 | break; | |
1145 | case AMDGPU_UCODE_ID_CP_CE: | |
1146 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1147 | adev->gfx.ce_fw->data; | |
1148 | adev->gfx.ce_fw_version = | |
1149 | le32_to_cpu(cp_hdr->header.ucode_version); | |
1150 | adev->gfx.ce_feature_version = | |
1151 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
1152 | ucode_fw = adev->gfx.ce_fw; | |
1153 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); | |
1154 | break; | |
1155 | case AMDGPU_UCODE_ID_CP_MEC1: | |
1156 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1157 | adev->gfx.mec_fw->data; | |
1158 | adev->gfx.mec_fw_version = | |
1159 | le32_to_cpu(cp_hdr->header.ucode_version); | |
1160 | adev->gfx.mec_feature_version = | |
1161 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
1162 | ucode_fw = adev->gfx.mec_fw; | |
1163 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - | |
1164 | le32_to_cpu(cp_hdr->jt_size) * 4; | |
1165 | break; | |
1166 | case AMDGPU_UCODE_ID_CP_MEC1_JT: | |
1167 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1168 | adev->gfx.mec_fw->data; | |
1169 | ucode_fw = adev->gfx.mec_fw; | |
1170 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; | |
1171 | break; | |
1172 | case AMDGPU_UCODE_ID_CP_MEC2: | |
1173 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1174 | adev->gfx.mec2_fw->data; | |
1175 | adev->gfx.mec2_fw_version = | |
1176 | le32_to_cpu(cp_hdr->header.ucode_version); | |
1177 | adev->gfx.mec2_feature_version = | |
1178 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
1179 | ucode_fw = adev->gfx.mec2_fw; | |
1180 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - | |
1181 | le32_to_cpu(cp_hdr->jt_size) * 4; | |
1182 | break; | |
1183 | case AMDGPU_UCODE_ID_CP_MEC2_JT: | |
1184 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
1185 | adev->gfx.mec2_fw->data; | |
1186 | ucode_fw = adev->gfx.mec2_fw; | |
1187 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; | |
1188 | break; | |
1189 | case AMDGPU_UCODE_ID_CP_RS64_MEC: | |
1190 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1191 | adev->gfx.mec_fw->data; | |
1192 | adev->gfx.mec_fw_version = | |
1193 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); | |
1194 | adev->gfx.mec_feature_version = | |
1195 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); | |
1196 | ucode_fw = adev->gfx.mec_fw; | |
1197 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); | |
1198 | break; | |
1199 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: | |
1200 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: | |
1201 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: | |
1202 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: | |
1203 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) | |
1204 | adev->gfx.mec_fw->data; | |
1205 | ucode_fw = adev->gfx.mec_fw; | |
1206 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); | |
1207 | break; | |
1208 | default: | |
1209 | break; | |
1210 | } | |
1211 | ||
1212 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { | |
1213 | info = &adev->firmware.ucode[ucode_id]; | |
1214 | info->ucode_id = ucode_id; | |
1215 | info->fw = ucode_fw; | |
1216 | adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); | |
1217 | } | |
1218 | } | |
66daccde LM |
1219 | |
1220 | bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) | |
1221 | { | |
1222 | return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? | |
1223 | adev->gfx.num_xcc_per_xcp : 1)); | |
1224 | } | |
98a54e88 LM |
1225 | |
1226 | static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, | |
1227 | struct device_attribute *addr, | |
1228 | char *buf) | |
1229 | { | |
1230 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1231 | struct amdgpu_device *adev = drm_to_adev(ddev); | |
8e7fd193 | 1232 | int mode; |
98a54e88 | 1233 | |
ded7d99e LL |
1234 | mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, |
1235 | AMDGPU_XCP_FL_NONE); | |
98a54e88 | 1236 | |
f9632096 | 1237 | return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode)); |
98a54e88 LM |
1238 | } |
1239 | ||
1240 | static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, | |
1241 | struct device_attribute *addr, | |
1242 | const char *buf, size_t count) | |
1243 | { | |
1244 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1245 | struct amdgpu_device *adev = drm_to_adev(ddev); | |
1246 | enum amdgpu_gfx_partition mode; | |
8078f1c6 | 1247 | int ret = 0, num_xcc; |
98a54e88 | 1248 | |
8078f1c6 LL |
1249 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1250 | if (num_xcc % 2 != 0) | |
98a54e88 LM |
1251 | return -EINVAL; |
1252 | ||
1253 | if (!strncasecmp("SPX", buf, strlen("SPX"))) { | |
1254 | mode = AMDGPU_SPX_PARTITION_MODE; | |
1255 | } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { | |
cb30544e MJ |
1256 | /* |
1257 | * DPX mode needs AIDs to be in multiple of 2. | |
1258 | * Each AID connects 2 XCCs. | |
1259 | */ | |
1260 | if (num_xcc%4) | |
98a54e88 LM |
1261 | return -EINVAL; |
1262 | mode = AMDGPU_DPX_PARTITION_MODE; | |
1263 | } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { | |
8078f1c6 | 1264 | if (num_xcc != 6) |
98a54e88 LM |
1265 | return -EINVAL; |
1266 | mode = AMDGPU_TPX_PARTITION_MODE; | |
1267 | } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { | |
8078f1c6 | 1268 | if (num_xcc != 8) |
98a54e88 LM |
1269 | return -EINVAL; |
1270 | mode = AMDGPU_QPX_PARTITION_MODE; | |
1271 | } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { | |
1272 | mode = AMDGPU_CPX_PARTITION_MODE; | |
1273 | } else { | |
1274 | return -EINVAL; | |
1275 | } | |
1276 | ||
8e7fd193 | 1277 | ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); |
98a54e88 LM |
1278 | |
1279 | if (ret) | |
1280 | return ret; | |
1281 | ||
1282 | return count; | |
1283 | } | |
1284 | ||
1285 | static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, | |
1286 | struct device_attribute *addr, | |
1287 | char *buf) | |
1288 | { | |
1289 | struct drm_device *ddev = dev_get_drvdata(dev); | |
1290 | struct amdgpu_device *adev = drm_to_adev(ddev); | |
1291 | char *supported_partition; | |
1292 | ||
1293 | /* TBD */ | |
8078f1c6 | 1294 | switch (NUM_XCC(adev->gfx.xcc_mask)) { |
98a54e88 LM |
1295 | case 8: |
1296 | supported_partition = "SPX, DPX, QPX, CPX"; | |
1297 | break; | |
1298 | case 6: | |
1299 | supported_partition = "SPX, TPX, CPX"; | |
1300 | break; | |
1301 | case 4: | |
1302 | supported_partition = "SPX, DPX, CPX"; | |
1303 | break; | |
1304 | /* this seems only existing in emulation phase */ | |
1305 | case 2: | |
1306 | supported_partition = "SPX, CPX"; | |
1307 | break; | |
1308 | default: | |
1309 | supported_partition = "Not supported"; | |
1310 | break; | |
1311 | } | |
1312 | ||
1313 | return sysfs_emit(buf, "%s\n", supported_partition); | |
1314 | } | |
1315 | ||
50fbe0cc | 1316 | static DEVICE_ATTR(current_compute_partition, 0644, |
98a54e88 LM |
1317 | amdgpu_gfx_get_current_compute_partition, |
1318 | amdgpu_gfx_set_compute_partition); | |
1319 | ||
50fbe0cc | 1320 | static DEVICE_ATTR(available_compute_partition, 0444, |
98a54e88 LM |
1321 | amdgpu_gfx_get_available_compute_partition, NULL); |
1322 | ||
1323 | int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) | |
1324 | { | |
1325 | int r; | |
1326 | ||
1327 | r = device_create_file(adev->dev, &dev_attr_current_compute_partition); | |
1328 | if (r) | |
1329 | return r; | |
1330 | ||
1331 | r = device_create_file(adev->dev, &dev_attr_available_compute_partition); | |
98a54e88 | 1332 | |
b6f90baa | 1333 | return r; |
98a54e88 | 1334 | } |
993d218f SZ |
1335 | |
1336 | void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) | |
1337 | { | |
1338 | device_remove_file(adev->dev, &dev_attr_current_compute_partition); | |
1339 | device_remove_file(adev->dev, &dev_attr_available_compute_partition); | |
993d218f | 1340 | } |