2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "kfd_pm4_headers_aldebaran.h"
30 #include "cwsr_trap_handler.h"
31 #include "kfd_iommu.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_migrate.h"
37 #define MQD_SIZE_ALIGNED 768
40 * kfd_locked is used to lock the kfd driver during suspend or reset
41 * once locked, kfd driver will stop any further GPU execution.
42 * create process (open) will return -EAGAIN.
44 static atomic_t kfd_locked = ATOMIC_INIT(0);
46 #ifdef CONFIG_DRM_AMDGPU_CIK
47 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
49 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
50 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
51 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
52 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
53 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
54 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
56 #ifdef KFD_SUPPORT_IOMMU_V2
57 static const struct kfd_device_info kaveri_device_info = {
58 .asic_name = "kaveri",
59 .gfx_target_version = 70000,
61 /* max num of queues for KV.TODO should be a dynamic value */
64 .ih_ring_entry_size = 4 * sizeof(uint32_t),
65 .event_interrupt_class = &event_interrupt_class_cik,
66 .num_of_watch_points = 4,
67 .mqd_size_aligned = MQD_SIZE_ALIGNED,
68 .supports_cwsr = false,
69 .needs_iommu_device = true,
70 .needs_pci_atomics = false,
71 .num_sdma_engines = 2,
72 .num_xgmi_sdma_engines = 0,
73 .num_sdma_queues_per_engine = 2,
76 static const struct kfd_device_info carrizo_device_info = {
77 .asic_name = "carrizo",
78 .gfx_target_version = 80001,
80 /* max num of queues for CZ.TODO should be a dynamic value */
83 .ih_ring_entry_size = 4 * sizeof(uint32_t),
84 .event_interrupt_class = &event_interrupt_class_cik,
85 .num_of_watch_points = 4,
86 .mqd_size_aligned = MQD_SIZE_ALIGNED,
87 .supports_cwsr = true,
88 .needs_iommu_device = true,
89 .needs_pci_atomics = false,
90 .num_sdma_engines = 2,
91 .num_xgmi_sdma_engines = 0,
92 .num_sdma_queues_per_engine = 2,
95 static const struct kfd_device_info raven_device_info = {
97 .gfx_target_version = 90002,
101 .ih_ring_entry_size = 8 * sizeof(uint32_t),
102 .event_interrupt_class = &event_interrupt_class_v9,
103 .num_of_watch_points = 4,
104 .mqd_size_aligned = MQD_SIZE_ALIGNED,
105 .supports_cwsr = true,
106 .needs_iommu_device = true,
107 .needs_pci_atomics = true,
108 .num_sdma_engines = 1,
109 .num_xgmi_sdma_engines = 0,
110 .num_sdma_queues_per_engine = 2,
114 #ifdef CONFIG_DRM_AMDGPU_CIK
115 static const struct kfd_device_info hawaii_device_info = {
116 .asic_name = "hawaii",
117 .gfx_target_version = 70001,
118 .max_pasid_bits = 16,
119 /* max num of queues for KV.TODO should be a dynamic value */
122 .ih_ring_entry_size = 4 * sizeof(uint32_t),
123 .event_interrupt_class = &event_interrupt_class_cik,
124 .num_of_watch_points = 4,
125 .mqd_size_aligned = MQD_SIZE_ALIGNED,
126 .supports_cwsr = false,
127 .needs_iommu_device = false,
128 .needs_pci_atomics = false,
129 .num_sdma_engines = 2,
130 .num_xgmi_sdma_engines = 0,
131 .num_sdma_queues_per_engine = 2,
135 static const struct kfd_device_info tonga_device_info = {
136 .asic_name = "tonga",
137 .gfx_target_version = 80002,
138 .max_pasid_bits = 16,
141 .ih_ring_entry_size = 4 * sizeof(uint32_t),
142 .event_interrupt_class = &event_interrupt_class_cik,
143 .num_of_watch_points = 4,
144 .mqd_size_aligned = MQD_SIZE_ALIGNED,
145 .supports_cwsr = false,
146 .needs_iommu_device = false,
147 .needs_pci_atomics = true,
148 .num_sdma_engines = 2,
149 .num_xgmi_sdma_engines = 0,
150 .num_sdma_queues_per_engine = 2,
153 static const struct kfd_device_info fiji_device_info = {
155 .gfx_target_version = 80003,
156 .max_pasid_bits = 16,
159 .ih_ring_entry_size = 4 * sizeof(uint32_t),
160 .event_interrupt_class = &event_interrupt_class_cik,
161 .num_of_watch_points = 4,
162 .mqd_size_aligned = MQD_SIZE_ALIGNED,
163 .supports_cwsr = true,
164 .needs_iommu_device = false,
165 .needs_pci_atomics = true,
166 .num_sdma_engines = 2,
167 .num_xgmi_sdma_engines = 0,
168 .num_sdma_queues_per_engine = 2,
171 static const struct kfd_device_info fiji_vf_device_info = {
173 .gfx_target_version = 80003,
174 .max_pasid_bits = 16,
177 .ih_ring_entry_size = 4 * sizeof(uint32_t),
178 .event_interrupt_class = &event_interrupt_class_cik,
179 .num_of_watch_points = 4,
180 .mqd_size_aligned = MQD_SIZE_ALIGNED,
181 .supports_cwsr = true,
182 .needs_iommu_device = false,
183 .needs_pci_atomics = false,
184 .num_sdma_engines = 2,
185 .num_xgmi_sdma_engines = 0,
186 .num_sdma_queues_per_engine = 2,
190 static const struct kfd_device_info polaris10_device_info = {
191 .asic_name = "polaris10",
192 .gfx_target_version = 80003,
193 .max_pasid_bits = 16,
196 .ih_ring_entry_size = 4 * sizeof(uint32_t),
197 .event_interrupt_class = &event_interrupt_class_cik,
198 .num_of_watch_points = 4,
199 .mqd_size_aligned = MQD_SIZE_ALIGNED,
200 .supports_cwsr = true,
201 .needs_iommu_device = false,
202 .needs_pci_atomics = true,
203 .num_sdma_engines = 2,
204 .num_xgmi_sdma_engines = 0,
205 .num_sdma_queues_per_engine = 2,
208 static const struct kfd_device_info polaris10_vf_device_info = {
209 .asic_name = "polaris10",
210 .gfx_target_version = 80003,
211 .max_pasid_bits = 16,
214 .ih_ring_entry_size = 4 * sizeof(uint32_t),
215 .event_interrupt_class = &event_interrupt_class_cik,
216 .num_of_watch_points = 4,
217 .mqd_size_aligned = MQD_SIZE_ALIGNED,
218 .supports_cwsr = true,
219 .needs_iommu_device = false,
220 .needs_pci_atomics = false,
221 .num_sdma_engines = 2,
222 .num_xgmi_sdma_engines = 0,
223 .num_sdma_queues_per_engine = 2,
226 static const struct kfd_device_info polaris11_device_info = {
227 .asic_name = "polaris11",
228 .gfx_target_version = 80003,
229 .max_pasid_bits = 16,
232 .ih_ring_entry_size = 4 * sizeof(uint32_t),
233 .event_interrupt_class = &event_interrupt_class_cik,
234 .num_of_watch_points = 4,
235 .mqd_size_aligned = MQD_SIZE_ALIGNED,
236 .supports_cwsr = true,
237 .needs_iommu_device = false,
238 .needs_pci_atomics = true,
239 .num_sdma_engines = 2,
240 .num_xgmi_sdma_engines = 0,
241 .num_sdma_queues_per_engine = 2,
244 static const struct kfd_device_info polaris12_device_info = {
245 .asic_name = "polaris12",
246 .gfx_target_version = 80003,
247 .max_pasid_bits = 16,
250 .ih_ring_entry_size = 4 * sizeof(uint32_t),
251 .event_interrupt_class = &event_interrupt_class_cik,
252 .num_of_watch_points = 4,
253 .mqd_size_aligned = MQD_SIZE_ALIGNED,
254 .supports_cwsr = true,
255 .needs_iommu_device = false,
256 .needs_pci_atomics = true,
257 .num_sdma_engines = 2,
258 .num_xgmi_sdma_engines = 0,
259 .num_sdma_queues_per_engine = 2,
262 static const struct kfd_device_info vegam_device_info = {
263 .asic_name = "vegam",
264 .gfx_target_version = 80003,
265 .max_pasid_bits = 16,
268 .ih_ring_entry_size = 4 * sizeof(uint32_t),
269 .event_interrupt_class = &event_interrupt_class_cik,
270 .num_of_watch_points = 4,
271 .mqd_size_aligned = MQD_SIZE_ALIGNED,
272 .supports_cwsr = true,
273 .needs_iommu_device = false,
274 .needs_pci_atomics = true,
275 .num_sdma_engines = 2,
276 .num_xgmi_sdma_engines = 0,
277 .num_sdma_queues_per_engine = 2,
280 static const struct kfd_device_info vega10_device_info = {
281 .asic_name = "vega10",
282 .gfx_target_version = 90000,
283 .max_pasid_bits = 16,
286 .ih_ring_entry_size = 8 * sizeof(uint32_t),
287 .event_interrupt_class = &event_interrupt_class_v9,
288 .num_of_watch_points = 4,
289 .mqd_size_aligned = MQD_SIZE_ALIGNED,
290 .supports_cwsr = true,
291 .needs_iommu_device = false,
292 .needs_pci_atomics = false,
293 .num_sdma_engines = 2,
294 .num_xgmi_sdma_engines = 0,
295 .num_sdma_queues_per_engine = 2,
298 static const struct kfd_device_info vega10_vf_device_info = {
299 .asic_name = "vega10",
300 .gfx_target_version = 90000,
301 .max_pasid_bits = 16,
304 .ih_ring_entry_size = 8 * sizeof(uint32_t),
305 .event_interrupt_class = &event_interrupt_class_v9,
306 .num_of_watch_points = 4,
307 .mqd_size_aligned = MQD_SIZE_ALIGNED,
308 .supports_cwsr = true,
309 .needs_iommu_device = false,
310 .needs_pci_atomics = false,
311 .num_sdma_engines = 2,
312 .num_xgmi_sdma_engines = 0,
313 .num_sdma_queues_per_engine = 2,
316 static const struct kfd_device_info vega12_device_info = {
317 .asic_name = "vega12",
318 .gfx_target_version = 90004,
319 .max_pasid_bits = 16,
322 .ih_ring_entry_size = 8 * sizeof(uint32_t),
323 .event_interrupt_class = &event_interrupt_class_v9,
324 .num_of_watch_points = 4,
325 .mqd_size_aligned = MQD_SIZE_ALIGNED,
326 .supports_cwsr = true,
327 .needs_iommu_device = false,
328 .needs_pci_atomics = false,
329 .num_sdma_engines = 2,
330 .num_xgmi_sdma_engines = 0,
331 .num_sdma_queues_per_engine = 2,
334 static const struct kfd_device_info vega20_device_info = {
335 .asic_name = "vega20",
336 .gfx_target_version = 90006,
337 .max_pasid_bits = 16,
340 .ih_ring_entry_size = 8 * sizeof(uint32_t),
341 .event_interrupt_class = &event_interrupt_class_v9,
342 .num_of_watch_points = 4,
343 .mqd_size_aligned = MQD_SIZE_ALIGNED,
344 .supports_cwsr = true,
345 .needs_iommu_device = false,
346 .needs_pci_atomics = false,
347 .num_sdma_engines = 2,
348 .num_xgmi_sdma_engines = 0,
349 .num_sdma_queues_per_engine = 8,
352 static const struct kfd_device_info arcturus_device_info = {
353 .asic_name = "arcturus",
354 .gfx_target_version = 90008,
355 .max_pasid_bits = 16,
358 .ih_ring_entry_size = 8 * sizeof(uint32_t),
359 .event_interrupt_class = &event_interrupt_class_v9,
360 .num_of_watch_points = 4,
361 .mqd_size_aligned = MQD_SIZE_ALIGNED,
362 .supports_cwsr = true,
363 .needs_iommu_device = false,
364 .needs_pci_atomics = false,
365 .num_sdma_engines = 2,
366 .num_xgmi_sdma_engines = 6,
367 .num_sdma_queues_per_engine = 8,
370 static const struct kfd_device_info aldebaran_device_info = {
371 .asic_name = "aldebaran",
372 .gfx_target_version = 90010,
373 .max_pasid_bits = 16,
376 .ih_ring_entry_size = 8 * sizeof(uint32_t),
377 .event_interrupt_class = &event_interrupt_class_v9,
378 .num_of_watch_points = 4,
379 .mqd_size_aligned = MQD_SIZE_ALIGNED,
380 .supports_cwsr = true,
381 .needs_iommu_device = false,
382 .needs_pci_atomics = false,
383 .num_sdma_engines = 2,
384 .num_xgmi_sdma_engines = 3,
385 .num_sdma_queues_per_engine = 8,
388 static const struct kfd_device_info renoir_device_info = {
389 .asic_name = "renoir",
390 .gfx_target_version = 90012,
391 .max_pasid_bits = 16,
394 .ih_ring_entry_size = 8 * sizeof(uint32_t),
395 .event_interrupt_class = &event_interrupt_class_v9,
396 .num_of_watch_points = 4,
397 .mqd_size_aligned = MQD_SIZE_ALIGNED,
398 .supports_cwsr = true,
399 .needs_iommu_device = false,
400 .needs_pci_atomics = false,
401 .num_sdma_engines = 1,
402 .num_xgmi_sdma_engines = 0,
403 .num_sdma_queues_per_engine = 2,
406 static const struct kfd_device_info navi10_device_info = {
407 .asic_name = "navi10",
408 .gfx_target_version = 100100,
409 .max_pasid_bits = 16,
412 .ih_ring_entry_size = 8 * sizeof(uint32_t),
413 .event_interrupt_class = &event_interrupt_class_v9,
414 .num_of_watch_points = 4,
415 .mqd_size_aligned = MQD_SIZE_ALIGNED,
416 .needs_iommu_device = false,
417 .supports_cwsr = true,
418 .needs_pci_atomics = true,
419 .no_atomic_fw_version = 145,
420 .num_sdma_engines = 2,
421 .num_xgmi_sdma_engines = 0,
422 .num_sdma_queues_per_engine = 8,
425 static const struct kfd_device_info navi12_device_info = {
426 .asic_name = "navi12",
427 .gfx_target_version = 100101,
428 .max_pasid_bits = 16,
431 .ih_ring_entry_size = 8 * sizeof(uint32_t),
432 .event_interrupt_class = &event_interrupt_class_v9,
433 .num_of_watch_points = 4,
434 .mqd_size_aligned = MQD_SIZE_ALIGNED,
435 .needs_iommu_device = false,
436 .supports_cwsr = true,
437 .needs_pci_atomics = true,
438 .no_atomic_fw_version = 145,
439 .num_sdma_engines = 2,
440 .num_xgmi_sdma_engines = 0,
441 .num_sdma_queues_per_engine = 8,
444 static const struct kfd_device_info navi14_device_info = {
445 .asic_name = "navi14",
446 .gfx_target_version = 100102,
447 .max_pasid_bits = 16,
450 .ih_ring_entry_size = 8 * sizeof(uint32_t),
451 .event_interrupt_class = &event_interrupt_class_v9,
452 .num_of_watch_points = 4,
453 .mqd_size_aligned = MQD_SIZE_ALIGNED,
454 .needs_iommu_device = false,
455 .supports_cwsr = true,
456 .needs_pci_atomics = true,
457 .no_atomic_fw_version = 145,
458 .num_sdma_engines = 2,
459 .num_xgmi_sdma_engines = 0,
460 .num_sdma_queues_per_engine = 8,
463 static const struct kfd_device_info sienna_cichlid_device_info = {
464 .asic_name = "sienna_cichlid",
465 .gfx_target_version = 100300,
466 .max_pasid_bits = 16,
469 .ih_ring_entry_size = 8 * sizeof(uint32_t),
470 .event_interrupt_class = &event_interrupt_class_v9,
471 .num_of_watch_points = 4,
472 .mqd_size_aligned = MQD_SIZE_ALIGNED,
473 .needs_iommu_device = false,
474 .supports_cwsr = true,
475 .needs_pci_atomics = true,
476 .no_atomic_fw_version = 92,
477 .num_sdma_engines = 4,
478 .num_xgmi_sdma_engines = 0,
479 .num_sdma_queues_per_engine = 8,
482 static const struct kfd_device_info navy_flounder_device_info = {
483 .asic_name = "navy_flounder",
484 .gfx_target_version = 100301,
485 .max_pasid_bits = 16,
488 .ih_ring_entry_size = 8 * sizeof(uint32_t),
489 .event_interrupt_class = &event_interrupt_class_v9,
490 .num_of_watch_points = 4,
491 .mqd_size_aligned = MQD_SIZE_ALIGNED,
492 .needs_iommu_device = false,
493 .supports_cwsr = true,
494 .needs_pci_atomics = true,
495 .no_atomic_fw_version = 92,
496 .num_sdma_engines = 2,
497 .num_xgmi_sdma_engines = 0,
498 .num_sdma_queues_per_engine = 8,
501 static const struct kfd_device_info vangogh_device_info = {
502 .asic_name = "vangogh",
503 .gfx_target_version = 100303,
504 .max_pasid_bits = 16,
507 .ih_ring_entry_size = 8 * sizeof(uint32_t),
508 .event_interrupt_class = &event_interrupt_class_v9,
509 .num_of_watch_points = 4,
510 .mqd_size_aligned = MQD_SIZE_ALIGNED,
511 .needs_iommu_device = false,
512 .supports_cwsr = true,
513 .needs_pci_atomics = true,
514 .no_atomic_fw_version = 92,
515 .num_sdma_engines = 1,
516 .num_xgmi_sdma_engines = 0,
517 .num_sdma_queues_per_engine = 2,
520 static const struct kfd_device_info dimgrey_cavefish_device_info = {
521 .asic_name = "dimgrey_cavefish",
522 .gfx_target_version = 100302,
523 .max_pasid_bits = 16,
526 .ih_ring_entry_size = 8 * sizeof(uint32_t),
527 .event_interrupt_class = &event_interrupt_class_v9,
528 .num_of_watch_points = 4,
529 .mqd_size_aligned = MQD_SIZE_ALIGNED,
530 .needs_iommu_device = false,
531 .supports_cwsr = true,
532 .needs_pci_atomics = true,
533 .no_atomic_fw_version = 92,
534 .num_sdma_engines = 2,
535 .num_xgmi_sdma_engines = 0,
536 .num_sdma_queues_per_engine = 8,
539 static const struct kfd_device_info beige_goby_device_info = {
540 .asic_name = "beige_goby",
541 .gfx_target_version = 100304,
542 .max_pasid_bits = 16,
545 .ih_ring_entry_size = 8 * sizeof(uint32_t),
546 .event_interrupt_class = &event_interrupt_class_v9,
547 .num_of_watch_points = 4,
548 .mqd_size_aligned = MQD_SIZE_ALIGNED,
549 .needs_iommu_device = false,
550 .supports_cwsr = true,
551 .needs_pci_atomics = true,
552 .no_atomic_fw_version = 92,
553 .num_sdma_engines = 1,
554 .num_xgmi_sdma_engines = 0,
555 .num_sdma_queues_per_engine = 8,
558 static const struct kfd_device_info yellow_carp_device_info = {
559 .asic_name = "yellow_carp",
560 .gfx_target_version = 100305,
561 .max_pasid_bits = 16,
564 .ih_ring_entry_size = 8 * sizeof(uint32_t),
565 .event_interrupt_class = &event_interrupt_class_v9,
566 .num_of_watch_points = 4,
567 .mqd_size_aligned = MQD_SIZE_ALIGNED,
568 .needs_iommu_device = false,
569 .supports_cwsr = true,
570 .needs_pci_atomics = true,
571 .no_atomic_fw_version = 92,
572 .num_sdma_engines = 1,
573 .num_xgmi_sdma_engines = 0,
574 .num_sdma_queues_per_engine = 2,
577 static const struct kfd_device_info cyan_skillfish_device_info = {
578 .asic_name = "cyan_skillfish",
579 .gfx_target_version = 100103,
580 .max_pasid_bits = 16,
583 .ih_ring_entry_size = 8 * sizeof(uint32_t),
584 .event_interrupt_class = &event_interrupt_class_v9,
585 .num_of_watch_points = 4,
586 .mqd_size_aligned = MQD_SIZE_ALIGNED,
587 .needs_iommu_device = false,
588 .supports_cwsr = true,
589 .needs_pci_atomics = true,
590 .num_sdma_engines = 2,
591 .num_xgmi_sdma_engines = 0,
592 .num_sdma_queues_per_engine = 8,
595 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
596 unsigned int chunk_size);
597 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
599 static int kfd_resume(struct kfd_dev *kfd);
601 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
604 const struct kfd_device_info *device_info;
605 const struct kfd2kgd_calls *f2g;
606 struct pci_dev *pdev = adev->pdev;
608 switch (adev->asic_type) {
609 #ifdef KFD_SUPPORT_IOMMU_V2
610 #ifdef CONFIG_DRM_AMDGPU_CIK
615 device_info = &kaveri_device_info;
616 f2g = &gfx_v7_kfd2kgd;
623 device_info = &carrizo_device_info;
624 f2g = &gfx_v8_kfd2kgd;
627 #ifdef CONFIG_DRM_AMDGPU_CIK
632 device_info = &hawaii_device_info;
633 f2g = &gfx_v7_kfd2kgd;
640 device_info = &tonga_device_info;
641 f2g = &gfx_v8_kfd2kgd;
645 device_info = &fiji_vf_device_info;
647 device_info = &fiji_device_info;
648 f2g = &gfx_v8_kfd2kgd;
652 device_info = &polaris10_vf_device_info;
654 device_info = &polaris10_device_info;
655 f2g = &gfx_v8_kfd2kgd;
661 device_info = &polaris11_device_info;
662 f2g = &gfx_v8_kfd2kgd;
668 device_info = &polaris12_device_info;
669 f2g = &gfx_v8_kfd2kgd;
675 device_info = &vegam_device_info;
676 f2g = &gfx_v8_kfd2kgd;
679 switch (adev->ip_versions[GC_HWIP][0]) {
680 case IP_VERSION(9, 0, 1):
682 device_info = &vega10_vf_device_info;
684 device_info = &vega10_device_info;
685 f2g = &gfx_v9_kfd2kgd;
687 #ifdef KFD_SUPPORT_IOMMU_V2
688 case IP_VERSION(9, 1, 0):
689 case IP_VERSION(9, 2, 2):
693 device_info = &raven_device_info;
694 f2g = &gfx_v9_kfd2kgd;
697 case IP_VERSION(9, 2, 1):
701 device_info = &vega12_device_info;
702 f2g = &gfx_v9_kfd2kgd;
704 case IP_VERSION(9, 3, 0):
708 device_info = &renoir_device_info;
709 f2g = &gfx_v9_kfd2kgd;
711 case IP_VERSION(9, 4, 0):
715 device_info = &vega20_device_info;
716 f2g = &gfx_v9_kfd2kgd;
718 case IP_VERSION(9, 4, 1):
719 device_info = &arcturus_device_info;
720 f2g = &arcturus_kfd2kgd;
722 case IP_VERSION(9, 4, 2):
723 device_info = &aldebaran_device_info;
724 f2g = &aldebaran_kfd2kgd;
726 case IP_VERSION(10, 1, 10):
730 device_info = &navi10_device_info;
731 f2g = &gfx_v10_kfd2kgd;
733 case IP_VERSION(10, 1, 2):
734 device_info = &navi12_device_info;
735 f2g = &gfx_v10_kfd2kgd;
737 case IP_VERSION(10, 1, 1):
741 device_info = &navi14_device_info;
742 f2g = &gfx_v10_kfd2kgd;
744 case IP_VERSION(10, 1, 3):
748 device_info = &cyan_skillfish_device_info;
749 f2g = &gfx_v10_kfd2kgd;
751 case IP_VERSION(10, 3, 0):
752 device_info = &sienna_cichlid_device_info;
753 f2g = &gfx_v10_3_kfd2kgd;
755 case IP_VERSION(10, 3, 2):
756 device_info = &navy_flounder_device_info;
757 f2g = &gfx_v10_3_kfd2kgd;
759 case IP_VERSION(10, 3, 1):
763 device_info = &vangogh_device_info;
764 f2g = &gfx_v10_3_kfd2kgd;
766 case IP_VERSION(10, 3, 4):
767 device_info = &dimgrey_cavefish_device_info;
768 f2g = &gfx_v10_3_kfd2kgd;
770 case IP_VERSION(10, 3, 5):
771 device_info = &beige_goby_device_info;
772 f2g = &gfx_v10_3_kfd2kgd;
774 case IP_VERSION(10, 3, 3):
778 device_info = &yellow_carp_device_info;
779 f2g = &gfx_v10_3_kfd2kgd;
787 if (!device_info || !f2g) {
788 if (adev->ip_versions[GC_HWIP][0])
789 dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
790 adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
792 dev_err(kfd_device, "%s %s not supported in kfd\n",
793 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
797 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
802 kfd->device_info = device_info;
804 kfd->init_complete = false;
806 atomic_set(&kfd->compute_profile, 0);
808 mutex_init(&kfd->doorbell_mutex);
809 memset(&kfd->doorbell_available_index, 0,
810 sizeof(kfd->doorbell_available_index));
812 atomic_set(&kfd->sram_ecc_flag, 0);
814 ida_init(&kfd->doorbell_ida);
819 static void kfd_cwsr_init(struct kfd_dev *kfd)
821 if (cwsr_enable && kfd->device_info->supports_cwsr) {
822 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
823 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
824 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
825 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
826 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
827 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
828 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
829 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
830 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
831 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
832 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
833 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
834 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
835 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
836 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
837 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
838 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
839 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
840 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
841 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
843 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
844 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
845 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
848 kfd->cwsr_enabled = true;
852 static int kfd_gws_init(struct kfd_dev *kfd)
856 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
859 if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
860 ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
861 && kfd->mec2_fw_version >= 0x81b3) ||
862 (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
863 && kfd->mec2_fw_version >= 0x1b3) ||
864 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
865 && kfd->mec2_fw_version >= 0x30) ||
866 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
867 && kfd->mec2_fw_version >= 0x28))))
868 ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
869 kfd->adev->gds.gws_size, &kfd->gws);
874 static void kfd_smi_init(struct kfd_dev *dev) {
875 INIT_LIST_HEAD(&dev->smi_clients);
876 spin_lock_init(&dev->smi_lock);
879 bool kgd2kfd_device_init(struct kfd_dev *kfd,
880 struct drm_device *ddev,
881 const struct kgd2kfd_shared_resources *gpu_resources)
883 unsigned int size, map_process_packet_size;
886 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
888 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
890 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
892 kfd->shared_resources = *gpu_resources;
894 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
895 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
896 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
897 - kfd->vm_info.first_vmid_kfd + 1;
899 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
900 * 32 and 64-bit requests are possible and must be
903 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
904 if (!kfd->pci_atomic_requested &&
905 kfd->device_info->needs_pci_atomics &&
906 (!kfd->device_info->no_atomic_fw_version ||
907 kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
909 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
910 kfd->pdev->vendor, kfd->pdev->device,
912 kfd->device_info->no_atomic_fw_version);
916 /* Verify module parameters regarding mapped process number*/
917 if ((hws_max_conc_proc < 0)
918 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
920 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
921 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
922 kfd->vm_info.vmid_num_kfd);
923 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
925 kfd->max_proc_per_quantum = hws_max_conc_proc;
927 /* calculate max size of mqds needed for queues */
928 size = max_num_of_queues_per_device *
929 kfd->device_info->mqd_size_aligned;
932 * calculate max size of runlist packet.
933 * There can be only 2 packets at once
935 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
936 sizeof(struct pm4_mes_map_process_aldebaran) :
937 sizeof(struct pm4_mes_map_process);
938 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
939 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
940 + sizeof(struct pm4_mes_runlist)) * 2;
942 /* Add size of HIQ & DIQ */
943 size += KFD_KERNEL_QUEUE_SIZE * 2;
945 /* add another 512KB for all other allocations on gart (HPD, fences) */
948 if (amdgpu_amdkfd_alloc_gtt_mem(
949 kfd->adev, size, &kfd->gtt_mem,
950 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
952 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
953 goto alloc_gtt_mem_failure;
956 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
958 /* Initialize GTT sa with 512 byte chunk size */
959 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
960 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
961 goto kfd_gtt_sa_init_error;
964 if (kfd_doorbell_init(kfd)) {
966 "Error initializing doorbell aperture\n");
967 goto kfd_doorbell_error;
970 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
972 kfd->noretry = kfd->adev->gmc.noretry;
974 if (kfd_interrupt_init(kfd)) {
975 dev_err(kfd_device, "Error initializing interrupts\n");
976 goto kfd_interrupt_error;
979 kfd->dqm = device_queue_manager_init(kfd);
981 dev_err(kfd_device, "Error initializing queue manager\n");
982 goto device_queue_manager_error;
985 /* If supported on this device, allocate global GWS that is shared
986 * by all KFD processes
988 if (kfd_gws_init(kfd)) {
989 dev_err(kfd_device, "Could not allocate %d gws\n",
990 kfd->adev->gds.gws_size);
994 /* If CRAT is broken, won't set iommu enabled */
995 kfd_double_confirm_iommu_support(kfd);
997 if (kfd_iommu_device_init(kfd)) {
998 kfd->use_iommu_v2 = false;
999 dev_err(kfd_device, "Error initializing iommuv2\n");
1000 goto device_iommu_error;
1005 svm_migrate_init(kfd->adev);
1007 if(kgd2kfd_resume_iommu(kfd))
1008 goto device_iommu_error;
1010 if (kfd_resume(kfd))
1011 goto kfd_resume_error;
1015 if (kfd_topology_add_device(kfd)) {
1016 dev_err(kfd_device, "Error adding device to topology\n");
1017 goto kfd_topology_add_device_error;
1022 kfd->init_complete = true;
1023 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
1026 pr_debug("Starting kfd with the following scheduling policy %d\n",
1027 kfd->dqm->sched_policy);
1031 kfd_topology_add_device_error:
1035 device_queue_manager_uninit(kfd->dqm);
1036 device_queue_manager_error:
1037 kfd_interrupt_exit(kfd);
1038 kfd_interrupt_error:
1039 kfd_doorbell_fini(kfd);
1041 kfd_gtt_sa_fini(kfd);
1042 kfd_gtt_sa_init_error:
1043 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
1044 alloc_gtt_mem_failure:
1046 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
1048 "device %x:%x NOT added due to errors\n",
1049 kfd->pdev->vendor, kfd->pdev->device);
1051 return kfd->init_complete;
1054 void kgd2kfd_device_exit(struct kfd_dev *kfd)
1056 if (kfd->init_complete) {
1057 device_queue_manager_uninit(kfd->dqm);
1058 kfd_interrupt_exit(kfd);
1059 kfd_topology_remove_device(kfd);
1060 kfd_doorbell_fini(kfd);
1061 ida_destroy(&kfd->doorbell_ida);
1062 kfd_gtt_sa_fini(kfd);
1063 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
1065 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
1071 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
1073 if (!kfd->init_complete)
1076 kfd_smi_event_update_gpu_reset(kfd, false);
1078 kfd->dqm->ops.pre_reset(kfd->dqm);
1080 kgd2kfd_suspend(kfd, false);
1082 kfd_signal_reset_event(kfd);
1087 * Fix me. KFD won't be able to resume existing process for now.
1088 * We will keep all existing process in a evicted state and
1089 * wait the process to be terminated.
1092 int kgd2kfd_post_reset(struct kfd_dev *kfd)
1096 if (!kfd->init_complete)
1099 ret = kfd_resume(kfd);
1102 atomic_dec(&kfd_locked);
1104 atomic_set(&kfd->sram_ecc_flag, 0);
1106 kfd_smi_event_update_gpu_reset(kfd, true);
1111 bool kfd_is_locked(void)
1113 return (atomic_read(&kfd_locked) > 0);
1116 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
1118 if (!kfd->init_complete)
1121 /* for runtime suspend, skip locking kfd */
1123 /* For first KFD device suspend all the KFD processes */
1124 if (atomic_inc_return(&kfd_locked) == 1)
1125 kfd_suspend_all_processes();
1128 kfd->dqm->ops.stop(kfd->dqm);
1129 kfd_iommu_suspend(kfd);
1132 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
1136 if (!kfd->init_complete)
1139 ret = kfd_resume(kfd);
1143 /* for runtime resume, skip unlocking kfd */
1145 count = atomic_dec_return(&kfd_locked);
1146 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
1148 ret = kfd_resume_all_processes();
1154 int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
1158 err = kfd_iommu_resume(kfd);
1161 "Failed to resume IOMMU for device %x:%x\n",
1162 kfd->pdev->vendor, kfd->pdev->device);
1166 static int kfd_resume(struct kfd_dev *kfd)
1170 err = kfd->dqm->ops.start(kfd->dqm);
1173 "Error starting queue manager for device %x:%x\n",
1174 kfd->pdev->vendor, kfd->pdev->device);
1179 static inline void kfd_queue_work(struct workqueue_struct *wq,
1180 struct work_struct *work)
1184 cpu = new_cpu = smp_processor_id();
1186 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1187 if (cpu_to_node(new_cpu) == numa_node_id())
1189 } while (cpu != new_cpu);
1191 queue_work_on(new_cpu, wq, work);
1194 /* This is called directly from KGD at ISR. */
1195 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1197 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
1198 bool is_patched = false;
1199 unsigned long flags;
1201 if (!kfd->init_complete)
1204 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
1205 dev_err_once(kfd_device, "Ring entry too small\n");
1209 spin_lock_irqsave(&kfd->interrupt_lock, flags);
1211 if (kfd->interrupts_active
1212 && interrupt_is_wanted(kfd, ih_ring_entry,
1213 patched_ihre, &is_patched)
1214 && enqueue_ih_ring_entry(kfd,
1215 is_patched ? patched_ihre : ih_ring_entry))
1216 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
1218 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
1221 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
1223 struct kfd_process *p;
1226 /* Because we are called from arbitrary context (workqueue) as opposed
1227 * to process context, kfd_process could attempt to exit while we are
1228 * running so the lookup function increments the process ref count.
1230 p = kfd_lookup_process_by_mm(mm);
1234 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1235 r = kfd_process_evict_queues(p);
1237 kfd_unref_process(p);
1241 int kgd2kfd_resume_mm(struct mm_struct *mm)
1243 struct kfd_process *p;
1246 /* Because we are called from arbitrary context (workqueue) as opposed
1247 * to process context, kfd_process could attempt to exit while we are
1248 * running so the lookup function increments the process ref count.
1250 p = kfd_lookup_process_by_mm(mm);
1254 r = kfd_process_restore_queues(p);
1256 kfd_unref_process(p);
1260 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1261 * prepare for safe eviction of KFD BOs that belong to the specified
1264 * @mm: mm_struct that identifies the specified KFD process
1265 * @fence: eviction fence attached to KFD process BOs
1268 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1269 struct dma_fence *fence)
1271 struct kfd_process *p;
1272 unsigned long active_time;
1273 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1278 if (dma_fence_is_signaled(fence))
1281 p = kfd_lookup_process_by_mm(mm);
1285 if (fence->seqno == p->last_eviction_seqno)
1288 p->last_eviction_seqno = fence->seqno;
1290 /* Avoid KFD process starvation. Wait for at least
1291 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1293 active_time = get_jiffies_64() - p->last_restore_timestamp;
1294 if (delay_jiffies > active_time)
1295 delay_jiffies -= active_time;
1299 /* During process initialization eviction_work.dwork is initialized
1300 * to kfd_evict_bo_worker
1302 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1303 p->lead_thread->pid, delay_jiffies);
1304 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1306 kfd_unref_process(p);
1310 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1311 unsigned int chunk_size)
1313 unsigned int num_of_longs;
1315 if (WARN_ON(buf_size < chunk_size))
1317 if (WARN_ON(buf_size == 0))
1319 if (WARN_ON(chunk_size == 0))
1322 kfd->gtt_sa_chunk_size = chunk_size;
1323 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1325 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
1328 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
1330 if (!kfd->gtt_sa_bitmap)
1333 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1334 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1336 mutex_init(&kfd->gtt_sa_lock);
1342 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1344 mutex_destroy(&kfd->gtt_sa_lock);
1345 kfree(kfd->gtt_sa_bitmap);
1348 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1349 unsigned int bit_num,
1350 unsigned int chunk_size)
1352 return start_addr + bit_num * chunk_size;
1355 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1356 unsigned int bit_num,
1357 unsigned int chunk_size)
1359 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1362 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
1363 struct kfd_mem_obj **mem_obj)
1365 unsigned int found, start_search, cur_size;
1370 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1373 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1377 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1381 mutex_lock(&kfd->gtt_sa_lock);
1383 kfd_gtt_restart_search:
1384 /* Find the first chunk that is free */
1385 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1386 kfd->gtt_sa_num_of_chunks,
1389 pr_debug("Found = %d\n", found);
1391 /* If there wasn't any free chunk, bail out */
1392 if (found == kfd->gtt_sa_num_of_chunks)
1393 goto kfd_gtt_no_free_chunk;
1395 /* Update fields of mem_obj */
1396 (*mem_obj)->range_start = found;
1397 (*mem_obj)->range_end = found;
1398 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1399 kfd->gtt_start_gpu_addr,
1401 kfd->gtt_sa_chunk_size);
1402 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1403 kfd->gtt_start_cpu_ptr,
1405 kfd->gtt_sa_chunk_size);
1407 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1408 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1410 /* If we need only one chunk, mark it as allocated and get out */
1411 if (size <= kfd->gtt_sa_chunk_size) {
1412 pr_debug("Single bit\n");
1413 set_bit(found, kfd->gtt_sa_bitmap);
1417 /* Otherwise, try to see if we have enough contiguous chunks */
1418 cur_size = size - kfd->gtt_sa_chunk_size;
1420 (*mem_obj)->range_end =
1421 find_next_zero_bit(kfd->gtt_sa_bitmap,
1422 kfd->gtt_sa_num_of_chunks, ++found);
1424 * If next free chunk is not contiguous than we need to
1425 * restart our search from the last free chunk we found (which
1426 * wasn't contiguous to the previous ones
1428 if ((*mem_obj)->range_end != found) {
1429 start_search = found;
1430 goto kfd_gtt_restart_search;
1434 * If we reached end of buffer, bail out with error
1436 if (found == kfd->gtt_sa_num_of_chunks)
1437 goto kfd_gtt_no_free_chunk;
1439 /* Check if we don't need another chunk */
1440 if (cur_size <= kfd->gtt_sa_chunk_size)
1443 cur_size -= kfd->gtt_sa_chunk_size;
1445 } while (cur_size > 0);
1447 pr_debug("range_start = %d, range_end = %d\n",
1448 (*mem_obj)->range_start, (*mem_obj)->range_end);
1450 /* Mark the chunks as allocated */
1451 for (found = (*mem_obj)->range_start;
1452 found <= (*mem_obj)->range_end;
1454 set_bit(found, kfd->gtt_sa_bitmap);
1457 mutex_unlock(&kfd->gtt_sa_lock);
1460 kfd_gtt_no_free_chunk:
1461 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1462 mutex_unlock(&kfd->gtt_sa_lock);
1467 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1471 /* Act like kfree when trying to free a NULL object */
1475 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1476 mem_obj, mem_obj->range_start, mem_obj->range_end);
1478 mutex_lock(&kfd->gtt_sa_lock);
1480 /* Mark the chunks as free */
1481 for (bit = mem_obj->range_start;
1482 bit <= mem_obj->range_end;
1484 clear_bit(bit, kfd->gtt_sa_bitmap);
1486 mutex_unlock(&kfd->gtt_sa_lock);
1492 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1495 atomic_inc(&kfd->sram_ecc_flag);
1498 void kfd_inc_compute_active(struct kfd_dev *kfd)
1500 if (atomic_inc_return(&kfd->compute_profile) == 1)
1501 amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
1504 void kfd_dec_compute_active(struct kfd_dev *kfd)
1506 int count = atomic_dec_return(&kfd->compute_profile);
1509 amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
1510 WARN_ONCE(count < 0, "Compute profile ref. count error");
1513 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1515 if (kfd && kfd->init_complete)
1516 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
1519 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1520 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1521 * When the device has more than two engines, we reserve two for PCIe to enable
1522 * full-duplex and the rest are used as XGMI.
1524 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
1526 /* If XGMI is not supported, all SDMA engines are PCIe */
1527 if (!kdev->adev->gmc.xgmi.supported)
1528 return kdev->adev->sdma.num_instances;
1530 return min(kdev->adev->sdma.num_instances, 2);
1533 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
1535 /* After reserved for PCIe, the rest of engines are XGMI */
1536 return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
1539 #if defined(CONFIG_DEBUG_FS)
1541 /* This function will send a package to HIQ to hang the HWS
1542 * which will trigger a GPU reset and bring the HWS back to normal state
1544 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1546 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1547 pr_err("HWS is not enabled");
1551 return dqm_debugfs_hang_hws(dev->dqm);