2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_PRIV_H_INCLUDED
24 #define KFD_PRIV_H_INCLUDED
26 #include <linux/hashtable.h>
27 #include <linux/mmu_notifier.h>
28 #include <linux/mutex.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/workqueue.h>
32 #include <linux/spinlock.h>
33 #include <linux/kfd_ioctl.h>
34 #include <linux/idr.h>
35 #include <linux/kfifo.h>
36 #include <linux/seq_file.h>
37 #include <linux/kref.h>
38 #include <linux/sysfs.h>
39 #include <linux/device_cgroup.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_device.h>
43 #include <drm/drm_ioctl.h>
44 #include <kgd_kfd_interface.h>
45 #include <linux/swap.h>
47 #include "amd_shared.h"
49 #define KFD_MAX_RING_ENTRY_SIZE 8
51 #define KFD_SYSFS_FILE_MODE 0444
53 /* GPU ID hash width in bits */
54 #define KFD_GPU_ID_HASH_WIDTH 16
56 /* Use upper bits of mmap offset to store KFD driver specific information.
57 * BITS[63:62] - Encode MMAP type
58 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
59 * BITS[45:0] - MMAP offset value
61 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
62 * defines are w.r.t to PAGE_SIZE
64 #define KFD_MMAP_TYPE_SHIFT 62
65 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
66 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
67 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
68 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
69 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
71 #define KFD_MMAP_GPU_ID_SHIFT 46
72 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
73 << KFD_MMAP_GPU_ID_SHIFT)
74 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
75 & KFD_MMAP_GPU_ID_MASK)
76 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
77 >> KFD_MMAP_GPU_ID_SHIFT)
80 * When working with cp scheduler we should assign the HIQ manually or via
81 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
82 * definitions for Kaveri. In Kaveri only the first ME queues participates
83 * in the cp scheduling taking that in mind we set the HIQ slot in the
86 #define KFD_CIK_HIQ_PIPE 4
87 #define KFD_CIK_HIQ_QUEUE 0
89 /* Macro for allocating structures */
90 #define kfd_alloc_struct(ptr_to_struct) \
91 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
93 #define KFD_MAX_NUM_OF_PROCESSES 512
94 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
97 * Size of the per-process TBA+TMA buffer: 2 pages
99 * The first page is the TBA used for the CWSR ISA code. The second
100 * page is used as TMA for user-mode trap handler setup in daisy-chain mode.
102 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
103 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE
105 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
106 (KFD_MAX_NUM_OF_PROCESSES * \
107 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
109 #define KFD_KERNEL_QUEUE_SIZE 2048
111 #define KFD_UNMAP_LATENCY_MS (4000)
115 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
116 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
117 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
118 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
119 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
121 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
125 * Kernel module parameter to specify maximum number of supported queues per
128 extern int max_num_of_queues_per_device;
131 /* Kernel module parameter to specify the scheduling policy */
132 extern int sched_policy;
135 * Kernel module parameter to specify the maximum process
136 * number per HW scheduler
138 extern int hws_max_conc_proc;
140 extern int cwsr_enable;
143 * Kernel module parameter to specify whether to send sigterm to HSA process on
144 * unhandled exception
146 extern int send_sigterm;
149 * This kernel module is used to simulate large bar machine on non-large bar
152 extern int debug_largebar;
155 * Ignore CRAT table during KFD initialization, can be used to work around
156 * broken CRAT tables on some AMD systems
158 extern int ignore_crat;
160 /* Set sh_mem_config.retry_disable on GFX v9 */
161 extern int amdgpu_noretry;
163 /* Halt if HWS hang is detected */
164 extern int halt_if_hws_hang;
166 /* Whether MEC FW support GWS barriers */
167 extern bool hws_gws_support;
169 /* Queue preemption timeout in ms */
170 extern int queue_preemption_timeout_ms;
172 /* Enable eviction debug messages */
173 extern bool debug_evictions;
176 cache_policy_coherent,
177 cache_policy_noncoherent
180 #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
182 struct kfd_event_interrupt_class {
183 bool (*interrupt_isr)(struct kfd_dev *dev,
184 const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
186 void (*interrupt_wq)(struct kfd_dev *dev,
187 const uint32_t *ih_ring_entry);
190 struct kfd_device_info {
191 enum amd_asic_type asic_family;
192 const char *asic_name;
193 const struct kfd_event_interrupt_class *event_interrupt_class;
194 unsigned int max_pasid_bits;
195 unsigned int max_no_of_hqd;
196 unsigned int doorbell_size;
197 size_t ih_ring_entry_size;
198 uint8_t num_of_watch_points;
199 uint16_t mqd_size_aligned;
201 bool needs_iommu_device;
202 bool needs_pci_atomics;
203 unsigned int num_sdma_engines;
204 unsigned int num_xgmi_sdma_engines;
205 unsigned int num_sdma_queues_per_engine;
209 uint32_t range_start;
216 struct kfd_vmid_info {
217 uint32_t first_vmid_kfd;
218 uint32_t last_vmid_kfd;
219 uint32_t vmid_num_kfd;
225 const struct kfd_device_info *device_info;
226 struct pci_dev *pdev;
227 struct drm_device *ddev;
229 unsigned int id; /* topology stub index */
231 phys_addr_t doorbell_base; /* Start of actual doorbells used by
232 * KFD. It is aligned for mapping
235 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
236 * doorbell BAR to the first KFD
237 * doorbell in dwords. GFX reserves
238 * the segment before this offset.
240 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
241 * page used by kernel queue
244 struct kgd2kfd_shared_resources shared_resources;
245 struct kfd_vmid_info vm_info;
247 const struct kfd2kgd_calls *kfd2kgd;
248 struct mutex doorbell_mutex;
249 DECLARE_BITMAP(doorbell_available_index,
250 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
253 uint64_t gtt_start_gpu_addr;
254 void *gtt_start_cpu_ptr;
256 struct mutex gtt_sa_lock;
257 unsigned int gtt_sa_chunk_size;
258 unsigned int gtt_sa_num_of_chunks;
261 struct kfifo ih_fifo;
262 struct workqueue_struct *ih_wq;
263 struct work_struct interrupt_work;
264 spinlock_t interrupt_lock;
266 /* QCM Device instance */
267 struct device_queue_manager *dqm;
271 * Interrupts of interest to KFD are copied
272 * from the HW ring into a SW ring.
274 bool interrupts_active;
277 struct kfd_dbgmgr *dbgmgr;
279 /* Firmware versions */
280 uint16_t mec_fw_version;
281 uint16_t mec2_fw_version;
282 uint16_t sdma_fw_version;
284 /* Maximum process number mapped to HW scheduler */
285 unsigned int max_proc_per_quantum;
289 const void *cwsr_isa;
290 unsigned int cwsr_isa_size;
298 bool pci_atomic_requested;
300 /* Use IOMMU v2 flag */
304 atomic_t sram_ecc_flag;
306 /* Compute Profile ref. count */
307 atomic_t compute_profile;
309 /* Global GWS resource shared between processes */
312 /* Clients watching SMI events */
313 struct list_head smi_clients;
316 uint32_t reset_seq_num;
318 struct ida doorbell_ida;
319 unsigned int max_doorbell_slices;
323 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
324 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
325 KFD_MEMPOOL_FRAMEBUFFER = 3,
328 /* Character device interface */
329 int kfd_chardev_init(void);
330 void kfd_chardev_exit(void);
331 struct device *kfd_chardev(void);
334 * enum kfd_unmap_queues_filter - Enum for queue filters.
336 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
338 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
339 * running queues list.
341 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
345 enum kfd_unmap_queues_filter {
346 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
347 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
348 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
349 KFD_UNMAP_QUEUES_FILTER_BY_PASID
353 * enum kfd_queue_type - Enum for various queue types.
355 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
357 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type.
359 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
361 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
363 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
365 enum kfd_queue_type {
366 KFD_QUEUE_TYPE_COMPUTE,
370 KFD_QUEUE_TYPE_SDMA_XGMI
373 enum kfd_queue_format {
374 KFD_QUEUE_FORMAT_PM4,
378 enum KFD_QUEUE_PRIORITY {
379 KFD_QUEUE_PRIORITY_MINIMUM = 0,
380 KFD_QUEUE_PRIORITY_MAXIMUM = 15
384 * struct queue_properties
386 * @type: The queue type.
388 * @queue_id: Queue identifier.
390 * @queue_address: Queue ring buffer address.
392 * @queue_size: Queue ring buffer size.
394 * @priority: Defines the queue priority relative to other queues in the
396 * This is just an indication and HW scheduling may override the priority as
397 * necessary while keeping the relative prioritization.
398 * the priority granularity is from 0 to f which f is the highest priority.
399 * currently all queues are initialized with the highest priority.
401 * @queue_percent: This field is partially implemented and currently a zero in
402 * this field defines that the queue is non active.
404 * @read_ptr: User space address which points to the number of dwords the
405 * cp read from the ring buffer. This field updates automatically by the H/W.
407 * @write_ptr: Defines the number of dwords written to the ring buffer.
409 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring
410 * buffer. This field should be similar to write_ptr and the user should
411 * update this field after updating the write_ptr.
413 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
415 * @is_interop: Defines if this is a interop queue. Interop queue means that
416 * the queue can access both graphics and compute resources.
418 * @is_evicted: Defines if the queue is evicted. Only active queues
419 * are evicted, rendering them inactive.
421 * @is_active: Defines if the queue is active or not. @is_active and
422 * @is_evicted are protected by the DQM lock.
424 * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
425 * @is_gws should be protected by the DQM lock, since changing it can yield the
426 * possibility of updating DQM state on number of GWS queues.
428 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
431 * This structure represents the queue properties for each queue no matter if
432 * it's user mode or kernel mode queue.
435 struct queue_properties {
436 enum kfd_queue_type type;
437 enum kfd_queue_format format;
438 unsigned int queue_id;
439 uint64_t queue_address;
442 uint32_t queue_percent;
445 void __iomem *doorbell_ptr;
446 uint32_t doorbell_off;
451 /* Not relevant for user mode queues in cp scheduling */
453 /* Relevant only for sdma queues*/
454 uint32_t sdma_engine_id;
455 uint32_t sdma_queue_id;
456 uint32_t sdma_vm_addr;
457 /* Relevant only for VI */
458 uint64_t eop_ring_buffer_address;
459 uint32_t eop_ring_buffer_size;
460 uint64_t ctx_save_restore_area_address;
461 uint32_t ctx_save_restore_area_size;
462 uint32_t ctl_stack_size;
465 /* Relevant for CU */
466 uint32_t cu_mask_count; /* Must be a multiple of 32 */
470 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
471 (q).queue_address != 0 && \
472 (q).queue_percent > 0 && \
478 * @list: Queue linked list.
480 * @mqd: The queue MQD (memory queue descriptor).
482 * @mqd_mem_obj: The MQD local gpu memory object.
484 * @gart_mqd_addr: The MQD gart mc address.
486 * @properties: The queue properties.
488 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
489 * that the queue should be executed on.
491 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
494 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
496 * @process: The kfd process that created this queue.
498 * @device: The kfd device that created this queue.
500 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
503 * This structure represents user mode compute queues.
504 * It contains all the necessary data to handle such queues.
509 struct list_head list;
511 struct kfd_mem_obj *mqd_mem_obj;
512 uint64_t gart_mqd_addr;
513 struct queue_properties properties;
519 unsigned int sdma_id;
520 unsigned int doorbell_id;
522 struct kfd_process *process;
523 struct kfd_dev *device;
531 KFD_MQD_TYPE_HIQ = 0, /* for hiq */
532 KFD_MQD_TYPE_CP, /* for cp queues and diq */
533 KFD_MQD_TYPE_SDMA, /* for sdma queues */
534 KFD_MQD_TYPE_DIQ, /* for diq */
538 enum KFD_PIPE_PRIORITY {
539 KFD_PIPE_PRIORITY_CS_LOW = 0,
540 KFD_PIPE_PRIORITY_CS_MEDIUM,
541 KFD_PIPE_PRIORITY_CS_HIGH
544 struct scheduling_resources {
545 unsigned int vmid_mask;
546 enum kfd_queue_type type;
550 uint32_t gds_heap_base;
551 uint32_t gds_heap_size;
554 struct process_queue_manager {
556 struct kfd_process *process;
557 struct list_head queues;
558 unsigned long *queue_slot_bitmap;
561 struct qcm_process_device {
562 /* The Device Queue Manager that owns this data */
563 struct device_queue_manager *dqm;
564 struct process_queue_manager *pqm;
566 struct list_head queues_list;
567 struct list_head priv_queue_list;
569 unsigned int queue_count;
572 unsigned int evicted; /* eviction counter, 0=active */
574 /* This flag tells if we should reset all wavefronts on
575 * process termination
577 bool reset_wavefronts;
579 /* This flag tells us if this process has a GWS-capable
580 * queue that will be mapped into the runlist. It's
581 * possible to request a GWS BO, but not have the queue
582 * currently mapped, and this changes how the MAP_PROCESS
583 * PM4 packet is configured.
585 bool mapped_gws_queue;
587 /* All the memory management data should be here too */
588 uint64_t gds_context_area;
589 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
590 uint64_t page_table_base;
591 uint32_t sh_mem_config;
592 uint32_t sh_mem_bases;
593 uint32_t sh_mem_ape1_base;
594 uint32_t sh_mem_ape1_limit;
598 uint32_t sh_hidden_private_base;
610 /* doorbell resources per process per device */
611 unsigned long *doorbell_bitmap;
614 /* KFD Memory Eviction */
616 /* Approx. wait time before attempting to restore evicted BOs */
617 #define PROCESS_RESTORE_TIME_MS 100
618 /* Approx. back off time if restore fails due to lack of memory */
619 #define PROCESS_BACK_OFF_TIME_MS 100
620 /* Approx. time before evicting the process again */
621 #define PROCESS_ACTIVE_TIME_MS 10
623 /* 8 byte handle containing GPU ID in the most significant 4 bytes and
624 * idr_handle in the least significant 4 bytes
626 #define MAKE_HANDLE(gpu_id, idr_handle) \
627 (((uint64_t)(gpu_id) << 32) + idr_handle)
628 #define GET_GPU_ID(handle) (handle >> 32)
629 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
637 #define MAX_SYSFS_FILENAME_LEN 15
640 * SDMA counter runs at 100MHz frequency.
641 * We display SDMA activity in microsecond granularity in sysfs.
642 * As a result, the divisor is 100.
644 #define SDMA_ACTIVITY_DIVISOR 100
646 /* Data that is per-process-per device. */
647 struct kfd_process_device {
649 * List of all per-device data for a process.
650 * Starts from kfd_process.per_device_data.
652 struct list_head per_device_list;
654 /* The device that owns this data. */
657 /* The process that owns this kfd_process_device. */
658 struct kfd_process *process;
660 /* per-process-per device QCM data structure */
661 struct qcm_process_device qpd;
667 uint64_t gpuvm_limit;
668 uint64_t scratch_base;
669 uint64_t scratch_limit;
671 /* VM context for GPUVM allocations */
672 struct file *drm_file;
675 /* GPUVM allocations storage */
676 struct idr alloc_idr;
678 /* Flag used to tell the pdd has dequeued from the dqm.
679 * This is used to prevent dev->dqm->ops.process_termination() from
680 * being called twice when it is already called in IOMMU callback
683 bool already_dequeued;
686 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
687 enum kfd_pdd_bound bound;
691 struct attribute attr_vram;
692 char vram_filename[MAX_SYSFS_FILENAME_LEN];
694 /* SDMA activity tracking */
695 uint64_t sdma_past_activity_counter;
696 struct attribute attr_sdma;
697 char sdma_filename[MAX_SYSFS_FILENAME_LEN];
699 /* Eviction activity tracking */
700 uint64_t last_evict_timestamp;
701 atomic64_t evict_duration_counter;
702 struct attribute attr_evict;
704 struct kobject *kobj_stats;
705 unsigned int doorbell_index;
708 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
713 * kfd_process are stored in an mm_struct*->kfd_process*
714 * hash table (kfd_processes in kfd_process.c)
716 struct hlist_node kfd_processes;
719 * Opaque pointer to mm_struct. We don't hold a reference to
720 * it so it should never be dereferenced from here. This is
721 * only used for looking up processes by their mm.
726 struct work_struct release_work;
731 * In any process, the thread that started main() is the lead
732 * thread and outlives the rest.
733 * It is here because amd_iommu_bind_pasid wants a task_struct.
734 * It can also be used for safely getting a reference to the
735 * mm_struct of the process.
737 struct task_struct *lead_thread;
739 /* We want to receive a notification when the mm_struct is destroyed */
740 struct mmu_notifier mmu_notifier;
745 * List of kfd_process_device structures,
746 * one for each device the process is using.
748 struct list_head per_device_data;
750 struct process_queue_manager pqm;
752 /*Is the user space process 32 bit?*/
753 bool is_32bit_user_mode;
755 /* Event-related data */
756 struct mutex event_mutex;
757 /* Event ID allocator and lookup */
758 struct idr event_idr;
760 struct kfd_signal_page *signal_page;
761 size_t signal_mapped_size;
762 size_t signal_event_count;
763 bool signal_event_limit_reached;
765 /* Information used for memory eviction */
766 void *kgd_process_info;
767 /* Eviction fence that is attached to all the BOs of this process. The
768 * fence will be triggered during eviction and new one will be created
771 struct dma_fence *ef;
773 /* Work items for evicting and restoring BOs */
774 struct delayed_work eviction_work;
775 struct delayed_work restore_work;
776 /* seqno of the last scheduled eviction */
777 unsigned int last_eviction_seqno;
778 /* Approx. the last timestamp (in jiffies) when the process was
779 * restored after an eviction
781 unsigned long last_restore_timestamp;
783 /* Kobj for our procfs */
784 struct kobject *kobj;
785 struct kobject *kobj_queues;
786 struct attribute attr_pasid;
789 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
790 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
791 extern struct srcu_struct kfd_processes_srcu;
794 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer.
796 * @filep: pointer to file structure.
797 * @p: amdkfd process pointer.
798 * @data: pointer to arg that was copied from user.
800 * Return: returns ioctl completion code.
802 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
805 struct amdkfd_ioctl_desc {
808 amdkfd_ioctl_t *func;
809 unsigned int cmd_drv;
812 bool kfd_dev_is_large_bar(struct kfd_dev *dev);
814 int kfd_process_create_wq(void);
815 void kfd_process_destroy_wq(void);
816 struct kfd_process *kfd_create_process(struct file *filep);
817 struct kfd_process *kfd_get_process(const struct task_struct *);
818 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
819 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
820 void kfd_unref_process(struct kfd_process *p);
821 int kfd_process_evict_queues(struct kfd_process *p);
822 int kfd_process_restore_queues(struct kfd_process *p);
823 void kfd_suspend_all_processes(void);
824 int kfd_resume_all_processes(void);
826 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
827 struct file *drm_file);
828 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
829 struct kfd_process *p);
830 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
831 struct kfd_process *p);
832 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
833 struct kfd_process *p);
835 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
836 struct vm_area_struct *vma);
838 /* KFD process API for creating and translating handles */
839 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
841 void *kfd_process_device_translate_handle(struct kfd_process_device *p,
843 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
846 /* Process device data iterator */
847 struct kfd_process_device *kfd_get_first_process_device_data(
848 struct kfd_process *p);
849 struct kfd_process_device *kfd_get_next_process_device_data(
850 struct kfd_process *p,
851 struct kfd_process_device *pdd);
852 bool kfd_has_process_device_data(struct kfd_process *p);
855 int kfd_pasid_init(void);
856 void kfd_pasid_exit(void);
857 bool kfd_set_pasid_limit(unsigned int new_limit);
858 unsigned int kfd_get_pasid_limit(void);
859 unsigned int kfd_pasid_alloc(void);
860 void kfd_pasid_free(unsigned int pasid);
863 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
864 int kfd_doorbell_init(struct kfd_dev *kfd);
865 void kfd_doorbell_fini(struct kfd_dev *kfd);
866 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
867 struct vm_area_struct *vma);
868 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
869 unsigned int *doorbell_off);
870 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
871 u32 read_kernel_doorbell(u32 __iomem *db);
872 void write_kernel_doorbell(void __iomem *db, u32 value);
873 void write_kernel_doorbell64(void __iomem *db, u64 value);
874 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
875 struct kfd_process_device *pdd,
876 unsigned int doorbell_id);
877 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
878 int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
879 unsigned int *doorbell_index);
880 void kfd_free_process_doorbells(struct kfd_dev *kfd,
881 unsigned int doorbell_index);
882 /* GTT Sub-Allocator */
884 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
885 struct kfd_mem_obj **mem_obj);
887 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
889 extern struct device *kfd_device;
892 void kfd_procfs_init(void);
893 void kfd_procfs_shutdown(void);
894 int kfd_procfs_add_queue(struct queue *q);
895 void kfd_procfs_del_queue(struct queue *q);
898 int kfd_topology_init(void);
899 void kfd_topology_shutdown(void);
900 int kfd_topology_add_device(struct kfd_dev *gpu);
901 int kfd_topology_remove_device(struct kfd_dev *gpu);
902 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
903 uint32_t proximity_domain);
904 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
905 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
906 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
907 struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
908 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
909 int kfd_numa_node_to_apic_id(int numa_node_id);
910 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
913 int kfd_interrupt_init(struct kfd_dev *dev);
914 void kfd_interrupt_exit(struct kfd_dev *dev);
915 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
916 bool interrupt_is_wanted(struct kfd_dev *dev,
917 const uint32_t *ih_ring_entry,
918 uint32_t *patched_ihre, bool *flag);
920 /* amdkfd Apertures */
921 int kfd_init_apertures(struct kfd_process *process);
923 /* Queue Context Management */
924 int init_queue(struct queue **q, const struct queue_properties *properties);
925 void uninit_queue(struct queue *q);
926 void print_queue_properties(struct queue_properties *q);
927 void print_queue(struct queue *q);
929 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
930 struct kfd_dev *dev);
931 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
932 struct kfd_dev *dev);
933 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
934 struct kfd_dev *dev);
935 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
936 struct kfd_dev *dev);
937 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
938 struct kfd_dev *dev);
939 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
940 struct kfd_dev *dev);
941 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
942 void device_queue_manager_uninit(struct device_queue_manager *dqm);
943 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
944 enum kfd_queue_type type);
945 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
946 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
948 /* Process Queue Manager */
949 struct process_queue_node {
951 struct kernel_queue *kq;
952 struct list_head process_queue_list;
955 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
956 void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
957 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
958 void pqm_uninit(struct process_queue_manager *pqm);
959 int pqm_create_queue(struct process_queue_manager *pqm,
962 struct queue_properties *properties,
964 uint32_t *p_doorbell_offset_in_process);
965 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
966 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
967 struct queue_properties *p);
968 int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
969 struct queue_properties *p);
970 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
972 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
974 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
976 int pqm_get_wave_state(struct process_queue_manager *pqm,
978 void __user *ctl_stack,
979 u32 *ctl_stack_used_size,
980 u32 *save_area_used_size);
982 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
983 unsigned int fence_value,
984 unsigned int timeout_ms);
988 #define KFD_FENCE_COMPLETED (100)
989 #define KFD_FENCE_INIT (10)
991 struct packet_manager {
992 struct device_queue_manager *dqm;
993 struct kernel_queue *priv_queue;
996 struct kfd_mem_obj *ib_buffer_obj;
997 unsigned int ib_size_bytes;
998 bool is_over_subscription;
1000 const struct packet_manager_funcs *pmf;
1003 struct packet_manager_funcs {
1004 /* Support ASIC-specific packet formats for PM4 packets */
1005 int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
1006 struct qcm_process_device *qpd);
1007 int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
1008 uint64_t ib, size_t ib_size_in_dwords, bool chain);
1009 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
1010 struct scheduling_resources *res);
1011 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
1012 struct queue *q, bool is_static);
1013 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
1014 enum kfd_queue_type type,
1015 enum kfd_unmap_queues_filter mode,
1016 uint32_t filter_param, bool reset,
1017 unsigned int sdma_engine);
1018 int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
1019 uint64_t fence_address, uint32_t fence_value);
1020 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
1023 int map_process_size;
1025 int set_resources_size;
1026 int map_queues_size;
1027 int unmap_queues_size;
1028 int query_status_size;
1029 int release_mem_size;
1032 extern const struct packet_manager_funcs kfd_vi_pm_funcs;
1033 extern const struct packet_manager_funcs kfd_v9_pm_funcs;
1035 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1036 void pm_uninit(struct packet_manager *pm, bool hanging);
1037 int pm_send_set_resources(struct packet_manager *pm,
1038 struct scheduling_resources *res);
1039 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
1040 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
1041 uint32_t fence_value);
1043 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
1044 enum kfd_unmap_queues_filter mode,
1045 uint32_t filter_param, bool reset,
1046 unsigned int sdma_engine);
1048 void pm_release_ib(struct packet_manager *pm);
1050 /* Following PM funcs can be shared among VI and AI */
1051 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
1053 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
1056 extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
1057 extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
1059 extern const struct kfd_device_global_init_class device_global_init_class_cik;
1061 void kfd_event_init_process(struct kfd_process *p);
1062 void kfd_event_free_process(struct kfd_process *p);
1063 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
1064 int kfd_wait_on_events(struct kfd_process *p,
1065 uint32_t num_events, void __user *data,
1066 bool all, uint32_t user_timeout_ms,
1067 uint32_t *wait_result);
1068 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
1069 uint32_t valid_id_bits);
1070 void kfd_signal_iommu_event(struct kfd_dev *dev,
1071 unsigned int pasid, unsigned long address,
1072 bool is_write_requested, bool is_execute_requested);
1073 void kfd_signal_hw_exception_event(unsigned int pasid);
1074 int kfd_set_event(struct kfd_process *p, uint32_t event_id);
1075 int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
1076 int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
1078 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
1079 uint32_t event_type, bool auto_reset, uint32_t node_id,
1080 uint32_t *event_id, uint32_t *event_trigger_data,
1081 uint64_t *event_page_offset, uint32_t *event_slot_index);
1082 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
1084 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
1085 struct kfd_vm_fault_info *info);
1087 void kfd_signal_reset_event(struct kfd_dev *dev);
1089 void kfd_flush_tlb(struct kfd_process_device *pdd);
1091 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
1093 bool kfd_is_locked(void);
1095 /* Compute profile */
1096 void kfd_inc_compute_active(struct kfd_dev *dev);
1097 void kfd_dec_compute_active(struct kfd_dev *dev);
1099 /* Cgroup Support */
1100 /* Check with device cgroup if @kfd device is accessible */
1101 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
1103 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
1104 struct drm_device *ddev = kfd->ddev;
1106 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
1107 ddev->render->index,
1108 DEVCG_ACC_WRITE | DEVCG_ACC_READ);
1115 #if defined(CONFIG_DEBUG_FS)
1117 void kfd_debugfs_init(void);
1118 void kfd_debugfs_fini(void);
1119 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
1120 int pqm_debugfs_mqds(struct seq_file *m, void *data);
1121 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
1122 int dqm_debugfs_hqds(struct seq_file *m, void *data);
1123 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
1124 int pm_debugfs_runlist(struct seq_file *m, void *data);
1126 int kfd_debugfs_hang_hws(struct kfd_dev *dev);
1127 int pm_debugfs_hang_hws(struct packet_manager *pm);
1128 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
1132 static inline void kfd_debugfs_init(void) {}
1133 static inline void kfd_debugfs_fini(void) {}