2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_PRIV_H_INCLUDED
24 #define KFD_PRIV_H_INCLUDED
26 #include <linux/hashtable.h>
27 #include <linux/mmu_notifier.h>
28 #include <linux/mutex.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/workqueue.h>
32 #include <linux/spinlock.h>
33 #include <linux/kfd_ioctl.h>
34 #include <linux/idr.h>
35 #include <linux/kfifo.h>
36 #include <linux/seq_file.h>
37 #include <linux/kref.h>
38 #include <kgd_kfd_interface.h>
40 #include "amd_shared.h"
42 #define KFD_SYSFS_FILE_MODE 0444
44 #define KFD_MMAP_DOORBELL_MASK 0x8000000000000ull
45 #define KFD_MMAP_EVENTS_MASK 0x4000000000000ull
46 #define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000ull
49 * When working with cp scheduler we should assign the HIQ manually or via
50 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
51 * definitions for Kaveri. In Kaveri only the first ME queues participates
52 * in the cp scheduling taking that in mind we set the HIQ slot in the
55 #define KFD_CIK_HIQ_PIPE 4
56 #define KFD_CIK_HIQ_QUEUE 0
58 /* GPU ID hash width in bits */
59 #define KFD_GPU_ID_HASH_WIDTH 16
61 /* Macro for allocating structures */
62 #define kfd_alloc_struct(ptr_to_struct) \
63 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
65 #define KFD_MAX_NUM_OF_PROCESSES 512
66 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
69 * Size of the per-process TBA+TMA buffer: 2 pages
71 * The first page is the TBA used for the CWSR ISA code. The second
72 * page is used as TMA for daisy changing a user-mode trap handler.
74 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
75 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE
78 * Kernel module parameter to specify maximum number of supported queues per
81 extern int max_num_of_queues_per_device;
83 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
84 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
85 (KFD_MAX_NUM_OF_PROCESSES * \
86 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
88 #define KFD_KERNEL_QUEUE_SIZE 2048
90 /* Kernel module parameter to specify the scheduling policy */
91 extern int sched_policy;
94 * Kernel module parameter to specify the maximum process
95 * number per HW scheduler
97 extern int hws_max_conc_proc;
99 extern int cwsr_enable;
102 * Kernel module parameter to specify whether to send sigterm to HSA process on
103 * unhandled exception
105 extern int send_sigterm;
108 * Ignore CRAT table during KFD initialization, can be used to work around
109 * broken CRAT tables on some AMD systems
111 extern int ignore_crat;
114 * enum kfd_sched_policy
116 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
117 * scheduling. In this scheduling mode we're using the firmware code to
118 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
119 * the HIQ queue is used as a special queue that dispatches the configuration
120 * to the cp and the user mode queues list that are currently running.
121 * the DIQ queue is a debugging queue that dispatches debugging commands to the
123 * in this scheduling mode user mode queues over subscription feature is
126 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
127 * subscription feature disabled.
129 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
130 * set the command processor registers and sets the queues "manually". This
131 * mode is used *ONLY* for debugging proposes.
134 enum kfd_sched_policy {
135 KFD_SCHED_POLICY_HWS = 0,
136 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
137 KFD_SCHED_POLICY_NO_HWS
141 cache_policy_coherent,
142 cache_policy_noncoherent
145 struct kfd_event_interrupt_class {
146 bool (*interrupt_isr)(struct kfd_dev *dev,
147 const uint32_t *ih_ring_entry);
148 void (*interrupt_wq)(struct kfd_dev *dev,
149 const uint32_t *ih_ring_entry);
152 struct kfd_device_info {
153 enum amd_asic_type asic_family;
154 const struct kfd_event_interrupt_class *event_interrupt_class;
155 unsigned int max_pasid_bits;
156 unsigned int max_no_of_hqd;
157 size_t ih_ring_entry_size;
158 uint8_t num_of_watch_points;
159 uint16_t mqd_size_aligned;
164 uint32_t range_start;
170 struct kfd_vmid_info {
171 uint32_t first_vmid_kfd;
172 uint32_t last_vmid_kfd;
173 uint32_t vmid_num_kfd;
179 const struct kfd_device_info *device_info;
180 struct pci_dev *pdev;
182 unsigned int id; /* topology stub index */
184 phys_addr_t doorbell_base; /* Start of actual doorbells used by
185 * KFD. It is aligned for mapping
188 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
189 * to HW doorbell, GFX reserved some
192 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
193 * page used by kernel queue
196 struct kgd2kfd_shared_resources shared_resources;
197 struct kfd_vmid_info vm_info;
199 const struct kfd2kgd_calls *kfd2kgd;
200 struct mutex doorbell_mutex;
201 DECLARE_BITMAP(doorbell_available_index,
202 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
205 uint64_t gtt_start_gpu_addr;
206 void *gtt_start_cpu_ptr;
208 struct mutex gtt_sa_lock;
209 unsigned int gtt_sa_chunk_size;
210 unsigned int gtt_sa_num_of_chunks;
213 struct kfifo ih_fifo;
214 struct workqueue_struct *ih_wq;
215 struct work_struct interrupt_work;
216 spinlock_t interrupt_lock;
218 /* QCM Device instance */
219 struct device_queue_manager *dqm;
223 * Interrupts of interest to KFD are copied
224 * from the HW ring into a SW ring.
226 bool interrupts_active;
229 struct kfd_dbgmgr *dbgmgr;
231 /* Maximum process number mapped to HW scheduler */
232 unsigned int max_proc_per_quantum;
236 const void *cwsr_isa;
237 unsigned int cwsr_isa_size;
240 /* KGD2KFD callbacks */
241 void kgd2kfd_exit(void);
242 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
243 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
244 bool kgd2kfd_device_init(struct kfd_dev *kfd,
245 const struct kgd2kfd_shared_resources *gpu_resources);
246 void kgd2kfd_device_exit(struct kfd_dev *kfd);
249 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
250 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
251 KFD_MEMPOOL_FRAMEBUFFER = 3,
254 /* Character device interface */
255 int kfd_chardev_init(void);
256 void kfd_chardev_exit(void);
257 struct device *kfd_chardev(void);
260 * enum kfd_unmap_queues_filter
262 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
264 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
265 * running queues list.
267 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
271 enum kfd_unmap_queues_filter {
272 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
273 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
274 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
275 KFD_UNMAP_QUEUES_FILTER_BY_PASID
279 * enum kfd_queue_type
281 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
283 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
285 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
287 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
289 enum kfd_queue_type {
290 KFD_QUEUE_TYPE_COMPUTE,
296 enum kfd_queue_format {
297 KFD_QUEUE_FORMAT_PM4,
302 * struct queue_properties
304 * @type: The queue type.
306 * @queue_id: Queue identifier.
308 * @queue_address: Queue ring buffer address.
310 * @queue_size: Queue ring buffer size.
312 * @priority: Defines the queue priority relative to other queues in the
314 * This is just an indication and HW scheduling may override the priority as
315 * necessary while keeping the relative prioritization.
316 * the priority granularity is from 0 to f which f is the highest priority.
317 * currently all queues are initialized with the highest priority.
319 * @queue_percent: This field is partially implemented and currently a zero in
320 * this field defines that the queue is non active.
322 * @read_ptr: User space address which points to the number of dwords the
323 * cp read from the ring buffer. This field updates automatically by the H/W.
325 * @write_ptr: Defines the number of dwords written to the ring buffer.
327 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
328 * the queue ring buffer. This field should be similar to write_ptr and the
329 * user should update this field after he updated the write_ptr.
331 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
333 * @is_interop: Defines if this is a interop queue. Interop queue means that
334 * the queue can access both graphics and compute resources.
336 * @is_active: Defines if the queue is active or not.
338 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
341 * This structure represents the queue properties for each queue no matter if
342 * it's user mode or kernel mode queue.
345 struct queue_properties {
346 enum kfd_queue_type type;
347 enum kfd_queue_format format;
348 unsigned int queue_id;
349 uint64_t queue_address;
352 uint32_t queue_percent;
355 uint32_t __iomem *doorbell_ptr;
356 uint32_t doorbell_off;
359 /* Not relevant for user mode queues in cp scheduling */
361 /* Relevant only for sdma queues*/
362 uint32_t sdma_engine_id;
363 uint32_t sdma_queue_id;
364 uint32_t sdma_vm_addr;
365 /* Relevant only for VI */
366 uint64_t eop_ring_buffer_address;
367 uint32_t eop_ring_buffer_size;
368 uint64_t ctx_save_restore_area_address;
369 uint32_t ctx_save_restore_area_size;
370 uint32_t ctl_stack_size;
378 * @list: Queue linked list.
380 * @mqd: The queue MQD.
382 * @mqd_mem_obj: The MQD local gpu memory object.
384 * @gart_mqd_addr: The MQD gart mc address.
386 * @properties: The queue properties.
388 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
389 * that the queue should be execute on.
391 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
394 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
396 * @process: The kfd process that created this queue.
398 * @device: The kfd device that created this queue.
400 * This structure represents user mode compute queues.
401 * It contains all the necessary data to handle such queues.
406 struct list_head list;
408 struct kfd_mem_obj *mqd_mem_obj;
409 uint64_t gart_mqd_addr;
410 struct queue_properties properties;
416 unsigned int sdma_id;
418 struct kfd_process *process;
419 struct kfd_dev *device;
423 * Please read the kfd_mqd_manager.h description.
426 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
427 KFD_MQD_TYPE_HIQ, /* for hiq */
428 KFD_MQD_TYPE_CP, /* for cp queues and diq */
429 KFD_MQD_TYPE_SDMA, /* for sdma queues */
433 struct scheduling_resources {
434 unsigned int vmid_mask;
435 enum kfd_queue_type type;
439 uint32_t gds_heap_base;
440 uint32_t gds_heap_size;
443 struct process_queue_manager {
445 struct kfd_process *process;
446 struct list_head queues;
447 unsigned long *queue_slot_bitmap;
450 struct qcm_process_device {
451 /* The Device Queue Manager that owns this data */
452 struct device_queue_manager *dqm;
453 struct process_queue_manager *pqm;
455 struct list_head queues_list;
456 struct list_head priv_queue_list;
458 unsigned int queue_count;
462 /* This flag tells if we should reset all wavefronts on
463 * process termination
465 bool reset_wavefronts;
468 * All the memory management data should be here too
470 uint64_t gds_context_area;
471 uint32_t sh_mem_config;
472 uint32_t sh_mem_bases;
473 uint32_t sh_mem_ape1_base;
474 uint32_t sh_mem_ape1_limit;
475 uint32_t page_table_base;
479 uint32_t sh_hidden_private_base;
494 /* Data that is per-process-per device. */
495 struct kfd_process_device {
497 * List of all per-device data for a process.
498 * Starts from kfd_process.per_device_data.
500 struct list_head per_device_list;
502 /* The device that owns this data. */
505 /* The process that owns this kfd_process_device. */
506 struct kfd_process *process;
508 /* per-process-per device QCM data structure */
509 struct qcm_process_device qpd;
515 uint64_t gpuvm_limit;
516 uint64_t scratch_base;
517 uint64_t scratch_limit;
519 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
520 enum kfd_pdd_bound bound;
522 /* Flag used to tell the pdd has dequeued from the dqm.
523 * This is used to prevent dev->dqm->ops.process_termination() from
524 * being called twice when it is already called in IOMMU callback
527 bool already_dequeued;
530 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
535 * kfd_process are stored in an mm_struct*->kfd_process*
536 * hash table (kfd_processes in kfd_process.c)
538 struct hlist_node kfd_processes;
541 * Opaque pointer to mm_struct. We don't hold a reference to
542 * it so it should never be dereferenced from here. This is
543 * only used for looking up processes by their mm.
548 struct work_struct release_work;
553 * In any process, the thread that started main() is the lead
554 * thread and outlives the rest.
555 * It is here because amd_iommu_bind_pasid wants a task_struct.
556 * It can also be used for safely getting a reference to the
557 * mm_struct of the process.
559 struct task_struct *lead_thread;
561 /* We want to receive a notification when the mm_struct is destroyed */
562 struct mmu_notifier mmu_notifier;
564 /* Use for delayed freeing of kfd_process structure */
568 unsigned int doorbell_index;
571 * List of kfd_process_device structures,
572 * one for each device the process is using.
574 struct list_head per_device_data;
576 struct process_queue_manager pqm;
578 /*Is the user space process 32 bit?*/
579 bool is_32bit_user_mode;
581 /* Event-related data */
582 struct mutex event_mutex;
583 /* Event ID allocator and lookup */
584 struct idr event_idr;
586 struct kfd_signal_page *signal_page;
587 size_t signal_mapped_size;
588 size_t signal_event_count;
589 bool signal_event_limit_reached;
593 * Ioctl function type.
595 * \param filep pointer to file structure.
596 * \param p amdkfd process pointer.
597 * \param data pointer to arg that was copied from user.
599 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
602 struct amdkfd_ioctl_desc {
605 amdkfd_ioctl_t *func;
606 unsigned int cmd_drv;
610 void kfd_process_create_wq(void);
611 void kfd_process_destroy_wq(void);
612 struct kfd_process *kfd_create_process(struct file *filep);
613 struct kfd_process *kfd_get_process(const struct task_struct *);
614 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
615 void kfd_unref_process(struct kfd_process *p);
617 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
618 struct kfd_process *p);
619 int kfd_bind_processes_to_device(struct kfd_dev *dev);
620 void kfd_unbind_processes_from_device(struct kfd_dev *dev);
621 void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
622 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
623 struct kfd_process *p);
624 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
625 struct kfd_process *p);
627 int kfd_reserved_mem_mmap(struct kfd_process *process,
628 struct vm_area_struct *vma);
630 /* Process device data iterator */
631 struct kfd_process_device *kfd_get_first_process_device_data(
632 struct kfd_process *p);
633 struct kfd_process_device *kfd_get_next_process_device_data(
634 struct kfd_process *p,
635 struct kfd_process_device *pdd);
636 bool kfd_has_process_device_data(struct kfd_process *p);
639 int kfd_pasid_init(void);
640 void kfd_pasid_exit(void);
641 bool kfd_set_pasid_limit(unsigned int new_limit);
642 unsigned int kfd_get_pasid_limit(void);
643 unsigned int kfd_pasid_alloc(void);
644 void kfd_pasid_free(unsigned int pasid);
647 int kfd_doorbell_init(struct kfd_dev *kfd);
648 void kfd_doorbell_fini(struct kfd_dev *kfd);
649 int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
650 u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
651 unsigned int *doorbell_off);
652 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
653 u32 read_kernel_doorbell(u32 __iomem *db);
654 void write_kernel_doorbell(u32 __iomem *db, u32 value);
655 unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
656 struct kfd_process *process,
657 unsigned int queue_id);
658 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
659 struct kfd_process *process);
660 int kfd_alloc_process_doorbells(struct kfd_process *process);
661 void kfd_free_process_doorbells(struct kfd_process *process);
663 /* GTT Sub-Allocator */
665 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
666 struct kfd_mem_obj **mem_obj);
668 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
670 extern struct device *kfd_device;
673 int kfd_topology_init(void);
674 void kfd_topology_shutdown(void);
675 int kfd_topology_add_device(struct kfd_dev *gpu);
676 int kfd_topology_remove_device(struct kfd_dev *gpu);
677 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
678 uint32_t proximity_domain);
679 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
680 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
681 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
682 int kfd_numa_node_to_apic_id(int numa_node_id);
685 int kfd_interrupt_init(struct kfd_dev *dev);
686 void kfd_interrupt_exit(struct kfd_dev *dev);
687 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
688 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
689 bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
691 /* Power Management */
692 void kgd2kfd_suspend(struct kfd_dev *kfd);
693 int kgd2kfd_resume(struct kfd_dev *kfd);
695 /* amdkfd Apertures */
696 int kfd_init_apertures(struct kfd_process *process);
698 /* Queue Context Management */
699 int init_queue(struct queue **q, const struct queue_properties *properties);
700 void uninit_queue(struct queue *q);
701 void print_queue_properties(struct queue_properties *q);
702 void print_queue(struct queue *q);
704 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
705 struct kfd_dev *dev);
706 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
707 struct kfd_dev *dev);
708 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
709 struct kfd_dev *dev);
710 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
711 void device_queue_manager_uninit(struct device_queue_manager *dqm);
712 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
713 enum kfd_queue_type type);
714 void kernel_queue_uninit(struct kernel_queue *kq);
716 /* Process Queue Manager */
717 struct process_queue_node {
719 struct kernel_queue *kq;
720 struct list_head process_queue_list;
723 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
724 void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
725 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
726 void pqm_uninit(struct process_queue_manager *pqm);
727 int pqm_create_queue(struct process_queue_manager *pqm,
730 struct queue_properties *properties,
732 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
733 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
734 struct queue_properties *p);
735 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
738 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
739 unsigned int fence_value,
740 unsigned int timeout_ms);
744 #define KFD_FENCE_COMPLETED (100)
745 #define KFD_FENCE_INIT (10)
747 struct packet_manager {
748 struct device_queue_manager *dqm;
749 struct kernel_queue *priv_queue;
752 struct kfd_mem_obj *ib_buffer_obj;
753 unsigned int ib_size_bytes;
756 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
757 void pm_uninit(struct packet_manager *pm);
758 int pm_send_set_resources(struct packet_manager *pm,
759 struct scheduling_resources *res);
760 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
761 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
762 uint32_t fence_value);
764 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
765 enum kfd_unmap_queues_filter mode,
766 uint32_t filter_param, bool reset,
767 unsigned int sdma_engine);
769 void pm_release_ib(struct packet_manager *pm);
771 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
774 extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
775 extern const struct kfd_device_global_init_class device_global_init_class_cik;
777 void kfd_event_init_process(struct kfd_process *p);
778 void kfd_event_free_process(struct kfd_process *p);
779 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
780 int kfd_wait_on_events(struct kfd_process *p,
781 uint32_t num_events, void __user *data,
782 bool all, uint32_t user_timeout_ms,
783 uint32_t *wait_result);
784 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
785 uint32_t valid_id_bits);
786 void kfd_signal_iommu_event(struct kfd_dev *dev,
787 unsigned int pasid, unsigned long address,
788 bool is_write_requested, bool is_execute_requested);
789 void kfd_signal_hw_exception_event(unsigned int pasid);
790 int kfd_set_event(struct kfd_process *p, uint32_t event_id);
791 int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
792 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
793 uint32_t event_type, bool auto_reset, uint32_t node_id,
794 uint32_t *event_id, uint32_t *event_trigger_data,
795 uint64_t *event_page_offset, uint32_t *event_slot_index);
796 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
798 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
801 #if defined(CONFIG_DEBUG_FS)
803 void kfd_debugfs_init(void);
804 void kfd_debugfs_fini(void);
805 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
806 int pqm_debugfs_mqds(struct seq_file *m, void *data);
807 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
808 int dqm_debugfs_hqds(struct seq_file *m, void *data);
809 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
810 int pm_debugfs_runlist(struct seq_file *m, void *data);
814 static inline void kfd_debugfs_init(void) {}
815 static inline void kfd_debugfs_fini(void) {}