2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include "amdgpu_amdkfd.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "kfd_iommu.h"
44 * List of struct kfd_process (field kfd_process).
45 * Unique/indexed by mm_struct*
47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48 static DEFINE_MUTEX(kfd_processes_mutex);
50 DEFINE_SRCU(kfd_processes_srcu);
52 /* For process termination handling */
53 static struct workqueue_struct *kfd_process_wq;
55 /* Ordered, single-threaded workqueue for restoring evicted
56 * processes. Restoring multiple processes concurrently under memory
57 * pressure can lead to processes blocking each other from validating
58 * their BOs and result in a live-lock situation where processes
59 * remain evicted indefinitely.
61 static struct workqueue_struct *kfd_restore_wq;
63 static struct kfd_process *find_process(const struct task_struct *thread);
64 static void kfd_process_ref_release(struct kref *ref);
65 static struct kfd_process *create_process(const struct task_struct *thread,
68 static void evict_process_worker(struct work_struct *work);
69 static void restore_process_worker(struct work_struct *work);
71 struct kfd_procfs_tree {
75 static struct kfd_procfs_tree procfs;
77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
82 if (strcmp(attr->name, "pasid") == 0) {
83 struct kfd_process *p = container_of(attr, struct kfd_process,
87 pr_err("Invalid attribute");
91 return snprintf(buffer, PAGE_SIZE, "%d\n", val);
94 static void kfd_procfs_kobj_release(struct kobject *kobj)
99 static const struct sysfs_ops kfd_procfs_ops = {
100 .show = kfd_procfs_show,
103 static struct kobj_type procfs_type = {
104 .release = kfd_procfs_kobj_release,
105 .sysfs_ops = &kfd_procfs_ops,
108 void kfd_procfs_init(void)
112 procfs.kobj = kfd_alloc_struct(procfs.kobj);
116 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
117 &kfd_device->kobj, "proc");
119 pr_warn("Could not create procfs proc folder");
120 /* If we fail to create the procfs, clean up */
121 kfd_procfs_shutdown();
125 void kfd_procfs_shutdown(void)
128 kobject_del(procfs.kobj);
129 kobject_put(procfs.kobj);
134 int kfd_process_create_wq(void)
137 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
139 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
141 if (!kfd_process_wq || !kfd_restore_wq) {
142 kfd_process_destroy_wq();
149 void kfd_process_destroy_wq(void)
151 if (kfd_process_wq) {
152 destroy_workqueue(kfd_process_wq);
153 kfd_process_wq = NULL;
155 if (kfd_restore_wq) {
156 destroy_workqueue(kfd_restore_wq);
157 kfd_restore_wq = NULL;
161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
162 struct kfd_process_device *pdd)
164 struct kfd_dev *dev = pdd->dev;
166 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
167 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
171 * This function should be only called right after the process
172 * is created and when kfd_processes_mutex is still being held
173 * to avoid concurrency. Because of that exclusiveness, we do
174 * not need to take p->mutex.
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
177 uint64_t gpu_va, uint32_t size,
178 uint32_t flags, void **kptr)
180 struct kfd_dev *kdev = pdd->dev;
181 struct kgd_mem *mem = NULL;
185 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
186 pdd->vm, &mem, NULL, flags);
190 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
194 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
196 pr_debug("Sync memory failed, wait interrupted by user signal\n");
197 goto sync_memory_failed;
200 /* Create an obj handle so kfd_process_device_remove_obj_handle
201 * will take care of the bo removal when the process finishes.
202 * We do not need to take p->mutex, because the process is just
203 * created and the ioctls have not had the chance to run.
205 handle = kfd_process_device_create_obj_handle(pdd, mem);
213 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
214 (struct kgd_mem *)mem, kptr, NULL);
216 pr_debug("Map GTT BO to kernel failed\n");
217 goto free_obj_handle;
224 kfd_process_device_remove_obj_handle(pdd, handle);
227 kfd_process_free_gpuvm(mem, pdd);
231 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
238 * process for IB usage The memory reserved is for KFD to submit
239 * IB to AMDGPU from kernel. If the memory is reserved
240 * successfully, ib_kaddr will have the CPU/kernel
241 * address. Check ib_kaddr before accessing the memory.
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
245 struct qcm_process_device *qpd = &pdd->qpd;
246 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
247 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
248 ALLOC_MEM_FLAGS_WRITABLE |
249 ALLOC_MEM_FLAGS_EXECUTABLE;
253 if (qpd->ib_kaddr || !qpd->ib_base)
256 /* ib_base is only set for dGPU */
257 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
262 qpd->ib_kaddr = kaddr;
267 struct kfd_process *kfd_create_process(struct file *filep)
269 struct kfd_process *process;
270 struct task_struct *thread = current;
274 return ERR_PTR(-EINVAL);
276 /* Only the pthreads threading model is supported. */
277 if (thread->group_leader->mm != thread->mm)
278 return ERR_PTR(-EINVAL);
281 * take kfd processes mutex before starting of process creation
282 * so there won't be a case where two threads of the same process
283 * create two kfd_process structures
285 mutex_lock(&kfd_processes_mutex);
287 /* A prior open of /dev/kfd could have already created the process. */
288 process = find_process(thread);
290 pr_debug("Process already found\n");
292 process = create_process(thread, filep);
297 process->kobj = kfd_alloc_struct(process->kobj);
298 if (!process->kobj) {
299 pr_warn("Creating procfs kobject failed");
302 ret = kobject_init_and_add(process->kobj, &procfs_type,
304 (int)process->lead_thread->pid);
306 pr_warn("Creating procfs pid directory failed");
310 process->attr_pasid.name = "pasid";
311 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
312 sysfs_attr_init(&process->attr_pasid);
313 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
315 pr_warn("Creating pasid for pid %d failed",
316 (int)process->lead_thread->pid);
319 mutex_unlock(&kfd_processes_mutex);
324 struct kfd_process *kfd_get_process(const struct task_struct *thread)
326 struct kfd_process *process;
329 return ERR_PTR(-EINVAL);
331 /* Only the pthreads threading model is supported. */
332 if (thread->group_leader->mm != thread->mm)
333 return ERR_PTR(-EINVAL);
335 process = find_process(thread);
337 return ERR_PTR(-EINVAL);
342 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
344 struct kfd_process *process;
346 hash_for_each_possible_rcu(kfd_processes_table, process,
347 kfd_processes, (uintptr_t)mm)
348 if (process->mm == mm)
354 static struct kfd_process *find_process(const struct task_struct *thread)
356 struct kfd_process *p;
359 idx = srcu_read_lock(&kfd_processes_srcu);
360 p = find_process_by_mm(thread->mm);
361 srcu_read_unlock(&kfd_processes_srcu, idx);
366 void kfd_unref_process(struct kfd_process *p)
368 kref_put(&p->ref, kfd_process_ref_release);
371 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
373 struct kfd_process *p = pdd->process;
378 * Remove all handles from idr and release appropriate
379 * local memory object
381 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
382 struct kfd_process_device *peer_pdd;
384 list_for_each_entry(peer_pdd, &p->per_device_data,
388 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
389 peer_pdd->dev->kgd, mem, peer_pdd->vm);
392 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
393 kfd_process_device_remove_obj_handle(pdd, id);
397 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
399 struct kfd_process_device *pdd;
401 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
402 kfd_process_device_free_bos(pdd);
405 static void kfd_process_destroy_pdds(struct kfd_process *p)
407 struct kfd_process_device *pdd, *temp;
409 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
411 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
412 pdd->dev->id, p->pasid);
415 amdgpu_amdkfd_gpuvm_release_process_vm(
416 pdd->dev->kgd, pdd->vm);
420 amdgpu_amdkfd_gpuvm_destroy_process_vm(
421 pdd->dev->kgd, pdd->vm);
423 list_del(&pdd->per_device_list);
425 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
426 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
427 get_order(KFD_CWSR_TBA_TMA_SIZE));
429 kfree(pdd->qpd.doorbell_bitmap);
430 idr_destroy(&pdd->alloc_idr);
436 /* No process locking is needed in this function, because the process
437 * is not findable any more. We must assume that no other thread is
438 * using it any more, otherwise we couldn't safely free the process
439 * structure in the end.
441 static void kfd_process_wq_release(struct work_struct *work)
443 struct kfd_process *p = container_of(work, struct kfd_process,
446 /* Remove the procfs files */
448 sysfs_remove_file(p->kobj, &p->attr_pasid);
449 kobject_del(p->kobj);
450 kobject_put(p->kobj);
454 kfd_iommu_unbind_process(p);
456 kfd_process_free_outstanding_kfd_bos(p);
458 kfd_process_destroy_pdds(p);
459 dma_fence_put(p->ef);
461 kfd_event_free_process(p);
463 kfd_pasid_free(p->pasid);
464 kfd_free_process_doorbells(p);
466 mutex_destroy(&p->mutex);
468 put_task_struct(p->lead_thread);
473 static void kfd_process_ref_release(struct kref *ref)
475 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
477 INIT_WORK(&p->release_work, kfd_process_wq_release);
478 queue_work(kfd_process_wq, &p->release_work);
481 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
483 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
485 kfd_unref_process(p);
488 static void kfd_process_notifier_release(struct mmu_notifier *mn,
489 struct mm_struct *mm)
491 struct kfd_process *p;
492 struct kfd_process_device *pdd = NULL;
495 * The kfd_process structure can not be free because the
496 * mmu_notifier srcu is read locked
498 p = container_of(mn, struct kfd_process, mmu_notifier);
499 if (WARN_ON(p->mm != mm))
502 mutex_lock(&kfd_processes_mutex);
503 hash_del_rcu(&p->kfd_processes);
504 mutex_unlock(&kfd_processes_mutex);
505 synchronize_srcu(&kfd_processes_srcu);
507 cancel_delayed_work_sync(&p->eviction_work);
508 cancel_delayed_work_sync(&p->restore_work);
510 mutex_lock(&p->mutex);
512 /* Iterate over all process device data structures and if the
513 * pdd is in debug mode, we should first force unregistration,
514 * then we will be able to destroy the queues
516 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
517 struct kfd_dev *dev = pdd->dev;
519 mutex_lock(kfd_get_dbgmgr_mutex());
520 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
521 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
522 kfd_dbgmgr_destroy(dev->dbgmgr);
526 mutex_unlock(kfd_get_dbgmgr_mutex());
529 kfd_process_dequeue_from_all_devices(p);
532 /* Indicate to other users that MM is no longer valid */
535 mutex_unlock(&p->mutex);
537 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
538 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
541 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
542 .release = kfd_process_notifier_release,
545 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
547 unsigned long offset;
548 struct kfd_process_device *pdd;
550 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
551 struct kfd_dev *dev = pdd->dev;
552 struct qcm_process_device *qpd = &pdd->qpd;
554 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
557 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
559 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
560 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
563 if (IS_ERR_VALUE(qpd->tba_addr)) {
564 int err = qpd->tba_addr;
566 pr_err("Failure to set tba address. error %d.\n", err);
568 qpd->cwsr_kaddr = NULL;
572 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
574 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
575 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
576 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
582 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
584 struct kfd_dev *dev = pdd->dev;
585 struct qcm_process_device *qpd = &pdd->qpd;
586 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
587 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
591 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
594 /* cwsr_base is only set for dGPU */
595 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
596 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
600 qpd->cwsr_kaddr = kaddr;
601 qpd->tba_addr = qpd->cwsr_base;
603 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
605 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
606 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
607 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
612 static struct kfd_process *create_process(const struct task_struct *thread,
615 struct kfd_process *process;
618 process = kzalloc(sizeof(*process), GFP_KERNEL);
621 goto err_alloc_process;
623 process->pasid = kfd_pasid_alloc();
624 if (process->pasid == 0)
625 goto err_alloc_pasid;
627 if (kfd_alloc_process_doorbells(process) < 0)
628 goto err_alloc_doorbells;
630 kref_init(&process->ref);
632 mutex_init(&process->mutex);
634 process->mm = thread->mm;
636 /* register notifier */
637 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
638 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
640 goto err_mmu_notifier;
642 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
643 (uintptr_t)process->mm);
645 process->lead_thread = thread->group_leader;
646 get_task_struct(process->lead_thread);
648 INIT_LIST_HEAD(&process->per_device_data);
650 kfd_event_init_process(process);
652 err = pqm_init(&process->pqm, process);
654 goto err_process_pqm_init;
656 /* init process apertures*/
657 process->is_32bit_user_mode = in_compat_syscall();
658 err = kfd_init_apertures(process);
660 goto err_init_apertures;
662 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
663 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
664 process->last_restore_timestamp = get_jiffies_64();
666 err = kfd_process_init_cwsr_apu(process, filep);
673 kfd_process_free_outstanding_kfd_bos(process);
674 kfd_process_destroy_pdds(process);
676 pqm_uninit(&process->pqm);
677 err_process_pqm_init:
678 hash_del_rcu(&process->kfd_processes);
680 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
682 mutex_destroy(&process->mutex);
683 kfd_free_process_doorbells(process);
685 kfd_pasid_free(process->pasid);
692 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
697 if (!KFD_IS_SOC15(dev->device_info->asic_family))
700 qpd->doorbell_bitmap =
701 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
702 BITS_PER_BYTE), GFP_KERNEL);
703 if (!qpd->doorbell_bitmap)
706 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
707 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
708 if (i >= dev->shared_resources.non_cp_doorbells_start
709 && i <= dev->shared_resources.non_cp_doorbells_end) {
710 set_bit(i, qpd->doorbell_bitmap);
711 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
712 qpd->doorbell_bitmap);
713 pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
714 i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
721 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
722 struct kfd_process *p)
724 struct kfd_process_device *pdd = NULL;
726 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
733 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
734 struct kfd_process *p)
736 struct kfd_process_device *pdd = NULL;
738 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
742 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
743 pr_err("Failed to init doorbell for process\n");
749 INIT_LIST_HEAD(&pdd->qpd.queues_list);
750 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
751 pdd->qpd.dqm = dev->dqm;
752 pdd->qpd.pqm = &p->pqm;
753 pdd->qpd.evicted = 0;
755 pdd->bound = PDD_UNBOUND;
756 pdd->already_dequeued = false;
757 list_add(&pdd->per_device_list, &p->per_device_data);
759 /* Init idr used for memory handle translation */
760 idr_init(&pdd->alloc_idr);
766 * kfd_process_device_init_vm - Initialize a VM for a process-device
768 * @pdd: The process-device
769 * @drm_file: Optional pointer to a DRM file descriptor
771 * If @drm_file is specified, it will be used to acquire the VM from
772 * that file descriptor. If successful, the @pdd takes ownership of
773 * the file descriptor.
775 * If @drm_file is NULL, a new VM is created.
777 * Returns 0 on success, -errno on failure.
779 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
780 struct file *drm_file)
782 struct kfd_process *p;
787 return drm_file ? -EBUSY : 0;
793 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
794 dev->kgd, drm_file, p->pasid,
795 &pdd->vm, &p->kgd_process_info, &p->ef);
797 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
798 &pdd->vm, &p->kgd_process_info, &p->ef);
800 pr_err("Failed to create process VM object\n");
804 amdgpu_vm_set_task_info(pdd->vm);
806 ret = kfd_process_device_reserve_ib_mem(pdd);
808 goto err_reserve_ib_mem;
809 ret = kfd_process_device_init_cwsr_dgpu(pdd);
813 pdd->drm_file = drm_file;
819 kfd_process_device_free_bos(pdd);
821 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
828 * Direct the IOMMU to bind the process (specifically the pasid->mm)
830 * Unbinding occurs when the process dies or the device is removed.
832 * Assumes that the process lock is held.
834 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
835 struct kfd_process *p)
837 struct kfd_process_device *pdd;
840 pdd = kfd_get_process_device_data(dev, p);
842 pr_err("Process device data doesn't exist\n");
843 return ERR_PTR(-ENOMEM);
846 err = kfd_iommu_bind_process_to_device(pdd);
850 err = kfd_process_device_init_vm(pdd, NULL);
857 struct kfd_process_device *kfd_get_first_process_device_data(
858 struct kfd_process *p)
860 return list_first_entry(&p->per_device_data,
861 struct kfd_process_device,
865 struct kfd_process_device *kfd_get_next_process_device_data(
866 struct kfd_process *p,
867 struct kfd_process_device *pdd)
869 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
871 return list_next_entry(pdd, per_device_list);
874 bool kfd_has_process_device_data(struct kfd_process *p)
876 return !(list_empty(&p->per_device_data));
879 /* Create specific handle mapped to mem from process local memory idr
880 * Assumes that the process lock is held.
882 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
885 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
888 /* Translate specific handle from process local memory idr
889 * Assumes that the process lock is held.
891 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
897 return idr_find(&pdd->alloc_idr, handle);
900 /* Remove specific handle from process local memory idr
901 * Assumes that the process lock is held.
903 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
907 idr_remove(&pdd->alloc_idr, handle);
910 /* This increments the process->ref counter. */
911 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
913 struct kfd_process *p, *ret_p = NULL;
916 int idx = srcu_read_lock(&kfd_processes_srcu);
918 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
919 if (p->pasid == pasid) {
926 srcu_read_unlock(&kfd_processes_srcu, idx);
931 /* This increments the process->ref counter. */
932 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
934 struct kfd_process *p;
936 int idx = srcu_read_lock(&kfd_processes_srcu);
938 p = find_process_by_mm(mm);
942 srcu_read_unlock(&kfd_processes_srcu, idx);
947 /* process_evict_queues - Evict all user queues of a process
949 * Eviction is reference-counted per process-device. This means multiple
950 * evictions from different sources can be nested safely.
952 int kfd_process_evict_queues(struct kfd_process *p)
954 struct kfd_process_device *pdd;
956 unsigned int n_evicted = 0;
958 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
959 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
962 pr_err("Failed to evict process queues\n");
971 /* To keep state consistent, roll back partial eviction by
974 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
977 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
979 pr_err("Failed to restore queues\n");
987 /* process_restore_queues - Restore all user queues of a process */
988 int kfd_process_restore_queues(struct kfd_process *p)
990 struct kfd_process_device *pdd;
993 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
994 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
997 pr_err("Failed to restore process queues\n");
1006 static void evict_process_worker(struct work_struct *work)
1009 struct kfd_process *p;
1010 struct delayed_work *dwork;
1012 dwork = to_delayed_work(work);
1014 /* Process termination destroys this worker thread. So during the
1015 * lifetime of this thread, kfd_process p will be valid
1017 p = container_of(dwork, struct kfd_process, eviction_work);
1018 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1019 "Eviction fence mismatch\n");
1021 /* Narrow window of overlap between restore and evict work
1022 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1023 * unreserves KFD BOs, it is possible to evicted again. But
1024 * restore has few more steps of finish. So lets wait for any
1025 * previous restore work to complete
1027 flush_delayed_work(&p->restore_work);
1029 pr_debug("Started evicting pasid %d\n", p->pasid);
1030 ret = kfd_process_evict_queues(p);
1032 dma_fence_signal(p->ef);
1033 dma_fence_put(p->ef);
1035 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1036 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1038 pr_debug("Finished evicting pasid %d\n", p->pasid);
1040 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
1043 static void restore_process_worker(struct work_struct *work)
1045 struct delayed_work *dwork;
1046 struct kfd_process *p;
1047 struct kfd_process_device *pdd;
1050 dwork = to_delayed_work(work);
1052 /* Process termination destroys this worker thread. So during the
1053 * lifetime of this thread, kfd_process p will be valid
1055 p = container_of(dwork, struct kfd_process, restore_work);
1057 /* Call restore_process_bos on the first KGD device. This function
1058 * takes care of restoring the whole process including other devices.
1059 * Restore can fail if enough memory is not available. If so,
1062 pdd = list_first_entry(&p->per_device_data,
1063 struct kfd_process_device,
1066 pr_debug("Started restoring pasid %d\n", p->pasid);
1068 /* Setting last_restore_timestamp before successful restoration.
1069 * Otherwise this would have to be set by KGD (restore_process_bos)
1070 * before KFD BOs are unreserved. If not, the process can be evicted
1071 * again before the timestamp is set.
1072 * If restore fails, the timestamp will be set again in the next
1073 * attempt. This would mean that the minimum GPU quanta would be
1074 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1078 p->last_restore_timestamp = get_jiffies_64();
1079 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1082 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
1083 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1084 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1085 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1086 WARN(!ret, "reschedule restore work failed\n");
1090 ret = kfd_process_restore_queues(p);
1092 pr_debug("Finished restoring pasid %d\n", p->pasid);
1094 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
1097 void kfd_suspend_all_processes(void)
1099 struct kfd_process *p;
1101 int idx = srcu_read_lock(&kfd_processes_srcu);
1103 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1104 cancel_delayed_work_sync(&p->eviction_work);
1105 cancel_delayed_work_sync(&p->restore_work);
1107 if (kfd_process_evict_queues(p))
1108 pr_err("Failed to suspend process %d\n", p->pasid);
1109 dma_fence_signal(p->ef);
1110 dma_fence_put(p->ef);
1113 srcu_read_unlock(&kfd_processes_srcu, idx);
1116 int kfd_resume_all_processes(void)
1118 struct kfd_process *p;
1120 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1122 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1123 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1124 pr_err("Restore process %d failed during resume\n",
1129 srcu_read_unlock(&kfd_processes_srcu, idx);
1133 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1134 struct vm_area_struct *vma)
1136 struct kfd_process_device *pdd;
1137 struct qcm_process_device *qpd;
1139 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1140 pr_err("Incorrect CWSR mapping size.\n");
1144 pdd = kfd_get_process_device_data(dev, process);
1149 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1150 get_order(KFD_CWSR_TBA_TMA_SIZE));
1151 if (!qpd->cwsr_kaddr) {
1152 pr_err("Error allocating per process CWSR buffer.\n");
1156 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1157 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1158 /* Mapping pages to user process */
1159 return remap_pfn_range(vma, vma->vm_start,
1160 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1161 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1164 void kfd_flush_tlb(struct kfd_process_device *pdd)
1166 struct kfd_dev *dev = pdd->dev;
1167 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1169 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1170 /* Nothing to flush until a VMID is assigned, which
1171 * only happens when the first queue is created.
1174 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1176 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1180 #if defined(CONFIG_DEBUG_FS)
1182 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1184 struct kfd_process *p;
1188 int idx = srcu_read_lock(&kfd_processes_srcu);
1190 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1191 seq_printf(m, "Process %d PASID %d:\n",
1192 p->lead_thread->tgid, p->pasid);
1194 mutex_lock(&p->mutex);
1195 r = pqm_debugfs_mqds(m, &p->pqm);
1196 mutex_unlock(&p->mutex);
1202 srcu_read_unlock(&kfd_processes_srcu, idx);