mm: replace vma->vm_flags direct modifications with modifier calls
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_process.c
CommitLineData
d87f36a0 1// SPDX-License-Identifier: GPL-2.0 OR MIT
19f6d2a6 2/*
d87f36a0 3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
19f6d2a6
OG
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/mutex.h>
25#include <linux/log2.h>
26#include <linux/sched.h>
6e84f315 27#include <linux/sched/mm.h>
c7b1243e 28#include <linux/sched/task.h>
32cb59f3 29#include <linux/mmu_context.h>
19f6d2a6 30#include <linux/slab.h>
b17f068a 31#include <linux/amd-iommu.h>
19f6d2a6 32#include <linux/notifier.h>
dd59239a 33#include <linux/compat.h>
373d7080 34#include <linux/mman.h>
b84394e2 35#include <linux/file.h>
9593f4d6 36#include <linux/pm_runtime.h>
5b87245f 37#include "amdgpu_amdkfd.h"
ffa02269 38#include "amdgpu.h"
dd59239a 39
19f6d2a6
OG
40struct mm_struct;
41
42#include "kfd_priv.h"
403575c4 43#include "kfd_device_queue_manager.h"
64d1c3a4 44#include "kfd_iommu.h"
42de677f 45#include "kfd_svm.h"
c7f21978 46#include "kfd_smi_events.h"
19f6d2a6 47
19f6d2a6
OG
48/*
49 * List of struct kfd_process (field kfd_process).
50 * Unique/indexed by mm_struct*
51 */
64d1c3a4 52DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
19f6d2a6
OG
53static DEFINE_MUTEX(kfd_processes_mutex);
54
64d1c3a4 55DEFINE_SRCU(kfd_processes_srcu);
19f6d2a6 56
1679ae8f 57/* For process termination handling */
19f6d2a6
OG
58static struct workqueue_struct *kfd_process_wq;
59
1679ae8f
FK
60/* Ordered, single-threaded workqueue for restoring evicted
61 * processes. Restoring multiple processes concurrently under memory
62 * pressure can lead to processes blocking each other from validating
63 * their BOs and result in a live-lock situation where processes
64 * remain evicted indefinitely.
65 */
66static struct workqueue_struct *kfd_restore_wq;
67
011bbb03
RB
68static struct kfd_process *find_process(const struct task_struct *thread,
69 bool ref);
abb208a8 70static void kfd_process_ref_release(struct kref *ref);
0029cab3
JG
71static struct kfd_process *create_process(const struct task_struct *thread);
72static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
373d7080 73
26103436
FK
74static void evict_process_worker(struct work_struct *work);
75static void restore_process_worker(struct work_struct *work);
76
68df0f19
LY
77static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
78
de9f26bb
KR
79struct kfd_procfs_tree {
80 struct kobject *kobj;
81};
82
83static struct kfd_procfs_tree procfs;
84
32cb59f3
MJ
85/*
86 * Structure for SDMA activity tracking
87 */
88struct kfd_sdma_activity_handler_workarea {
89 struct work_struct sdma_activity_work;
90 struct kfd_process_device *pdd;
91 uint64_t sdma_activity_counter;
92};
93
d69fd951 94struct temp_sdma_queue_list {
818b0324 95 uint64_t __user *rptr;
d69fd951
MJ
96 uint64_t sdma_val;
97 unsigned int queue_id;
98 struct list_head list;
99};
100
32cb59f3
MJ
101static void kfd_sdma_activity_worker(struct work_struct *work)
102{
103 struct kfd_sdma_activity_handler_workarea *workarea;
104 struct kfd_process_device *pdd;
105 uint64_t val;
106 struct mm_struct *mm;
107 struct queue *q;
108 struct qcm_process_device *qpd;
109 struct device_queue_manager *dqm;
110 int ret = 0;
d69fd951
MJ
111 struct temp_sdma_queue_list sdma_q_list;
112 struct temp_sdma_queue_list *sdma_q, *next;
32cb59f3
MJ
113
114 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
115 sdma_activity_work);
32cb59f3
MJ
116
117 pdd = workarea->pdd;
2652bda7
CIK
118 if (!pdd)
119 return;
32cb59f3
MJ
120 dqm = pdd->dev->dqm;
121 qpd = &pdd->qpd;
2652bda7 122 if (!dqm || !qpd)
32cb59f3 123 return;
d69fd951
MJ
124 /*
125 * Total SDMA activity is current SDMA activity + past SDMA activity
126 * Past SDMA count is stored in pdd.
127 * To get the current activity counters for all active SDMA queues,
128 * we loop over all SDMA queues and get their counts from user-space.
129 *
130 * We cannot call get_user() with dqm_lock held as it can cause
131 * a circular lock dependency situation. To read the SDMA stats,
132 * we need to do the following:
133 *
134 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
135 * with dqm_lock/dqm_unlock().
136 * 2. Call get_user() for each node in temporary list without dqm_lock.
137 * Save the SDMA count for each node and also add the count to the total
138 * SDMA count counter.
139 * Its possible, during this step, a few SDMA queue nodes got deleted
140 * from the qpd->queues_list.
141 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
142 * If any node got deleted, its SDMA count would be captured in the sdma
143 * past activity counter. So subtract the SDMA counter stored in step 2
144 * for this node from the total SDMA count.
145 */
146 INIT_LIST_HEAD(&sdma_q_list.list);
32cb59f3 147
d69fd951
MJ
148 /*
149 * Create the temp list of all SDMA queues
150 */
151 dqm_lock(dqm);
152
153 list_for_each_entry(q, &qpd->queues_list, list) {
154 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
155 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
156 continue;
157
158 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
159 if (!sdma_q) {
160 dqm_unlock(dqm);
161 goto cleanup;
162 }
163
164 INIT_LIST_HEAD(&sdma_q->list);
818b0324 165 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
d69fd951
MJ
166 sdma_q->queue_id = q->properties.queue_id;
167 list_add_tail(&sdma_q->list, &sdma_q_list.list);
32cb59f3
MJ
168 }
169
d69fd951
MJ
170 /*
171 * If the temp list is empty, then no SDMA queues nodes were found in
172 * qpd->queues_list. Return the past activity count as the total sdma
173 * count
174 */
175 if (list_empty(&sdma_q_list.list)) {
176 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
177 dqm_unlock(dqm);
178 return;
179 }
32cb59f3 180
d69fd951 181 dqm_unlock(dqm);
32cb59f3
MJ
182
183 /*
d69fd951 184 * Get the usage count for each SDMA queue in temp_list.
32cb59f3 185 */
d69fd951
MJ
186 mm = get_task_mm(pdd->process->lead_thread);
187 if (!mm)
188 goto cleanup;
189
9555152b 190 kthread_use_mm(mm);
d69fd951
MJ
191
192 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
193 val = 0;
194 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
195 if (ret) {
196 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
197 sdma_q->queue_id);
198 } else {
199 sdma_q->sdma_val = val;
200 workarea->sdma_activity_counter += val;
201 }
202 }
203
9555152b 204 kthread_unuse_mm(mm);
d69fd951 205 mmput(mm);
32cb59f3
MJ
206
207 /*
d69fd951
MJ
208 * Do a second iteration over qpd_queues_list to check if any SDMA
209 * nodes got deleted while fetching SDMA counter.
32cb59f3 210 */
d69fd951
MJ
211 dqm_lock(dqm);
212
213 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
214
32cb59f3 215 list_for_each_entry(q, &qpd->queues_list, list) {
d69fd951
MJ
216 if (list_empty(&sdma_q_list.list))
217 break;
218
219 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
220 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
221 continue;
222
223 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
818b0324 224 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
d69fd951
MJ
225 (sdma_q->queue_id == q->properties.queue_id)) {
226 list_del(&sdma_q->list);
227 kfree(sdma_q);
228 break;
229 }
32cb59f3
MJ
230 }
231 }
232
233 dqm_unlock(dqm);
d69fd951
MJ
234
235 /*
236 * If temp list is not empty, it implies some queues got deleted
237 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
238 * count for each node from the total SDMA count.
239 */
240 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
241 workarea->sdma_activity_counter -= sdma_q->sdma_val;
242 list_del(&sdma_q->list);
243 kfree(sdma_q);
244 }
245
246 return;
247
248cleanup:
249 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
250 list_del(&sdma_q->list);
251 kfree(sdma_q);
252 }
32cb59f3
MJ
253}
254
f2fa07b3 255/**
bbe04dec 256 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
f2fa07b3
RE
257 * by current process. Translates acquired wave count into number of compute units
258 * that are occupied.
259 *
bbe04dec 260 * @attr: Handle of attribute that allows reporting of wave count. The attribute
f2fa07b3
RE
261 * handle encapsulates GPU device it is associated with, thereby allowing collection
262 * of waves in flight, etc
f2fa07b3
RE
263 * @buffer: Handle of user provided buffer updated with wave count
264 *
265 * Return: Number of bytes written to user buffer or an error value
266 */
267static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
268{
269 int cu_cnt;
270 int wave_cnt;
271 int max_waves_per_cu;
272 struct kfd_dev *dev = NULL;
273 struct kfd_process *proc = NULL;
274 struct kfd_process_device *pdd = NULL;
275
276 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
277 dev = pdd->dev;
278 if (dev->kfd2kgd->get_cu_occupancy == NULL)
279 return -EINVAL;
280
281 cu_cnt = 0;
282 proc = pdd->process;
283 if (pdd->qpd.queue_count == 0) {
284 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
285 dev->id, proc->pasid);
286 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
287 }
288
289 /* Collect wave count from device if it supports */
290 wave_cnt = 0;
291 max_waves_per_cu = 0;
3356c38d 292 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
f2fa07b3
RE
293 &max_waves_per_cu);
294
295 /* Translate wave count to number of compute units */
296 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
297 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
298}
299
de9f26bb
KR
300static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
301 char *buffer)
302{
de9f26bb
KR
303 if (strcmp(attr->name, "pasid") == 0) {
304 struct kfd_process *p = container_of(attr, struct kfd_process,
305 attr_pasid);
d4566dee
MJ
306
307 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
308 } else if (strncmp(attr->name, "vram_", 5) == 0) {
309 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
310 attr_vram);
32cb59f3
MJ
311 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
312 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
313 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
314 attr_sdma);
315 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
316
317 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
318 kfd_sdma_activity_worker);
319
320 sdma_activity_work_handler.pdd = pdd;
5960e022 321 sdma_activity_work_handler.sdma_activity_counter = 0;
32cb59f3
MJ
322
323 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
324
325 flush_work(&sdma_activity_work_handler.sdma_activity_work);
326
327 return snprintf(buffer, PAGE_SIZE, "%llu\n",
328 (sdma_activity_work_handler.sdma_activity_counter)/
329 SDMA_ACTIVITY_DIVISOR);
de9f26bb
KR
330 } else {
331 pr_err("Invalid attribute");
332 return -EINVAL;
333 }
334
d4566dee 335 return 0;
de9f26bb
KR
336}
337
338static void kfd_procfs_kobj_release(struct kobject *kobj)
339{
340 kfree(kobj);
341}
342
343static const struct sysfs_ops kfd_procfs_ops = {
344 .show = kfd_procfs_show,
345};
346
347static struct kobj_type procfs_type = {
348 .release = kfd_procfs_kobj_release,
349 .sysfs_ops = &kfd_procfs_ops,
350};
351
352void kfd_procfs_init(void)
353{
354 int ret = 0;
355
356 procfs.kobj = kfd_alloc_struct(procfs.kobj);
357 if (!procfs.kobj)
358 return;
359
360 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
361 &kfd_device->kobj, "proc");
362 if (ret) {
363 pr_warn("Could not create procfs proc folder");
364 /* If we fail to create the procfs, clean up */
365 kfd_procfs_shutdown();
366 }
367}
368
369void kfd_procfs_shutdown(void)
370{
371 if (procfs.kobj) {
372 kobject_del(procfs.kobj);
373 kobject_put(procfs.kobj);
374 procfs.kobj = NULL;
375 }
376}
19f6d2a6 377
6d220a7e
AL
378static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
379 struct attribute *attr, char *buffer)
380{
381 struct queue *q = container_of(kobj, struct queue, kobj);
382
383 if (!strcmp(attr->name, "size"))
384 return snprintf(buffer, PAGE_SIZE, "%llu",
385 q->properties.queue_size);
386 else if (!strcmp(attr->name, "type"))
387 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
388 else if (!strcmp(attr->name, "gpuid"))
389 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
390 else
391 pr_err("Invalid attribute");
392
393 return 0;
394}
395
4327bed2
PC
396static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
397 struct attribute *attr, char *buffer)
398{
399 if (strcmp(attr->name, "evicted_ms") == 0) {
400 struct kfd_process_device *pdd = container_of(attr,
401 struct kfd_process_device,
402 attr_evict);
403 uint64_t evict_jiffies;
404
405 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
406
407 return snprintf(buffer,
408 PAGE_SIZE,
409 "%llu\n",
410 jiffies64_to_msecs(evict_jiffies));
f2fa07b3
RE
411
412 /* Sysfs handle that gets CU occupancy is per device */
413 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
414 return kfd_get_cu_occupancy(attr, buffer);
415 } else {
4327bed2 416 pr_err("Invalid attribute");
f2fa07b3 417 }
4327bed2
PC
418
419 return 0;
420}
6d220a7e 421
751580b3
PY
422static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
423 struct attribute *attr, char *buf)
424{
425 struct kfd_process_device *pdd;
426
427 if (!strcmp(attr->name, "faults")) {
428 pdd = container_of(attr, struct kfd_process_device,
429 attr_faults);
430 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
431 }
432 if (!strcmp(attr->name, "page_in")) {
433 pdd = container_of(attr, struct kfd_process_device,
434 attr_page_in);
435 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
436 }
437 if (!strcmp(attr->name, "page_out")) {
438 pdd = container_of(attr, struct kfd_process_device,
439 attr_page_out);
440 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
441 }
442 return 0;
443}
444
6d220a7e
AL
445static struct attribute attr_queue_size = {
446 .name = "size",
447 .mode = KFD_SYSFS_FILE_MODE
448};
449
450static struct attribute attr_queue_type = {
451 .name = "type",
452 .mode = KFD_SYSFS_FILE_MODE
453};
454
455static struct attribute attr_queue_gpuid = {
456 .name = "gpuid",
457 .mode = KFD_SYSFS_FILE_MODE
458};
459
460static struct attribute *procfs_queue_attrs[] = {
461 &attr_queue_size,
462 &attr_queue_type,
463 &attr_queue_gpuid,
464 NULL
465};
5fea167e 466ATTRIBUTE_GROUPS(procfs_queue);
6d220a7e
AL
467
468static const struct sysfs_ops procfs_queue_ops = {
469 .show = kfd_procfs_queue_show,
470};
471
472static struct kobj_type procfs_queue_type = {
473 .sysfs_ops = &procfs_queue_ops,
5fea167e 474 .default_groups = procfs_queue_groups,
6d220a7e
AL
475};
476
4327bed2
PC
477static const struct sysfs_ops procfs_stats_ops = {
478 .show = kfd_procfs_stats_show,
479};
480
4327bed2
PC
481static struct kobj_type procfs_stats_type = {
482 .sysfs_ops = &procfs_stats_ops,
dcdb4d90 483 .release = kfd_procfs_kobj_release,
4327bed2
PC
484};
485
751580b3
PY
486static const struct sysfs_ops sysfs_counters_ops = {
487 .show = kfd_sysfs_counters_show,
488};
489
490static struct kobj_type sysfs_counters_type = {
491 .sysfs_ops = &sysfs_counters_ops,
492 .release = kfd_procfs_kobj_release,
493};
494
6d220a7e
AL
495int kfd_procfs_add_queue(struct queue *q)
496{
497 struct kfd_process *proc;
498 int ret;
499
500 if (!q || !q->process)
501 return -EINVAL;
502 proc = q->process;
503
504 /* Create proc/<pid>/queues/<queue id> folder */
505 if (!proc->kobj_queues)
506 return -EFAULT;
507 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
508 proc->kobj_queues, "%u", q->properties.queue_id);
509 if (ret < 0) {
510 pr_warn("Creating proc/<pid>/queues/%u failed",
511 q->properties.queue_id);
512 kobject_put(&q->kobj);
513 return ret;
514 }
515
516 return 0;
517}
518
75ae84c8 519static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
32cb59f3
MJ
520 char *name)
521{
75ae84c8 522 int ret;
32cb59f3 523
75ae84c8
PY
524 if (!kobj || !attr || !name)
525 return;
32cb59f3
MJ
526
527 attr->name = name;
528 attr->mode = KFD_SYSFS_FILE_MODE;
529 sysfs_attr_init(attr);
530
75ae84c8
PY
531 ret = sysfs_create_file(kobj, attr);
532 if (ret)
533 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
32cb59f3
MJ
534}
535
75ae84c8 536static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
4327bed2 537{
75ae84c8 538 int ret;
6ae27841 539 int i;
4327bed2
PC
540 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
541
75ae84c8
PY
542 if (!p || !p->kobj)
543 return;
4327bed2
PC
544
545 /*
546 * Create sysfs files for each GPU:
547 * - proc/<pid>/stats_<gpuid>/
548 * - proc/<pid>/stats_<gpuid>/evicted_ms
f2fa07b3 549 * - proc/<pid>/stats_<gpuid>/cu_occupancy
4327bed2 550 */
6ae27841
AS
551 for (i = 0; i < p->n_pdds; i++) {
552 struct kfd_process_device *pdd = p->pdds[i];
4327bed2
PC
553
554 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
555 "stats_%u", pdd->dev->id);
75ae84c8
PY
556 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
557 if (!pdd->kobj_stats)
558 return;
4327bed2 559
75ae84c8
PY
560 ret = kobject_init_and_add(pdd->kobj_stats,
561 &procfs_stats_type,
562 p->kobj,
563 stats_dir_filename);
4327bed2
PC
564
565 if (ret) {
566 pr_warn("Creating KFD proc/stats_%s folder failed",
75ae84c8
PY
567 stats_dir_filename);
568 kobject_put(pdd->kobj_stats);
569 pdd->kobj_stats = NULL;
570 return;
4327bed2
PC
571 }
572
75ae84c8
PY
573 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
574 "evicted_ms");
f2fa07b3 575 /* Add sysfs file to report compute unit occupancy */
75ae84c8
PY
576 if (pdd->dev->kfd2kgd->get_cu_occupancy)
577 kfd_sysfs_create_file(pdd->kobj_stats,
578 &pdd->attr_cu_occupancy,
579 "cu_occupancy");
4327bed2 580 }
4327bed2
PC
581}
582
751580b3
PY
583static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
584{
585 int ret = 0;
586 int i;
587 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
588
589 if (!p || !p->kobj)
590 return;
591
592 /*
593 * Create sysfs files for each GPU which supports SVM
594 * - proc/<pid>/counters_<gpuid>/
595 * - proc/<pid>/counters_<gpuid>/faults
596 * - proc/<pid>/counters_<gpuid>/page_in
597 * - proc/<pid>/counters_<gpuid>/page_out
598 */
599 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
600 struct kfd_process_device *pdd = p->pdds[i];
601 struct kobject *kobj_counters;
602
603 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
604 "counters_%u", pdd->dev->id);
605 kobj_counters = kfd_alloc_struct(kobj_counters);
606 if (!kobj_counters)
607 return;
608
609 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
610 p->kobj, counters_dir_filename);
611 if (ret) {
612 pr_warn("Creating KFD proc/%s folder failed",
613 counters_dir_filename);
614 kobject_put(kobj_counters);
615 return;
616 }
617
618 pdd->kobj_counters = kobj_counters;
619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
620 "faults");
621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
622 "page_in");
623 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
624 "page_out");
625 }
626}
4327bed2 627
75ae84c8 628static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
d4566dee 629{
6ae27841 630 int i;
d4566dee 631
75ae84c8
PY
632 if (!p || !p->kobj)
633 return;
d4566dee 634
32cb59f3
MJ
635 /*
636 * Create sysfs files for each GPU:
637 * - proc/<pid>/vram_<gpuid>
638 * - proc/<pid>/sdma_<gpuid>
639 */
6ae27841
AS
640 for (i = 0; i < p->n_pdds; i++) {
641 struct kfd_process_device *pdd = p->pdds[i];
642
32cb59f3 643 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
d4566dee 644 pdd->dev->id);
75ae84c8
PY
645 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
646 pdd->vram_filename);
32cb59f3
MJ
647
648 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
649 pdd->dev->id);
75ae84c8
PY
650 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
651 pdd->sdma_filename);
d4566dee 652 }
d4566dee
MJ
653}
654
6d220a7e
AL
655void kfd_procfs_del_queue(struct queue *q)
656{
657 if (!q)
658 return;
659
660 kobject_del(&q->kobj);
661 kobject_put(&q->kobj);
662}
663
1679ae8f 664int kfd_process_create_wq(void)
19f6d2a6
OG
665{
666 if (!kfd_process_wq)
fd320bf6 667 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
1679ae8f
FK
668 if (!kfd_restore_wq)
669 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
670
671 if (!kfd_process_wq || !kfd_restore_wq) {
672 kfd_process_destroy_wq();
673 return -ENOMEM;
674 }
675
676 return 0;
19f6d2a6
OG
677}
678
679void kfd_process_destroy_wq(void)
680{
681 if (kfd_process_wq) {
19f6d2a6
OG
682 destroy_workqueue(kfd_process_wq);
683 kfd_process_wq = NULL;
684 }
1679ae8f
FK
685 if (kfd_restore_wq) {
686 destroy_workqueue(kfd_restore_wq);
687 kfd_restore_wq = NULL;
688 }
19f6d2a6
OG
689}
690
f35751b8 691static void kfd_process_free_gpuvm(struct kgd_mem *mem,
29d48b87 692 struct kfd_process_device *pdd, void **kptr)
f35751b8
FK
693{
694 struct kfd_dev *dev = pdd->dev;
695
29d48b87 696 if (kptr && *kptr) {
4e2d1044 697 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
29d48b87 698 *kptr = NULL;
68df0f19
LY
699 }
700
dff63da9
GS
701 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
702 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
d4ec4bdc 703 NULL);
f35751b8
FK
704}
705
706/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
707 * This function should be only called right after the process
708 * is created and when kfd_processes_mutex is still being held
709 * to avoid concurrency. Because of that exclusiveness, we do
710 * not need to take p->mutex.
711 */
712static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
713 uint64_t gpu_va, uint32_t size,
68df0f19 714 uint32_t flags, struct kgd_mem **mem, void **kptr)
f35751b8
FK
715{
716 struct kfd_dev *kdev = pdd->dev;
f35751b8
FK
717 int err;
718
dff63da9 719 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
011bbb03
RB
720 pdd->drm_priv, mem, NULL,
721 flags, false);
f35751b8
FK
722 if (err)
723 goto err_alloc_mem;
724
dff63da9 725 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
4d30a83c 726 pdd->drm_priv);
f35751b8
FK
727 if (err)
728 goto err_map_mem;
729
dff63da9 730 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
f35751b8
FK
731 if (err) {
732 pr_debug("Sync memory failed, wait interrupted by user signal\n");
733 goto sync_memory_failed;
734 }
735
f35751b8 736 if (kptr) {
4e2d1044 737 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
68df0f19 738 (struct kgd_mem *)*mem, kptr, NULL);
f35751b8
FK
739 if (err) {
740 pr_debug("Map GTT BO to kernel failed\n");
68df0f19 741 goto sync_memory_failed;
f35751b8
FK
742 }
743 }
744
745 return err;
746
f35751b8 747sync_memory_failed:
dff63da9 748 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
f35751b8
FK
749
750err_map_mem:
dff63da9 751 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
d4ec4bdc 752 NULL);
f35751b8 753err_alloc_mem:
68df0f19 754 *mem = NULL;
f35751b8
FK
755 *kptr = NULL;
756 return err;
757}
758
552764b6
FK
759/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
760 * process for IB usage The memory reserved is for KFD to submit
761 * IB to AMDGPU from kernel. If the memory is reserved
762 * successfully, ib_kaddr will have the CPU/kernel
763 * address. Check ib_kaddr before accessing the memory.
764 */
765static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766{
767 struct qcm_process_device *qpd = &pdd->qpd;
1d251d90
YZ
768 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
769 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
770 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
771 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
68df0f19 772 struct kgd_mem *mem;
552764b6
FK
773 void *kaddr;
774 int ret;
775
776 if (qpd->ib_kaddr || !qpd->ib_base)
777 return 0;
778
779 /* ib_base is only set for dGPU */
780 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
68df0f19 781 &mem, &kaddr);
552764b6
FK
782 if (ret)
783 return ret;
784
68df0f19 785 qpd->ib_mem = mem;
552764b6
FK
786 qpd->ib_kaddr = kaddr;
787
788 return 0;
789}
790
68df0f19
LY
791static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792{
793 struct qcm_process_device *qpd = &pdd->qpd;
794
795 if (!qpd->ib_kaddr || !qpd->ib_base)
796 return;
797
29d48b87 798 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
68df0f19
LY
799}
800
373d7080 801struct kfd_process *kfd_create_process(struct file *filep)
19f6d2a6
OG
802{
803 struct kfd_process *process;
373d7080 804 struct task_struct *thread = current;
de9f26bb 805 int ret;
19f6d2a6 806
4eacc26b 807 if (!thread->mm)
19f6d2a6
OG
808 return ERR_PTR(-EINVAL);
809
810 /* Only the pthreads threading model is supported. */
811 if (thread->group_leader->mm != thread->mm)
812 return ERR_PTR(-EINVAL);
813
19f6d2a6
OG
814 /*
815 * take kfd processes mutex before starting of process creation
816 * so there won't be a case where two threads of the same process
817 * create two kfd_process structures
818 */
819 mutex_lock(&kfd_processes_mutex);
820
821 /* A prior open of /dev/kfd could have already created the process. */
011bbb03 822 process = find_process(thread, false);
de9f26bb 823 if (process) {
79775b62 824 pr_debug("Process already found\n");
de9f26bb 825 } else {
0029cab3
JG
826 process = create_process(thread);
827 if (IS_ERR(process))
828 goto out;
829
830 ret = kfd_process_init_cwsr_apu(process, filep);
172e4ee2
FK
831 if (ret)
832 goto out_destroy;
19f6d2a6 833
de9f26bb
KR
834 if (!procfs.kobj)
835 goto out;
836
837 process->kobj = kfd_alloc_struct(process->kobj);
838 if (!process->kobj) {
839 pr_warn("Creating procfs kobject failed");
840 goto out;
841 }
842 ret = kobject_init_and_add(process->kobj, &procfs_type,
843 procfs.kobj, "%d",
844 (int)process->lead_thread->pid);
845 if (ret) {
846 pr_warn("Creating procfs pid directory failed");
dc2f832e 847 kobject_put(process->kobj);
de9f26bb
KR
848 goto out;
849 }
850
75ae84c8
PY
851 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
852 "pasid");
6d220a7e
AL
853
854 process->kobj_queues = kobject_create_and_add("queues",
855 process->kobj);
856 if (!process->kobj_queues)
857 pr_warn("Creating KFD proc/queues folder failed");
d4566dee 858
75ae84c8
PY
859 kfd_procfs_add_sysfs_stats(process);
860 kfd_procfs_add_sysfs_files(process);
751580b3 861 kfd_procfs_add_sysfs_counters(process);
de9f26bb
KR
862 }
863out:
0f899fd4
FK
864 if (!IS_ERR(process))
865 kref_get(&process->ref);
19f6d2a6
OG
866 mutex_unlock(&kfd_processes_mutex);
867
19f6d2a6 868 return process;
172e4ee2
FK
869
870out_destroy:
871 hash_del_rcu(&process->kfd_processes);
872 mutex_unlock(&kfd_processes_mutex);
873 synchronize_srcu(&kfd_processes_srcu);
874 /* kfd_process_free_notifier will trigger the cleanup */
875 mmu_notifier_put(&process->mmu_notifier);
876 return ERR_PTR(ret);
19f6d2a6
OG
877}
878
879struct kfd_process *kfd_get_process(const struct task_struct *thread)
880{
881 struct kfd_process *process;
882
4eacc26b 883 if (!thread->mm)
19f6d2a6
OG
884 return ERR_PTR(-EINVAL);
885
886 /* Only the pthreads threading model is supported. */
887 if (thread->group_leader->mm != thread->mm)
888 return ERR_PTR(-EINVAL);
889
011bbb03 890 process = find_process(thread, false);
e47cb828
WL
891 if (!process)
892 return ERR_PTR(-EINVAL);
19f6d2a6
OG
893
894 return process;
895}
896
897static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
898{
899 struct kfd_process *process;
900
901 hash_for_each_possible_rcu(kfd_processes_table, process,
902 kfd_processes, (uintptr_t)mm)
903 if (process->mm == mm)
904 return process;
905
906 return NULL;
907}
908
011bbb03
RB
909static struct kfd_process *find_process(const struct task_struct *thread,
910 bool ref)
19f6d2a6
OG
911{
912 struct kfd_process *p;
913 int idx;
914
915 idx = srcu_read_lock(&kfd_processes_srcu);
916 p = find_process_by_mm(thread->mm);
011bbb03
RB
917 if (p && ref)
918 kref_get(&p->ref);
19f6d2a6
OG
919 srcu_read_unlock(&kfd_processes_srcu, idx);
920
921 return p;
922}
923
abb208a8
FK
924void kfd_unref_process(struct kfd_process *p)
925{
926 kref_put(&p->ref, kfd_process_ref_release);
927}
928
011bbb03
RB
929/* This increments the process->ref counter. */
930struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
931{
932 struct task_struct *task = NULL;
933 struct kfd_process *p = NULL;
934
935 if (!pid) {
936 task = current;
937 get_task_struct(task);
938 } else {
939 task = get_pid_task(pid, PIDTYPE_PID);
940 }
941
942 if (task) {
943 p = find_process(task, true);
944 put_task_struct(task);
945 }
946
947 return p;
948}
6ae27841 949
52b29d73
FK
950static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
951{
952 struct kfd_process *p = pdd->process;
953 void *mem;
954 int id;
6ae27841 955 int i;
52b29d73
FK
956
957 /*
958 * Remove all handles from idr and release appropriate
959 * local memory object
960 */
961 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
52b29d73 962
6ae27841
AS
963 for (i = 0; i < p->n_pdds; i++) {
964 struct kfd_process_device *peer_pdd = p->pdds[i];
965
b40a6ab2 966 if (!peer_pdd->drm_priv)
52b29d73 967 continue;
5b87245f 968 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
dff63da9 969 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
52b29d73
FK
970 }
971
dff63da9 972 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
d4ec4bdc 973 pdd->drm_priv, NULL);
52b29d73
FK
974 kfd_process_device_remove_obj_handle(pdd, id);
975 }
976}
977
68df0f19
LY
978/*
979 * Just kunmap and unpin signal BO here. It will be freed in
980 * kfd_process_free_outstanding_kfd_bos()
981 */
982static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
983{
984 struct kfd_process_device *pdd;
985 struct kfd_dev *kdev;
986 void *mem;
987
988 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
989 if (!kdev)
990 return;
991
992 mutex_lock(&p->mutex);
993
994 pdd = kfd_get_process_device_data(kdev, p);
995 if (!pdd)
996 goto out;
997
998 mem = kfd_process_device_translate_handle(
999 pdd, GET_IDR_HANDLE(p->signal_handle));
1000 if (!mem)
1001 goto out;
1002
4e2d1044 1003 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
68df0f19
LY
1004
1005out:
1006 mutex_unlock(&p->mutex);
1007}
1008
52b29d73
FK
1009static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1010{
6ae27841 1011 int i;
52b29d73 1012
6ae27841
AS
1013 for (i = 0; i < p->n_pdds; i++)
1014 kfd_process_device_free_bos(p->pdds[i]);
52b29d73
FK
1015}
1016
de1450a5 1017static void kfd_process_destroy_pdds(struct kfd_process *p)
19f6d2a6 1018{
6ae27841
AS
1019 int i;
1020
1021 for (i = 0; i < p->n_pdds; i++) {
1022 struct kfd_process_device *pdd = p->pdds[i];
19f6d2a6 1023
6027b1bf 1024 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
94a1ee09
OG
1025 pdd->dev->id, p->pasid);
1026
68df0f19
LY
1027 kfd_process_device_destroy_cwsr_dgpu(pdd);
1028 kfd_process_device_destroy_ib_mem(pdd);
1029
bf47afba 1030 if (pdd->drm_file) {
5b87245f 1031 amdgpu_amdkfd_gpuvm_release_process_vm(
dff63da9 1032 pdd->dev->adev, pdd->drm_priv);
b84394e2 1033 fput(pdd->drm_file);
bf47afba 1034 }
403575c4 1035
f35751b8 1036 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
373d7080
FK
1037 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1038 get_order(KFD_CWSR_TBA_TMA_SIZE));
1039
b9dd6fbd 1040 bitmap_free(pdd->qpd.doorbell_bitmap);
52b29d73
FK
1041 idr_destroy(&pdd->alloc_idr);
1042
59d7115d
MJ
1043 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
1044
cc009e61
MJ
1045 if (pdd->dev->shared_resources.enable_mes)
1046 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1047 pdd->proc_ctx_bo);
9593f4d6
RB
1048 /*
1049 * before destroying pdd, make sure to report availability
1050 * for auto suspend
1051 */
1052 if (pdd->runtime_inuse) {
d69a3b76
MJ
1053 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1054 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
9593f4d6
RB
1055 pdd->runtime_inuse = false;
1056 }
1057
19f6d2a6 1058 kfree(pdd);
6ae27841 1059 p->pdds[i] = NULL;
19f6d2a6 1060 }
6ae27841 1061 p->n_pdds = 0;
de1450a5
FK
1062}
1063
751580b3 1064static void kfd_process_remove_sysfs(struct kfd_process *p)
de1450a5 1065{
751580b3 1066 struct kfd_process_device *pdd;
6ae27841 1067 int i;
de1450a5 1068
751580b3
PY
1069 if (!p->kobj)
1070 return;
d4566dee 1071
751580b3
PY
1072 sysfs_remove_file(p->kobj, &p->attr_pasid);
1073 kobject_del(p->kobj_queues);
1074 kobject_put(p->kobj_queues);
1075 p->kobj_queues = NULL;
6ae27841 1076
751580b3
PY
1077 for (i = 0; i < p->n_pdds; i++) {
1078 pdd = p->pdds[i];
dcdb4d90 1079
751580b3
PY
1080 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1081 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
d4566dee 1082
751580b3
PY
1083 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1084 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1085 sysfs_remove_file(pdd->kobj_stats,
1086 &pdd->attr_cu_occupancy);
1087 kobject_del(pdd->kobj_stats);
1088 kobject_put(pdd->kobj_stats);
1089 pdd->kobj_stats = NULL;
1090 }
1091
1092 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1093 pdd = p->pdds[i];
1094
1095 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1096 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1097 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1098 kobject_del(pdd->kobj_counters);
1099 kobject_put(pdd->kobj_counters);
1100 pdd->kobj_counters = NULL;
de9f26bb
KR
1101 }
1102
751580b3
PY
1103 kobject_del(p->kobj);
1104 kobject_put(p->kobj);
1105 p->kobj = NULL;
1106}
1107
1108/* No process locking is needed in this function, because the process
1109 * is not findable any more. We must assume that no other thread is
1110 * using it any more, otherwise we couldn't safely free the process
1111 * structure in the end.
1112 */
1113static void kfd_process_wq_release(struct work_struct *work)
1114{
1115 struct kfd_process *p = container_of(work, struct kfd_process,
1116 release_work);
68df0f19 1117
74097f9f
PY
1118 kfd_process_dequeue_from_all_devices(p);
1119 pqm_uninit(&p->pqm);
1120
1121 /* Signal the eviction fence after user mode queues are
1122 * destroyed. This allows any BOs to be freed without
1123 * triggering pointless evictions or waiting for fences.
1124 */
1125 dma_fence_signal(p->ef);
1126
751580b3 1127 kfd_process_remove_sysfs(p);
64d1c3a4 1128 kfd_iommu_unbind_process(p);
de1450a5 1129
68df0f19 1130 kfd_process_kunmap_signal_bo(p);
52b29d73 1131 kfd_process_free_outstanding_kfd_bos(p);
42de677f 1132 svm_range_list_fini(p);
52b29d73 1133
de1450a5 1134 kfd_process_destroy_pdds(p);
403575c4 1135 dma_fence_put(p->ef);
19f6d2a6 1136
f3a39818
AL
1137 kfd_event_free_process(p);
1138
19f6d2a6 1139 kfd_pasid_free(p->pasid);
19f6d2a6
OG
1140 mutex_destroy(&p->mutex);
1141
c7b1243e
FK
1142 put_task_struct(p->lead_thread);
1143
19f6d2a6 1144 kfree(p);
19f6d2a6
OG
1145}
1146
5ce10687 1147static void kfd_process_ref_release(struct kref *ref)
19f6d2a6 1148{
5ce10687 1149 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
19f6d2a6 1150
5ce10687
FK
1151 INIT_WORK(&p->release_work, kfd_process_wq_release);
1152 queue_work(kfd_process_wq, &p->release_work);
1153}
19f6d2a6 1154
3248b6d3
FK
1155static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1156{
1157 int idx = srcu_read_lock(&kfd_processes_srcu);
1158 struct kfd_process *p = find_process_by_mm(mm);
1159
1160 srcu_read_unlock(&kfd_processes_srcu, idx);
1161
1162 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1163}
1164
471f3902 1165static void kfd_process_free_notifier(struct mmu_notifier *mn)
5ce10687 1166{
471f3902 1167 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
19f6d2a6
OG
1168}
1169
1170static void kfd_process_notifier_release(struct mmu_notifier *mn,
1171 struct mm_struct *mm)
1172{
1173 struct kfd_process *p;
1174
1175 /*
1176 * The kfd_process structure can not be free because the
1177 * mmu_notifier srcu is read locked
1178 */
1179 p = container_of(mn, struct kfd_process, mmu_notifier);
32fa8219
FK
1180 if (WARN_ON(p->mm != mm))
1181 return;
19f6d2a6
OG
1182
1183 mutex_lock(&kfd_processes_mutex);
1184 hash_del_rcu(&p->kfd_processes);
1185 mutex_unlock(&kfd_processes_mutex);
1186 synchronize_srcu(&kfd_processes_srcu);
1187
26103436
FK
1188 cancel_delayed_work_sync(&p->eviction_work);
1189 cancel_delayed_work_sync(&p->restore_work);
1190
5ce10687
FK
1191 /* Indicate to other users that MM is no longer valid */
1192 p->mm = NULL;
45102048 1193
471f3902 1194 mmu_notifier_put(&p->mmu_notifier);
19f6d2a6
OG
1195}
1196
1197static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1198 .release = kfd_process_notifier_release,
3248b6d3 1199 .alloc_notifier = kfd_process_alloc_notifier,
471f3902 1200 .free_notifier = kfd_process_free_notifier,
19f6d2a6
OG
1201};
1202
f35751b8 1203static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
373d7080 1204{
373d7080 1205 unsigned long offset;
6ae27841 1206 int i;
373d7080 1207
6ae27841
AS
1208 for (i = 0; i < p->n_pdds; i++) {
1209 struct kfd_dev *dev = p->pdds[i]->dev;
1210 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
f35751b8
FK
1211
1212 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
373d7080 1213 continue;
f35751b8 1214
29453755 1215 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
373d7080
FK
1216 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1217 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1218 MAP_SHARED, offset);
1219
1220 if (IS_ERR_VALUE(qpd->tba_addr)) {
c0ede1f8
YZ
1221 int err = qpd->tba_addr;
1222
1223 pr_err("Failure to set tba address. error %d.\n", err);
373d7080
FK
1224 qpd->tba_addr = 0;
1225 qpd->cwsr_kaddr = NULL;
c0ede1f8 1226 return err;
373d7080
FK
1227 }
1228
1229 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1230
1231 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1232 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1233 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1234 }
c0ede1f8
YZ
1235
1236 return 0;
373d7080
FK
1237}
1238
f35751b8
FK
1239static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1240{
1241 struct kfd_dev *dev = pdd->dev;
1242 struct qcm_process_device *qpd = &pdd->qpd;
1d251d90
YZ
1243 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1244 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1245 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
68df0f19 1246 struct kgd_mem *mem;
f35751b8
FK
1247 void *kaddr;
1248 int ret;
1249
1250 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1251 return 0;
1252
1253 /* cwsr_base is only set for dGPU */
1254 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
68df0f19 1255 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
f35751b8
FK
1256 if (ret)
1257 return ret;
1258
68df0f19 1259 qpd->cwsr_mem = mem;
f35751b8
FK
1260 qpd->cwsr_kaddr = kaddr;
1261 qpd->tba_addr = qpd->cwsr_base;
1262
1263 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1264
1265 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1266 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1267 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1268
1269 return 0;
1270}
1271
68df0f19
LY
1272static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1273{
1274 struct kfd_dev *dev = pdd->dev;
1275 struct qcm_process_device *qpd = &pdd->qpd;
1276
1277 if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1278 return;
1279
29d48b87 1280 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
68df0f19
LY
1281}
1282
7c9631af
JC
1283void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1284 uint64_t tba_addr,
1285 uint64_t tma_addr)
1286{
1287 if (qpd->cwsr_kaddr) {
1288 /* KFD trap handler is bound, record as second-level TBA/TMA
1289 * in first-level TMA. First-level trap will jump to second.
1290 */
1291 uint64_t *tma =
1292 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1293 tma[0] = tba_addr;
1294 tma[1] = tma_addr;
1295 } else {
1296 /* No trap handler bound, bind as first-level TBA/TMA. */
1297 qpd->tba_addr = tba_addr;
1298 qpd->tma_addr = tma_addr;
1299 }
1300}
1301
063e33c5
AS
1302bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1303{
1304 int i;
1305
1306 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1307 * boot time retry setting. Mixing processes with different
1308 * XNACK/retry settings can hang the GPU.
1309 *
1310 * Different GPUs can have different noretry settings depending
1311 * on HW bugs or limitations. We need to find at least one
1312 * XNACK mode for this process that's compatible with all GPUs.
1313 * Fortunately GPUs with retry enabled (noretry=0) can run code
1314 * built for XNACK-off. On GFXv9 it may perform slower.
1315 *
1316 * Therefore applications built for XNACK-off can always be
1317 * supported and will be our fallback if any GPU does not
1318 * support retry.
1319 */
1320 for (i = 0; i < p->n_pdds; i++) {
1321 struct kfd_dev *dev = p->pdds[i]->dev;
1322
1323 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1324 * support the SVM APIs and don't need to be considered
1325 * for the XNACK mode selection.
1326 */
046e674b 1327 if (!KFD_IS_SOC15(dev))
063e33c5
AS
1328 continue;
1329 /* Aldebaran can always support XNACK because it can support
1330 * per-process XNACK mode selection. But let the dev->noretry
1331 * setting still influence the default XNACK mode.
1332 */
046e674b 1333 if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
063e33c5
AS
1334 continue;
1335
1336 /* GFXv10 and later GPUs do not support shader preemption
1337 * during page faults. This can lead to poor QoS for queue
1338 * management and memory-manager-related preemptions or
1339 * even deadlocks.
1340 */
046e674b 1341 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
063e33c5
AS
1342 return false;
1343
1344 if (dev->noretry)
1345 return false;
1346 }
1347
1348 return true;
1349}
1350
0029cab3
JG
1351/*
1352 * On return the kfd_process is fully operational and will be freed when the
1353 * mm is released
1354 */
1355static struct kfd_process *create_process(const struct task_struct *thread)
19f6d2a6
OG
1356{
1357 struct kfd_process *process;
3248b6d3 1358 struct mmu_notifier *mn;
19f6d2a6
OG
1359 int err = -ENOMEM;
1360
1361 process = kzalloc(sizeof(*process), GFP_KERNEL);
19f6d2a6
OG
1362 if (!process)
1363 goto err_alloc_process;
1364
5ce10687 1365 kref_init(&process->ref);
19f6d2a6 1366 mutex_init(&process->mutex);
19f6d2a6 1367 process->mm = thread->mm;
19f6d2a6 1368 process->lead_thread = thread->group_leader;
6ae27841 1369 process->n_pdds = 0;
cd9f7910 1370 process->queues_paused = false;
0029cab3
JG
1371 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1372 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1373 process->last_restore_timestamp = get_jiffies_64();
c3eb12df
FK
1374 err = kfd_event_init_process(process);
1375 if (err)
1376 goto err_event_init;
0029cab3
JG
1377 process->is_32bit_user_mode = in_compat_syscall();
1378
1379 process->pasid = kfd_pasid_alloc();
c3eb12df
FK
1380 if (process->pasid == 0) {
1381 err = -ENOSPC;
0029cab3 1382 goto err_alloc_pasid;
c3eb12df 1383 }
0029cab3 1384
45102048
BG
1385 err = pqm_init(&process->pqm, process);
1386 if (err != 0)
1387 goto err_process_pqm_init;
1388
dd59239a 1389 /* init process apertures*/
b312b2b2
DC
1390 err = kfd_init_apertures(process);
1391 if (err != 0)
7a10d63f 1392 goto err_init_apertures;
dd59239a 1393
063e33c5
AS
1394 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1395 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1396
42de677f
PY
1397 err = svm_range_list_init(process);
1398 if (err)
1399 goto err_init_svm_range_list;
1400
3248b6d3
FK
1401 /* alloc_notifier needs to find the process in the hash table */
1402 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1403 (uintptr_t)process->mm);
1404
0593ad21
PY
1405 /* Avoid free_notifier to start kfd_process_wq_release if
1406 * mmu_notifier_get failed because of pending signal.
1407 */
1408 kref_get(&process->ref);
1409
3248b6d3
FK
1410 /* MMU notifier registration must be the last call that can fail
1411 * because after this point we cannot unwind the process creation.
1412 * After this point, mmu_notifier_put will trigger the cleanup by
1413 * dropping the last process reference in the free_notifier.
1414 */
1415 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1416 if (IS_ERR(mn)) {
1417 err = PTR_ERR(mn);
0029cab3 1418 goto err_register_notifier;
3248b6d3
FK
1419 }
1420 BUG_ON(mn != &process->mmu_notifier);
0029cab3 1421
0593ad21 1422 kfd_unref_process(process);
0029cab3 1423 get_task_struct(process->lead_thread);
c0ede1f8 1424
19f6d2a6
OG
1425 return process;
1426
0029cab3 1427err_register_notifier:
3248b6d3 1428 hash_del_rcu(&process->kfd_processes);
42de677f
PY
1429 svm_range_list_fini(process);
1430err_init_svm_range_list:
52b29d73 1431 kfd_process_free_outstanding_kfd_bos(process);
c0ede1f8 1432 kfd_process_destroy_pdds(process);
7a10d63f 1433err_init_apertures:
dd59239a 1434 pqm_uninit(&process->pqm);
45102048 1435err_process_pqm_init:
19f6d2a6
OG
1436 kfd_pasid_free(process->pasid);
1437err_alloc_pasid:
c3eb12df
FK
1438 kfd_event_free_process(process);
1439err_event_init:
0029cab3 1440 mutex_destroy(&process->mutex);
19f6d2a6
OG
1441 kfree(process);
1442err_alloc_process:
1443 return ERR_PTR(err);
1444}
1445
ef568db7
FK
1446static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1447 struct kfd_dev *dev)
1448{
1449 unsigned int i;
89b0679b
YZ
1450 int range_start = dev->shared_resources.non_cp_doorbells_start;
1451 int range_end = dev->shared_resources.non_cp_doorbells_end;
ef568db7 1452
dd0ae064 1453 if (!KFD_IS_SOC15(dev))
ef568db7
FK
1454 return 0;
1455
b9dd6fbd
CJ
1456 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1457 GFP_KERNEL);
ef568db7
FK
1458 if (!qpd->doorbell_bitmap)
1459 return -ENOMEM;
1460
1f86805a 1461 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
89b0679b
YZ
1462 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1463 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1464 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1465 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1466
1f86805a 1467 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
89b0679b 1468 if (i >= range_start && i <= range_end) {
f3766830
CJ
1469 __set_bit(i, qpd->doorbell_bitmap);
1470 __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1471 qpd->doorbell_bitmap);
ef568db7 1472 }
1f86805a 1473 }
ef568db7
FK
1474
1475 return 0;
1476}
1477
19f6d2a6 1478struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
093c7d8c 1479 struct kfd_process *p)
19f6d2a6 1480{
6ae27841 1481 int i;
19f6d2a6 1482
6ae27841
AS
1483 for (i = 0; i < p->n_pdds; i++)
1484 if (p->pdds[i]->dev == dev)
1485 return p->pdds[i];
093c7d8c 1486
733fa1f7 1487 return NULL;
093c7d8c
AS
1488}
1489
1490struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1491 struct kfd_process *p)
1492{
1493 struct kfd_process_device *pdd = NULL;
cc009e61 1494 int retval = 0;
093c7d8c 1495
6ae27841
AS
1496 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1497 return NULL;
093c7d8c 1498 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
2d9b36f9
FK
1499 if (!pdd)
1500 return NULL;
1501
ef568db7
FK
1502 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1503 pr_err("Failed to init doorbell for process\n");
59d7115d 1504 goto err_free_pdd;
ef568db7
FK
1505 }
1506
2d9b36f9
FK
1507 pdd->dev = dev;
1508 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1509 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1510 pdd->qpd.dqm = dev->dqm;
1511 pdd->qpd.pqm = &p->pqm;
26103436 1512 pdd->qpd.evicted = 0;
b8020b03 1513 pdd->qpd.mapped_gws_queue = false;
2d9b36f9
FK
1514 pdd->process = p;
1515 pdd->bound = PDD_UNBOUND;
1516 pdd->already_dequeued = false;
9593f4d6 1517 pdd->runtime_inuse = false;
d4566dee 1518 pdd->vram_usage = 0;
32cb59f3 1519 pdd->sdma_past_activity_counter = 0;
bef153b7 1520 pdd->user_gpu_id = dev->id;
4327bed2 1521 atomic64_set(&pdd->evict_duration_counter, 0);
cc009e61
MJ
1522
1523 if (dev->shared_resources.enable_mes) {
1524 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1525 AMDGPU_MES_PROC_CTX_SIZE,
1526 &pdd->proc_ctx_bo,
1527 &pdd->proc_ctx_gpu_addr,
1528 &pdd->proc_ctx_cpu_ptr,
1529 false);
1530 if (retval) {
1531 pr_err("failed to allocate process context bo\n");
1532 goto err_free_pdd;
1533 }
1534 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1535 }
1536
6ae27841 1537 p->pdds[p->n_pdds++] = pdd;
19f6d2a6 1538
52b29d73
FK
1539 /* Init idr used for memory handle translation */
1540 idr_init(&pdd->alloc_idr);
1541
b84394e2 1542 return pdd;
59d7115d
MJ
1543
1544err_free_pdd:
1545 kfree(pdd);
1546 return NULL;
b84394e2
FK
1547}
1548
1549/**
1550 * kfd_process_device_init_vm - Initialize a VM for a process-device
1551 *
1552 * @pdd: The process-device
1553 * @drm_file: Optional pointer to a DRM file descriptor
1554 *
1555 * If @drm_file is specified, it will be used to acquire the VM from
1556 * that file descriptor. If successful, the @pdd takes ownership of
1557 * the file descriptor.
1558 *
1559 * If @drm_file is NULL, a new VM is created.
1560 *
1561 * Returns 0 on success, -errno on failure.
1562 */
1563int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1564 struct file *drm_file)
1565{
1566 struct kfd_process *p;
1567 struct kfd_dev *dev;
1568 int ret;
1569
f45e6b9d
FK
1570 if (!drm_file)
1571 return -EINVAL;
1572
b40a6ab2 1573 if (pdd->drm_priv)
f45e6b9d 1574 return -EBUSY;
b84394e2
FK
1575
1576 p = pdd->process;
1577 dev = pdd->dev;
1578
1a799c4c
PY
1579 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
1580 &p->kgd_process_info,
1581 &p->ef);
b84394e2 1582 if (ret) {
403575c4 1583 pr_err("Failed to create process VM object\n");
b84394e2 1584 return ret;
403575c4 1585 }
b40a6ab2 1586 pdd->drm_priv = drm_file->private_data;
8fde0248 1587 atomic64_set(&pdd->tlb_seq, 0);
f40c6912 1588
552764b6
FK
1589 ret = kfd_process_device_reserve_ib_mem(pdd);
1590 if (ret)
1591 goto err_reserve_ib_mem;
f35751b8
FK
1592 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1593 if (ret)
1594 goto err_init_cwsr;
1595
1a799c4c
PY
1596 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
1597 if (ret)
1598 goto err_set_pasid;
1599
b84394e2
FK
1600 pdd->drm_file = drm_file;
1601
1602 return 0;
f35751b8 1603
1a799c4c
PY
1604err_set_pasid:
1605 kfd_process_device_destroy_cwsr_dgpu(pdd);
f35751b8 1606err_init_cwsr:
29d48b87 1607 kfd_process_device_destroy_ib_mem(pdd);
552764b6 1608err_reserve_ib_mem:
b40a6ab2 1609 pdd->drm_priv = NULL;
f35751b8
FK
1610
1611 return ret;
19f6d2a6
OG
1612}
1613
1614/*
1615 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1616 * to the device.
1617 * Unbinding occurs when the process dies or the device is removed.
1618 *
1619 * Assumes that the process lock is held.
1620 */
1621struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1622 struct kfd_process *p)
1623{
093c7d8c 1624 struct kfd_process_device *pdd;
b17f068a 1625 int err;
19f6d2a6 1626
093c7d8c
AS
1627 pdd = kfd_get_process_device_data(dev, p);
1628 if (!pdd) {
1629 pr_err("Process device data doesn't exist\n");
19f6d2a6 1630 return ERR_PTR(-ENOMEM);
093c7d8c 1631 }
19f6d2a6 1632
b40a6ab2 1633 if (!pdd->drm_priv)
f45e6b9d
FK
1634 return ERR_PTR(-ENODEV);
1635
9593f4d6
RB
1636 /*
1637 * signal runtime-pm system to auto resume and prevent
1638 * further runtime suspend once device pdd is created until
1639 * pdd is destroyed.
1640 */
1641 if (!pdd->runtime_inuse) {
d69a3b76 1642 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1c1ada37 1643 if (err < 0) {
d69a3b76 1644 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
9593f4d6 1645 return ERR_PTR(err);
1c1ada37 1646 }
9593f4d6
RB
1647 }
1648
64d1c3a4
FK
1649 err = kfd_iommu_bind_process_to_device(pdd);
1650 if (err)
9593f4d6 1651 goto out;
b17f068a 1652
9593f4d6
RB
1653 /*
1654 * make sure that runtime_usage counter is incremented just once
1655 * per pdd
1656 */
1657 pdd->runtime_inuse = true;
b84394e2 1658
19f6d2a6 1659 return pdd;
9593f4d6
RB
1660
1661out:
1662 /* balance runpm reference count and exit with error */
1663 if (!pdd->runtime_inuse) {
d69a3b76
MJ
1664 pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
1665 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
9593f4d6
RB
1666 }
1667
1668 return ERR_PTR(err);
19f6d2a6
OG
1669}
1670
52b29d73
FK
1671/* Create specific handle mapped to mem from process local memory idr
1672 * Assumes that the process lock is held.
1673 */
1674int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1675 void *mem)
1676{
1677 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1678}
1679
1680/* Translate specific handle from process local memory idr
1681 * Assumes that the process lock is held.
1682 */
1683void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1684 int handle)
1685{
1686 if (handle < 0)
1687 return NULL;
1688
1689 return idr_find(&pdd->alloc_idr, handle);
1690}
1691
1692/* Remove specific handle from process local memory idr
1693 * Assumes that the process lock is held.
1694 */
1695void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1696 int handle)
1697{
1698 if (handle >= 0)
1699 idr_remove(&pdd->alloc_idr, handle);
1700}
1701
abb208a8 1702/* This increments the process->ref counter. */
c7b6bac9 1703struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
f3a39818 1704{
82c16b42 1705 struct kfd_process *p, *ret_p = NULL;
f3a39818
AL
1706 unsigned int temp;
1707
1708 int idx = srcu_read_lock(&kfd_processes_srcu);
1709
1710 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1711 if (p->pasid == pasid) {
abb208a8 1712 kref_get(&p->ref);
82c16b42 1713 ret_p = p;
f3a39818
AL
1714 break;
1715 }
1716 }
1717
1718 srcu_read_unlock(&kfd_processes_srcu, idx);
1719
82c16b42 1720 return ret_p;
f3a39818 1721}
373d7080 1722
26103436
FK
1723/* This increments the process->ref counter. */
1724struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1725{
1726 struct kfd_process *p;
1727
1728 int idx = srcu_read_lock(&kfd_processes_srcu);
1729
1730 p = find_process_by_mm(mm);
1731 if (p)
1732 kref_get(&p->ref);
1733
1734 srcu_read_unlock(&kfd_processes_srcu, idx);
1735
1736 return p;
1737}
1738
0aeaaf64 1739/* kfd_process_evict_queues - Evict all user queues of a process
26103436
FK
1740 *
1741 * Eviction is reference-counted per process-device. This means multiple
1742 * evictions from different sources can be nested safely.
1743 */
c7f21978 1744int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
26103436 1745{
26103436 1746 int r = 0;
6ae27841 1747 int i;
26103436
FK
1748 unsigned int n_evicted = 0;
1749
6ae27841
AS
1750 for (i = 0; i < p->n_pdds; i++) {
1751 struct kfd_process_device *pdd = p->pdds[i];
1752
c7f21978
PY
1753 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1754 trigger);
1755
26103436
FK
1756 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1757 &pdd->qpd);
b8c20c74 1758 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1759 * we would like to set all the queues to be in evicted state to prevent
1760 * them been add back since they actually not be saved right now.
1761 */
1762 if (r && r != -EIO) {
26103436
FK
1763 pr_err("Failed to evict process queues\n");
1764 goto fail;
1765 }
1766 n_evicted++;
1767 }
1768
1769 return r;
1770
1771fail:
1772 /* To keep state consistent, roll back partial eviction by
1773 * restoring queues
1774 */
6ae27841
AS
1775 for (i = 0; i < p->n_pdds; i++) {
1776 struct kfd_process_device *pdd = p->pdds[i];
1777
26103436
FK
1778 if (n_evicted == 0)
1779 break;
c7f21978
PY
1780
1781 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1782
26103436
FK
1783 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1784 &pdd->qpd))
1785 pr_err("Failed to restore queues\n");
1786
1787 n_evicted--;
1788 }
1789
1790 return r;
1791}
1792
0aeaaf64 1793/* kfd_process_restore_queues - Restore all user queues of a process */
6b95e797 1794int kfd_process_restore_queues(struct kfd_process *p)
26103436 1795{
26103436 1796 int r, ret = 0;
6ae27841
AS
1797 int i;
1798
1799 for (i = 0; i < p->n_pdds; i++) {
1800 struct kfd_process_device *pdd = p->pdds[i];
26103436 1801
c7f21978
PY
1802 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1803
26103436
FK
1804 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1805 &pdd->qpd);
1806 if (r) {
1807 pr_err("Failed to restore process queues\n");
1808 if (!ret)
1809 ret = r;
1810 }
1811 }
1812
1813 return ret;
1814}
1815
2aeb742b
AS
1816int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1817{
1818 int i;
1819
1820 for (i = 0; i < p->n_pdds; i++)
d763d803 1821 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
2aeb742b
AS
1822 return i;
1823 return -EINVAL;
1824}
1825
cda0f85b 1826int
56c5977e 1827kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
cda0f85b
FK
1828 uint32_t *gpuid, uint32_t *gpuidx)
1829{
cda0f85b
FK
1830 int i;
1831
1832 for (i = 0; i < p->n_pdds; i++)
56c5977e 1833 if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
d763d803 1834 *gpuid = p->pdds[i]->user_gpu_id;
cda0f85b
FK
1835 *gpuidx = i;
1836 return 0;
1837 }
1838 return -EINVAL;
1839}
1840
26103436
FK
1841static void evict_process_worker(struct work_struct *work)
1842{
1843 int ret;
1844 struct kfd_process *p;
1845 struct delayed_work *dwork;
1846
1847 dwork = to_delayed_work(work);
1848
1849 /* Process termination destroys this worker thread. So during the
1850 * lifetime of this thread, kfd_process p will be valid
1851 */
1852 p = container_of(dwork, struct kfd_process, eviction_work);
1853 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1854 "Eviction fence mismatch\n");
1855
1856 /* Narrow window of overlap between restore and evict work
1857 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1858 * unreserves KFD BOs, it is possible to evicted again. But
1859 * restore has few more steps of finish. So lets wait for any
1860 * previous restore work to complete
1861 */
1862 flush_delayed_work(&p->restore_work);
1863
6027b1bf 1864 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
c7f21978 1865 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
26103436
FK
1866 if (!ret) {
1867 dma_fence_signal(p->ef);
1868 dma_fence_put(p->ef);
1869 p->ef = NULL;
1679ae8f 1870 queue_delayed_work(kfd_restore_wq, &p->restore_work,
26103436
FK
1871 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1872
6027b1bf 1873 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
26103436 1874 } else
6027b1bf 1875 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
26103436
FK
1876}
1877
1878static void restore_process_worker(struct work_struct *work)
1879{
1880 struct delayed_work *dwork;
1881 struct kfd_process *p;
26103436
FK
1882 int ret = 0;
1883
1884 dwork = to_delayed_work(work);
1885
1886 /* Process termination destroys this worker thread. So during the
1887 * lifetime of this thread, kfd_process p will be valid
1888 */
1889 p = container_of(dwork, struct kfd_process, restore_work);
6027b1bf 1890 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
26103436
FK
1891
1892 /* Setting last_restore_timestamp before successful restoration.
1893 * Otherwise this would have to be set by KGD (restore_process_bos)
1894 * before KFD BOs are unreserved. If not, the process can be evicted
1895 * again before the timestamp is set.
1896 * If restore fails, the timestamp will be set again in the next
1897 * attempt. This would mean that the minimum GPU quanta would be
1898 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1899 * functions)
1900 */
1901
1902 p->last_restore_timestamp = get_jiffies_64();
5b87245f 1903 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
26103436
FK
1904 &p->ef);
1905 if (ret) {
6027b1bf 1906 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
26103436 1907 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1679ae8f 1908 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
26103436
FK
1909 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1910 WARN(!ret, "reschedule restore work failed\n");
1911 return;
1912 }
1913
6b95e797 1914 ret = kfd_process_restore_queues(p);
26103436 1915 if (!ret)
6027b1bf 1916 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
26103436 1917 else
6027b1bf 1918 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
26103436
FK
1919}
1920
1921void kfd_suspend_all_processes(void)
1922{
1923 struct kfd_process *p;
1924 unsigned int temp;
1925 int idx = srcu_read_lock(&kfd_processes_srcu);
1926
8a491bb3 1927 WARN(debug_evictions, "Evicting all processes");
26103436
FK
1928 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1929 cancel_delayed_work_sync(&p->eviction_work);
1930 cancel_delayed_work_sync(&p->restore_work);
1931
c7f21978 1932 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
6027b1bf 1933 pr_err("Failed to suspend process 0x%x\n", p->pasid);
26103436
FK
1934 dma_fence_signal(p->ef);
1935 dma_fence_put(p->ef);
1936 p->ef = NULL;
1937 }
1938 srcu_read_unlock(&kfd_processes_srcu, idx);
1939}
1940
1941int kfd_resume_all_processes(void)
1942{
1943 struct kfd_process *p;
1944 unsigned int temp;
1945 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1946
1947 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1679ae8f 1948 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
26103436
FK
1949 pr_err("Restore process %d failed during resume\n",
1950 p->pasid);
1951 ret = -EFAULT;
1952 }
1953 }
1954 srcu_read_unlock(&kfd_processes_srcu, idx);
1955 return ret;
1956}
1957
df03ef93 1958int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
373d7080
FK
1959 struct vm_area_struct *vma)
1960{
373d7080
FK
1961 struct kfd_process_device *pdd;
1962 struct qcm_process_device *qpd;
1963
373d7080
FK
1964 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1965 pr_err("Incorrect CWSR mapping size.\n");
1966 return -EINVAL;
1967 }
1968
1969 pdd = kfd_get_process_device_data(dev, process);
1970 if (!pdd)
1971 return -EINVAL;
1972 qpd = &pdd->qpd;
1973
1974 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1975 get_order(KFD_CWSR_TBA_TMA_SIZE));
1976 if (!qpd->cwsr_kaddr) {
1977 pr_err("Error allocating per process CWSR buffer.\n");
1978 return -ENOMEM;
1979 }
1980
1c71222e
SB
1981 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1982 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
373d7080
FK
1983 /* Mapping pages to user process */
1984 return remap_pfn_range(vma, vma->vm_start,
1985 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1986 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1987}
851a645e 1988
3543b055 1989void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
403575c4 1990{
bffa91da
CK
1991 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1992 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
403575c4 1993 struct kfd_dev *dev = pdd->dev;
403575c4 1994
8fde0248
PY
1995 /*
1996 * It can be that we race and lose here, but that is extremely unlikely
1997 * and the worst thing which could happen is that we flush the changes
1998 * into the TLB once more which is harmless.
1999 */
2000 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
bffa91da
CK
2001 return;
2002
403575c4
FK
2003 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2004 /* Nothing to flush until a VMID is assigned, which
2005 * only happens when the first queue is created.
2006 */
2007 if (pdd->qpd.vmid)
6bfc7c7e 2008 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
ffa02269 2009 pdd->qpd.vmid);
403575c4 2010 } else {
6bfc7c7e 2011 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
3543b055 2012 pdd->process->pasid, type);
403575c4
FK
2013 }
2014}
2015
bef153b7
DYS
2016struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2017{
2018 int i;
2019
2020 if (gpu_id) {
2021 for (i = 0; i < p->n_pdds; i++) {
2022 struct kfd_process_device *pdd = p->pdds[i];
2023
2024 if (pdd->user_gpu_id == gpu_id)
2025 return pdd;
2026 }
2027 }
2028 return NULL;
2029}
2030
2031int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2032{
2033 int i;
2034
2035 if (!actual_gpu_id)
2036 return 0;
2037
2038 for (i = 0; i < p->n_pdds; i++) {
2039 struct kfd_process_device *pdd = p->pdds[i];
2040
2041 if (pdd->dev->id == actual_gpu_id)
2042 return pdd->user_gpu_id;
2043 }
2044 return -EINVAL;
2045}
2046
851a645e
FK
2047#if defined(CONFIG_DEBUG_FS)
2048
2049int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2050{
2051 struct kfd_process *p;
2052 unsigned int temp;
2053 int r = 0;
2054
2055 int idx = srcu_read_lock(&kfd_processes_srcu);
2056
2057 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
6027b1bf 2058 seq_printf(m, "Process %d PASID 0x%x:\n",
851a645e
FK
2059 p->lead_thread->tgid, p->pasid);
2060
2061 mutex_lock(&p->mutex);
2062 r = pqm_debugfs_mqds(m, &p->pqm);
2063 mutex_unlock(&p->mutex);
2064
2065 if (r)
2066 break;
2067 }
2068
2069 srcu_read_unlock(&kfd_processes_srcu, idx);
2070
2071 return r;
2072}
2073
2074#endif
14328aa5 2075