Merge tag 'for-linus-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw...
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ids.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ids.h"
24
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27
28
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31
32 /*
33  * PASID manager
34  *
35  * PASIDs are global address space identifiers that can be shared
36  * between the GPU, an IOMMU and the driver. VMs on different devices
37  * may use the same PASID if they share the same address
38  * space. Therefore PASIDs are allocated using a global IDA. VMs are
39  * looked up from the PASID per amdgpu_device.
40  */
41 static DEFINE_IDA(amdgpu_pasid_ida);
42
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45         struct dma_fence_cb cb;
46         u32 pasid;
47 };
48
49 /**
50  * amdgpu_pasid_alloc - Allocate a PASID
51  * @bits: Maximum width of the PASID in bits, must be at least 1
52  *
53  * Allocates a PASID of the given width while keeping smaller PASIDs
54  * available if possible.
55  *
56  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58  * memory allocation failure.
59  */
60 int amdgpu_pasid_alloc(unsigned int bits)
61 {
62         int pasid = -EINVAL;
63
64         for (bits = min(bits, 31U); bits > 0; bits--) {
65                 pasid = ida_simple_get(&amdgpu_pasid_ida,
66                                        1U << (bits - 1), 1U << bits,
67                                        GFP_KERNEL);
68                 if (pasid != -ENOSPC)
69                         break;
70         }
71
72         if (pasid >= 0)
73                 trace_amdgpu_pasid_allocated(pasid);
74
75         return pasid;
76 }
77
78 /**
79  * amdgpu_pasid_free - Free a PASID
80  * @pasid: PASID to free
81  */
82 void amdgpu_pasid_free(u32 pasid)
83 {
84         trace_amdgpu_pasid_freed(pasid);
85         ida_simple_remove(&amdgpu_pasid_ida, pasid);
86 }
87
88 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89                                  struct dma_fence_cb *_cb)
90 {
91         struct amdgpu_pasid_cb *cb =
92                 container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94         amdgpu_pasid_free(cb->pasid);
95         dma_fence_put(fence);
96         kfree(cb);
97 }
98
99 /**
100  * amdgpu_pasid_free_delayed - free pasid when fences signal
101  *
102  * @resv: reservation object with the fences to wait for
103  * @pasid: pasid to free
104  *
105  * Free the pasid only after all the fences in resv are signaled.
106  */
107 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108                                u32 pasid)
109 {
110         struct dma_fence *fence, **fences;
111         struct amdgpu_pasid_cb *cb;
112         unsigned count;
113         int r;
114
115         r = dma_resv_get_fences(resv, true, &count, &fences);
116         if (r)
117                 goto fallback;
118
119         if (count == 0) {
120                 amdgpu_pasid_free(pasid);
121                 return;
122         }
123
124         if (count == 1) {
125                 fence = fences[0];
126                 kfree(fences);
127         } else {
128                 uint64_t context = dma_fence_context_alloc(1);
129                 struct dma_fence_array *array;
130
131                 array = dma_fence_array_create(count, fences, context,
132                                                1, false);
133                 if (!array) {
134                         kfree(fences);
135                         goto fallback;
136                 }
137                 fence = &array->base;
138         }
139
140         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
141         if (!cb) {
142                 /* Last resort when we are OOM */
143                 dma_fence_wait(fence, false);
144                 dma_fence_put(fence);
145                 amdgpu_pasid_free(pasid);
146         } else {
147                 cb->pasid = pasid;
148                 if (dma_fence_add_callback(fence, &cb->cb,
149                                            amdgpu_pasid_free_cb))
150                         amdgpu_pasid_free_cb(fence, &cb->cb);
151         }
152
153         return;
154
155 fallback:
156         /* Not enough memory for the delayed delete, as last resort
157          * block for all the fences to complete.
158          */
159         dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
160         amdgpu_pasid_free(pasid);
161 }
162
163 /*
164  * VMID manager
165  *
166  * VMIDs are a per VMHUB identifier for page tables handling.
167  */
168
169 /**
170  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
171  *
172  * @adev: amdgpu_device pointer
173  * @id: VMID structure
174  *
175  * Check if GPU reset occured since last use of the VMID.
176  */
177 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
178                                struct amdgpu_vmid *id)
179 {
180         return id->current_gpu_reset_count !=
181                 atomic_read(&adev->gpu_reset_counter);
182 }
183
184 /**
185  * amdgpu_vmid_grab_idle - grab idle VMID
186  *
187  * @vm: vm to allocate id for
188  * @ring: ring we want to submit job to
189  * @sync: sync object where we add dependencies
190  * @idle: resulting idle VMID
191  *
192  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
193  * object. Returns -ENOMEM when we are out of memory.
194  */
195 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
196                                  struct amdgpu_ring *ring,
197                                  struct amdgpu_sync *sync,
198                                  struct amdgpu_vmid **idle)
199 {
200         struct amdgpu_device *adev = ring->adev;
201         unsigned vmhub = ring->funcs->vmhub;
202         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
203         struct dma_fence **fences;
204         unsigned i;
205         int r;
206
207         if (!dma_fence_is_signaled(ring->vmid_wait))
208                 return amdgpu_sync_fence(sync, ring->vmid_wait);
209
210         fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
211         if (!fences)
212                 return -ENOMEM;
213
214         /* Check if we have an idle VMID */
215         i = 0;
216         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
217                 /* Don't use per engine and per process VMID at the same time */
218                 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
219                         NULL : ring;
220
221                 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
222                 if (!fences[i])
223                         break;
224                 ++i;
225         }
226
227         /* If we can't find a idle VMID to use, wait till one becomes available */
228         if (&(*idle)->list == &id_mgr->ids_lru) {
229                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
230                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
231                 struct dma_fence_array *array;
232                 unsigned j;
233
234                 *idle = NULL;
235                 for (j = 0; j < i; ++j)
236                         dma_fence_get(fences[j]);
237
238                 array = dma_fence_array_create(i, fences, fence_context,
239                                                seqno, true);
240                 if (!array) {
241                         for (j = 0; j < i; ++j)
242                                 dma_fence_put(fences[j]);
243                         kfree(fences);
244                         return -ENOMEM;
245                 }
246
247                 r = amdgpu_sync_fence(sync, &array->base);
248                 dma_fence_put(ring->vmid_wait);
249                 ring->vmid_wait = &array->base;
250                 return r;
251         }
252         kfree(fences);
253
254         return 0;
255 }
256
257 /**
258  * amdgpu_vmid_grab_reserved - try to assign reserved VMID
259  *
260  * @vm: vm to allocate id for
261  * @ring: ring we want to submit job to
262  * @sync: sync object where we add dependencies
263  * @fence: fence protecting ID from reuse
264  * @job: job who wants to use the VMID
265  * @id: resulting VMID
266  *
267  * Try to assign a reserved VMID.
268  */
269 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
270                                      struct amdgpu_ring *ring,
271                                      struct amdgpu_sync *sync,
272                                      struct dma_fence *fence,
273                                      struct amdgpu_job *job,
274                                      struct amdgpu_vmid **id)
275 {
276         struct amdgpu_device *adev = ring->adev;
277         unsigned vmhub = ring->funcs->vmhub;
278         uint64_t fence_context = adev->fence_context + ring->idx;
279         struct dma_fence *updates = sync->last_vm_update;
280         bool needs_flush = vm->use_cpu_for_update;
281         int r = 0;
282
283         *id = vm->reserved_vmid[vmhub];
284         if (updates && (*id)->flushed_updates &&
285             updates->context == (*id)->flushed_updates->context &&
286             !dma_fence_is_later(updates, (*id)->flushed_updates))
287                 updates = NULL;
288
289         if ((*id)->owner != vm->immediate.fence_context ||
290             job->vm_pd_addr != (*id)->pd_gpu_addr ||
291             updates || !(*id)->last_flush ||
292             ((*id)->last_flush->context != fence_context &&
293              !dma_fence_is_signaled((*id)->last_flush))) {
294                 struct dma_fence *tmp;
295
296                 /* Don't use per engine and per process VMID at the same time */
297                 if (adev->vm_manager.concurrent_flush)
298                         ring = NULL;
299
300                 /* to prevent one context starved by another context */
301                 (*id)->pd_gpu_addr = 0;
302                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
303                 if (tmp) {
304                         *id = NULL;
305                         r = amdgpu_sync_fence(sync, tmp);
306                         return r;
307                 }
308                 needs_flush = true;
309         }
310
311         /* Good we can use this VMID. Remember this submission as
312         * user of the VMID.
313         */
314         r = amdgpu_sync_fence(&(*id)->active, fence);
315         if (r)
316                 return r;
317
318         if (updates) {
319                 dma_fence_put((*id)->flushed_updates);
320                 (*id)->flushed_updates = dma_fence_get(updates);
321         }
322         job->vm_needs_flush = needs_flush;
323         return 0;
324 }
325
326 /**
327  * amdgpu_vmid_grab_used - try to reuse a VMID
328  *
329  * @vm: vm to allocate id for
330  * @ring: ring we want to submit job to
331  * @sync: sync object where we add dependencies
332  * @fence: fence protecting ID from reuse
333  * @job: job who wants to use the VMID
334  * @id: resulting VMID
335  *
336  * Try to reuse a VMID for this submission.
337  */
338 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
339                                  struct amdgpu_ring *ring,
340                                  struct amdgpu_sync *sync,
341                                  struct dma_fence *fence,
342                                  struct amdgpu_job *job,
343                                  struct amdgpu_vmid **id)
344 {
345         struct amdgpu_device *adev = ring->adev;
346         unsigned vmhub = ring->funcs->vmhub;
347         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
348         uint64_t fence_context = adev->fence_context + ring->idx;
349         struct dma_fence *updates = sync->last_vm_update;
350         int r;
351
352         job->vm_needs_flush = vm->use_cpu_for_update;
353
354         /* Check if we can use a VMID already assigned to this VM */
355         list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
356                 bool needs_flush = vm->use_cpu_for_update;
357                 struct dma_fence *flushed;
358
359                 /* Check all the prerequisites to using this VMID */
360                 if ((*id)->owner != vm->immediate.fence_context)
361                         continue;
362
363                 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
364                         continue;
365
366                 if (!(*id)->last_flush ||
367                     ((*id)->last_flush->context != fence_context &&
368                      !dma_fence_is_signaled((*id)->last_flush)))
369                         needs_flush = true;
370
371                 flushed  = (*id)->flushed_updates;
372                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
373                         needs_flush = true;
374
375                 if (needs_flush && !adev->vm_manager.concurrent_flush)
376                         continue;
377
378                 /* Good, we can use this VMID. Remember this submission as
379                  * user of the VMID.
380                  */
381                 r = amdgpu_sync_fence(&(*id)->active, fence);
382                 if (r)
383                         return r;
384
385                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
386                         dma_fence_put((*id)->flushed_updates);
387                         (*id)->flushed_updates = dma_fence_get(updates);
388                 }
389
390                 job->vm_needs_flush |= needs_flush;
391                 return 0;
392         }
393
394         *id = NULL;
395         return 0;
396 }
397
398 /**
399  * amdgpu_vmid_grab - allocate the next free VMID
400  *
401  * @vm: vm to allocate id for
402  * @ring: ring we want to submit job to
403  * @sync: sync object where we add dependencies
404  * @fence: fence protecting ID from reuse
405  * @job: job who wants to use the VMID
406  *
407  * Allocate an id for the vm, adding fences to the sync obj as necessary.
408  */
409 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
410                      struct amdgpu_sync *sync, struct dma_fence *fence,
411                      struct amdgpu_job *job)
412 {
413         struct amdgpu_device *adev = ring->adev;
414         unsigned vmhub = ring->funcs->vmhub;
415         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
416         struct amdgpu_vmid *idle = NULL;
417         struct amdgpu_vmid *id = NULL;
418         int r = 0;
419
420         mutex_lock(&id_mgr->lock);
421         r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
422         if (r || !idle)
423                 goto error;
424
425         if (vm->reserved_vmid[vmhub]) {
426                 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
427                 if (r || !id)
428                         goto error;
429         } else {
430                 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
431                 if (r)
432                         goto error;
433
434                 if (!id) {
435                         struct dma_fence *updates = sync->last_vm_update;
436
437                         /* Still no ID to use? Then use the idle one found earlier */
438                         id = idle;
439
440                         /* Remember this submission as user of the VMID */
441                         r = amdgpu_sync_fence(&id->active, fence);
442                         if (r)
443                                 goto error;
444
445                         dma_fence_put(id->flushed_updates);
446                         id->flushed_updates = dma_fence_get(updates);
447                         job->vm_needs_flush = true;
448                 }
449
450                 list_move_tail(&id->list, &id_mgr->ids_lru);
451         }
452
453         id->pd_gpu_addr = job->vm_pd_addr;
454         id->owner = vm->immediate.fence_context;
455
456         if (job->vm_needs_flush) {
457                 dma_fence_put(id->last_flush);
458                 id->last_flush = NULL;
459         }
460         job->vmid = id - id_mgr->ids;
461         job->pasid = vm->pasid;
462         trace_amdgpu_vm_grab_id(vm, ring, job);
463
464 error:
465         mutex_unlock(&id_mgr->lock);
466         return r;
467 }
468
469 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
470                                struct amdgpu_vm *vm,
471                                unsigned vmhub)
472 {
473         struct amdgpu_vmid_mgr *id_mgr;
474         struct amdgpu_vmid *idle;
475         int r = 0;
476
477         id_mgr = &adev->vm_manager.id_mgr[vmhub];
478         mutex_lock(&id_mgr->lock);
479         if (vm->reserved_vmid[vmhub])
480                 goto unlock;
481         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
482             AMDGPU_VM_MAX_RESERVED_VMID) {
483                 DRM_ERROR("Over limitation of reserved vmid\n");
484                 atomic_dec(&id_mgr->reserved_vmid_num);
485                 r = -EINVAL;
486                 goto unlock;
487         }
488         /* Select the first entry VMID */
489         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
490         list_del_init(&idle->list);
491         vm->reserved_vmid[vmhub] = idle;
492         mutex_unlock(&id_mgr->lock);
493
494         return 0;
495 unlock:
496         mutex_unlock(&id_mgr->lock);
497         return r;
498 }
499
500 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
501                                struct amdgpu_vm *vm,
502                                unsigned vmhub)
503 {
504         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
505
506         mutex_lock(&id_mgr->lock);
507         if (vm->reserved_vmid[vmhub]) {
508                 list_add(&vm->reserved_vmid[vmhub]->list,
509                         &id_mgr->ids_lru);
510                 vm->reserved_vmid[vmhub] = NULL;
511                 atomic_dec(&id_mgr->reserved_vmid_num);
512         }
513         mutex_unlock(&id_mgr->lock);
514 }
515
516 /**
517  * amdgpu_vmid_reset - reset VMID to zero
518  *
519  * @adev: amdgpu device structure
520  * @vmhub: vmhub type
521  * @vmid: vmid number to use
522  *
523  * Reset saved GDW, GWS and OA to force switch on next flush.
524  */
525 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
526                        unsigned vmid)
527 {
528         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
529         struct amdgpu_vmid *id = &id_mgr->ids[vmid];
530
531         mutex_lock(&id_mgr->lock);
532         id->owner = 0;
533         id->gds_base = 0;
534         id->gds_size = 0;
535         id->gws_base = 0;
536         id->gws_size = 0;
537         id->oa_base = 0;
538         id->oa_size = 0;
539         mutex_unlock(&id_mgr->lock);
540 }
541
542 /**
543  * amdgpu_vmid_reset_all - reset VMID to zero
544  *
545  * @adev: amdgpu device structure
546  *
547  * Reset VMID to force flush on next use
548  */
549 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
550 {
551         unsigned i, j;
552
553         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
554                 struct amdgpu_vmid_mgr *id_mgr =
555                         &adev->vm_manager.id_mgr[i];
556
557                 for (j = 1; j < id_mgr->num_ids; ++j)
558                         amdgpu_vmid_reset(adev, i, j);
559         }
560 }
561
562 /**
563  * amdgpu_vmid_mgr_init - init the VMID manager
564  *
565  * @adev: amdgpu_device pointer
566  *
567  * Initialize the VM manager structures
568  */
569 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
570 {
571         unsigned i, j;
572
573         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
574                 struct amdgpu_vmid_mgr *id_mgr =
575                         &adev->vm_manager.id_mgr[i];
576
577                 mutex_init(&id_mgr->lock);
578                 INIT_LIST_HEAD(&id_mgr->ids_lru);
579                 atomic_set(&id_mgr->reserved_vmid_num, 0);
580
581                 /* manage only VMIDs not used by KFD */
582                 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
583
584                 /* skip over VMID 0, since it is the system VM */
585                 for (j = 1; j < id_mgr->num_ids; ++j) {
586                         amdgpu_vmid_reset(adev, i, j);
587                         amdgpu_sync_create(&id_mgr->ids[j].active);
588                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
589                 }
590         }
591 }
592
593 /**
594  * amdgpu_vmid_mgr_fini - cleanup VM manager
595  *
596  * @adev: amdgpu_device pointer
597  *
598  * Cleanup the VM manager and free resources.
599  */
600 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
601 {
602         unsigned i, j;
603
604         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
605                 struct amdgpu_vmid_mgr *id_mgr =
606                         &adev->vm_manager.id_mgr[i];
607
608                 mutex_destroy(&id_mgr->lock);
609                 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
610                         struct amdgpu_vmid *id = &id_mgr->ids[j];
611
612                         amdgpu_sync_free(&id->active);
613                         dma_fence_put(id->flushed_updates);
614                         dma_fence_put(id->last_flush);
615                         dma_fence_put(id->pasid_mapping);
616                 }
617         }
618 }