drm/amdgpu: rename vm_id to vmid
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ids.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ids.h"
24
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drmP.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31
32 /*
33  * PASID manager
34  *
35  * PASIDs are global address space identifiers that can be shared
36  * between the GPU, an IOMMU and the driver. VMs on different devices
37  * may use the same PASID if they share the same address
38  * space. Therefore PASIDs are allocated using a global IDA. VMs are
39  * looked up from the PASID per amdgpu_device.
40  */
41 static DEFINE_IDA(amdgpu_pasid_ida);
42
43 /**
44  * amdgpu_pasid_alloc - Allocate a PASID
45  * @bits: Maximum width of the PASID in bits, must be at least 1
46  *
47  * Allocates a PASID of the given width while keeping smaller PASIDs
48  * available if possible.
49  *
50  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
51  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
52  * memory allocation failure.
53  */
54 int amdgpu_pasid_alloc(unsigned int bits)
55 {
56         int pasid = -EINVAL;
57
58         for (bits = min(bits, 31U); bits > 0; bits--) {
59                 pasid = ida_simple_get(&amdgpu_pasid_ida,
60                                        1U << (bits - 1), 1U << bits,
61                                        GFP_KERNEL);
62                 if (pasid != -ENOSPC)
63                         break;
64         }
65
66         return pasid;
67 }
68
69 /**
70  * amdgpu_pasid_free - Free a PASID
71  * @pasid: PASID to free
72  */
73 void amdgpu_pasid_free(unsigned int pasid)
74 {
75         ida_simple_remove(&amdgpu_pasid_ida, pasid);
76 }
77
78 /*
79  * VMID manager
80  *
81  * VMIDs are a per VMHUB identifier for page tables handling.
82  */
83
84 /**
85  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
86  *
87  * @adev: amdgpu_device pointer
88  * @id: VMID structure
89  *
90  * Check if GPU reset occured since last use of the VMID.
91  */
92 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
93                                struct amdgpu_vmid *id)
94 {
95         return id->current_gpu_reset_count !=
96                 atomic_read(&adev->gpu_reset_counter);
97 }
98
99 /* idr_mgr->lock must be held */
100 static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
101                                             struct amdgpu_ring *ring,
102                                             struct amdgpu_sync *sync,
103                                             struct dma_fence *fence,
104                                             struct amdgpu_job *job)
105 {
106         struct amdgpu_device *adev = ring->adev;
107         unsigned vmhub = ring->funcs->vmhub;
108         uint64_t fence_context = adev->fence_context + ring->idx;
109         struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
110         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
111         struct dma_fence *updates = sync->last_vm_update;
112         int r = 0;
113         struct dma_fence *flushed, *tmp;
114         bool needs_flush = vm->use_cpu_for_update;
115
116         flushed  = id->flushed_updates;
117         if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
118             (atomic64_read(&id->owner) != vm->client_id) ||
119             (job->vm_pd_addr != id->pd_gpu_addr) ||
120             (updates && (!flushed || updates->context != flushed->context ||
121                         dma_fence_is_later(updates, flushed))) ||
122             (!id->last_flush || (id->last_flush->context != fence_context &&
123                                  !dma_fence_is_signaled(id->last_flush)))) {
124                 needs_flush = true;
125                 /* to prevent one context starved by another context */
126                 id->pd_gpu_addr = 0;
127                 tmp = amdgpu_sync_peek_fence(&id->active, ring);
128                 if (tmp) {
129                         r = amdgpu_sync_fence(adev, sync, tmp, false);
130                         return r;
131                 }
132         }
133
134         /* Good we can use this VMID. Remember this submission as
135         * user of the VMID.
136         */
137         r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
138         if (r)
139                 goto out;
140
141         if (updates && (!flushed || updates->context != flushed->context ||
142                         dma_fence_is_later(updates, flushed))) {
143                 dma_fence_put(id->flushed_updates);
144                 id->flushed_updates = dma_fence_get(updates);
145         }
146         id->pd_gpu_addr = job->vm_pd_addr;
147         atomic64_set(&id->owner, vm->client_id);
148         job->vm_needs_flush = needs_flush;
149         if (needs_flush) {
150                 dma_fence_put(id->last_flush);
151                 id->last_flush = NULL;
152         }
153         job->vmid = id - id_mgr->ids;
154         trace_amdgpu_vm_grab_id(vm, ring, job);
155 out:
156         return r;
157 }
158
159 /**
160  * amdgpu_vm_grab_id - allocate the next free VMID
161  *
162  * @vm: vm to allocate id for
163  * @ring: ring we want to submit job to
164  * @sync: sync object where we add dependencies
165  * @fence: fence protecting ID from reuse
166  *
167  * Allocate an id for the vm, adding fences to the sync obj as necessary.
168  */
169 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
170                      struct amdgpu_sync *sync, struct dma_fence *fence,
171                      struct amdgpu_job *job)
172 {
173         struct amdgpu_device *adev = ring->adev;
174         unsigned vmhub = ring->funcs->vmhub;
175         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
176         uint64_t fence_context = adev->fence_context + ring->idx;
177         struct dma_fence *updates = sync->last_vm_update;
178         struct amdgpu_vmid *id, *idle;
179         struct dma_fence **fences;
180         unsigned i;
181         int r = 0;
182
183         mutex_lock(&id_mgr->lock);
184         if (vm->reserved_vmid[vmhub]) {
185                 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
186                 mutex_unlock(&id_mgr->lock);
187                 return r;
188         }
189         fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
190         if (!fences) {
191                 mutex_unlock(&id_mgr->lock);
192                 return -ENOMEM;
193         }
194         /* Check if we have an idle VMID */
195         i = 0;
196         list_for_each_entry(idle, &id_mgr->ids_lru, list) {
197                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
198                 if (!fences[i])
199                         break;
200                 ++i;
201         }
202
203         /* If we can't find a idle VMID to use, wait till one becomes available */
204         if (&idle->list == &id_mgr->ids_lru) {
205                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
206                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
207                 struct dma_fence_array *array;
208                 unsigned j;
209
210                 for (j = 0; j < i; ++j)
211                         dma_fence_get(fences[j]);
212
213                 array = dma_fence_array_create(i, fences, fence_context,
214                                            seqno, true);
215                 if (!array) {
216                         for (j = 0; j < i; ++j)
217                                 dma_fence_put(fences[j]);
218                         kfree(fences);
219                         r = -ENOMEM;
220                         goto error;
221                 }
222
223
224                 r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
225                 dma_fence_put(&array->base);
226                 if (r)
227                         goto error;
228
229                 mutex_unlock(&id_mgr->lock);
230                 return 0;
231
232         }
233         kfree(fences);
234
235         job->vm_needs_flush = vm->use_cpu_for_update;
236         /* Check if we can use a VMID already assigned to this VM */
237         list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
238                 struct dma_fence *flushed;
239                 bool needs_flush = vm->use_cpu_for_update;
240
241                 /* Check all the prerequisites to using this VMID */
242                 if (amdgpu_vmid_had_gpu_reset(adev, id))
243                         continue;
244
245                 if (atomic64_read(&id->owner) != vm->client_id)
246                         continue;
247
248                 if (job->vm_pd_addr != id->pd_gpu_addr)
249                         continue;
250
251                 if (!id->last_flush ||
252                     (id->last_flush->context != fence_context &&
253                      !dma_fence_is_signaled(id->last_flush)))
254                         needs_flush = true;
255
256                 flushed  = id->flushed_updates;
257                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
258                         needs_flush = true;
259
260                 /* Concurrent flushes are only possible starting with Vega10 */
261                 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
262                         continue;
263
264                 /* Good we can use this VMID. Remember this submission as
265                  * user of the VMID.
266                  */
267                 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
268                 if (r)
269                         goto error;
270
271                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
272                         dma_fence_put(id->flushed_updates);
273                         id->flushed_updates = dma_fence_get(updates);
274                 }
275
276                 if (needs_flush)
277                         goto needs_flush;
278                 else
279                         goto no_flush_needed;
280
281         };
282
283         /* Still no ID to use? Then use the idle one found earlier */
284         id = idle;
285
286         /* Remember this submission as user of the VMID */
287         r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
288         if (r)
289                 goto error;
290
291         id->pd_gpu_addr = job->vm_pd_addr;
292         dma_fence_put(id->flushed_updates);
293         id->flushed_updates = dma_fence_get(updates);
294         atomic64_set(&id->owner, vm->client_id);
295
296 needs_flush:
297         job->vm_needs_flush = true;
298         dma_fence_put(id->last_flush);
299         id->last_flush = NULL;
300
301 no_flush_needed:
302         list_move_tail(&id->list, &id_mgr->ids_lru);
303
304         job->vmid = id - id_mgr->ids;
305         trace_amdgpu_vm_grab_id(vm, ring, job);
306
307 error:
308         mutex_unlock(&id_mgr->lock);
309         return r;
310 }
311
312 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
313                                struct amdgpu_vm *vm,
314                                unsigned vmhub)
315 {
316         struct amdgpu_vmid_mgr *id_mgr;
317         struct amdgpu_vmid *idle;
318         int r = 0;
319
320         id_mgr = &adev->vm_manager.id_mgr[vmhub];
321         mutex_lock(&id_mgr->lock);
322         if (vm->reserved_vmid[vmhub])
323                 goto unlock;
324         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
325             AMDGPU_VM_MAX_RESERVED_VMID) {
326                 DRM_ERROR("Over limitation of reserved vmid\n");
327                 atomic_dec(&id_mgr->reserved_vmid_num);
328                 r = -EINVAL;
329                 goto unlock;
330         }
331         /* Select the first entry VMID */
332         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
333         list_del_init(&idle->list);
334         vm->reserved_vmid[vmhub] = idle;
335         mutex_unlock(&id_mgr->lock);
336
337         return 0;
338 unlock:
339         mutex_unlock(&id_mgr->lock);
340         return r;
341 }
342
343 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
344                                struct amdgpu_vm *vm,
345                                unsigned vmhub)
346 {
347         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
348
349         mutex_lock(&id_mgr->lock);
350         if (vm->reserved_vmid[vmhub]) {
351                 list_add(&vm->reserved_vmid[vmhub]->list,
352                         &id_mgr->ids_lru);
353                 vm->reserved_vmid[vmhub] = NULL;
354                 atomic_dec(&id_mgr->reserved_vmid_num);
355         }
356         mutex_unlock(&id_mgr->lock);
357 }
358
359 /**
360  * amdgpu_vmid_reset - reset VMID to zero
361  *
362  * @adev: amdgpu device structure
363  * @vmid: vmid number to use
364  *
365  * Reset saved GDW, GWS and OA to force switch on next flush.
366  */
367 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
368                        unsigned vmid)
369 {
370         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
371         struct amdgpu_vmid *id = &id_mgr->ids[vmid];
372
373         atomic64_set(&id->owner, 0);
374         id->gds_base = 0;
375         id->gds_size = 0;
376         id->gws_base = 0;
377         id->gws_size = 0;
378         id->oa_base = 0;
379         id->oa_size = 0;
380 }
381
382 /**
383  * amdgpu_vmid_reset_all - reset VMID to zero
384  *
385  * @adev: amdgpu device structure
386  *
387  * Reset VMID to force flush on next use
388  */
389 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
390 {
391         unsigned i, j;
392
393         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
394                 struct amdgpu_vmid_mgr *id_mgr =
395                         &adev->vm_manager.id_mgr[i];
396
397                 for (j = 1; j < id_mgr->num_ids; ++j)
398                         amdgpu_vmid_reset(adev, i, j);
399         }
400 }
401
402 /**
403  * amdgpu_vmid_mgr_init - init the VMID manager
404  *
405  * @adev: amdgpu_device pointer
406  *
407  * Initialize the VM manager structures
408  */
409 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
410 {
411         unsigned i, j;
412
413         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
414                 struct amdgpu_vmid_mgr *id_mgr =
415                         &adev->vm_manager.id_mgr[i];
416
417                 mutex_init(&id_mgr->lock);
418                 INIT_LIST_HEAD(&id_mgr->ids_lru);
419                 atomic_set(&id_mgr->reserved_vmid_num, 0);
420
421                 /* skip over VMID 0, since it is the system VM */
422                 for (j = 1; j < id_mgr->num_ids; ++j) {
423                         amdgpu_vmid_reset(adev, i, j);
424                         amdgpu_sync_create(&id_mgr->ids[i].active);
425                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
426                 }
427         }
428
429         adev->vm_manager.fence_context =
430                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
431         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
432                 adev->vm_manager.seqno[i] = 0;
433 }
434
435 /**
436  * amdgpu_vmid_mgr_fini - cleanup VM manager
437  *
438  * @adev: amdgpu_device pointer
439  *
440  * Cleanup the VM manager and free resources.
441  */
442 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
443 {
444         unsigned i, j;
445
446         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
447                 struct amdgpu_vmid_mgr *id_mgr =
448                         &adev->vm_manager.id_mgr[i];
449
450                 mutex_destroy(&id_mgr->lock);
451                 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
452                         struct amdgpu_vmid *id = &id_mgr->ids[j];
453
454                         amdgpu_sync_free(&id->active);
455                         dma_fence_put(id->flushed_updates);
456                         dma_fence_put(id->last_flush);
457                 }
458         }
459 }