efi: vars: Move efivar caching layer into efivarfs
[linux-2.6-block.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44
45 #include "ttm_module.h"
46
47 /* default destructor */
48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
49 {
50         kfree(bo);
51 }
52
53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
54                                         struct ttm_placement *placement)
55 {
56         struct drm_printer p = drm_debug_printer(TTM_PFX);
57         struct ttm_resource_manager *man;
58         int i, mem_type;
59
60         drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
61                    bo, bo->resource->num_pages, bo->base.size >> 10,
62                    bo->base.size >> 20);
63         for (i = 0; i < placement->num_placement; i++) {
64                 mem_type = placement->placement[i].mem_type;
65                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
66                            i, placement->placement[i].flags, mem_type);
67                 man = ttm_manager_type(bo->bdev, mem_type);
68                 ttm_resource_manager_debug(man, &p);
69         }
70 }
71
72 /**
73  * ttm_bo_move_to_lru_tail
74  *
75  * @bo: The buffer object.
76  *
77  * Move this BO to the tail of all lru lists used to lookup and reserve an
78  * object. This function must be called with struct ttm_global::lru_lock
79  * held, and is used to make a BO less likely to be considered for eviction.
80  */
81 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
82 {
83         dma_resv_assert_held(bo->base.resv);
84
85         if (bo->resource)
86                 ttm_resource_move_to_lru_tail(bo->resource);
87 }
88 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
89
90 /**
91  * ttm_bo_set_bulk_move - update BOs bulk move object
92  *
93  * @bo: The buffer object.
94  *
95  * Update the BOs bulk move object, making sure that resources are added/removed
96  * as well. A bulk move allows to move many resource on the LRU at once,
97  * resulting in much less overhead of maintaining the LRU.
98  * The only requirement is that the resources stay together on the LRU and are
99  * never separated. This is enforces by setting the bulk_move structure on a BO.
100  * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
101  * their LRU list.
102  */
103 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
104                           struct ttm_lru_bulk_move *bulk)
105 {
106         dma_resv_assert_held(bo->base.resv);
107
108         if (bo->bulk_move == bulk)
109                 return;
110
111         spin_lock(&bo->bdev->lru_lock);
112         if (bo->bulk_move && bo->resource)
113                 ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
114         bo->bulk_move = bulk;
115         if (bo->bulk_move && bo->resource)
116                 ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
117         spin_unlock(&bo->bdev->lru_lock);
118 }
119 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
120
121 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
122                                   struct ttm_resource *mem, bool evict,
123                                   struct ttm_operation_ctx *ctx,
124                                   struct ttm_place *hop)
125 {
126         struct ttm_resource_manager *old_man, *new_man;
127         struct ttm_device *bdev = bo->bdev;
128         int ret;
129
130         old_man = ttm_manager_type(bdev, bo->resource->mem_type);
131         new_man = ttm_manager_type(bdev, mem->mem_type);
132
133         ttm_bo_unmap_virtual(bo);
134
135         /*
136          * Create and bind a ttm if required.
137          */
138
139         if (new_man->use_tt) {
140                 /* Zero init the new TTM structure if the old location should
141                  * have used one as well.
142                  */
143                 ret = ttm_tt_create(bo, old_man->use_tt);
144                 if (ret)
145                         goto out_err;
146
147                 if (mem->mem_type != TTM_PL_SYSTEM) {
148                         ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
149                         if (ret)
150                                 goto out_err;
151                 }
152         }
153
154         ret = dma_resv_reserve_fences(bo->base.resv, 1);
155         if (ret)
156                 goto out_err;
157
158         ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
159         if (ret) {
160                 if (ret == -EMULTIHOP)
161                         return ret;
162                 goto out_err;
163         }
164
165         ctx->bytes_moved += bo->base.size;
166         return 0;
167
168 out_err:
169         new_man = ttm_manager_type(bdev, bo->resource->mem_type);
170         if (!new_man->use_tt)
171                 ttm_bo_tt_destroy(bo);
172
173         return ret;
174 }
175
176 /*
177  * Call bo::reserved.
178  * Will release GPU memory type usage on destruction.
179  * This is the place to put in driver specific hooks to release
180  * driver private resources.
181  * Will release the bo::reserved lock.
182  */
183
184 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
185 {
186         if (bo->bdev->funcs->delete_mem_notify)
187                 bo->bdev->funcs->delete_mem_notify(bo);
188
189         ttm_bo_tt_destroy(bo);
190         ttm_resource_free(bo, &bo->resource);
191 }
192
193 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
194 {
195         int r;
196
197         if (bo->base.resv == &bo->base._resv)
198                 return 0;
199
200         BUG_ON(!dma_resv_trylock(&bo->base._resv));
201
202         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
203         dma_resv_unlock(&bo->base._resv);
204         if (r)
205                 return r;
206
207         if (bo->type != ttm_bo_type_sg) {
208                 /* This works because the BO is about to be destroyed and nobody
209                  * reference it any more. The only tricky case is the trylock on
210                  * the resv object while holding the lru_lock.
211                  */
212                 spin_lock(&bo->bdev->lru_lock);
213                 bo->base.resv = &bo->base._resv;
214                 spin_unlock(&bo->bdev->lru_lock);
215         }
216
217         return r;
218 }
219
220 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
221 {
222         struct dma_resv *resv = &bo->base._resv;
223         struct dma_resv_iter cursor;
224         struct dma_fence *fence;
225
226         dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
227         dma_resv_for_each_fence_unlocked(&cursor, fence) {
228                 if (!fence->ops->signaled)
229                         dma_fence_enable_sw_signaling(fence);
230         }
231         dma_resv_iter_end(&cursor);
232 }
233
234 /**
235  * ttm_bo_cleanup_refs
236  * If bo idle, remove from lru lists, and unref.
237  * If not idle, block if possible.
238  *
239  * Must be called with lru_lock and reservation held, this function
240  * will drop the lru lock and optionally the reservation lock before returning.
241  *
242  * @bo:                    The buffer object to clean-up
243  * @interruptible:         Any sleeps should occur interruptibly.
244  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
245  * @unlock_resv:           Unlock the reservation lock as well.
246  */
247
248 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
249                                bool interruptible, bool no_wait_gpu,
250                                bool unlock_resv)
251 {
252         struct dma_resv *resv = &bo->base._resv;
253         int ret;
254
255         if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
256                 ret = 0;
257         else
258                 ret = -EBUSY;
259
260         if (ret && !no_wait_gpu) {
261                 long lret;
262
263                 if (unlock_resv)
264                         dma_resv_unlock(bo->base.resv);
265                 spin_unlock(&bo->bdev->lru_lock);
266
267                 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
268                                              interruptible,
269                                              30 * HZ);
270
271                 if (lret < 0)
272                         return lret;
273                 else if (lret == 0)
274                         return -EBUSY;
275
276                 spin_lock(&bo->bdev->lru_lock);
277                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
278                         /*
279                          * We raced, and lost, someone else holds the reservation now,
280                          * and is probably busy in ttm_bo_cleanup_memtype_use.
281                          *
282                          * Even if it's not the case, because we finished waiting any
283                          * delayed destruction would succeed, so just return success
284                          * here.
285                          */
286                         spin_unlock(&bo->bdev->lru_lock);
287                         return 0;
288                 }
289                 ret = 0;
290         }
291
292         if (ret || unlikely(list_empty(&bo->ddestroy))) {
293                 if (unlock_resv)
294                         dma_resv_unlock(bo->base.resv);
295                 spin_unlock(&bo->bdev->lru_lock);
296                 return ret;
297         }
298
299         list_del_init(&bo->ddestroy);
300         spin_unlock(&bo->bdev->lru_lock);
301         ttm_bo_cleanup_memtype_use(bo);
302
303         if (unlock_resv)
304                 dma_resv_unlock(bo->base.resv);
305
306         ttm_bo_put(bo);
307
308         return 0;
309 }
310
311 /*
312  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
313  * encountered buffers.
314  */
315 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
316 {
317         struct list_head removed;
318         bool empty;
319
320         INIT_LIST_HEAD(&removed);
321
322         spin_lock(&bdev->lru_lock);
323         while (!list_empty(&bdev->ddestroy)) {
324                 struct ttm_buffer_object *bo;
325
326                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
327                                       ddestroy);
328                 list_move_tail(&bo->ddestroy, &removed);
329                 if (!ttm_bo_get_unless_zero(bo))
330                         continue;
331
332                 if (remove_all || bo->base.resv != &bo->base._resv) {
333                         spin_unlock(&bdev->lru_lock);
334                         dma_resv_lock(bo->base.resv, NULL);
335
336                         spin_lock(&bdev->lru_lock);
337                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
338
339                 } else if (dma_resv_trylock(bo->base.resv)) {
340                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
341                 } else {
342                         spin_unlock(&bdev->lru_lock);
343                 }
344
345                 ttm_bo_put(bo);
346                 spin_lock(&bdev->lru_lock);
347         }
348         list_splice_tail(&removed, &bdev->ddestroy);
349         empty = list_empty(&bdev->ddestroy);
350         spin_unlock(&bdev->lru_lock);
351
352         return empty;
353 }
354
355 static void ttm_bo_release(struct kref *kref)
356 {
357         struct ttm_buffer_object *bo =
358             container_of(kref, struct ttm_buffer_object, kref);
359         struct ttm_device *bdev = bo->bdev;
360         int ret;
361
362         WARN_ON_ONCE(bo->pin_count);
363         WARN_ON_ONCE(bo->bulk_move);
364
365         if (!bo->deleted) {
366                 ret = ttm_bo_individualize_resv(bo);
367                 if (ret) {
368                         /* Last resort, if we fail to allocate memory for the
369                          * fences block for the BO to become idle
370                          */
371                         dma_resv_wait_timeout(bo->base.resv,
372                                               DMA_RESV_USAGE_BOOKKEEP, false,
373                                               30 * HZ);
374                 }
375
376                 if (bo->bdev->funcs->release_notify)
377                         bo->bdev->funcs->release_notify(bo);
378
379                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
380                 ttm_mem_io_free(bdev, bo->resource);
381         }
382
383         if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
384             !dma_resv_trylock(bo->base.resv)) {
385                 /* The BO is not idle, resurrect it for delayed destroy */
386                 ttm_bo_flush_all_fences(bo);
387                 bo->deleted = true;
388
389                 spin_lock(&bo->bdev->lru_lock);
390
391                 /*
392                  * Make pinned bos immediately available to
393                  * shrinkers, now that they are queued for
394                  * destruction.
395                  *
396                  * FIXME: QXL is triggering this. Can be removed when the
397                  * driver is fixed.
398                  */
399                 if (bo->pin_count) {
400                         bo->pin_count = 0;
401                         ttm_resource_move_to_lru_tail(bo->resource);
402                 }
403
404                 kref_init(&bo->kref);
405                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
406                 spin_unlock(&bo->bdev->lru_lock);
407
408                 schedule_delayed_work(&bdev->wq,
409                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
410                 return;
411         }
412
413         spin_lock(&bo->bdev->lru_lock);
414         list_del(&bo->ddestroy);
415         spin_unlock(&bo->bdev->lru_lock);
416
417         ttm_bo_cleanup_memtype_use(bo);
418         dma_resv_unlock(bo->base.resv);
419
420         atomic_dec(&ttm_glob.bo_count);
421         bo->destroy(bo);
422 }
423
424 void ttm_bo_put(struct ttm_buffer_object *bo)
425 {
426         kref_put(&bo->kref, ttm_bo_release);
427 }
428 EXPORT_SYMBOL(ttm_bo_put);
429
430 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
431 {
432         return cancel_delayed_work_sync(&bdev->wq);
433 }
434 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
435
436 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
437 {
438         if (resched)
439                 schedule_delayed_work(&bdev->wq,
440                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
441 }
442 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
443
444 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
445                                      struct ttm_resource **mem,
446                                      struct ttm_operation_ctx *ctx,
447                                      struct ttm_place *hop)
448 {
449         struct ttm_placement hop_placement;
450         struct ttm_resource *hop_mem;
451         int ret;
452
453         hop_placement.num_placement = hop_placement.num_busy_placement = 1;
454         hop_placement.placement = hop_placement.busy_placement = hop;
455
456         /* find space in the bounce domain */
457         ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
458         if (ret)
459                 return ret;
460         /* move to the bounce domain */
461         ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
462         if (ret) {
463                 ttm_resource_free(bo, &hop_mem);
464                 return ret;
465         }
466         return 0;
467 }
468
469 static int ttm_bo_evict(struct ttm_buffer_object *bo,
470                         struct ttm_operation_ctx *ctx)
471 {
472         struct ttm_device *bdev = bo->bdev;
473         struct ttm_resource *evict_mem;
474         struct ttm_placement placement;
475         struct ttm_place hop;
476         int ret = 0;
477
478         memset(&hop, 0, sizeof(hop));
479
480         dma_resv_assert_held(bo->base.resv);
481
482         placement.num_placement = 0;
483         placement.num_busy_placement = 0;
484         bdev->funcs->evict_flags(bo, &placement);
485
486         if (!placement.num_placement && !placement.num_busy_placement) {
487                 ret = ttm_bo_wait(bo, true, false);
488                 if (ret)
489                         return ret;
490
491                 /*
492                  * Since we've already synced, this frees backing store
493                  * immediately.
494                  */
495                 return ttm_bo_pipeline_gutting(bo);
496         }
497
498         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
499         if (ret) {
500                 if (ret != -ERESTARTSYS) {
501                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
502                                bo);
503                         ttm_bo_mem_space_debug(bo, &placement);
504                 }
505                 goto out;
506         }
507
508 bounce:
509         ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
510         if (ret == -EMULTIHOP) {
511                 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
512                 if (ret) {
513                         pr_err("Buffer eviction failed\n");
514                         ttm_resource_free(bo, &evict_mem);
515                         goto out;
516                 }
517                 /* try and move to final place now. */
518                 goto bounce;
519         }
520 out:
521         return ret;
522 }
523
524 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
525                               const struct ttm_place *place)
526 {
527         dma_resv_assert_held(bo->base.resv);
528         if (bo->resource->mem_type == TTM_PL_SYSTEM)
529                 return true;
530
531         /* Don't evict this BO if it's outside of the
532          * requested placement range
533          */
534         if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
535             (place->lpfn && place->lpfn <= bo->resource->start))
536                 return false;
537
538         return true;
539 }
540 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
541
542 /*
543  * Check the target bo is allowable to be evicted or swapout, including cases:
544  *
545  * a. if share same reservation object with ctx->resv, have assumption
546  * reservation objects should already be locked, so not lock again and
547  * return true directly when either the opreation allow_reserved_eviction
548  * or the target bo already is in delayed free list;
549  *
550  * b. Otherwise, trylock it.
551  */
552 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
553                                            struct ttm_operation_ctx *ctx,
554                                            const struct ttm_place *place,
555                                            bool *locked, bool *busy)
556 {
557         bool ret = false;
558
559         if (bo->base.resv == ctx->resv) {
560                 dma_resv_assert_held(bo->base.resv);
561                 if (ctx->allow_res_evict)
562                         ret = true;
563                 *locked = false;
564                 if (busy)
565                         *busy = false;
566         } else {
567                 ret = dma_resv_trylock(bo->base.resv);
568                 *locked = ret;
569                 if (busy)
570                         *busy = !ret;
571         }
572
573         if (ret && place && (bo->resource->mem_type != place->mem_type ||
574                 !bo->bdev->funcs->eviction_valuable(bo, place))) {
575                 ret = false;
576                 if (*locked) {
577                         dma_resv_unlock(bo->base.resv);
578                         *locked = false;
579                 }
580         }
581
582         return ret;
583 }
584
585 /**
586  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
587  *
588  * @busy_bo: BO which couldn't be locked with trylock
589  * @ctx: operation context
590  * @ticket: acquire ticket
591  *
592  * Try to lock a busy buffer object to avoid failing eviction.
593  */
594 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
595                                    struct ttm_operation_ctx *ctx,
596                                    struct ww_acquire_ctx *ticket)
597 {
598         int r;
599
600         if (!busy_bo || !ticket)
601                 return -EBUSY;
602
603         if (ctx->interruptible)
604                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
605                                                           ticket);
606         else
607                 r = dma_resv_lock(busy_bo->base.resv, ticket);
608
609         /*
610          * TODO: It would be better to keep the BO locked until allocation is at
611          * least tried one more time, but that would mean a much larger rework
612          * of TTM.
613          */
614         if (!r)
615                 dma_resv_unlock(busy_bo->base.resv);
616
617         return r == -EDEADLK ? -EBUSY : r;
618 }
619
620 int ttm_mem_evict_first(struct ttm_device *bdev,
621                         struct ttm_resource_manager *man,
622                         const struct ttm_place *place,
623                         struct ttm_operation_ctx *ctx,
624                         struct ww_acquire_ctx *ticket)
625 {
626         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
627         struct ttm_resource_cursor cursor;
628         struct ttm_resource *res;
629         bool locked = false;
630         int ret;
631
632         spin_lock(&bdev->lru_lock);
633         ttm_resource_manager_for_each_res(man, &cursor, res) {
634                 bool busy;
635
636                 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
637                                                     &locked, &busy)) {
638                         if (busy && !busy_bo && ticket !=
639                             dma_resv_locking_ctx(res->bo->base.resv))
640                                 busy_bo = res->bo;
641                         continue;
642                 }
643
644                 if (ttm_bo_get_unless_zero(res->bo)) {
645                         bo = res->bo;
646                         break;
647                 }
648                 if (locked)
649                         dma_resv_unlock(res->bo->base.resv);
650         }
651
652         if (!bo) {
653                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
654                         busy_bo = NULL;
655                 spin_unlock(&bdev->lru_lock);
656                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
657                 if (busy_bo)
658                         ttm_bo_put(busy_bo);
659                 return ret;
660         }
661
662         if (bo->deleted) {
663                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
664                                           ctx->no_wait_gpu, locked);
665                 ttm_bo_put(bo);
666                 return ret;
667         }
668
669         spin_unlock(&bdev->lru_lock);
670
671         ret = ttm_bo_evict(bo, ctx);
672         if (locked)
673                 ttm_bo_unreserve(bo);
674         else
675                 ttm_bo_move_to_lru_tail_unlocked(bo);
676
677         ttm_bo_put(bo);
678         return ret;
679 }
680
681 /**
682  * ttm_bo_pin - Pin the buffer object.
683  * @bo: The buffer object to pin
684  *
685  * Make sure the buffer is not evicted any more during memory pressure.
686  * @bo must be unpinned again by calling ttm_bo_unpin().
687  */
688 void ttm_bo_pin(struct ttm_buffer_object *bo)
689 {
690         dma_resv_assert_held(bo->base.resv);
691         WARN_ON_ONCE(!kref_read(&bo->kref));
692         if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
693                 ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
694 }
695 EXPORT_SYMBOL(ttm_bo_pin);
696
697 /**
698  * ttm_bo_unpin - Unpin the buffer object.
699  * @bo: The buffer object to unpin
700  *
701  * Allows the buffer object to be evicted again during memory pressure.
702  */
703 void ttm_bo_unpin(struct ttm_buffer_object *bo)
704 {
705         dma_resv_assert_held(bo->base.resv);
706         WARN_ON_ONCE(!kref_read(&bo->kref));
707         if (WARN_ON_ONCE(!bo->pin_count))
708                 return;
709
710         if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
711                 ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
712 }
713 EXPORT_SYMBOL(ttm_bo_unpin);
714
715 /*
716  * Add the last move fence to the BO as kernel dependency and reserve a new
717  * fence slot.
718  */
719 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
720                                  struct ttm_resource_manager *man,
721                                  struct ttm_resource *mem,
722                                  bool no_wait_gpu)
723 {
724         struct dma_fence *fence;
725         int ret;
726
727         spin_lock(&man->move_lock);
728         fence = dma_fence_get(man->move);
729         spin_unlock(&man->move_lock);
730
731         if (!fence)
732                 return 0;
733
734         if (no_wait_gpu) {
735                 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
736                 dma_fence_put(fence);
737                 return ret;
738         }
739
740         dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
741
742         ret = dma_resv_reserve_fences(bo->base.resv, 1);
743         dma_fence_put(fence);
744         return ret;
745 }
746
747 /*
748  * Repeatedly evict memory from the LRU for @mem_type until we create enough
749  * space, or we've evicted everything and there isn't enough space.
750  */
751 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
752                                   const struct ttm_place *place,
753                                   struct ttm_resource **mem,
754                                   struct ttm_operation_ctx *ctx)
755 {
756         struct ttm_device *bdev = bo->bdev;
757         struct ttm_resource_manager *man;
758         struct ww_acquire_ctx *ticket;
759         int ret;
760
761         man = ttm_manager_type(bdev, place->mem_type);
762         ticket = dma_resv_locking_ctx(bo->base.resv);
763         do {
764                 ret = ttm_resource_alloc(bo, place, mem);
765                 if (likely(!ret))
766                         break;
767                 if (unlikely(ret != -ENOSPC))
768                         return ret;
769                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
770                                           ticket);
771                 if (unlikely(ret != 0))
772                         return ret;
773         } while (1);
774
775         return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
776 }
777
778 /*
779  * Creates space for memory region @mem according to its type.
780  *
781  * This function first searches for free space in compatible memory types in
782  * the priority order defined by the driver.  If free space isn't found, then
783  * ttm_bo_mem_force_space is attempted in priority order to evict and find
784  * space.
785  */
786 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
787                         struct ttm_placement *placement,
788                         struct ttm_resource **mem,
789                         struct ttm_operation_ctx *ctx)
790 {
791         struct ttm_device *bdev = bo->bdev;
792         bool type_found = false;
793         int i, ret;
794
795         ret = dma_resv_reserve_fences(bo->base.resv, 1);
796         if (unlikely(ret))
797                 return ret;
798
799         for (i = 0; i < placement->num_placement; ++i) {
800                 const struct ttm_place *place = &placement->placement[i];
801                 struct ttm_resource_manager *man;
802
803                 man = ttm_manager_type(bdev, place->mem_type);
804                 if (!man || !ttm_resource_manager_used(man))
805                         continue;
806
807                 type_found = true;
808                 ret = ttm_resource_alloc(bo, place, mem);
809                 if (ret == -ENOSPC)
810                         continue;
811                 if (unlikely(ret))
812                         goto error;
813
814                 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
815                 if (unlikely(ret)) {
816                         ttm_resource_free(bo, mem);
817                         if (ret == -EBUSY)
818                                 continue;
819
820                         goto error;
821                 }
822                 return 0;
823         }
824
825         for (i = 0; i < placement->num_busy_placement; ++i) {
826                 const struct ttm_place *place = &placement->busy_placement[i];
827                 struct ttm_resource_manager *man;
828
829                 man = ttm_manager_type(bdev, place->mem_type);
830                 if (!man || !ttm_resource_manager_used(man))
831                         continue;
832
833                 type_found = true;
834                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
835                 if (likely(!ret))
836                         return 0;
837
838                 if (ret && ret != -EBUSY)
839                         goto error;
840         }
841
842         ret = -ENOMEM;
843         if (!type_found) {
844                 pr_err(TTM_PFX "No compatible memory type found\n");
845                 ret = -EINVAL;
846         }
847
848 error:
849         return ret;
850 }
851 EXPORT_SYMBOL(ttm_bo_mem_space);
852
853 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
854                               struct ttm_placement *placement,
855                               struct ttm_operation_ctx *ctx)
856 {
857         struct ttm_resource *mem;
858         struct ttm_place hop;
859         int ret;
860
861         dma_resv_assert_held(bo->base.resv);
862
863         /*
864          * Determine where to move the buffer.
865          *
866          * If driver determines move is going to need
867          * an extra step then it will return -EMULTIHOP
868          * and the buffer will be moved to the temporary
869          * stop and the driver will be called to make
870          * the second hop.
871          */
872         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
873         if (ret)
874                 return ret;
875 bounce:
876         ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
877         if (ret == -EMULTIHOP) {
878                 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
879                 if (ret)
880                         goto out;
881                 /* try and move to final place now. */
882                 goto bounce;
883         }
884 out:
885         if (ret)
886                 ttm_resource_free(bo, &mem);
887         return ret;
888 }
889
890 int ttm_bo_validate(struct ttm_buffer_object *bo,
891                     struct ttm_placement *placement,
892                     struct ttm_operation_ctx *ctx)
893 {
894         int ret;
895
896         dma_resv_assert_held(bo->base.resv);
897
898         /*
899          * Remove the backing store if no placement is given.
900          */
901         if (!placement->num_placement && !placement->num_busy_placement)
902                 return ttm_bo_pipeline_gutting(bo);
903
904         /*
905          * Check whether we need to move buffer.
906          */
907         if (!ttm_resource_compat(bo->resource, placement)) {
908                 ret = ttm_bo_move_buffer(bo, placement, ctx);
909                 if (ret)
910                         return ret;
911         }
912         /*
913          * We might need to add a TTM.
914          */
915         if (bo->resource->mem_type == TTM_PL_SYSTEM) {
916                 ret = ttm_tt_create(bo, true);
917                 if (ret)
918                         return ret;
919         }
920         return 0;
921 }
922 EXPORT_SYMBOL(ttm_bo_validate);
923
924 int ttm_bo_init_reserved(struct ttm_device *bdev,
925                          struct ttm_buffer_object *bo,
926                          size_t size,
927                          enum ttm_bo_type type,
928                          struct ttm_placement *placement,
929                          uint32_t page_alignment,
930                          struct ttm_operation_ctx *ctx,
931                          struct sg_table *sg,
932                          struct dma_resv *resv,
933                          void (*destroy) (struct ttm_buffer_object *))
934 {
935         static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
936         bool locked;
937         int ret;
938
939         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
940
941         kref_init(&bo->kref);
942         INIT_LIST_HEAD(&bo->ddestroy);
943         bo->bdev = bdev;
944         bo->type = type;
945         bo->page_alignment = page_alignment;
946         bo->pin_count = 0;
947         bo->sg = sg;
948         bo->bulk_move = NULL;
949         if (resv) {
950                 bo->base.resv = resv;
951                 dma_resv_assert_held(bo->base.resv);
952         } else {
953                 bo->base.resv = &bo->base._resv;
954         }
955         atomic_inc(&ttm_glob.bo_count);
956
957         ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
958         if (unlikely(ret)) {
959                 ttm_bo_put(bo);
960                 return ret;
961         }
962
963         /*
964          * For ttm_bo_type_device buffers, allocate
965          * address space from the device.
966          */
967         if (bo->type == ttm_bo_type_device ||
968             bo->type == ttm_bo_type_sg)
969                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
970                                          bo->resource->num_pages);
971
972         /* passed reservation objects should already be locked,
973          * since otherwise lockdep will be angered in radeon.
974          */
975         if (!resv) {
976                 locked = dma_resv_trylock(bo->base.resv);
977                 WARN_ON(!locked);
978         }
979
980         if (likely(!ret))
981                 ret = ttm_bo_validate(bo, placement, ctx);
982
983         if (unlikely(ret)) {
984                 if (!resv)
985                         ttm_bo_unreserve(bo);
986
987                 ttm_bo_put(bo);
988                 return ret;
989         }
990
991         return ret;
992 }
993 EXPORT_SYMBOL(ttm_bo_init_reserved);
994
995 int ttm_bo_init(struct ttm_device *bdev,
996                 struct ttm_buffer_object *bo,
997                 size_t size,
998                 enum ttm_bo_type type,
999                 struct ttm_placement *placement,
1000                 uint32_t page_alignment,
1001                 bool interruptible,
1002                 struct sg_table *sg,
1003                 struct dma_resv *resv,
1004                 void (*destroy) (struct ttm_buffer_object *))
1005 {
1006         struct ttm_operation_ctx ctx = { interruptible, false };
1007         int ret;
1008
1009         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1010                                    page_alignment, &ctx, sg, resv, destroy);
1011         if (ret)
1012                 return ret;
1013
1014         if (!resv)
1015                 ttm_bo_unreserve(bo);
1016
1017         return 0;
1018 }
1019 EXPORT_SYMBOL(ttm_bo_init);
1020
1021 /*
1022  * buffer object vm functions.
1023  */
1024
1025 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1026 {
1027         struct ttm_device *bdev = bo->bdev;
1028
1029         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1030         ttm_mem_io_free(bdev, bo->resource);
1031 }
1032 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1033
1034 int ttm_bo_wait(struct ttm_buffer_object *bo,
1035                 bool interruptible, bool no_wait)
1036 {
1037         long timeout = 15 * HZ;
1038
1039         if (no_wait) {
1040                 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1041                         return 0;
1042                 else
1043                         return -EBUSY;
1044         }
1045
1046         timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1047                                         interruptible, timeout);
1048         if (timeout < 0)
1049                 return timeout;
1050
1051         if (timeout == 0)
1052                 return -EBUSY;
1053
1054         return 0;
1055 }
1056 EXPORT_SYMBOL(ttm_bo_wait);
1057
1058 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1059                    gfp_t gfp_flags)
1060 {
1061         struct ttm_place place;
1062         bool locked;
1063         int ret;
1064
1065         /*
1066          * While the bo may already reside in SYSTEM placement, set
1067          * SYSTEM as new placement to cover also the move further below.
1068          * The driver may use the fact that we're moving from SYSTEM
1069          * as an indication that we're about to swap out.
1070          */
1071         memset(&place, 0, sizeof(place));
1072         place.mem_type = bo->resource->mem_type;
1073         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1074                 return -EBUSY;
1075
1076         if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1077             bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1078             bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1079             !ttm_bo_get_unless_zero(bo)) {
1080                 if (locked)
1081                         dma_resv_unlock(bo->base.resv);
1082                 return -EBUSY;
1083         }
1084
1085         if (bo->deleted) {
1086                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1087                 ttm_bo_put(bo);
1088                 return ret == -EBUSY ? -ENOSPC : ret;
1089         }
1090
1091         /* TODO: Cleanup the locking */
1092         spin_unlock(&bo->bdev->lru_lock);
1093
1094         /*
1095          * Move to system cached
1096          */
1097         if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1098                 struct ttm_operation_ctx ctx = { false, false };
1099                 struct ttm_resource *evict_mem;
1100                 struct ttm_place hop;
1101
1102                 memset(&hop, 0, sizeof(hop));
1103                 place.mem_type = TTM_PL_SYSTEM;
1104                 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1105                 if (unlikely(ret))
1106                         goto out;
1107
1108                 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1109                 if (unlikely(ret != 0)) {
1110                         WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1111                         goto out;
1112                 }
1113         }
1114
1115         /*
1116          * Make sure BO is idle.
1117          */
1118         ret = ttm_bo_wait(bo, false, false);
1119         if (unlikely(ret != 0))
1120                 goto out;
1121
1122         ttm_bo_unmap_virtual(bo);
1123
1124         /*
1125          * Swap out. Buffer will be swapped in again as soon as
1126          * anyone tries to access a ttm page.
1127          */
1128         if (bo->bdev->funcs->swap_notify)
1129                 bo->bdev->funcs->swap_notify(bo);
1130
1131         if (ttm_tt_is_populated(bo->ttm))
1132                 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1133 out:
1134
1135         /*
1136          * Unreserve without putting on LRU to avoid swapping out an
1137          * already swapped buffer.
1138          */
1139         if (locked)
1140                 dma_resv_unlock(bo->base.resv);
1141         ttm_bo_put(bo);
1142         return ret == -EBUSY ? -ENOSPC : ret;
1143 }
1144
1145 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1146 {
1147         if (bo->ttm == NULL)
1148                 return;
1149
1150         ttm_tt_unpopulate(bo->bdev, bo->ttm);
1151         ttm_tt_destroy(bo->bdev, bo->ttm);
1152         bo->ttm = NULL;
1153 }