1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021-2022 Intel Corporation
4 * Copyright (C) 2021-2002 Red Hat
7 #include <drm/drm_managed.h>
9 #include <drm/ttm/ttm_placement.h>
10 #include <drm/ttm/ttm_range_manager.h>
13 #include "xe_device.h"
15 #include "xe_res_cursor.h"
16 #include "xe_ttm_vram_mgr.h"
18 static inline struct drm_buddy_block *
19 xe_ttm_vram_mgr_first_block(struct list_head *list)
21 return list_first_entry_or_null(list, struct drm_buddy_block, link);
24 static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm,
25 struct list_head *head)
27 struct drm_buddy_block *block;
30 block = xe_ttm_vram_mgr_first_block(head);
34 while (head != block->link.next) {
35 start = drm_buddy_block_offset(block);
36 size = drm_buddy_block_size(mm, block);
38 block = list_entry(block->link.next, struct drm_buddy_block,
40 if (start + size != drm_buddy_block_offset(block))
47 static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
48 struct ttm_buffer_object *tbo,
49 const struct ttm_place *place,
50 struct ttm_resource **res)
52 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
53 struct xe_ttm_vram_mgr_resource *vres;
54 struct drm_buddy *mm = &mgr->mm;
55 u64 size, remaining_size, min_page_size;
60 if (!lpfn || lpfn > man->size >> PAGE_SHIFT)
61 lpfn = man->size >> PAGE_SHIFT;
63 if (tbo->base.size >> PAGE_SHIFT > (lpfn - place->fpfn))
64 return -E2BIG; /* don't trigger eviction for the impossible */
66 vres = kzalloc(sizeof(*vres), GFP_KERNEL);
70 ttm_resource_init(tbo, place, &vres->base);
72 /* bail out quickly if there's likely not enough VRAM for this BO */
73 if (ttm_resource_manager_usage(man) > man->size) {
78 INIT_LIST_HEAD(&vres->blocks);
80 if (place->flags & TTM_PL_FLAG_TOPDOWN)
81 vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
83 if (place->fpfn || lpfn != man->size >> PAGE_SHIFT)
84 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
86 if (WARN_ON(!vres->base.size)) {
90 size = vres->base.size;
92 min_page_size = mgr->default_page_size;
93 if (tbo->page_alignment)
94 min_page_size = tbo->page_alignment << PAGE_SHIFT;
96 if (WARN_ON(min_page_size < mm->chunk_size)) {
101 if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
106 if (WARN_ON((size > SZ_2G &&
107 (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
112 if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
117 mutex_lock(&mgr->lock);
118 if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
119 mutex_unlock(&mgr->lock);
124 if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
125 place->flags & TTM_PL_FLAG_CONTIGUOUS) {
126 size = roundup_pow_of_two(size);
127 min_page_size = size;
129 lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
132 remaining_size = size;
135 * Limit maximum size to 2GiB due to SG table limitations.
136 * FIXME: Should maybe be handled as part of sg construction.
138 u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
140 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
141 (u64)lpfn << PAGE_SHIFT,
147 goto error_free_blocks;
149 remaining_size -= alloc_size;
150 } while (remaining_size);
152 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
153 if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks))
154 size = vres->base.size;
157 if (lpfn <= mgr->visible_size >> PAGE_SHIFT) {
158 vres->used_visible_size = size;
160 struct drm_buddy_block *block;
162 list_for_each_entry(block, &vres->blocks, link) {
163 u64 start = drm_buddy_block_offset(block);
165 if (start < mgr->visible_size) {
166 u64 end = start + drm_buddy_block_size(mm, block);
168 vres->used_visible_size +=
169 min(end, mgr->visible_size) - start;
174 mgr->visible_avail -= vres->used_visible_size;
175 mutex_unlock(&mgr->lock);
177 if (!(vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) &&
178 xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
179 vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
182 * For some kernel objects we still rely on the start when io mapping
185 if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) {
186 struct drm_buddy_block *block = list_first_entry(&vres->blocks,
190 vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT;
192 vres->base.start = XE_BO_INVALID_OFFSET;
199 drm_buddy_free_list(mm, &vres->blocks);
200 mutex_unlock(&mgr->lock);
202 ttm_resource_fini(man, &vres->base);
208 static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man,
209 struct ttm_resource *res)
211 struct xe_ttm_vram_mgr_resource *vres =
212 to_xe_ttm_vram_mgr_resource(res);
213 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
214 struct drm_buddy *mm = &mgr->mm;
216 mutex_lock(&mgr->lock);
217 drm_buddy_free_list(mm, &vres->blocks);
218 mgr->visible_avail += vres->used_visible_size;
219 mutex_unlock(&mgr->lock);
221 ttm_resource_fini(man, res);
226 static void xe_ttm_vram_mgr_debug(struct ttm_resource_manager *man,
227 struct drm_printer *printer)
229 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
230 struct drm_buddy *mm = &mgr->mm;
232 mutex_lock(&mgr->lock);
233 drm_printf(printer, "default_page_size: %lluKiB\n",
234 mgr->default_page_size >> 10);
235 drm_printf(printer, "visible_avail: %lluMiB\n",
236 (u64)mgr->visible_avail >> 20);
237 drm_printf(printer, "visible_size: %lluMiB\n",
238 (u64)mgr->visible_size >> 20);
240 drm_buddy_print(mm, printer);
241 mutex_unlock(&mgr->lock);
242 drm_printf(printer, "man size:%llu\n", man->size);
245 static bool xe_ttm_vram_mgr_intersects(struct ttm_resource_manager *man,
246 struct ttm_resource *res,
247 const struct ttm_place *place,
250 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
251 struct xe_ttm_vram_mgr_resource *vres =
252 to_xe_ttm_vram_mgr_resource(res);
253 struct drm_buddy *mm = &mgr->mm;
254 struct drm_buddy_block *block;
256 if (!place->fpfn && !place->lpfn)
259 if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
260 return vres->used_visible_size > 0;
262 list_for_each_entry(block, &vres->blocks, link) {
264 drm_buddy_block_offset(block) >> PAGE_SHIFT;
265 unsigned long lpfn = fpfn +
266 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
268 if (place->fpfn < lpfn && place->lpfn > fpfn)
275 static bool xe_ttm_vram_mgr_compatible(struct ttm_resource_manager *man,
276 struct ttm_resource *res,
277 const struct ttm_place *place,
280 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
281 struct xe_ttm_vram_mgr_resource *vres =
282 to_xe_ttm_vram_mgr_resource(res);
283 struct drm_buddy *mm = &mgr->mm;
284 struct drm_buddy_block *block;
286 if (!place->fpfn && !place->lpfn)
289 if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
290 return vres->used_visible_size == size;
292 list_for_each_entry(block, &vres->blocks, link) {
294 drm_buddy_block_offset(block) >> PAGE_SHIFT;
295 unsigned long lpfn = fpfn +
296 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
298 if (fpfn < place->fpfn || lpfn > place->lpfn)
305 static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = {
306 .alloc = xe_ttm_vram_mgr_new,
307 .free = xe_ttm_vram_mgr_del,
308 .intersects = xe_ttm_vram_mgr_intersects,
309 .compatible = xe_ttm_vram_mgr_compatible,
310 .debug = xe_ttm_vram_mgr_debug
313 static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
315 struct xe_device *xe = to_xe_device(dev);
316 struct xe_ttm_vram_mgr *mgr = arg;
317 struct ttm_resource_manager *man = &mgr->manager;
319 ttm_resource_manager_set_used(man, false);
321 if (ttm_resource_manager_evict_all(&xe->ttm, man))
324 WARN_ON_ONCE(mgr->visible_avail != mgr->visible_size);
326 drm_buddy_fini(&mgr->mm);
328 ttm_resource_manager_cleanup(&mgr->manager);
330 ttm_set_driver_manager(&xe->ttm, mgr->mem_type, NULL);
332 mutex_destroy(&mgr->lock);
335 int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
336 u32 mem_type, u64 size, u64 io_size,
337 u64 default_page_size)
339 struct ttm_resource_manager *man = &mgr->manager;
342 man->func = &xe_ttm_vram_mgr_func;
343 mgr->mem_type = mem_type;
344 mutex_init(&mgr->lock);
345 mgr->default_page_size = default_page_size;
346 mgr->visible_size = io_size;
347 mgr->visible_avail = io_size;
349 ttm_resource_manager_init(man, &xe->ttm, size);
350 err = drm_buddy_init(&mgr->mm, man->size, default_page_size);
354 ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager);
355 ttm_resource_manager_set_used(&mgr->manager, true);
357 return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
360 int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
362 struct xe_device *xe = tile_to_xe(tile);
363 struct xe_mem_region *vram = &tile->mem.vram;
366 return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
367 vram->usable_size, vram->io_size,
371 int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
372 struct ttm_resource *res,
373 u64 offset, u64 length,
375 enum dma_data_direction dir,
376 struct sg_table **sgt)
378 struct xe_tile *tile = &xe->tiles[res->mem_type - XE_PL_VRAM0];
379 struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res);
380 struct xe_res_cursor cursor;
381 struct scatterlist *sg;
385 if (vres->used_visible_size < res->size)
388 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
392 /* Determine the number of DRM_BUDDY blocks to export */
393 xe_res_first(res, offset, length, &cursor);
394 while (cursor.remaining) {
396 xe_res_next(&cursor, cursor.size);
399 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
403 /* Initialize scatterlist nodes of sg_table */
404 for_each_sgtable_sg((*sgt), sg, i)
408 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
409 * @note: Use iterator api to get first the DRM_BUDDY block
410 * and the number of bytes from it. Access the following
411 * DRM_BUDDY block(s) if more buffer needs to exported
413 xe_res_first(res, offset, length, &cursor);
414 for_each_sgtable_sg((*sgt), sg, i) {
415 phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
416 size_t size = cursor.size;
419 addr = dma_map_resource(dev, phys, size, dir,
420 DMA_ATTR_SKIP_CPU_SYNC);
421 r = dma_mapping_error(dev, addr);
425 sg_set_page(sg, NULL, size, 0);
426 sg_dma_address(sg) = addr;
427 sg_dma_len(sg) = size;
429 xe_res_next(&cursor, cursor.size);
435 for_each_sgtable_sg((*sgt), sg, i) {
439 dma_unmap_resource(dev, sg->dma_address,
441 DMA_ATTR_SKIP_CPU_SYNC);
450 void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
451 struct sg_table *sgt)
453 struct scatterlist *sg;
456 for_each_sgtable_sg(sgt, sg, i)
457 dma_unmap_resource(dev, sg->dma_address,
459 DMA_ATTR_SKIP_CPU_SYNC);
464 u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man)
466 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
468 return mgr->visible_size;
471 void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
472 u64 *used, u64 *used_visible)
474 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
476 mutex_lock(&mgr->lock);
477 *used = mgr->mm.size - mgr->mm.avail;
478 *used_visible = mgr->visible_size - mgr->visible_avail;
479 mutex_unlock(&mgr->lock);