2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2012 Intel Corporation
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_mcr.h"
17 #include "gt/intel_gt_regs.h"
18 #include "gt/intel_region_lmem.h"
20 #include "i915_gem_stolen.h"
23 #include "i915_utils.h"
24 #include "i915_vgpu.h"
25 #include "intel_mchbar_regs.h"
26 #include "intel_pci_config.h"
29 * The BIOS typically reserves some of the system's memory for the exclusive
30 * use of the integrated graphics. This memory is no longer available for
31 * use by the OS and so the user finds that his system has less memory
32 * available than he put in. We refer to this memory as stolen.
34 * The BIOS will allocate its framebuffer from the stolen memory. Our
35 * goal is try to reuse that object for our own fbcon which must always
36 * be available for panics. Anything else we can reuse the stolen memory
40 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41 struct drm_mm_node *node, u64 size,
42 unsigned alignment, u64 start, u64 end)
46 if (!drm_mm_initialized(&i915->mm.stolen))
49 /* WaSkipStolenMemoryFirstPage:bdw+ */
50 if (GRAPHICS_VER(i915) >= 8 && start < 4096)
53 mutex_lock(&i915->mm.stolen_lock);
54 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
56 start, end, DRM_MM_INSERT_BEST);
57 mutex_unlock(&i915->mm.stolen_lock);
62 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63 struct drm_mm_node *node, u64 size,
66 return i915_gem_stolen_insert_node_in_range(i915, node,
72 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73 struct drm_mm_node *node)
75 mutex_lock(&i915->mm.stolen_lock);
76 drm_mm_remove_node(node);
77 mutex_unlock(&i915->mm.stolen_lock);
80 static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
82 return (dsm->start != 0 || HAS_BAR2_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
85 static int adjust_stolen(struct drm_i915_private *i915,
88 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
91 if (!valid_stolen_size(i915, dsm))
95 * Make sure we don't clobber the GTT if it's within stolen memory
97 * TODO: We have yet too encounter the case where the GTT wasn't at the
98 * end of stolen. With that assumption we could simplify this.
100 if (GRAPHICS_VER(i915) <= 4 &&
101 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102 struct resource stolen[2] = {*dsm, *dsm};
103 struct resource ggtt_res;
104 resource_size_t ggtt_start;
106 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107 if (GRAPHICS_VER(i915) == 4)
108 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
111 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
114 (struct resource) DEFINE_RES_MEM(ggtt_start,
115 ggtt_total_entries(ggtt) * 4);
117 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
118 stolen[0].end = ggtt_res.start;
119 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
120 stolen[1].start = ggtt_res.end;
122 /* Pick the larger of the two chunks */
123 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
128 if (stolen[0].start != stolen[1].start ||
129 stolen[0].end != stolen[1].end) {
131 "GTT within stolen memory at %pR\n",
133 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
138 if (!valid_stolen_size(i915, dsm))
144 static int request_smem_stolen(struct drm_i915_private *i915,
145 struct resource *dsm)
150 * With stolen lmem, we don't need to request system memory for the
151 * address range since it's local to the gpu.
153 * Starting MTL, in IGFX devices the stolen memory is exposed via
154 * BAR2 and shall be considered similar to stolen lmem.
156 if (HAS_LMEM(i915) || HAS_BAR2_SMEM_STOLEN(i915))
160 * Verify that nothing else uses this physical address. Stolen
161 * memory should be reserved by the BIOS and hidden from the
162 * kernel. So if the region is already marked as busy, something
163 * is seriously wrong.
165 r = devm_request_mem_region(i915->drm.dev, dsm->start,
167 "Graphics Stolen Memory");
170 * One more attempt but this time requesting region from
171 * start + 1, as we have seen that this resolves the region
172 * conflict with the PCI Bus.
173 * This is a BIOS w/a: Some BIOS wrap stolen in the root
174 * PCI bus, but have an off-by-one error. Hence retry the
175 * reservation starting from 1 instead of 0.
176 * There's also BIOS with off-by-one on the other end.
178 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
179 resource_size(dsm) - 2,
180 "Graphics Stolen Memory");
182 * GEN3 firmware likes to smash pci bridges into the stolen
183 * range. Apparently this works.
185 if (!r && GRAPHICS_VER(i915) != 3) {
187 "conflict detected with stolen region: %pR\n",
197 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
199 if (!drm_mm_initialized(&i915->mm.stolen))
202 drm_mm_takedown(&i915->mm.stolen);
205 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
206 struct intel_uncore *uncore,
207 resource_size_t *base,
208 resource_size_t *size)
210 u32 reg_val = intel_uncore_read(uncore,
212 CTG_STOLEN_RESERVED :
213 ELK_STOLEN_RESERVED);
214 resource_size_t stolen_top = i915->dsm.end + 1;
216 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
217 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
219 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
223 * Whether ILK really reuses the ELK register for this is unclear.
224 * Let's see if we catch anyone with this supposedly enabled on ILK.
226 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
227 "ILK stolen reserved found? 0x%08x\n",
230 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
233 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
234 drm_WARN_ON(&i915->drm,
235 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
237 *size = stolen_top - *base;
240 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
241 struct intel_uncore *uncore,
242 resource_size_t *base,
243 resource_size_t *size)
245 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
247 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
249 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
252 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
254 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
255 case GEN6_STOLEN_RESERVED_1M:
258 case GEN6_STOLEN_RESERVED_512K:
261 case GEN6_STOLEN_RESERVED_256K:
264 case GEN6_STOLEN_RESERVED_128K:
269 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
273 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
274 struct intel_uncore *uncore,
275 resource_size_t *base,
276 resource_size_t *size)
278 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
279 resource_size_t stolen_top = i915->dsm.end + 1;
281 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
283 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
286 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
288 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
290 case GEN7_STOLEN_RESERVED_1M:
296 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
297 * reserved location as (top - size).
299 *base = stolen_top - *size;
302 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
303 struct intel_uncore *uncore,
304 resource_size_t *base,
305 resource_size_t *size)
307 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
309 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
311 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
314 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
316 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
317 case GEN7_STOLEN_RESERVED_1M:
320 case GEN7_STOLEN_RESERVED_256K:
325 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
329 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
330 struct intel_uncore *uncore,
331 resource_size_t *base,
332 resource_size_t *size)
334 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
336 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
338 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
341 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
343 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
344 case GEN8_STOLEN_RESERVED_1M:
347 case GEN8_STOLEN_RESERVED_2M:
348 *size = 2 * 1024 * 1024;
350 case GEN8_STOLEN_RESERVED_4M:
351 *size = 4 * 1024 * 1024;
353 case GEN8_STOLEN_RESERVED_8M:
354 *size = 8 * 1024 * 1024;
357 *size = 8 * 1024 * 1024;
358 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
362 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
363 struct intel_uncore *uncore,
364 resource_size_t *base,
365 resource_size_t *size)
367 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
368 resource_size_t stolen_top = i915->dsm.end + 1;
370 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
372 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
375 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
378 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
379 *size = stolen_top - *base;
382 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
383 struct intel_uncore *uncore,
384 resource_size_t *base,
385 resource_size_t *size)
387 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
389 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
391 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
392 case GEN8_STOLEN_RESERVED_1M:
395 case GEN8_STOLEN_RESERVED_2M:
396 *size = 2 * 1024 * 1024;
398 case GEN8_STOLEN_RESERVED_4M:
399 *size = 4 * 1024 * 1024;
401 case GEN8_STOLEN_RESERVED_8M:
402 *size = 8 * 1024 * 1024;
405 *size = 8 * 1024 * 1024;
406 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
409 if (HAS_BAR2_SMEM_STOLEN(i915))
410 /* the base is initialized to stolen top so subtract size to get base */
413 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
417 * Initialize i915->dsm_reserved to contain the reserved space within the Data
418 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
419 * be used by driver, so must be excluded from the region passed to the
420 * allocator later. In the spec this is also called as WOPCM.
422 * Our expectation is that the reserved space is at the top of the stolen
423 * region, as it has been the case for every platform, and *never* at the
424 * bottom, so the calculation here can be simplified.
426 static int init_reserved_stolen(struct drm_i915_private *i915)
428 struct intel_uncore *uncore = &i915->uncore;
429 resource_size_t reserved_base, stolen_top;
430 resource_size_t reserved_size;
433 stolen_top = i915->dsm.end + 1;
434 reserved_base = stolen_top;
437 if (GRAPHICS_VER(i915) >= 11) {
438 icl_get_stolen_reserved(i915, uncore,
439 &reserved_base, &reserved_size);
440 } else if (GRAPHICS_VER(i915) >= 8) {
442 chv_get_stolen_reserved(i915, uncore,
443 &reserved_base, &reserved_size);
445 bdw_get_stolen_reserved(i915, uncore,
446 &reserved_base, &reserved_size);
447 } else if (GRAPHICS_VER(i915) >= 7) {
448 if (IS_VALLEYVIEW(i915))
449 vlv_get_stolen_reserved(i915, uncore,
450 &reserved_base, &reserved_size);
452 gen7_get_stolen_reserved(i915, uncore,
453 &reserved_base, &reserved_size);
454 } else if (GRAPHICS_VER(i915) >= 6) {
455 gen6_get_stolen_reserved(i915, uncore,
456 &reserved_base, &reserved_size);
457 } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
458 g4x_get_stolen_reserved(i915, uncore,
459 &reserved_base, &reserved_size);
462 /* No reserved stolen */
463 if (reserved_base == stolen_top)
466 if (!reserved_base) {
468 "inconsistent reservation %pa + %pa; ignoring\n",
469 &reserved_base, &reserved_size);
475 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
477 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
479 "Stolen reserved area %pR outside stolen memory %pR\n",
480 &i915->dsm_reserved, &i915->dsm);
489 (struct resource)DEFINE_RES_MEM(reserved_base, 0);
494 static int i915_gem_init_stolen(struct intel_memory_region *mem)
496 struct drm_i915_private *i915 = mem->i915;
498 mutex_init(&i915->mm.stolen_lock);
500 if (intel_vgpu_active(i915)) {
501 drm_notice(&i915->drm,
502 "%s, disabling use of stolen memory\n",
507 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
508 drm_notice(&i915->drm,
509 "%s, disabling use of stolen memory\n",
514 if (adjust_stolen(i915, &mem->region))
517 if (request_smem_stolen(i915, &mem->region))
520 i915->dsm = mem->region;
522 if (init_reserved_stolen(i915))
525 /* Exclude the reserved region from driver use */
526 mem->region.end = i915->dsm_reserved.start - 1;
527 mem->io_size = min(mem->io_size, resource_size(&mem->region));
529 i915->stolen_usable_size = resource_size(&mem->region);
532 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
533 (u64)resource_size(&i915->dsm) >> 10,
534 (u64)i915->stolen_usable_size >> 10);
536 if (i915->stolen_usable_size == 0)
539 /* Basic memrange allocator for stolen space. */
540 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
545 static void dbg_poison(struct i915_ggtt *ggtt,
546 dma_addr_t addr, resource_size_t size,
549 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
550 if (!drm_mm_node_allocated(&ggtt->error_capture))
553 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
554 return; /* beware stop_machine() inversion */
556 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
558 mutex_lock(&ggtt->error_mutex);
562 ggtt->vm.insert_page(&ggtt->vm, addr,
563 ggtt->error_capture.start,
567 s = io_mapping_map_wc(&ggtt->iomap,
568 ggtt->error_capture.start,
570 memset_io(s, x, PAGE_SIZE);
577 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
578 mutex_unlock(&ggtt->error_mutex);
582 static struct sg_table *
583 i915_pages_create_for_stolen(struct drm_device *dev,
584 resource_size_t offset, resource_size_t size)
586 struct drm_i915_private *i915 = to_i915(dev);
588 struct scatterlist *sg;
590 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
592 /* We hide that we have no struct page backing our stolen object
593 * by wrapping the contiguous physical allocation with a fake
594 * dma mapping in a single scatterlist.
597 st = kmalloc(sizeof(*st), GFP_KERNEL);
599 return ERR_PTR(-ENOMEM);
601 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
603 return ERR_PTR(-ENOMEM);
610 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
611 sg_dma_len(sg) = size;
616 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
618 struct drm_i915_private *i915 = to_i915(obj->base.dev);
619 struct sg_table *pages =
620 i915_pages_create_for_stolen(obj->base.dev,
624 return PTR_ERR(pages);
626 dbg_poison(to_gt(i915)->ggtt,
627 sg_dma_address(pages->sgl),
628 sg_dma_len(pages->sgl),
631 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
636 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
637 struct sg_table *pages)
639 struct drm_i915_private *i915 = to_i915(obj->base.dev);
640 /* Should only be called from i915_gem_object_release_stolen() */
642 dbg_poison(to_gt(i915)->ggtt,
643 sg_dma_address(pages->sgl),
644 sg_dma_len(pages->sgl),
647 sg_free_table(pages);
652 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
654 struct drm_i915_private *i915 = to_i915(obj->base.dev);
655 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
658 i915_gem_stolen_remove_node(i915, stolen);
661 i915_gem_object_release_memory_region(obj);
664 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
665 .name = "i915_gem_object_stolen",
666 .get_pages = i915_gem_object_get_pages_stolen,
667 .put_pages = i915_gem_object_put_pages_stolen,
668 .release = i915_gem_object_release_stolen,
671 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
672 struct drm_i915_gem_object *obj,
673 struct drm_mm_node *stolen)
675 static struct lock_class_key lock_class;
676 unsigned int cache_level;
681 * Stolen objects are always physically contiguous since we just
682 * allocate one big block underneath using the drm_mm range allocator.
684 flags = I915_BO_ALLOC_CONTIGUOUS;
686 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
687 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
689 obj->stolen = stolen;
690 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
691 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
692 i915_gem_object_set_cache_coherency(obj, cache_level);
694 if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
697 i915_gem_object_init_memory_region(obj, mem);
699 err = i915_gem_object_pin_pages(obj);
701 i915_gem_object_release_memory_region(obj);
702 i915_gem_object_unlock(obj);
707 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
708 struct drm_i915_gem_object *obj,
709 resource_size_t offset,
710 resource_size_t size,
711 resource_size_t page_size,
714 struct drm_i915_private *i915 = mem->i915;
715 struct drm_mm_node *stolen;
718 if (!drm_mm_initialized(&i915->mm.stolen))
725 * With discrete devices, where we lack a mappable aperture there is no
726 * possible way to ever access this memory on the CPU side.
728 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
729 !(flags & I915_BO_ALLOC_GPU_ONLY))
732 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
736 if (offset != I915_BO_INVALID_OFFSET) {
738 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
741 stolen->start = offset;
743 mutex_lock(&i915->mm.stolen_lock);
744 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
745 mutex_unlock(&i915->mm.stolen_lock);
747 ret = i915_gem_stolen_insert_node(i915, stolen, size,
753 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
760 i915_gem_stolen_remove_node(i915, stolen);
766 struct drm_i915_gem_object *
767 i915_gem_object_create_stolen(struct drm_i915_private *i915,
768 resource_size_t size)
770 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
773 static int init_stolen_smem(struct intel_memory_region *mem)
778 * Initialise stolen early so that we may reserve preallocated
779 * objects for the BIOS to KMS transition.
781 err = i915_gem_init_stolen(mem);
783 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
788 static int release_stolen_smem(struct intel_memory_region *mem)
790 i915_gem_cleanup_stolen(mem->i915);
794 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
795 .init = init_stolen_smem,
796 .release = release_stolen_smem,
797 .init_object = _i915_gem_object_stolen_init,
800 static int init_stolen_lmem(struct intel_memory_region *mem)
802 struct drm_i915_private *i915 = mem->i915;
805 if (GEM_WARN_ON(resource_size(&mem->region) == 0))
808 err = i915_gem_init_stolen(mem);
810 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
815 !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
818 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
820 drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
825 i915_gem_cleanup_stolen(mem->i915);
829 static int release_stolen_lmem(struct intel_memory_region *mem)
832 io_mapping_fini(&mem->iomap);
833 i915_gem_cleanup_stolen(mem->i915);
837 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
838 .init = init_stolen_lmem,
839 .release = release_stolen_lmem,
840 .init_object = _i915_gem_object_stolen_init,
843 static int mtl_get_gms_size(struct intel_uncore *uncore)
847 ggc = intel_uncore_read16(uncore, GGC);
849 /* check GGMS, should be fixed 0x3 (8MB) */
850 if ((ggc & GGMS_MASK) != GGMS_MASK)
853 /* return valid GMS value, -EIO if invalid */
854 gms = REG_FIELD_GET(GMS_MASK, ggc);
859 return (gms - 0xf0 + 1) * 4;
866 struct intel_memory_region *
867 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
870 struct intel_uncore *uncore = &i915->uncore;
871 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
872 resource_size_t dsm_size, dsm_base, lmem_size;
873 struct intel_memory_region *mem;
874 resource_size_t io_start, io_size;
875 resource_size_t min_page_size;
878 if (WARN_ON_ONCE(instance))
879 return ERR_PTR(-ENODEV);
881 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
882 return ERR_PTR(-ENXIO);
884 if (HAS_BAR2_SMEM_STOLEN(i915) || IS_DG1(i915)) {
885 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
887 resource_size_t lmem_range;
889 lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
890 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
894 if (HAS_BAR2_SMEM_STOLEN(i915)) {
896 * MTL dsm size is in GGC register.
897 * Also MTL uses offset to DSMBASE in ptes, so i915
898 * uses dsm_base = 0 to setup stolen region.
900 ret = mtl_get_gms_size(uncore);
902 drm_err(&i915->drm, "invalid MTL GGC register setting\n");
907 dsm_size = (resource_size_t)(ret * SZ_1M);
909 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
910 GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
912 /* Use DSM base address instead for stolen memory */
913 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
914 if (WARN_ON(lmem_size < dsm_base))
915 return ERR_PTR(-ENODEV);
916 dsm_size = lmem_size - dsm_base;
920 if (pci_resource_len(pdev, GEN12_LMEM_BAR) < dsm_size) {
923 } else if (HAS_BAR2_SMEM_STOLEN(i915)) {
924 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
926 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
929 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
930 I915_GTT_PAGE_SIZE_4K;
932 mem = intel_memory_region_create(i915, dsm_base, dsm_size,
936 &i915_region_stolen_lmem_ops);
940 intel_memory_region_set_name(mem, "stolen-local");
947 struct intel_memory_region*
948 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
951 struct intel_memory_region *mem;
953 mem = intel_memory_region_create(i915,
954 intel_graphics_stolen_res.start,
955 resource_size(&intel_graphics_stolen_res),
956 PAGE_SIZE, 0, 0, type, instance,
957 &i915_region_stolen_smem_ops);
961 intel_memory_region_set_name(mem, "stolen-system");
968 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
970 return obj->ops == &i915_gem_object_stolen_ops;