drm/i915/gt: Use to_gt() helper for GGTT accesses
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_region_lmem.c
CommitLineData
b908be54
MA
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "intel_memory_region.h"
d1487389
TH
8#include "intel_region_lmem.h"
9#include "intel_region_ttm.h"
b908be54
MA
10#include "gem/i915_gem_lmem.h"
11#include "gem/i915_gem_region.h"
213d5092 12#include "gem/i915_gem_ttm.h"
3ffe82d7 13#include "gt/intel_gt.h"
b908be54 14
16292243
MA
15static int init_fake_lmem_bar(struct intel_memory_region *mem)
16{
17 struct drm_i915_private *i915 = mem->i915;
848915c3 18 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
16292243
MA
19 unsigned long n;
20 int ret;
21
22 /* We want to 1:1 map the mappable aperture to our reserved region */
23
24 mem->fake_mappable.start = 0;
25 mem->fake_mappable.size = resource_size(&mem->region);
26 mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
27
28 ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
29 if (ret)
30 return ret;
31
8ff5446a 32 mem->remap_addr = dma_map_resource(i915->drm.dev,
16292243
MA
33 mem->region.start,
34 mem->fake_mappable.size,
c4f61203 35 DMA_BIDIRECTIONAL,
16292243 36 DMA_ATTR_FORCE_CONTIGUOUS);
8ff5446a 37 if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
16292243
MA
38 drm_mm_remove_node(&mem->fake_mappable);
39 return -EINVAL;
40 }
41
42 for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
43 ggtt->vm.insert_page(&ggtt->vm,
44 mem->remap_addr + (n << PAGE_SHIFT),
45 n << PAGE_SHIFT,
46 I915_CACHE_NONE, 0);
47 }
48
49 mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
50 mem->fake_mappable.size);
51
52 return 0;
53}
54
55static void release_fake_lmem_bar(struct intel_memory_region *mem)
56{
cddb85dc
MA
57 if (!drm_mm_node_allocated(&mem->fake_mappable))
58 return;
59
60 drm_mm_remove_node(&mem->fake_mappable);
16292243 61
8ff5446a 62 dma_unmap_resource(mem->i915->drm.dev,
16292243
MA
63 mem->remap_addr,
64 mem->fake_mappable.size,
c4f61203 65 DMA_BIDIRECTIONAL,
16292243
MA
66 DMA_ATTR_FORCE_CONTIGUOUS);
67}
68
8b1f7f92 69static int
cb6d2467
AJ
70region_lmem_release(struct intel_memory_region *mem)
71{
8b1f7f92
TH
72 int ret;
73
74 ret = intel_region_ttm_fini(mem);
cb6d2467 75 io_mapping_fini(&mem->iomap);
d1487389 76 release_fake_lmem_bar(mem);
8b1f7f92
TH
77
78 return ret;
cb6d2467
AJ
79}
80
81static int
82region_lmem_init(struct intel_memory_region *mem)
83{
84 int ret;
85
8a25c4be 86 if (mem->i915->params.fake_lmem_start) {
16292243
MA
87 ret = init_fake_lmem_bar(mem);
88 GEM_BUG_ON(ret);
89 }
90
cb6d2467
AJ
91 if (!io_mapping_init_wc(&mem->iomap,
92 mem->io_start,
d1487389
TH
93 resource_size(&mem->region))) {
94 ret = -EIO;
95 goto out_no_io;
96 }
cb6d2467 97
d1487389 98 ret = intel_region_ttm_init(mem);
cb6d2467 99 if (ret)
d1487389
TH
100 goto out_no_buddy;
101
102 return 0;
103
104out_no_buddy:
105 io_mapping_fini(&mem->iomap);
106out_no_io:
107 release_fake_lmem_bar(mem);
cb6d2467
AJ
108
109 return ret;
110}
111
0dbfc194 112static const struct intel_memory_region_ops intel_region_lmem_ops = {
cb6d2467
AJ
113 .init = region_lmem_init,
114 .release = region_lmem_release,
213d5092 115 .init_object = __i915_gem_ttm_object_init,
b908be54 116};
16292243
MA
117
118struct intel_memory_region *
2dfcc7f4 119intel_gt_setup_fake_lmem(struct intel_gt *gt)
16292243 120{
2dfcc7f4 121 struct drm_i915_private *i915 = gt->i915;
8ff5446a 122 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
16292243
MA
123 struct intel_memory_region *mem;
124 resource_size_t mappable_end;
125 resource_size_t io_start;
126 resource_size_t start;
127
2dfcc7f4
MA
128 if (!HAS_LMEM(i915))
129 return ERR_PTR(-ENODEV);
130
131 if (!i915->params.fake_lmem_start)
132 return ERR_PTR(-ENODEV);
133
848915c3 134 GEM_BUG_ON(i915_ggtt_has_aperture(to_gt(i915)->ggtt));
16292243
MA
135
136 /* Your mappable aperture belongs to me now! */
137 mappable_end = pci_resource_len(pdev, 2);
70b0f077 138 io_start = pci_resource_start(pdev, 2);
8a25c4be 139 start = i915->params.fake_lmem_start;
16292243
MA
140
141 mem = intel_memory_region_create(i915,
142 start,
143 mappable_end,
144 PAGE_SIZE,
145 io_start,
d1487389
TH
146 INTEL_MEMORY_LOCAL,
147 0,
16292243
MA
148 &intel_region_lmem_ops);
149 if (!IS_ERR(mem)) {
d5cf720f
WK
150 drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
151 &mem->region);
152 drm_info(&i915->drm,
153 "Intel graphics fake LMEM IO start: %llx\n",
154 (u64)mem->io_start);
155 drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
16292243
MA
156 (u64)resource_size(&mem->region));
157 }
158
159 return mem;
160}
a50ca39f 161
7c5cc941
ID
162static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
163 u64 *start, u32 *size)
164{
c1f110ee 165 if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
7c5cc941
ID
166 return false;
167
168 *start = 0;
169 *size = SZ_1M;
170
171 drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
172 *start, *start + *size);
173
174 return true;
175}
176
177static int reserve_lowmem_region(struct intel_uncore *uncore,
178 struct intel_memory_region *mem)
179{
180 u64 reserve_start;
181 u32 reserve_size;
182 int ret;
183
184 if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
185 return 0;
186
187 ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
188 if (ret)
189 drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
190
191 return ret;
192}
193
a50ca39f
MA
194static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
195{
196 struct drm_i915_private *i915 = gt->i915;
7f2aa5b3 197 struct intel_uncore *uncore = gt->uncore;
97c463b2 198 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
a50ca39f 199 struct intel_memory_region *mem;
ca921624 200 resource_size_t min_page_size;
a50ca39f 201 resource_size_t io_start;
7f2aa5b3 202 resource_size_t lmem_size;
7c5cc941 203 int err;
a50ca39f
MA
204
205 if (!IS_DGFX(i915))
206 return ERR_PTR(-ENODEV);
207
7f2aa5b3
CT
208 /* Stolen starts from GSMBASE on DG1 */
209 lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
210
a50ca39f 211 io_start = pci_resource_start(pdev, 2);
7f2aa5b3
CT
212 if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
213 return ERR_PTR(-ENODEV);
a50ca39f 214
ca921624
MA
215 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
216 I915_GTT_PAGE_SIZE_4K;
a50ca39f
MA
217 mem = intel_memory_region_create(i915,
218 0,
7f2aa5b3 219 lmem_size,
ca921624 220 min_page_size,
a50ca39f 221 io_start,
d1487389
TH
222 INTEL_MEMORY_LOCAL,
223 0,
a50ca39f
MA
224 &intel_region_lmem_ops);
225 if (IS_ERR(mem))
226 return mem;
227
7c5cc941
ID
228 err = reserve_lowmem_region(uncore, mem);
229 if (err)
230 goto err_region_put;
231
a50ca39f
MA
232 drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
233 drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
234 &mem->io_start);
7f2aa5b3
CT
235 drm_info(&i915->drm, "Local memory available: %pa\n",
236 &lmem_size);
a50ca39f
MA
237
238 return mem;
7c5cc941
ID
239
240err_region_put:
8b1f7f92 241 intel_memory_region_destroy(mem);
7c5cc941 242 return ERR_PTR(err);
a50ca39f
MA
243}
244
245struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
246{
247 return setup_lmem(gt);
248}