Commit | Line | Data |
---|---|---|
9797fbfb | 1 | /* |
10be98a7 | 2 | * SPDX-License-Identifier: MIT |
9797fbfb | 3 | * |
10be98a7 | 4 | * Copyright © 2008-2012 Intel Corporation |
9797fbfb CW |
5 | */ |
6 | ||
10be98a7 CW |
7 | #include <linux/errno.h> |
8 | #include <linux/mutex.h> | |
9 | ||
10 | #include <drm/drm_mm.h> | |
760285e7 | 11 | #include <drm/i915_drm.h> |
10be98a7 | 12 | |
72405c3d | 13 | #include "gem/i915_gem_region.h" |
9797fbfb | 14 | #include "i915_drv.h" |
6401fafb | 15 | #include "i915_gem_stolen.h" |
9e859eb9 | 16 | #include "i915_vgpu.h" |
9797fbfb CW |
17 | |
18 | /* | |
19 | * The BIOS typically reserves some of the system's memory for the exclusive | |
20 | * use of the integrated graphics. This memory is no longer available for | |
21 | * use by the OS and so the user finds that his system has less memory | |
22 | * available than he put in. We refer to this memory as stolen. | |
23 | * | |
24 | * The BIOS will allocate its framebuffer from the stolen memory. Our | |
25 | * goal is try to reuse that object for our own fbcon which must always | |
26 | * be available for panics. Anything else we can reuse the stolen memory | |
27 | * for is a boon. | |
28 | */ | |
29 | ||
bdce2bea | 30 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, |
a9da512b PZ |
31 | struct drm_mm_node *node, u64 size, |
32 | unsigned alignment, u64 start, u64 end) | |
d713fd49 | 33 | { |
92e97d2f PZ |
34 | int ret; |
35 | ||
bdce2bea | 36 | if (!drm_mm_initialized(&i915->mm.stolen)) |
d713fd49 PZ |
37 | return -ENODEV; |
38 | ||
011f22eb | 39 | /* WaSkipStolenMemoryFirstPage:bdw+ */ |
bdce2bea | 40 | if (INTEL_GEN(i915) >= 8 && start < 4096) |
011f22eb HG |
41 | start = 4096; |
42 | ||
bdce2bea CW |
43 | mutex_lock(&i915->mm.stolen_lock); |
44 | ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, | |
4e64e553 CW |
45 | size, alignment, 0, |
46 | start, end, DRM_MM_INSERT_BEST); | |
bdce2bea | 47 | mutex_unlock(&i915->mm.stolen_lock); |
92e97d2f PZ |
48 | |
49 | return ret; | |
d713fd49 PZ |
50 | } |
51 | ||
bdce2bea | 52 | int i915_gem_stolen_insert_node(struct drm_i915_private *i915, |
a9da512b PZ |
53 | struct drm_mm_node *node, u64 size, |
54 | unsigned alignment) | |
55 | { | |
bdce2bea | 56 | return i915_gem_stolen_insert_node_in_range(i915, node, size, |
3c6b29b2 | 57 | alignment, 0, U64_MAX); |
a9da512b PZ |
58 | } |
59 | ||
bdce2bea | 60 | void i915_gem_stolen_remove_node(struct drm_i915_private *i915, |
d713fd49 PZ |
61 | struct drm_mm_node *node) |
62 | { | |
bdce2bea | 63 | mutex_lock(&i915->mm.stolen_lock); |
d713fd49 | 64 | drm_mm_remove_node(node); |
bdce2bea | 65 | mutex_unlock(&i915->mm.stolen_lock); |
d713fd49 PZ |
66 | } |
67 | ||
bdce2bea | 68 | static int i915_adjust_stolen(struct drm_i915_private *i915, |
77894226 | 69 | struct resource *dsm) |
9797fbfb | 70 | { |
bdce2bea CW |
71 | struct i915_ggtt *ggtt = &i915->ggtt; |
72 | struct intel_uncore *uncore = ggtt->vm.gt->uncore; | |
eaba1b8f | 73 | struct resource *r; |
0ad98c74 | 74 | |
77894226 MA |
75 | if (dsm->start == 0 || dsm->end <= dsm->start) |
76 | return -EINVAL; | |
9797fbfb | 77 | |
77894226 MA |
78 | /* |
79 | * TODO: We have yet too encounter the case where the GTT wasn't at the | |
80 | * end of stolen. With that assumption we could simplify this. | |
81 | */ | |
eaba1b8f | 82 | |
77894226 | 83 | /* Make sure we don't clobber the GTT if it's within stolen memory */ |
bdce2bea CW |
84 | if (INTEL_GEN(i915) <= 4 && |
85 | !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { | |
77894226 MA |
86 | struct resource stolen[2] = {*dsm, *dsm}; |
87 | struct resource ggtt_res; | |
b7128ef1 | 88 | resource_size_t ggtt_start; |
f1e1c212 | 89 | |
bdce2bea CW |
90 | ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); |
91 | if (IS_GEN(i915, 4)) | |
72e96d64 JL |
92 | ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | |
93 | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; | |
f1e1c212 | 94 | else |
72e96d64 | 95 | ggtt_start &= PGTBL_ADDRESS_LO_MASK; |
77894226 MA |
96 | |
97 | ggtt_res = | |
98 | (struct resource) DEFINE_RES_MEM(ggtt_start, | |
99 | ggtt_total_entries(ggtt) * 4); | |
100 | ||
101 | if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) | |
102 | stolen[0].end = ggtt_res.start; | |
103 | if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) | |
104 | stolen[1].start = ggtt_res.end; | |
105 | ||
106 | /* Pick the larger of the two chunks */ | |
107 | if (resource_size(&stolen[0]) > resource_size(&stolen[1])) | |
108 | *dsm = stolen[0]; | |
109 | else | |
110 | *dsm = stolen[1]; | |
f1e1c212 VS |
111 | |
112 | if (stolen[0].start != stolen[1].start || | |
113 | stolen[0].end != stolen[1].end) { | |
baa89ba3 WK |
114 | drm_dbg(&i915->drm, |
115 | "GTT within stolen memory at %pR\n", | |
116 | &ggtt_res); | |
117 | drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", | |
118 | dsm); | |
f1e1c212 VS |
119 | } |
120 | } | |
121 | ||
77894226 MA |
122 | /* |
123 | * Verify that nothing else uses this physical address. Stolen | |
eaba1b8f CW |
124 | * memory should be reserved by the BIOS and hidden from the |
125 | * kernel. So if the region is already marked as busy, something | |
126 | * is seriously wrong. | |
127 | */ | |
bdce2bea | 128 | r = devm_request_mem_region(i915->drm.dev, dsm->start, |
77894226 | 129 | resource_size(dsm), |
eaba1b8f CW |
130 | "Graphics Stolen Memory"); |
131 | if (r == NULL) { | |
3617dc96 AG |
132 | /* |
133 | * One more attempt but this time requesting region from | |
77894226 | 134 | * start + 1, as we have seen that this resolves the region |
3617dc96 AG |
135 | * conflict with the PCI Bus. |
136 | * This is a BIOS w/a: Some BIOS wrap stolen in the root | |
137 | * PCI bus, but have an off-by-one error. Hence retry the | |
138 | * reservation starting from 1 instead of 0. | |
023f8079 | 139 | * There's also BIOS with off-by-one on the other end. |
3617dc96 | 140 | */ |
bdce2bea | 141 | r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, |
77894226 | 142 | resource_size(dsm) - 2, |
3617dc96 | 143 | "Graphics Stolen Memory"); |
0b6d24c0 DV |
144 | /* |
145 | * GEN3 firmware likes to smash pci bridges into the stolen | |
146 | * range. Apparently this works. | |
147 | */ | |
bdce2bea | 148 | if (!r && !IS_GEN(i915, 3)) { |
baa89ba3 WK |
149 | drm_err(&i915->drm, |
150 | "conflict detected with stolen region: %pR\n", | |
151 | dsm); | |
920bcd18 | 152 | |
77894226 | 153 | return -EBUSY; |
3617dc96 | 154 | } |
eaba1b8f CW |
155 | } |
156 | ||
77894226 | 157 | return 0; |
9797fbfb CW |
158 | } |
159 | ||
bdce2bea | 160 | static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) |
9797fbfb | 161 | { |
bdce2bea | 162 | if (!drm_mm_initialized(&i915->mm.stolen)) |
446f8d81 DV |
163 | return; |
164 | ||
bdce2bea | 165 | drm_mm_takedown(&i915->mm.stolen); |
9797fbfb CW |
166 | } |
167 | ||
bdce2bea CW |
168 | static void g4x_get_stolen_reserved(struct drm_i915_private *i915, |
169 | struct intel_uncore *uncore, | |
0efb6561 CW |
170 | resource_size_t *base, |
171 | resource_size_t *size) | |
7d316aec | 172 | { |
bdce2bea CW |
173 | u32 reg_val = intel_uncore_read(uncore, |
174 | IS_GM45(i915) ? | |
175 | CTG_STOLEN_RESERVED : | |
176 | ELK_STOLEN_RESERVED); | |
177 | resource_size_t stolen_top = i915->dsm.end + 1; | |
7d316aec | 178 | |
baa89ba3 WK |
179 | drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", |
180 | IS_GM45(i915) ? "CTG" : "ELK", reg_val); | |
0efb6561 | 181 | |
957d32fe | 182 | if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) |
db7fb605 | 183 | return; |
db7fb605 | 184 | |
b099a445 VS |
185 | /* |
186 | * Whether ILK really reuses the ELK register for this is unclear. | |
187 | * Let's see if we catch anyone with this supposedly enabled on ILK. | |
188 | */ | |
85c823ac PB |
189 | drm_WARN(&i915->drm, IS_GEN(i915, 5), |
190 | "ILK stolen reserved found? 0x%08x\n", | |
191 | reg_val); | |
b099a445 | 192 | |
957d32fe CW |
193 | if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) |
194 | return; | |
7d316aec | 195 | |
957d32fe | 196 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
85c823ac PB |
197 | drm_WARN_ON(&i915->drm, |
198 | (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); | |
7d316aec | 199 | |
957d32fe | 200 | *size = stolen_top - *base; |
7d316aec VS |
201 | } |
202 | ||
bdce2bea CW |
203 | static void gen6_get_stolen_reserved(struct drm_i915_private *i915, |
204 | struct intel_uncore *uncore, | |
0efb6561 CW |
205 | resource_size_t *base, |
206 | resource_size_t *size) | |
3774eb50 | 207 | { |
bdce2bea | 208 | u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); |
0efb6561 | 209 | |
baa89ba3 | 210 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); |
3774eb50 | 211 | |
957d32fe | 212 | if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) |
db7fb605 | 213 | return; |
db7fb605 | 214 | |
3774eb50 PZ |
215 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
216 | ||
217 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { | |
218 | case GEN6_STOLEN_RESERVED_1M: | |
219 | *size = 1024 * 1024; | |
220 | break; | |
221 | case GEN6_STOLEN_RESERVED_512K: | |
222 | *size = 512 * 1024; | |
223 | break; | |
224 | case GEN6_STOLEN_RESERVED_256K: | |
225 | *size = 256 * 1024; | |
226 | break; | |
227 | case GEN6_STOLEN_RESERVED_128K: | |
228 | *size = 128 * 1024; | |
229 | break; | |
230 | default: | |
231 | *size = 1024 * 1024; | |
232 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); | |
233 | } | |
234 | } | |
235 | ||
bdce2bea CW |
236 | static void vlv_get_stolen_reserved(struct drm_i915_private *i915, |
237 | struct intel_uncore *uncore, | |
957d32fe CW |
238 | resource_size_t *base, |
239 | resource_size_t *size) | |
240 | { | |
bdce2bea CW |
241 | u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); |
242 | resource_size_t stolen_top = i915->dsm.end + 1; | |
957d32fe | 243 | |
baa89ba3 | 244 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); |
957d32fe CW |
245 | |
246 | if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) | |
247 | return; | |
248 | ||
249 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { | |
250 | default: | |
251 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); | |
df561f66 | 252 | fallthrough; |
957d32fe CW |
253 | case GEN7_STOLEN_RESERVED_1M: |
254 | *size = 1024 * 1024; | |
255 | break; | |
256 | } | |
257 | ||
258 | /* | |
259 | * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the | |
260 | * reserved location as (top - size). | |
261 | */ | |
262 | *base = stolen_top - *size; | |
263 | } | |
264 | ||
bdce2bea CW |
265 | static void gen7_get_stolen_reserved(struct drm_i915_private *i915, |
266 | struct intel_uncore *uncore, | |
0efb6561 CW |
267 | resource_size_t *base, |
268 | resource_size_t *size) | |
3774eb50 | 269 | { |
bdce2bea | 270 | u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); |
0efb6561 | 271 | |
baa89ba3 | 272 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); |
3774eb50 | 273 | |
957d32fe | 274 | if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) |
db7fb605 | 275 | return; |
db7fb605 | 276 | |
3774eb50 PZ |
277 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; |
278 | ||
279 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { | |
280 | case GEN7_STOLEN_RESERVED_1M: | |
281 | *size = 1024 * 1024; | |
282 | break; | |
283 | case GEN7_STOLEN_RESERVED_256K: | |
284 | *size = 256 * 1024; | |
285 | break; | |
286 | default: | |
287 | *size = 1024 * 1024; | |
288 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); | |
289 | } | |
290 | } | |
291 | ||
bdce2bea CW |
292 | static void chv_get_stolen_reserved(struct drm_i915_private *i915, |
293 | struct intel_uncore *uncore, | |
0efb6561 CW |
294 | resource_size_t *base, |
295 | resource_size_t *size) | |
3774eb50 | 296 | { |
bdce2bea | 297 | u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); |
0efb6561 | 298 | |
baa89ba3 | 299 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); |
3774eb50 | 300 | |
957d32fe | 301 | if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) |
db7fb605 | 302 | return; |
db7fb605 | 303 | |
3774eb50 PZ |
304 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
305 | ||
306 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { | |
307 | case GEN8_STOLEN_RESERVED_1M: | |
308 | *size = 1024 * 1024; | |
309 | break; | |
310 | case GEN8_STOLEN_RESERVED_2M: | |
311 | *size = 2 * 1024 * 1024; | |
312 | break; | |
313 | case GEN8_STOLEN_RESERVED_4M: | |
314 | *size = 4 * 1024 * 1024; | |
315 | break; | |
316 | case GEN8_STOLEN_RESERVED_8M: | |
317 | *size = 8 * 1024 * 1024; | |
318 | break; | |
319 | default: | |
320 | *size = 8 * 1024 * 1024; | |
321 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); | |
322 | } | |
323 | } | |
324 | ||
bdce2bea CW |
325 | static void bdw_get_stolen_reserved(struct drm_i915_private *i915, |
326 | struct intel_uncore *uncore, | |
0efb6561 CW |
327 | resource_size_t *base, |
328 | resource_size_t *size) | |
3774eb50 | 329 | { |
bdce2bea CW |
330 | u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); |
331 | resource_size_t stolen_top = i915->dsm.end + 1; | |
3774eb50 | 332 | |
baa89ba3 | 333 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); |
0efb6561 | 334 | |
957d32fe | 335 | if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) |
db7fb605 | 336 | return; |
db7fb605 | 337 | |
957d32fe CW |
338 | if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) |
339 | return; | |
3774eb50 PZ |
340 | |
341 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; | |
957d32fe | 342 | *size = stolen_top - *base; |
3774eb50 PZ |
343 | } |
344 | ||
8500f14b | 345 | static void icl_get_stolen_reserved(struct drm_i915_private *i915, |
bdce2bea | 346 | struct intel_uncore *uncore, |
185441e0 PZ |
347 | resource_size_t *base, |
348 | resource_size_t *size) | |
349 | { | |
bdce2bea | 350 | u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); |
185441e0 | 351 | |
baa89ba3 | 352 | drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); |
185441e0 PZ |
353 | |
354 | *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; | |
355 | ||
356 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { | |
357 | case GEN8_STOLEN_RESERVED_1M: | |
358 | *size = 1024 * 1024; | |
359 | break; | |
360 | case GEN8_STOLEN_RESERVED_2M: | |
361 | *size = 2 * 1024 * 1024; | |
362 | break; | |
363 | case GEN8_STOLEN_RESERVED_4M: | |
364 | *size = 4 * 1024 * 1024; | |
365 | break; | |
366 | case GEN8_STOLEN_RESERVED_8M: | |
367 | *size = 8 * 1024 * 1024; | |
368 | break; | |
369 | default: | |
370 | *size = 8 * 1024 * 1024; | |
371 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); | |
372 | } | |
373 | } | |
374 | ||
bdce2bea | 375 | static int i915_gem_init_stolen(struct drm_i915_private *i915) |
9797fbfb | 376 | { |
bdce2bea | 377 | struct intel_uncore *uncore = &i915->uncore; |
b7128ef1 MA |
378 | resource_size_t reserved_base, stolen_top; |
379 | resource_size_t reserved_total, reserved_size; | |
9797fbfb | 380 | |
bdce2bea | 381 | mutex_init(&i915->mm.stolen_lock); |
92e97d2f | 382 | |
bdce2bea | 383 | if (intel_vgpu_active(i915)) { |
dd1ba6ba | 384 | drm_notice(&i915->drm, |
88f8065c CW |
385 | "%s, disabling use of stolen memory\n", |
386 | "iGVT-g active"); | |
04a68a35 CW |
387 | return 0; |
388 | } | |
389 | ||
bdce2bea | 390 | if (intel_vtd_active() && INTEL_GEN(i915) < 8) { |
dd1ba6ba | 391 | drm_notice(&i915->drm, |
88f8065c CW |
392 | "%s, disabling use of stolen memory\n", |
393 | "DMAR active"); | |
0f4706d2 CW |
394 | return 0; |
395 | } | |
0f4706d2 | 396 | |
77894226 | 397 | if (resource_size(&intel_graphics_stolen_res) == 0) |
6644a4e9 CW |
398 | return 0; |
399 | ||
bdce2bea | 400 | i915->dsm = intel_graphics_stolen_res; |
77894226 | 401 | |
bdce2bea | 402 | if (i915_adjust_stolen(i915, &i915->dsm)) |
e12a2d53 CW |
403 | return 0; |
404 | ||
bdce2bea CW |
405 | GEM_BUG_ON(i915->dsm.start == 0); |
406 | GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); | |
77894226 | 407 | |
bdce2bea | 408 | stolen_top = i915->dsm.end + 1; |
957d32fe | 409 | reserved_base = stolen_top; |
46fad808 | 410 | reserved_size = 0; |
3774eb50 | 411 | |
bdce2bea | 412 | switch (INTEL_GEN(i915)) { |
3774eb50 PZ |
413 | case 2: |
414 | case 3: | |
7d316aec | 415 | break; |
3774eb50 | 416 | case 4: |
bdce2bea | 417 | if (!IS_G4X(i915)) |
b099a445 | 418 | break; |
df561f66 | 419 | fallthrough; |
3774eb50 | 420 | case 5: |
bdce2bea | 421 | g4x_get_stolen_reserved(i915, uncore, |
b099a445 | 422 | &reserved_base, &reserved_size); |
3774eb50 PZ |
423 | break; |
424 | case 6: | |
bdce2bea | 425 | gen6_get_stolen_reserved(i915, uncore, |
46fad808 | 426 | &reserved_base, &reserved_size); |
3774eb50 PZ |
427 | break; |
428 | case 7: | |
bdce2bea CW |
429 | if (IS_VALLEYVIEW(i915)) |
430 | vlv_get_stolen_reserved(i915, uncore, | |
957d32fe CW |
431 | &reserved_base, &reserved_size); |
432 | else | |
bdce2bea | 433 | gen7_get_stolen_reserved(i915, uncore, |
957d32fe | 434 | &reserved_base, &reserved_size); |
3774eb50 | 435 | break; |
185441e0 PZ |
436 | case 8: |
437 | case 9: | |
438 | case 10: | |
bdce2bea CW |
439 | if (IS_LP(i915)) |
440 | chv_get_stolen_reserved(i915, uncore, | |
46fad808 | 441 | &reserved_base, &reserved_size); |
3774eb50 | 442 | else |
bdce2bea | 443 | bdw_get_stolen_reserved(i915, uncore, |
46fad808 | 444 | &reserved_base, &reserved_size); |
3774eb50 | 445 | break; |
185441e0 | 446 | default: |
bdce2bea | 447 | MISSING_CASE(INTEL_GEN(i915)); |
df561f66 | 448 | fallthrough; |
5b548ae6 LDM |
449 | case 11: |
450 | case 12: | |
bdce2bea CW |
451 | icl_get_stolen_reserved(i915, uncore, |
452 | &reserved_base, | |
185441e0 PZ |
453 | &reserved_size); |
454 | break; | |
3774eb50 PZ |
455 | } |
456 | ||
957d32fe CW |
457 | /* |
458 | * Our expectation is that the reserved space is at the top of the | |
459 | * stolen region and *never* at the bottom. If we see !reserved_base, | |
460 | * it likely means we failed to read the registers correctly. | |
461 | */ | |
462 | if (!reserved_base) { | |
baa89ba3 WK |
463 | drm_err(&i915->drm, |
464 | "inconsistent reservation %pa + %pa; ignoring\n", | |
465 | &reserved_base, &reserved_size); | |
3774eb50 | 466 | reserved_base = stolen_top; |
957d32fe | 467 | reserved_size = 0; |
40bae736 | 468 | } |
c9cddffc | 469 | |
bdce2bea CW |
470 | i915->dsm_reserved = |
471 | (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); | |
17a05345 | 472 | |
bdce2bea | 473 | if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { |
baa89ba3 WK |
474 | drm_err(&i915->drm, |
475 | "Stolen reserved area %pR outside stolen memory %pR\n", | |
476 | &i915->dsm_reserved, &i915->dsm); | |
897f9ed0 | 477 | return 0; |
3774eb50 PZ |
478 | } |
479 | ||
480 | /* It is possible for the reserved area to end before the end of stolen | |
481 | * memory, so just consider the start. */ | |
482 | reserved_total = stolen_top - reserved_base; | |
483 | ||
baa89ba3 WK |
484 | drm_dbg(&i915->drm, |
485 | "Memory reserved for graphics device: %lluK, usable: %lluK\n", | |
486 | (u64)resource_size(&i915->dsm) >> 10, | |
487 | ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); | |
897f9ed0 | 488 | |
bdce2bea CW |
489 | i915->stolen_usable_size = |
490 | resource_size(&i915->dsm) - reserved_total; | |
3c6b29b2 PZ |
491 | |
492 | /* Basic memrange allocator for stolen space. */ | |
bdce2bea | 493 | drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); |
9797fbfb CW |
494 | |
495 | return 0; | |
496 | } | |
0104fdbb CW |
497 | |
498 | static struct sg_table * | |
499 | i915_pages_create_for_stolen(struct drm_device *dev, | |
b7128ef1 | 500 | resource_size_t offset, resource_size_t size) |
0104fdbb | 501 | { |
bdce2bea | 502 | struct drm_i915_private *i915 = to_i915(dev); |
0104fdbb CW |
503 | struct sg_table *st; |
504 | struct scatterlist *sg; | |
505 | ||
bdce2bea | 506 | GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); |
0104fdbb CW |
507 | |
508 | /* We hide that we have no struct page backing our stolen object | |
509 | * by wrapping the contiguous physical allocation with a fake | |
510 | * dma mapping in a single scatterlist. | |
511 | */ | |
512 | ||
513 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
514 | if (st == NULL) | |
43e157fa | 515 | return ERR_PTR(-ENOMEM); |
0104fdbb CW |
516 | |
517 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { | |
518 | kfree(st); | |
43e157fa | 519 | return ERR_PTR(-ENOMEM); |
0104fdbb CW |
520 | } |
521 | ||
522 | sg = st->sgl; | |
ec14ba47 | 523 | sg->offset = 0; |
ed23abdd | 524 | sg->length = size; |
0104fdbb | 525 | |
bdce2bea | 526 | sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; |
0104fdbb CW |
527 | sg_dma_len(sg) = size; |
528 | ||
529 | return st; | |
530 | } | |
531 | ||
b91b09ee | 532 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
0104fdbb | 533 | { |
b91b09ee MA |
534 | struct sg_table *pages = |
535 | i915_pages_create_for_stolen(obj->base.dev, | |
536 | obj->stolen->start, | |
537 | obj->stolen->size); | |
538 | if (IS_ERR(pages)) | |
539 | return PTR_ERR(pages); | |
540 | ||
a5c08166 | 541 | __i915_gem_object_set_pages(obj, pages, obj->stolen->size); |
b91b09ee MA |
542 | |
543 | return 0; | |
0104fdbb CW |
544 | } |
545 | ||
03ac84f1 CW |
546 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, |
547 | struct sg_table *pages) | |
0104fdbb | 548 | { |
6288c79e | 549 | /* Should only be called from i915_gem_object_release_stolen() */ |
03ac84f1 CW |
550 | sg_free_table(pages); |
551 | kfree(pages); | |
0104fdbb CW |
552 | } |
553 | ||
ef0cf27c CW |
554 | static void |
555 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) | |
556 | { | |
bdce2bea | 557 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
6288c79e CW |
558 | struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); |
559 | ||
560 | GEM_BUG_ON(!stolen); | |
d713fd49 | 561 | |
1e507872 CW |
562 | i915_gem_object_release_memory_region(obj); |
563 | ||
bdce2bea | 564 | i915_gem_stolen_remove_node(i915, stolen); |
6288c79e | 565 | kfree(stolen); |
ef0cf27c | 566 | } |
6288c79e | 567 | |
0104fdbb | 568 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
7d192daa | 569 | .name = "i915_gem_object_stolen", |
0104fdbb CW |
570 | .get_pages = i915_gem_object_get_pages_stolen, |
571 | .put_pages = i915_gem_object_put_pages_stolen, | |
ef0cf27c | 572 | .release = i915_gem_object_release_stolen, |
0104fdbb CW |
573 | }; |
574 | ||
575 | static struct drm_i915_gem_object * | |
1e507872 CW |
576 | __i915_gem_object_create_stolen(struct intel_memory_region *mem, |
577 | struct drm_mm_node *stolen) | |
0104fdbb | 578 | { |
7867d709 | 579 | static struct lock_class_key lock_class; |
0104fdbb | 580 | struct drm_i915_gem_object *obj; |
b8f55be6 | 581 | unsigned int cache_level; |
0e5493ca | 582 | int err = -ENOMEM; |
0104fdbb | 583 | |
13f1bfd3 | 584 | obj = i915_gem_object_alloc(); |
0e5493ca CT |
585 | if (!obj) |
586 | goto err; | |
0104fdbb | 587 | |
1e507872 | 588 | drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); |
7867d709 | 589 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); |
0104fdbb | 590 | |
0104fdbb | 591 | obj->stolen = stolen; |
c0a51fd0 | 592 | obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
1e507872 | 593 | cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; |
b8f55be6 | 594 | i915_gem_object_set_cache_coherency(obj, cache_level); |
0104fdbb | 595 | |
0e5493ca CT |
596 | err = i915_gem_object_pin_pages(obj); |
597 | if (err) | |
03ac84f1 CW |
598 | goto cleanup; |
599 | ||
1e507872 | 600 | i915_gem_object_init_memory_region(obj, mem, 0); |
72405c3d | 601 | |
0104fdbb CW |
602 | return obj; |
603 | ||
604 | cleanup: | |
42dcedd4 | 605 | i915_gem_object_free(obj); |
0e5493ca CT |
606 | err: |
607 | return ERR_PTR(err); | |
0104fdbb CW |
608 | } |
609 | ||
72405c3d MA |
610 | static struct drm_i915_gem_object * |
611 | _i915_gem_object_create_stolen(struct intel_memory_region *mem, | |
612 | resource_size_t size, | |
613 | unsigned int flags) | |
0104fdbb | 614 | { |
bdce2bea | 615 | struct drm_i915_private *i915 = mem->i915; |
0104fdbb CW |
616 | struct drm_i915_gem_object *obj; |
617 | struct drm_mm_node *stolen; | |
06e78edf | 618 | int ret; |
0104fdbb | 619 | |
bdce2bea | 620 | if (!drm_mm_initialized(&i915->mm.stolen)) |
0e5493ca | 621 | return ERR_PTR(-ENODEV); |
0104fdbb | 622 | |
0104fdbb | 623 | if (size == 0) |
0e5493ca | 624 | return ERR_PTR(-EINVAL); |
0104fdbb | 625 | |
06e78edf DH |
626 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
627 | if (!stolen) | |
0e5493ca | 628 | return ERR_PTR(-ENOMEM); |
0104fdbb | 629 | |
bdce2bea | 630 | ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); |
06e78edf | 631 | if (ret) { |
0e5493ca CT |
632 | obj = ERR_PTR(ret); |
633 | goto err_free; | |
06e78edf DH |
634 | } |
635 | ||
1e507872 | 636 | obj = __i915_gem_object_create_stolen(mem, stolen); |
0e5493ca CT |
637 | if (IS_ERR(obj)) |
638 | goto err_remove; | |
0104fdbb | 639 | |
0e5493ca CT |
640 | return obj; |
641 | ||
642 | err_remove: | |
bdce2bea | 643 | i915_gem_stolen_remove_node(i915, stolen); |
0e5493ca | 644 | err_free: |
06e78edf | 645 | kfree(stolen); |
0e5493ca | 646 | return obj; |
0104fdbb CW |
647 | } |
648 | ||
72405c3d | 649 | struct drm_i915_gem_object * |
bdce2bea | 650 | i915_gem_object_create_stolen(struct drm_i915_private *i915, |
72405c3d MA |
651 | resource_size_t size) |
652 | { | |
bdce2bea | 653 | return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN], |
72405c3d MA |
654 | size, I915_BO_ALLOC_CONTIGUOUS); |
655 | } | |
656 | ||
657 | static int init_stolen(struct intel_memory_region *mem) | |
658 | { | |
38f1cb68 LF |
659 | intel_memory_region_set_name(mem, "stolen"); |
660 | ||
72405c3d MA |
661 | /* |
662 | * Initialise stolen early so that we may reserve preallocated | |
663 | * objects for the BIOS to KMS transition. | |
664 | */ | |
665 | return i915_gem_init_stolen(mem->i915); | |
666 | } | |
667 | ||
668 | static void release_stolen(struct intel_memory_region *mem) | |
669 | { | |
670 | i915_gem_cleanup_stolen(mem->i915); | |
671 | } | |
672 | ||
673 | static const struct intel_memory_region_ops i915_region_stolen_ops = { | |
674 | .init = init_stolen, | |
675 | .release = release_stolen, | |
676 | .create_object = _i915_gem_object_create_stolen, | |
677 | }; | |
678 | ||
679 | struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) | |
680 | { | |
681 | return intel_memory_region_create(i915, | |
682 | intel_graphics_stolen_res.start, | |
683 | resource_size(&intel_graphics_stolen_res), | |
684 | PAGE_SIZE, 0, | |
685 | &i915_region_stolen_ops); | |
686 | } | |
687 | ||
866d12b4 | 688 | struct drm_i915_gem_object * |
bdce2bea | 689 | i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, |
b7128ef1 | 690 | resource_size_t stolen_offset, |
b7128ef1 | 691 | resource_size_t size) |
866d12b4 | 692 | { |
1e507872 | 693 | struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN]; |
866d12b4 CW |
694 | struct drm_i915_gem_object *obj; |
695 | struct drm_mm_node *stolen; | |
b3a070cc | 696 | int ret; |
866d12b4 | 697 | |
bdce2bea | 698 | if (!drm_mm_initialized(&i915->mm.stolen)) |
0e5493ca | 699 | return ERR_PTR(-ENODEV); |
866d12b4 | 700 | |
baa89ba3 | 701 | drm_dbg(&i915->drm, |
9c4ce97d CW |
702 | "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", |
703 | &stolen_offset, &size); | |
866d12b4 CW |
704 | |
705 | /* KISS and expect everything to be page-aligned */ | |
9c4ce97d CW |
706 | if (GEM_WARN_ON(size == 0) || |
707 | GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || | |
708 | GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) | |
0e5493ca | 709 | return ERR_PTR(-EINVAL); |
866d12b4 | 710 | |
b3a070cc BW |
711 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
712 | if (!stolen) | |
0e5493ca | 713 | return ERR_PTR(-ENOMEM); |
b3a070cc | 714 | |
338710e7 BW |
715 | stolen->start = stolen_offset; |
716 | stolen->size = size; | |
bdce2bea CW |
717 | mutex_lock(&i915->mm.stolen_lock); |
718 | ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); | |
719 | mutex_unlock(&i915->mm.stolen_lock); | |
b3a070cc | 720 | if (ret) { |
9c4ce97d CW |
721 | obj = ERR_PTR(ret); |
722 | goto err_free; | |
866d12b4 CW |
723 | } |
724 | ||
1e507872 | 725 | obj = __i915_gem_object_create_stolen(mem, stolen); |
9c4ce97d CW |
726 | if (IS_ERR(obj)) |
727 | goto err_stolen; | |
f6b9d5ca | 728 | |
9c4ce97d | 729 | i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); |
866d12b4 | 730 | return obj; |
b3a070cc | 731 | |
9c4ce97d CW |
732 | err_stolen: |
733 | i915_gem_stolen_remove_node(i915, stolen); | |
734 | err_free: | |
735 | kfree(stolen); | |
736 | return obj; | |
866d12b4 | 737 | } |