Commit | Line | Data |
---|---|---|
9797fbfb CW |
1 | /* |
2 | * Copyright © 2008-2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uk> | |
26 | * | |
27 | */ | |
28 | ||
760285e7 DH |
29 | #include <drm/drmP.h> |
30 | #include <drm/i915_drm.h> | |
9797fbfb CW |
31 | #include "i915_drv.h" |
32 | ||
0ad98c74 VS |
33 | #define KB(x) ((x) * 1024) |
34 | #define MB(x) (KB(x) * 1024) | |
35 | ||
9797fbfb CW |
36 | /* |
37 | * The BIOS typically reserves some of the system's memory for the exclusive | |
38 | * use of the integrated graphics. This memory is no longer available for | |
39 | * use by the OS and so the user finds that his system has less memory | |
40 | * available than he put in. We refer to this memory as stolen. | |
41 | * | |
42 | * The BIOS will allocate its framebuffer from the stolen memory. Our | |
43 | * goal is try to reuse that object for our own fbcon which must always | |
44 | * be available for panics. Anything else we can reuse the stolen memory | |
45 | * for is a boon. | |
46 | */ | |
47 | ||
a9da512b PZ |
48 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
49 | struct drm_mm_node *node, u64 size, | |
50 | unsigned alignment, u64 start, u64 end) | |
d713fd49 | 51 | { |
92e97d2f PZ |
52 | int ret; |
53 | ||
d713fd49 PZ |
54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
55 | return -ENODEV; | |
56 | ||
92e97d2f | 57 | mutex_lock(&dev_priv->mm.stolen_lock); |
4e64e553 CW |
58 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, |
59 | size, alignment, 0, | |
60 | start, end, DRM_MM_INSERT_BEST); | |
92e97d2f PZ |
61 | mutex_unlock(&dev_priv->mm.stolen_lock); |
62 | ||
63 | return ret; | |
d713fd49 PZ |
64 | } |
65 | ||
a9da512b PZ |
66 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
67 | struct drm_mm_node *node, u64 size, | |
68 | unsigned alignment) | |
69 | { | |
70 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, | |
3c6b29b2 | 71 | alignment, 0, U64_MAX); |
a9da512b PZ |
72 | } |
73 | ||
d713fd49 PZ |
74 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
75 | struct drm_mm_node *node) | |
76 | { | |
92e97d2f | 77 | mutex_lock(&dev_priv->mm.stolen_lock); |
d713fd49 | 78 | drm_mm_remove_node(node); |
92e97d2f | 79 | mutex_unlock(&dev_priv->mm.stolen_lock); |
d713fd49 PZ |
80 | } |
81 | ||
c8847387 | 82 | static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv) |
9797fbfb | 83 | { |
52a05c30 | 84 | struct pci_dev *pdev = dev_priv->drm.pdev; |
72e96d64 | 85 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
eaba1b8f | 86 | struct resource *r; |
c8847387 | 87 | dma_addr_t base; |
9797fbfb | 88 | |
17fec8a0 | 89 | /* Almost universally we can find the Graphics Base of Stolen Memory |
e10fa551 JL |
90 | * at register BSM (0x5c) in the igfx configuration space. On a few |
91 | * (desktop) machines this is also mirrored in the bridge device at | |
92 | * different locations, or in the MCHBAR. | |
0ad98c74 VS |
93 | * |
94 | * On 865 we just check the TOUD register. | |
95 | * | |
96 | * On 830/845/85x the stolen memory base isn't available in any | |
97 | * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. | |
e12a2d53 | 98 | * |
9797fbfb | 99 | */ |
e12a2d53 | 100 | base = 0; |
a9097be4 | 101 | if (INTEL_GEN(dev_priv) >= 3) { |
e10fa551 JL |
102 | u32 bsm; |
103 | ||
52a05c30 | 104 | pci_read_config_dword(pdev, INTEL_BSM, &bsm); |
e10fa551 | 105 | |
c0dd3460 | 106 | base = bsm & INTEL_BSM_MASK; |
50a0bc90 | 107 | } else if (IS_I865G(dev_priv)) { |
d721b02f | 108 | u32 tseg_size = 0; |
0ad98c74 | 109 | u16 toud = 0; |
d721b02f VS |
110 | u8 tmp; |
111 | ||
52a05c30 | 112 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
d721b02f VS |
113 | I845_ESMRAMC, &tmp); |
114 | ||
115 | if (tmp & TSEG_ENABLE) { | |
116 | switch (tmp & I845_TSEG_SIZE_MASK) { | |
117 | case I845_TSEG_SIZE_512K: | |
118 | tseg_size = KB(512); | |
119 | break; | |
120 | case I845_TSEG_SIZE_1M: | |
121 | tseg_size = MB(1); | |
122 | break; | |
123 | } | |
124 | } | |
0ad98c74 | 125 | |
52a05c30 | 126 | pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
127 | I865_TOUD, &toud); |
128 | ||
d721b02f | 129 | base = (toud << 16) + tseg_size; |
a9097be4 | 130 | } else if (IS_I85X(dev_priv)) { |
0ad98c74 VS |
131 | u32 tseg_size = 0; |
132 | u32 tom; | |
133 | u8 tmp; | |
134 | ||
52a05c30 | 135 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
136 | I85X_ESMRAMC, &tmp); |
137 | ||
138 | if (tmp & TSEG_ENABLE) | |
139 | tseg_size = MB(1); | |
140 | ||
52a05c30 | 141 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1), |
0ad98c74 VS |
142 | I85X_DRB3, &tmp); |
143 | tom = tmp * MB(32); | |
144 | ||
72e96d64 | 145 | base = tom - tseg_size - ggtt->stolen_size; |
2a307c2e | 146 | } else if (IS_I845G(dev_priv)) { |
0ad98c74 VS |
147 | u32 tseg_size = 0; |
148 | u32 tom; | |
149 | u8 tmp; | |
150 | ||
52a05c30 | 151 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
152 | I845_ESMRAMC, &tmp); |
153 | ||
154 | if (tmp & TSEG_ENABLE) { | |
155 | switch (tmp & I845_TSEG_SIZE_MASK) { | |
156 | case I845_TSEG_SIZE_512K: | |
157 | tseg_size = KB(512); | |
158 | break; | |
159 | case I845_TSEG_SIZE_1M: | |
160 | tseg_size = MB(1); | |
161 | break; | |
162 | } | |
163 | } | |
164 | ||
52a05c30 | 165 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
166 | I830_DRB3, &tmp); |
167 | tom = tmp * MB(32); | |
168 | ||
72e96d64 | 169 | base = tom - tseg_size - ggtt->stolen_size; |
50a0bc90 | 170 | } else if (IS_I830(dev_priv)) { |
0ad98c74 VS |
171 | u32 tseg_size = 0; |
172 | u32 tom; | |
173 | u8 tmp; | |
174 | ||
52a05c30 | 175 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
176 | I830_ESMRAMC, &tmp); |
177 | ||
178 | if (tmp & TSEG_ENABLE) { | |
179 | if (tmp & I830_TSEG_SIZE_1M) | |
180 | tseg_size = MB(1); | |
181 | else | |
182 | tseg_size = KB(512); | |
183 | } | |
184 | ||
52a05c30 | 185 | pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), |
0ad98c74 VS |
186 | I830_DRB3, &tmp); |
187 | tom = tmp * MB(32); | |
188 | ||
72e96d64 | 189 | base = tom - tseg_size - ggtt->stolen_size; |
e12a2d53 | 190 | } |
9797fbfb | 191 | |
1692cd60 | 192 | if (base == 0 || add_overflows(base, ggtt->stolen_size)) |
eaba1b8f CW |
193 | return 0; |
194 | ||
f1e1c212 | 195 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
73f67aa8 JN |
196 | if (INTEL_GEN(dev_priv) <= 4 && |
197 | !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { | |
f1e1c212 | 198 | struct { |
c8847387 | 199 | dma_addr_t start, end; |
f1e1c212 | 200 | } stolen[2] = { |
72e96d64 JL |
201 | { .start = base, .end = base + ggtt->stolen_size, }, |
202 | { .start = base, .end = base + ggtt->stolen_size, }, | |
f1e1c212 | 203 | }; |
72e96d64 | 204 | u64 ggtt_start, ggtt_end; |
f1e1c212 | 205 | |
72e96d64 | 206 | ggtt_start = I915_READ(PGTBL_CTL); |
5db94019 | 207 | if (IS_GEN4(dev_priv)) |
72e96d64 JL |
208 | ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | |
209 | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; | |
f1e1c212 | 210 | else |
72e96d64 JL |
211 | ggtt_start &= PGTBL_ADDRESS_LO_MASK; |
212 | ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; | |
f1e1c212 | 213 | |
72e96d64 JL |
214 | if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) |
215 | stolen[0].end = ggtt_start; | |
216 | if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) | |
217 | stolen[1].start = ggtt_end; | |
f1e1c212 VS |
218 | |
219 | /* pick the larger of the two chunks */ | |
220 | if (stolen[0].end - stolen[0].start > | |
221 | stolen[1].end - stolen[1].start) { | |
222 | base = stolen[0].start; | |
72e96d64 | 223 | ggtt->stolen_size = stolen[0].end - stolen[0].start; |
f1e1c212 VS |
224 | } else { |
225 | base = stolen[1].start; | |
72e96d64 | 226 | ggtt->stolen_size = stolen[1].end - stolen[1].start; |
f1e1c212 VS |
227 | } |
228 | ||
229 | if (stolen[0].start != stolen[1].start || | |
230 | stolen[0].end != stolen[1].end) { | |
c8847387 | 231 | dma_addr_t end = base + ggtt->stolen_size - 1; |
920bcd18 | 232 | |
f1e1c212 | 233 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
72e96d64 JL |
234 | (unsigned long long)ggtt_start, |
235 | (unsigned long long)ggtt_end - 1); | |
c8847387 | 236 | DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n", |
920bcd18 | 237 | &base, &end); |
f1e1c212 VS |
238 | } |
239 | } | |
240 | ||
241 | ||
eaba1b8f CW |
242 | /* Verify that nothing else uses this physical address. Stolen |
243 | * memory should be reserved by the BIOS and hidden from the | |
244 | * kernel. So if the region is already marked as busy, something | |
245 | * is seriously wrong. | |
246 | */ | |
7ace3d30 | 247 | r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size, |
eaba1b8f CW |
248 | "Graphics Stolen Memory"); |
249 | if (r == NULL) { | |
3617dc96 AG |
250 | /* |
251 | * One more attempt but this time requesting region from | |
252 | * base + 1, as we have seen that this resolves the region | |
253 | * conflict with the PCI Bus. | |
254 | * This is a BIOS w/a: Some BIOS wrap stolen in the root | |
255 | * PCI bus, but have an off-by-one error. Hence retry the | |
256 | * reservation starting from 1 instead of 0. | |
023f8079 | 257 | * There's also BIOS with off-by-one on the other end. |
3617dc96 | 258 | */ |
7ace3d30 | 259 | r = devm_request_mem_region(dev_priv->drm.dev, base + 1, |
023f8079 | 260 | ggtt->stolen_size - 2, |
3617dc96 | 261 | "Graphics Stolen Memory"); |
0b6d24c0 DV |
262 | /* |
263 | * GEN3 firmware likes to smash pci bridges into the stolen | |
264 | * range. Apparently this works. | |
265 | */ | |
5db94019 | 266 | if (r == NULL && !IS_GEN3(dev_priv)) { |
c8847387 | 267 | dma_addr_t end = base + ggtt->stolen_size; |
920bcd18 | 268 | |
c8847387 | 269 | DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n", |
920bcd18 | 270 | &base, &end); |
3617dc96 AG |
271 | base = 0; |
272 | } | |
eaba1b8f CW |
273 | } |
274 | ||
e12a2d53 | 275 | return base; |
9797fbfb CW |
276 | } |
277 | ||
9797fbfb CW |
278 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
279 | { | |
fac5e23e | 280 | struct drm_i915_private *dev_priv = to_i915(dev); |
4d7bb011 | 281 | |
446f8d81 DV |
282 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
283 | return; | |
284 | ||
4d7bb011 | 285 | drm_mm_takedown(&dev_priv->mm.stolen); |
9797fbfb CW |
286 | } |
287 | ||
7d316aec | 288 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
c8847387 | 289 | dma_addr_t *base, u32 *size) |
7d316aec | 290 | { |
72e96d64 | 291 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
7d316aec VS |
292 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
293 | CTG_STOLEN_RESERVED : | |
294 | ELK_STOLEN_RESERVED); | |
c8847387 | 295 | dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
7d316aec VS |
296 | |
297 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; | |
298 | ||
299 | WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); | |
300 | ||
301 | /* On these platforms, the register doesn't have a size field, so the | |
302 | * size is the distance between the base and the top of the stolen | |
303 | * memory. We also have the genuine case where base is zero and there's | |
304 | * nothing reserved. */ | |
305 | if (*base == 0) | |
306 | *size = 0; | |
307 | else | |
308 | *size = stolen_top - *base; | |
309 | } | |
310 | ||
3774eb50 | 311 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
c8847387 | 312 | dma_addr_t *base, u32 *size) |
3774eb50 PZ |
313 | { |
314 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | |
315 | ||
316 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; | |
317 | ||
318 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { | |
319 | case GEN6_STOLEN_RESERVED_1M: | |
320 | *size = 1024 * 1024; | |
321 | break; | |
322 | case GEN6_STOLEN_RESERVED_512K: | |
323 | *size = 512 * 1024; | |
324 | break; | |
325 | case GEN6_STOLEN_RESERVED_256K: | |
326 | *size = 256 * 1024; | |
327 | break; | |
328 | case GEN6_STOLEN_RESERVED_128K: | |
329 | *size = 128 * 1024; | |
330 | break; | |
331 | default: | |
332 | *size = 1024 * 1024; | |
333 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); | |
334 | } | |
335 | } | |
336 | ||
337 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, | |
c8847387 | 338 | dma_addr_t *base, u32 *size) |
3774eb50 PZ |
339 | { |
340 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | |
341 | ||
342 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; | |
343 | ||
344 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { | |
345 | case GEN7_STOLEN_RESERVED_1M: | |
346 | *size = 1024 * 1024; | |
347 | break; | |
348 | case GEN7_STOLEN_RESERVED_256K: | |
349 | *size = 256 * 1024; | |
350 | break; | |
351 | default: | |
352 | *size = 1024 * 1024; | |
353 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); | |
354 | } | |
355 | } | |
356 | ||
9244f858 | 357 | static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, |
c8847387 | 358 | dma_addr_t *base, u32 *size) |
3774eb50 PZ |
359 | { |
360 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | |
361 | ||
362 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; | |
363 | ||
364 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { | |
365 | case GEN8_STOLEN_RESERVED_1M: | |
366 | *size = 1024 * 1024; | |
367 | break; | |
368 | case GEN8_STOLEN_RESERVED_2M: | |
369 | *size = 2 * 1024 * 1024; | |
370 | break; | |
371 | case GEN8_STOLEN_RESERVED_4M: | |
372 | *size = 4 * 1024 * 1024; | |
373 | break; | |
374 | case GEN8_STOLEN_RESERVED_8M: | |
375 | *size = 8 * 1024 * 1024; | |
376 | break; | |
377 | default: | |
378 | *size = 8 * 1024 * 1024; | |
379 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); | |
380 | } | |
381 | } | |
382 | ||
383 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, | |
c8847387 | 384 | dma_addr_t *base, u32 *size) |
3774eb50 | 385 | { |
72e96d64 | 386 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
3774eb50 | 387 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
c8847387 | 388 | dma_addr_t stolen_top; |
3774eb50 | 389 | |
72e96d64 | 390 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
3774eb50 PZ |
391 | |
392 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; | |
393 | ||
394 | /* On these platforms, the register doesn't have a size field, so the | |
395 | * size is the distance between the base and the top of the stolen | |
396 | * memory. We also have the genuine case where base is zero and there's | |
397 | * nothing reserved. */ | |
398 | if (*base == 0) | |
399 | *size = 0; | |
400 | else | |
401 | *size = stolen_top - *base; | |
402 | } | |
403 | ||
7ace3d30 | 404 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv) |
9797fbfb | 405 | { |
72e96d64 | 406 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
c8847387 | 407 | dma_addr_t reserved_base, stolen_top; |
edd1f2fe CW |
408 | u32 reserved_total, reserved_size; |
409 | u32 stolen_usable_start; | |
9797fbfb | 410 | |
92e97d2f PZ |
411 | mutex_init(&dev_priv->mm.stolen_lock); |
412 | ||
04a68a35 CW |
413 | if (intel_vgpu_active(dev_priv)) { |
414 | DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); | |
415 | return 0; | |
416 | } | |
417 | ||
80debff8 | 418 | if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { |
0f4706d2 CW |
419 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
420 | return 0; | |
421 | } | |
0f4706d2 | 422 | |
72e96d64 | 423 | if (ggtt->stolen_size == 0) |
6644a4e9 CW |
424 | return 0; |
425 | ||
c8847387 | 426 | dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv); |
e12a2d53 CW |
427 | if (dev_priv->mm.stolen_base == 0) |
428 | return 0; | |
429 | ||
72e96d64 | 430 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
46fad808 CW |
431 | reserved_base = 0; |
432 | reserved_size = 0; | |
3774eb50 PZ |
433 | |
434 | switch (INTEL_INFO(dev_priv)->gen) { | |
435 | case 2: | |
436 | case 3: | |
7d316aec | 437 | break; |
3774eb50 | 438 | case 4: |
9beb5fea | 439 | if (IS_G4X(dev_priv)) |
46fad808 CW |
440 | g4x_get_stolen_reserved(dev_priv, |
441 | &reserved_base, &reserved_size); | |
7d316aec | 442 | break; |
3774eb50 PZ |
443 | case 5: |
444 | /* Assume the gen6 maximum for the older platforms. */ | |
445 | reserved_size = 1024 * 1024; | |
446 | reserved_base = stolen_top - reserved_size; | |
447 | break; | |
448 | case 6: | |
46fad808 CW |
449 | gen6_get_stolen_reserved(dev_priv, |
450 | &reserved_base, &reserved_size); | |
3774eb50 PZ |
451 | break; |
452 | case 7: | |
46fad808 CW |
453 | gen7_get_stolen_reserved(dev_priv, |
454 | &reserved_base, &reserved_size); | |
3774eb50 PZ |
455 | break; |
456 | default: | |
5af7edc5 | 457 | if (IS_LP(dev_priv)) |
46fad808 CW |
458 | chv_get_stolen_reserved(dev_priv, |
459 | &reserved_base, &reserved_size); | |
3774eb50 | 460 | else |
46fad808 CW |
461 | bdw_get_stolen_reserved(dev_priv, |
462 | &reserved_base, &reserved_size); | |
3774eb50 PZ |
463 | break; |
464 | } | |
465 | ||
466 | /* It is possible for the reserved base to be zero, but the register | |
467 | * field for size doesn't have a zero option. */ | |
468 | if (reserved_base == 0) { | |
469 | reserved_size = 0; | |
470 | reserved_base = stolen_top; | |
40bae736 | 471 | } |
c9cddffc | 472 | |
3774eb50 PZ |
473 | if (reserved_base < dev_priv->mm.stolen_base || |
474 | reserved_base + reserved_size > stolen_top) { | |
c8847387 CW |
475 | dma_addr_t reserved_top = reserved_base + reserved_size; |
476 | DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n", | |
46fad808 CW |
477 | &reserved_base, &reserved_top, |
478 | &dev_priv->mm.stolen_base, &stolen_top); | |
897f9ed0 | 479 | return 0; |
3774eb50 PZ |
480 | } |
481 | ||
72e96d64 JL |
482 | ggtt->stolen_reserved_base = reserved_base; |
483 | ggtt->stolen_reserved_size = reserved_size; | |
274008e8 | 484 | |
3774eb50 PZ |
485 | /* It is possible for the reserved area to end before the end of stolen |
486 | * memory, so just consider the start. */ | |
487 | reserved_total = stolen_top - reserved_base; | |
488 | ||
edd1f2fe | 489 | DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n", |
72e96d64 JL |
490 | ggtt->stolen_size >> 10, |
491 | (ggtt->stolen_size - reserved_total) >> 10); | |
897f9ed0 | 492 | |
3c6b29b2 PZ |
493 | stolen_usable_start = 0; |
494 | /* WaSkipStolenMemoryFirstPage:bdw+ */ | |
495 | if (INTEL_GEN(dev_priv) >= 8) | |
496 | stolen_usable_start = 4096; | |
a9da512b | 497 | |
edd1f2fe CW |
498 | ggtt->stolen_usable_size = |
499 | ggtt->stolen_size - reserved_total - stolen_usable_start; | |
3c6b29b2 PZ |
500 | |
501 | /* Basic memrange allocator for stolen space. */ | |
502 | drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start, | |
503 | ggtt->stolen_usable_size); | |
9797fbfb CW |
504 | |
505 | return 0; | |
506 | } | |
0104fdbb CW |
507 | |
508 | static struct sg_table * | |
509 | i915_pages_create_for_stolen(struct drm_device *dev, | |
510 | u32 offset, u32 size) | |
511 | { | |
72e96d64 | 512 | struct drm_i915_private *dev_priv = to_i915(dev); |
0104fdbb CW |
513 | struct sg_table *st; |
514 | struct scatterlist *sg; | |
515 | ||
e8f9ae9b | 516 | GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size)); |
0104fdbb CW |
517 | |
518 | /* We hide that we have no struct page backing our stolen object | |
519 | * by wrapping the contiguous physical allocation with a fake | |
520 | * dma mapping in a single scatterlist. | |
521 | */ | |
522 | ||
523 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
524 | if (st == NULL) | |
43e157fa | 525 | return ERR_PTR(-ENOMEM); |
0104fdbb CW |
526 | |
527 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { | |
528 | kfree(st); | |
43e157fa | 529 | return ERR_PTR(-ENOMEM); |
0104fdbb CW |
530 | } |
531 | ||
532 | sg = st->sgl; | |
ec14ba47 | 533 | sg->offset = 0; |
ed23abdd | 534 | sg->length = size; |
0104fdbb CW |
535 | |
536 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; | |
537 | sg_dma_len(sg) = size; | |
538 | ||
539 | return st; | |
540 | } | |
541 | ||
b91b09ee | 542 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
0104fdbb | 543 | { |
b91b09ee MA |
544 | struct sg_table *pages = |
545 | i915_pages_create_for_stolen(obj->base.dev, | |
546 | obj->stolen->start, | |
547 | obj->stolen->size); | |
548 | if (IS_ERR(pages)) | |
549 | return PTR_ERR(pages); | |
550 | ||
a5c08166 | 551 | __i915_gem_object_set_pages(obj, pages, obj->stolen->size); |
b91b09ee MA |
552 | |
553 | return 0; | |
0104fdbb CW |
554 | } |
555 | ||
03ac84f1 CW |
556 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, |
557 | struct sg_table *pages) | |
0104fdbb | 558 | { |
6288c79e | 559 | /* Should only be called from i915_gem_object_release_stolen() */ |
03ac84f1 CW |
560 | sg_free_table(pages); |
561 | kfree(pages); | |
0104fdbb CW |
562 | } |
563 | ||
ef0cf27c CW |
564 | static void |
565 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) | |
566 | { | |
fac5e23e | 567 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
6288c79e CW |
568 | struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); |
569 | ||
570 | GEM_BUG_ON(!stolen); | |
d713fd49 | 571 | |
a4f5ea64 CW |
572 | __i915_gem_object_unpin_pages(obj); |
573 | ||
6288c79e CW |
574 | i915_gem_stolen_remove_node(dev_priv, stolen); |
575 | kfree(stolen); | |
ef0cf27c | 576 | } |
6288c79e | 577 | |
0104fdbb CW |
578 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
579 | .get_pages = i915_gem_object_get_pages_stolen, | |
580 | .put_pages = i915_gem_object_put_pages_stolen, | |
ef0cf27c | 581 | .release = i915_gem_object_release_stolen, |
0104fdbb CW |
582 | }; |
583 | ||
584 | static struct drm_i915_gem_object * | |
187685cb | 585 | _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, |
0104fdbb CW |
586 | struct drm_mm_node *stolen) |
587 | { | |
588 | struct drm_i915_gem_object *obj; | |
b8f55be6 | 589 | unsigned int cache_level; |
0104fdbb | 590 | |
187685cb | 591 | obj = i915_gem_object_alloc(dev_priv); |
0104fdbb CW |
592 | if (obj == NULL) |
593 | return NULL; | |
594 | ||
187685cb | 595 | drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); |
0104fdbb CW |
596 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
597 | ||
0104fdbb | 598 | obj->stolen = stolen; |
d46f1c3f | 599 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
b8f55be6 CW |
600 | cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; |
601 | i915_gem_object_set_cache_coherency(obj, cache_level); | |
0104fdbb | 602 | |
03ac84f1 CW |
603 | if (i915_gem_object_pin_pages(obj)) |
604 | goto cleanup; | |
605 | ||
0104fdbb CW |
606 | return obj; |
607 | ||
608 | cleanup: | |
42dcedd4 | 609 | i915_gem_object_free(obj); |
0104fdbb CW |
610 | return NULL; |
611 | } | |
612 | ||
613 | struct drm_i915_gem_object * | |
187685cb | 614 | i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size) |
0104fdbb | 615 | { |
0104fdbb CW |
616 | struct drm_i915_gem_object *obj; |
617 | struct drm_mm_node *stolen; | |
06e78edf | 618 | int ret; |
0104fdbb | 619 | |
446f8d81 | 620 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
0104fdbb CW |
621 | return NULL; |
622 | ||
0104fdbb CW |
623 | if (size == 0) |
624 | return NULL; | |
625 | ||
06e78edf DH |
626 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
627 | if (!stolen) | |
0104fdbb CW |
628 | return NULL; |
629 | ||
d713fd49 | 630 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
06e78edf DH |
631 | if (ret) { |
632 | kfree(stolen); | |
633 | return NULL; | |
634 | } | |
635 | ||
187685cb | 636 | obj = _i915_gem_object_create_stolen(dev_priv, stolen); |
0104fdbb CW |
637 | if (obj) |
638 | return obj; | |
639 | ||
d713fd49 | 640 | i915_gem_stolen_remove_node(dev_priv, stolen); |
06e78edf | 641 | kfree(stolen); |
0104fdbb CW |
642 | return NULL; |
643 | } | |
644 | ||
866d12b4 | 645 | struct drm_i915_gem_object * |
187685cb | 646 | i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, |
866d12b4 CW |
647 | u32 stolen_offset, |
648 | u32 gtt_offset, | |
649 | u32 size) | |
650 | { | |
72e96d64 | 651 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
866d12b4 CW |
652 | struct drm_i915_gem_object *obj; |
653 | struct drm_mm_node *stolen; | |
2f633156 | 654 | struct i915_vma *vma; |
b3a070cc | 655 | int ret; |
866d12b4 | 656 | |
446f8d81 | 657 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
866d12b4 CW |
658 | return NULL; |
659 | ||
187685cb | 660 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
12c83d99 | 661 | |
866d12b4 CW |
662 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
663 | stolen_offset, gtt_offset, size); | |
664 | ||
665 | /* KISS and expect everything to be page-aligned */ | |
f51455d4 CW |
666 | if (WARN_ON(size == 0) || |
667 | WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || | |
668 | WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) | |
866d12b4 CW |
669 | return NULL; |
670 | ||
b3a070cc BW |
671 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
672 | if (!stolen) | |
673 | return NULL; | |
674 | ||
338710e7 BW |
675 | stolen->start = stolen_offset; |
676 | stolen->size = size; | |
92e97d2f | 677 | mutex_lock(&dev_priv->mm.stolen_lock); |
338710e7 | 678 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
92e97d2f | 679 | mutex_unlock(&dev_priv->mm.stolen_lock); |
b3a070cc | 680 | if (ret) { |
866d12b4 | 681 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
b3a070cc | 682 | kfree(stolen); |
866d12b4 CW |
683 | return NULL; |
684 | } | |
685 | ||
187685cb | 686 | obj = _i915_gem_object_create_stolen(dev_priv, stolen); |
866d12b4 CW |
687 | if (obj == NULL) { |
688 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); | |
d713fd49 | 689 | i915_gem_stolen_remove_node(dev_priv, stolen); |
06e78edf | 690 | kfree(stolen); |
866d12b4 CW |
691 | return NULL; |
692 | } | |
693 | ||
3727d55e | 694 | /* Some objects just need physical mem from stolen space */ |
190d6cd5 | 695 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
3727d55e JB |
696 | return obj; |
697 | ||
03ac84f1 CW |
698 | ret = i915_gem_object_pin_pages(obj); |
699 | if (ret) | |
700 | goto err; | |
701 | ||
718659a6 | 702 | vma = i915_vma_instance(obj, &ggtt->base, NULL); |
db473b36 DC |
703 | if (IS_ERR(vma)) { |
704 | ret = PTR_ERR(vma); | |
03ac84f1 | 705 | goto err_pages; |
2f633156 BW |
706 | } |
707 | ||
866d12b4 CW |
708 | /* To simplify the initialisation sequence between KMS and GTT, |
709 | * we allow construction of the stolen object prior to | |
710 | * setting up the GTT space. The actual reservation will occur | |
711 | * later. | |
712 | */ | |
625d988a CW |
713 | ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, |
714 | size, gtt_offset, obj->cache_level, | |
715 | 0); | |
f6b9d5ca CW |
716 | if (ret) { |
717 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); | |
03ac84f1 | 718 | goto err_pages; |
7c4a7d60 | 719 | } |
866d12b4 | 720 | |
44a0ec0d CW |
721 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
722 | ||
a4f5ea64 | 723 | vma->pages = obj->mm.pages; |
3272db53 | 724 | vma->flags |= I915_VMA_GLOBAL_BIND; |
f6b9d5ca | 725 | __i915_vma_set_map_and_fenceable(vma); |
50e046b6 | 726 | list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); |
f2123818 CW |
727 | |
728 | spin_lock(&dev_priv->mm.obj_lock); | |
729 | list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); | |
15717de2 | 730 | obj->bind_count++; |
f2123818 | 731 | spin_unlock(&dev_priv->mm.obj_lock); |
f6b9d5ca | 732 | |
866d12b4 | 733 | return obj; |
b3a070cc | 734 | |
03ac84f1 CW |
735 | err_pages: |
736 | i915_gem_object_unpin_pages(obj); | |
7c4a7d60 | 737 | err: |
f8c417cd | 738 | i915_gem_object_put(obj); |
b3a070cc | 739 | return NULL; |
866d12b4 | 740 | } |