Commit | Line | Data |
---|---|---|
94b4f3ba CW |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
a8c9b849 MW |
25 | #include <drm/drm_print.h> |
26 | ||
b978520d | 27 | #include "intel_device_info.h" |
94b4f3ba CW |
28 | #include "i915_drv.h" |
29 | ||
2e0d26f8 JN |
30 | #define PLATFORM_NAME(x) [INTEL_##x] = #x |
31 | static const char * const platform_names[] = { | |
32 | PLATFORM_NAME(I830), | |
33 | PLATFORM_NAME(I845G), | |
34 | PLATFORM_NAME(I85X), | |
35 | PLATFORM_NAME(I865G), | |
36 | PLATFORM_NAME(I915G), | |
37 | PLATFORM_NAME(I915GM), | |
38 | PLATFORM_NAME(I945G), | |
39 | PLATFORM_NAME(I945GM), | |
40 | PLATFORM_NAME(G33), | |
41 | PLATFORM_NAME(PINEVIEW), | |
c0f86832 JN |
42 | PLATFORM_NAME(I965G), |
43 | PLATFORM_NAME(I965GM), | |
f69c11ae JN |
44 | PLATFORM_NAME(G45), |
45 | PLATFORM_NAME(GM45), | |
2e0d26f8 JN |
46 | PLATFORM_NAME(IRONLAKE), |
47 | PLATFORM_NAME(SANDYBRIDGE), | |
48 | PLATFORM_NAME(IVYBRIDGE), | |
49 | PLATFORM_NAME(VALLEYVIEW), | |
50 | PLATFORM_NAME(HASWELL), | |
51 | PLATFORM_NAME(BROADWELL), | |
52 | PLATFORM_NAME(CHERRYVIEW), | |
53 | PLATFORM_NAME(SKYLAKE), | |
54 | PLATFORM_NAME(BROXTON), | |
55 | PLATFORM_NAME(KABYLAKE), | |
56 | PLATFORM_NAME(GEMINILAKE), | |
71851fa8 | 57 | PLATFORM_NAME(COFFEELAKE), |
413f3c19 | 58 | PLATFORM_NAME(CANNONLAKE), |
41231001 | 59 | PLATFORM_NAME(ICELAKE), |
897f2961 | 60 | PLATFORM_NAME(ELKHARTLAKE), |
abd3a0fe | 61 | PLATFORM_NAME(TIGERLAKE), |
2e0d26f8 JN |
62 | }; |
63 | #undef PLATFORM_NAME | |
64 | ||
65 | const char *intel_platform_name(enum intel_platform platform) | |
66 | { | |
9160095c JN |
67 | BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS); |
68 | ||
2e0d26f8 JN |
69 | if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) || |
70 | platform_names[platform] == NULL)) | |
71 | return "<unknown>"; | |
72 | ||
73 | return platform_names[platform]; | |
74 | } | |
75 | ||
a8c9b849 MW |
76 | void intel_device_info_dump_flags(const struct intel_device_info *info, |
77 | struct drm_printer *p) | |
78 | { | |
79 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); | |
80 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); | |
81 | #undef PRINT_FLAG | |
d53db442 JRS |
82 | |
83 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name)); | |
84 | DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); | |
85 | #undef PRINT_FLAG | |
a8c9b849 MW |
86 | } |
87 | ||
5fbbe8d4 MW |
88 | static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) |
89 | { | |
8cc76693 LL |
90 | int s; |
91 | ||
0ef904bb TU |
92 | drm_printf(p, "slice total: %u, mask=%04x\n", |
93 | hweight8(sseu->slice_mask), sseu->slice_mask); | |
0040fd19 | 94 | drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); |
0ef904bb | 95 | for (s = 0; s < sseu->max_slices; s++) { |
100f5f7f | 96 | drm_printf(p, "slice%d: %u subslices, mask=%08x\n", |
b5ab1abe | 97 | s, intel_sseu_subslices_per_slice(sseu, s), |
100f5f7f | 98 | intel_sseu_get_subslices(sseu, s)); |
8cc76693 | 99 | } |
5fbbe8d4 MW |
100 | drm_printf(p, "EU total: %u\n", sseu->eu_total); |
101 | drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); | |
102 | drm_printf(p, "has slice power gating: %s\n", | |
103 | yesno(sseu->has_slice_pg)); | |
104 | drm_printf(p, "has subslice power gating: %s\n", | |
105 | yesno(sseu->has_subslice_pg)); | |
106 | drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); | |
107 | } | |
108 | ||
0258404f | 109 | void intel_device_info_dump_runtime(const struct intel_runtime_info *info, |
5fbbe8d4 MW |
110 | struct drm_printer *p) |
111 | { | |
112 | sseu_dump(&info->sseu, p); | |
113 | ||
114 | drm_printf(p, "CS timestamp frequency: %u kHz\n", | |
115 | info->cs_timestamp_frequency_khz); | |
116 | } | |
117 | ||
0040fd19 SS |
118 | static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, |
119 | int subslice) | |
120 | { | |
49610c37 | 121 | int slice_stride = sseu->max_subslices * sseu->eu_stride; |
0040fd19 | 122 | |
49610c37 | 123 | return slice * slice_stride + subslice * sseu->eu_stride; |
0040fd19 SS |
124 | } |
125 | ||
126 | static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, | |
127 | int subslice) | |
128 | { | |
129 | int i, offset = sseu_eu_idx(sseu, slice, subslice); | |
130 | u16 eu_mask = 0; | |
131 | ||
49610c37 | 132 | for (i = 0; i < sseu->eu_stride; i++) { |
0040fd19 SS |
133 | eu_mask |= ((u16)sseu->eu_mask[offset + i]) << |
134 | (i * BITS_PER_BYTE); | |
135 | } | |
136 | ||
137 | return eu_mask; | |
138 | } | |
139 | ||
140 | static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, | |
141 | u16 eu_mask) | |
142 | { | |
143 | int i, offset = sseu_eu_idx(sseu, slice, subslice); | |
144 | ||
49610c37 | 145 | for (i = 0; i < sseu->eu_stride; i++) { |
0040fd19 SS |
146 | sseu->eu_mask[offset + i] = |
147 | (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; | |
148 | } | |
149 | } | |
150 | ||
79e9cd5f LL |
151 | void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, |
152 | struct drm_printer *p) | |
153 | { | |
154 | int s, ss; | |
155 | ||
156 | if (sseu->max_slices == 0) { | |
157 | drm_printf(p, "Unavailable\n"); | |
158 | return; | |
159 | } | |
160 | ||
161 | for (s = 0; s < sseu->max_slices; s++) { | |
100f5f7f | 162 | drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", |
b5ab1abe | 163 | s, intel_sseu_subslices_per_slice(sseu, s), |
100f5f7f | 164 | intel_sseu_get_subslices(sseu, s)); |
79e9cd5f LL |
165 | |
166 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
167 | u16 enabled_eus = sseu_get_eus(sseu, s, ss); | |
168 | ||
169 | drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", | |
170 | ss, hweight16(enabled_eus), enabled_eus); | |
171 | } | |
172 | } | |
173 | } | |
174 | ||
8cc76693 LL |
175 | static u16 compute_eu_total(const struct sseu_dev_info *sseu) |
176 | { | |
177 | u16 i, total = 0; | |
178 | ||
179 | for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) | |
180 | total += hweight8(sseu->eu_mask[i]); | |
181 | ||
182 | return total; | |
183 | } | |
184 | ||
8b5eb5e2 KG |
185 | static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) |
186 | { | |
0258404f | 187 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
8b5eb5e2 KG |
188 | u8 s_en; |
189 | u32 ss_en, ss_en_mask; | |
190 | u8 eu_en; | |
191 | int s; | |
192 | ||
8b355db9 SS |
193 | if (IS_ELKHARTLAKE(dev_priv)) |
194 | intel_sseu_set_info(sseu, 1, 4, 8); | |
195 | else | |
196 | intel_sseu_set_info(sseu, 1, 8, 8); | |
8b5eb5e2 KG |
197 | |
198 | s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; | |
199 | ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); | |
200 | ss_en_mask = BIT(sseu->max_subslices) - 1; | |
201 | eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); | |
202 | ||
203 | for (s = 0; s < sseu->max_slices; s++) { | |
204 | if (s_en & BIT(s)) { | |
a10f361d | 205 | int ss_idx = sseu->max_subslices * s; |
8b5eb5e2 KG |
206 | int ss; |
207 | ||
208 | sseu->slice_mask |= BIT(s); | |
9e8a135e SS |
209 | |
210 | intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & | |
211 | ss_en_mask); | |
212 | ||
e1210bbf SS |
213 | for (ss = 0; ss < sseu->max_subslices; ss++) |
214 | if (intel_sseu_has_subslice(sseu, s, ss)) | |
8b5eb5e2 | 215 | sseu_set_eus(sseu, s, ss, eu_en); |
8b5eb5e2 KG |
216 | } |
217 | } | |
218 | sseu->eu_per_subslice = hweight8(eu_en); | |
219 | sseu->eu_total = compute_eu_total(sseu); | |
220 | ||
221 | /* ICL has no power gating restrictions. */ | |
222 | sseu->has_slice_pg = 1; | |
223 | sseu->has_subslice_pg = 1; | |
224 | sseu->has_eu_pg = 1; | |
225 | } | |
226 | ||
4e9767bc BW |
227 | static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) |
228 | { | |
0258404f | 229 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
4e9767bc | 230 | const u32 fuse2 = I915_READ(GEN8_FUSE2); |
8cc76693 LL |
231 | int s, ss; |
232 | const int eu_mask = 0xff; | |
233 | u32 subslice_mask, eu_en; | |
4e9767bc | 234 | |
8b355db9 SS |
235 | intel_sseu_set_info(sseu, 6, 4, 8); |
236 | ||
4e9767bc BW |
237 | sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> |
238 | GEN10_F2_S_ENA_SHIFT; | |
a10f361d | 239 | |
8cc76693 LL |
240 | /* Slice0 */ |
241 | eu_en = ~I915_READ(GEN8_EU_DISABLE0); | |
242 | for (ss = 0; ss < sseu->max_subslices; ss++) | |
243 | sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); | |
244 | /* Slice1 */ | |
245 | sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); | |
246 | eu_en = ~I915_READ(GEN8_EU_DISABLE1); | |
247 | sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); | |
248 | /* Slice2 */ | |
249 | sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); | |
250 | sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); | |
251 | /* Slice3 */ | |
252 | sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); | |
253 | eu_en = ~I915_READ(GEN8_EU_DISABLE2); | |
254 | sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); | |
255 | /* Slice4 */ | |
256 | sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); | |
257 | sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); | |
258 | /* Slice5 */ | |
259 | sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); | |
260 | eu_en = ~I915_READ(GEN10_EU_DISABLE3); | |
261 | sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); | |
262 | ||
33ee9e86 SS |
263 | subslice_mask = (1 << 4) - 1; |
264 | subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> | |
265 | GEN10_F2_SS_DIS_SHIFT); | |
266 | ||
8cc76693 | 267 | for (s = 0; s < sseu->max_slices; s++) { |
33ee9e86 SS |
268 | u32 subslice_mask_with_eus = subslice_mask; |
269 | ||
8cc76693 LL |
270 | for (ss = 0; ss < sseu->max_subslices; ss++) { |
271 | if (sseu_get_eus(sseu, s, ss) == 0) | |
33ee9e86 | 272 | subslice_mask_with_eus &= ~BIT(ss); |
8cc76693 | 273 | } |
33ee9e86 SS |
274 | |
275 | /* | |
276 | * Slice0 can have up to 3 subslices, but there are only 2 in | |
277 | * slice1/2. | |
278 | */ | |
9e8a135e SS |
279 | intel_sseu_set_subslices(sseu, s, s == 0 ? |
280 | subslice_mask_with_eus : | |
281 | subslice_mask_with_eus & 0x3); | |
8cc76693 LL |
282 | } |
283 | ||
284 | sseu->eu_total = compute_eu_total(sseu); | |
4e9767bc BW |
285 | |
286 | /* | |
287 | * CNL is expected to always have a uniform distribution | |
288 | * of EU across subslices with the exception that any one | |
289 | * EU in any one subslice may be fused off for die | |
290 | * recovery. | |
291 | */ | |
0040fd19 | 292 | sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? |
4e9767bc | 293 | DIV_ROUND_UP(sseu->eu_total, |
0040fd19 SS |
294 | intel_sseu_subslice_total(sseu)) : |
295 | 0; | |
4e9767bc BW |
296 | |
297 | /* No restrictions on Power Gating */ | |
298 | sseu->has_slice_pg = 1; | |
299 | sseu->has_subslice_pg = 1; | |
300 | sseu->has_eu_pg = 1; | |
301 | } | |
302 | ||
94b4f3ba CW |
303 | static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) |
304 | { | |
0258404f | 305 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
8cc76693 | 306 | u32 fuse; |
33ee9e86 | 307 | u8 subslice_mask = 0; |
94b4f3ba CW |
308 | |
309 | fuse = I915_READ(CHV_FUSE_GT); | |
310 | ||
f08a0c92 | 311 | sseu->slice_mask = BIT(0); |
8b355db9 | 312 | intel_sseu_set_info(sseu, 1, 2, 8); |
94b4f3ba CW |
313 | |
314 | if (!(fuse & CHV_FGT_DISABLE_SS0)) { | |
8cc76693 LL |
315 | u8 disabled_mask = |
316 | ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> | |
317 | CHV_FGT_EU_DIS_SS0_R0_SHIFT) | | |
318 | (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> | |
319 | CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); | |
320 | ||
33ee9e86 | 321 | subslice_mask |= BIT(0); |
8cc76693 | 322 | sseu_set_eus(sseu, 0, 0, ~disabled_mask); |
94b4f3ba CW |
323 | } |
324 | ||
325 | if (!(fuse & CHV_FGT_DISABLE_SS1)) { | |
8cc76693 LL |
326 | u8 disabled_mask = |
327 | ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> | |
328 | CHV_FGT_EU_DIS_SS1_R0_SHIFT) | | |
329 | (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> | |
330 | CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); | |
331 | ||
33ee9e86 | 332 | subslice_mask |= BIT(1); |
8cc76693 | 333 | sseu_set_eus(sseu, 0, 1, ~disabled_mask); |
94b4f3ba CW |
334 | } |
335 | ||
9e8a135e | 336 | intel_sseu_set_subslices(sseu, 0, subslice_mask); |
33ee9e86 | 337 | |
8cc76693 LL |
338 | sseu->eu_total = compute_eu_total(sseu); |
339 | ||
94b4f3ba CW |
340 | /* |
341 | * CHV expected to always have a uniform distribution of EU | |
342 | * across subslices. | |
343 | */ | |
0040fd19 SS |
344 | sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? |
345 | sseu->eu_total / | |
346 | intel_sseu_subslice_total(sseu) : | |
94b4f3ba CW |
347 | 0; |
348 | /* | |
349 | * CHV supports subslice power gating on devices with more than | |
350 | * one subslice, and supports EU power gating on devices with | |
351 | * more than one EU pair per subslice. | |
352 | */ | |
43b67998 | 353 | sseu->has_slice_pg = 0; |
0040fd19 | 354 | sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; |
43b67998 | 355 | sseu->has_eu_pg = (sseu->eu_per_subslice > 2); |
94b4f3ba CW |
356 | } |
357 | ||
358 | static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) | |
359 | { | |
360 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | |
0258404f | 361 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
94b4f3ba | 362 | int s, ss; |
8cc76693 LL |
363 | u32 fuse2, eu_disable, subslice_mask; |
364 | const u8 eu_mask = 0xff; | |
94b4f3ba CW |
365 | |
366 | fuse2 = I915_READ(GEN8_FUSE2); | |
f08a0c92 | 367 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; |
94b4f3ba | 368 | |
8cc76693 | 369 | /* BXT has a single slice and at most 3 subslices. */ |
8b355db9 SS |
370 | intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, |
371 | IS_GEN9_LP(dev_priv) ? 3 : 4, 8); | |
8cc76693 | 372 | |
94b4f3ba CW |
373 | /* |
374 | * The subslice disable field is global, i.e. it applies | |
375 | * to each of the enabled slices. | |
376 | */ | |
8cc76693 LL |
377 | subslice_mask = (1 << sseu->max_subslices) - 1; |
378 | subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> | |
379 | GEN9_F2_SS_DIS_SHIFT); | |
94b4f3ba CW |
380 | |
381 | /* | |
382 | * Iterate through enabled slices and subslices to | |
383 | * count the total enabled EU. | |
384 | */ | |
8cc76693 | 385 | for (s = 0; s < sseu->max_slices; s++) { |
f08a0c92 | 386 | if (!(sseu->slice_mask & BIT(s))) |
94b4f3ba CW |
387 | /* skip disabled slice */ |
388 | continue; | |
389 | ||
9e8a135e | 390 | intel_sseu_set_subslices(sseu, s, subslice_mask); |
8cc76693 | 391 | |
94b4f3ba | 392 | eu_disable = I915_READ(GEN9_EU_DISABLE(s)); |
8cc76693 | 393 | for (ss = 0; ss < sseu->max_subslices; ss++) { |
94b4f3ba | 394 | int eu_per_ss; |
8cc76693 | 395 | u8 eu_disabled_mask; |
94b4f3ba | 396 | |
e1210bbf | 397 | if (!intel_sseu_has_subslice(sseu, s, ss)) |
94b4f3ba CW |
398 | /* skip disabled subslice */ |
399 | continue; | |
400 | ||
b3e7f866 | 401 | eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; |
8cc76693 LL |
402 | |
403 | sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); | |
404 | ||
405 | eu_per_ss = sseu->max_eus_per_subslice - | |
406 | hweight8(eu_disabled_mask); | |
94b4f3ba CW |
407 | |
408 | /* | |
409 | * Record which subslice(s) has(have) 7 EUs. we | |
410 | * can tune the hash used to spread work among | |
411 | * subslices if they are unbalanced. | |
412 | */ | |
413 | if (eu_per_ss == 7) | |
43b67998 | 414 | sseu->subslice_7eu[s] |= BIT(ss); |
94b4f3ba CW |
415 | } |
416 | } | |
417 | ||
8cc76693 LL |
418 | sseu->eu_total = compute_eu_total(sseu); |
419 | ||
94b4f3ba CW |
420 | /* |
421 | * SKL is expected to always have a uniform distribution | |
422 | * of EU across subslices with the exception that any one | |
423 | * EU in any one subslice may be fused off for die | |
424 | * recovery. BXT is expected to be perfectly uniform in EU | |
425 | * distribution. | |
426 | */ | |
0040fd19 | 427 | sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? |
43b67998 | 428 | DIV_ROUND_UP(sseu->eu_total, |
0040fd19 SS |
429 | intel_sseu_subslice_total(sseu)) : |
430 | 0; | |
94b4f3ba | 431 | /* |
c7ae7e9a | 432 | * SKL+ supports slice power gating on devices with more than |
94b4f3ba | 433 | * one slice, and supports EU power gating on devices with |
c7ae7e9a | 434 | * more than one EU pair per subslice. BXT+ supports subslice |
94b4f3ba CW |
435 | * power gating on devices with more than one subslice, and |
436 | * supports EU power gating on devices with more than one EU | |
437 | * pair per subslice. | |
438 | */ | |
43b67998 | 439 | sseu->has_slice_pg = |
c7ae7e9a | 440 | !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; |
43b67998 | 441 | sseu->has_subslice_pg = |
0040fd19 | 442 | IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1; |
43b67998 | 443 | sseu->has_eu_pg = sseu->eu_per_subslice > 2; |
94b4f3ba | 444 | |
234516af | 445 | if (IS_GEN9_LP(dev_priv)) { |
8cc76693 LL |
446 | #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) |
447 | info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; | |
234516af | 448 | |
43b67998 | 449 | sseu->min_eu_in_pool = 0; |
94b4f3ba | 450 | if (info->has_pooled_eu) { |
57ec171e | 451 | if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) |
43b67998 | 452 | sseu->min_eu_in_pool = 3; |
57ec171e | 453 | else if (IS_SS_DISABLED(1)) |
43b67998 | 454 | sseu->min_eu_in_pool = 6; |
94b4f3ba | 455 | else |
43b67998 | 456 | sseu->min_eu_in_pool = 9; |
94b4f3ba CW |
457 | } |
458 | #undef IS_SS_DISABLED | |
459 | } | |
460 | } | |
461 | ||
462 | static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) | |
463 | { | |
0258404f | 464 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
94b4f3ba | 465 | int s, ss; |
8cc76693 | 466 | u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ |
94b4f3ba CW |
467 | |
468 | fuse2 = I915_READ(GEN8_FUSE2); | |
f08a0c92 | 469 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; |
8b355db9 | 470 | intel_sseu_set_info(sseu, 3, 3, 8); |
8cc76693 | 471 | |
57ec171e ID |
472 | /* |
473 | * The subslice disable field is global, i.e. it applies | |
474 | * to each of the enabled slices. | |
475 | */ | |
8cc76693 LL |
476 | subslice_mask = GENMASK(sseu->max_subslices - 1, 0); |
477 | subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> | |
478 | GEN8_F2_SS_DIS_SHIFT); | |
94b4f3ba CW |
479 | |
480 | eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; | |
481 | eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | | |
482 | ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << | |
483 | (32 - GEN8_EU_DIS0_S1_SHIFT)); | |
484 | eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | | |
485 | ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << | |
486 | (32 - GEN8_EU_DIS1_S2_SHIFT)); | |
487 | ||
94b4f3ba CW |
488 | /* |
489 | * Iterate through enabled slices and subslices to | |
490 | * count the total enabled EU. | |
491 | */ | |
8cc76693 | 492 | for (s = 0; s < sseu->max_slices; s++) { |
f08a0c92 | 493 | if (!(sseu->slice_mask & BIT(s))) |
94b4f3ba CW |
494 | /* skip disabled slice */ |
495 | continue; | |
496 | ||
9e8a135e | 497 | intel_sseu_set_subslices(sseu, s, subslice_mask); |
8cc76693 LL |
498 | |
499 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
500 | u8 eu_disabled_mask; | |
94b4f3ba CW |
501 | u32 n_disabled; |
502 | ||
e1210bbf | 503 | if (!intel_sseu_has_subslice(sseu, s, ss)) |
94b4f3ba CW |
504 | /* skip disabled subslice */ |
505 | continue; | |
506 | ||
8cc76693 | 507 | eu_disabled_mask = |
a10f361d | 508 | eu_disable[s] >> (ss * sseu->max_eus_per_subslice); |
8cc76693 LL |
509 | |
510 | sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); | |
511 | ||
512 | n_disabled = hweight8(eu_disabled_mask); | |
94b4f3ba CW |
513 | |
514 | /* | |
515 | * Record which subslices have 7 EUs. | |
516 | */ | |
8cc76693 | 517 | if (sseu->max_eus_per_subslice - n_disabled == 7) |
43b67998 | 518 | sseu->subslice_7eu[s] |= 1 << ss; |
94b4f3ba CW |
519 | } |
520 | } | |
521 | ||
8cc76693 LL |
522 | sseu->eu_total = compute_eu_total(sseu); |
523 | ||
94b4f3ba CW |
524 | /* |
525 | * BDW is expected to always have a uniform distribution of EU across | |
526 | * subslices with the exception that any one EU in any one subslice may | |
527 | * be fused off for die recovery. | |
528 | */ | |
0040fd19 | 529 | sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? |
57ec171e | 530 | DIV_ROUND_UP(sseu->eu_total, |
0040fd19 SS |
531 | intel_sseu_subslice_total(sseu)) : |
532 | 0; | |
94b4f3ba CW |
533 | |
534 | /* | |
535 | * BDW supports slice power gating on devices with more than | |
536 | * one slice. | |
537 | */ | |
f08a0c92 | 538 | sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; |
43b67998 ID |
539 | sseu->has_subslice_pg = 0; |
540 | sseu->has_eu_pg = 0; | |
94b4f3ba CW |
541 | } |
542 | ||
b8ec759e LL |
543 | static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) |
544 | { | |
0258404f | 545 | struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
b8ec759e | 546 | u32 fuse1; |
33ee9e86 | 547 | u8 subslice_mask = 0; |
8cc76693 | 548 | int s, ss; |
b8ec759e LL |
549 | |
550 | /* | |
551 | * There isn't a register to tell us how many slices/subslices. We | |
552 | * work off the PCI-ids here. | |
553 | */ | |
0258404f | 554 | switch (INTEL_INFO(dev_priv)->gt) { |
b8ec759e | 555 | default: |
0258404f | 556 | MISSING_CASE(INTEL_INFO(dev_priv)->gt); |
b8ec759e LL |
557 | /* fall through */ |
558 | case 1: | |
559 | sseu->slice_mask = BIT(0); | |
33ee9e86 | 560 | subslice_mask = BIT(0); |
b8ec759e LL |
561 | break; |
562 | case 2: | |
563 | sseu->slice_mask = BIT(0); | |
33ee9e86 | 564 | subslice_mask = BIT(0) | BIT(1); |
b8ec759e LL |
565 | break; |
566 | case 3: | |
567 | sseu->slice_mask = BIT(0) | BIT(1); | |
33ee9e86 | 568 | subslice_mask = BIT(0) | BIT(1); |
b8ec759e LL |
569 | break; |
570 | } | |
571 | ||
572 | fuse1 = I915_READ(HSW_PAVP_FUSE1); | |
573 | switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { | |
574 | default: | |
575 | MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> | |
576 | HSW_F1_EU_DIS_SHIFT); | |
577 | /* fall through */ | |
578 | case HSW_F1_EU_DIS_10EUS: | |
579 | sseu->eu_per_subslice = 10; | |
580 | break; | |
581 | case HSW_F1_EU_DIS_8EUS: | |
582 | sseu->eu_per_subslice = 8; | |
583 | break; | |
584 | case HSW_F1_EU_DIS_6EUS: | |
585 | sseu->eu_per_subslice = 6; | |
586 | break; | |
587 | } | |
8b355db9 SS |
588 | |
589 | intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), | |
33ee9e86 | 590 | hweight8(subslice_mask), |
8b355db9 | 591 | sseu->eu_per_subslice); |
8cc76693 LL |
592 | |
593 | for (s = 0; s < sseu->max_slices; s++) { | |
9e8a135e | 594 | intel_sseu_set_subslices(sseu, s, subslice_mask); |
33ee9e86 | 595 | |
8cc76693 LL |
596 | for (ss = 0; ss < sseu->max_subslices; ss++) { |
597 | sseu_set_eus(sseu, s, ss, | |
598 | (1UL << sseu->eu_per_subslice) - 1); | |
599 | } | |
600 | } | |
b8ec759e | 601 | |
8cc76693 | 602 | sseu->eu_total = compute_eu_total(sseu); |
b8ec759e LL |
603 | |
604 | /* No powergating for you. */ | |
605 | sseu->has_slice_pg = 0; | |
606 | sseu->has_subslice_pg = 0; | |
607 | sseu->has_eu_pg = 0; | |
608 | } | |
609 | ||
f577a03b | 610 | static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) |
dab91783 LL |
611 | { |
612 | u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE); | |
f577a03b | 613 | u32 base_freq, frac_freq; |
dab91783 LL |
614 | |
615 | base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >> | |
616 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1; | |
f577a03b | 617 | base_freq *= 1000; |
dab91783 LL |
618 | |
619 | frac_freq = ((ts_override & | |
620 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >> | |
621 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT); | |
f577a03b | 622 | frac_freq = 1000 / (frac_freq + 1); |
dab91783 LL |
623 | |
624 | return base_freq + frac_freq; | |
625 | } | |
626 | ||
d775a7b1 PZ |
627 | static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv, |
628 | u32 rpm_config_reg) | |
629 | { | |
630 | u32 f19_2_mhz = 19200; | |
631 | u32 f24_mhz = 24000; | |
632 | u32 crystal_clock = (rpm_config_reg & | |
633 | GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> | |
634 | GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; | |
635 | ||
636 | switch (crystal_clock) { | |
637 | case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: | |
638 | return f19_2_mhz; | |
639 | case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: | |
640 | return f24_mhz; | |
641 | default: | |
642 | MISSING_CASE(crystal_clock); | |
643 | return 0; | |
644 | } | |
645 | } | |
646 | ||
647 | static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv, | |
648 | u32 rpm_config_reg) | |
649 | { | |
650 | u32 f19_2_mhz = 19200; | |
651 | u32 f24_mhz = 24000; | |
652 | u32 f25_mhz = 25000; | |
653 | u32 f38_4_mhz = 38400; | |
654 | u32 crystal_clock = (rpm_config_reg & | |
655 | GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> | |
656 | GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; | |
657 | ||
658 | switch (crystal_clock) { | |
659 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: | |
660 | return f24_mhz; | |
661 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: | |
662 | return f19_2_mhz; | |
663 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: | |
664 | return f38_4_mhz; | |
665 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: | |
666 | return f25_mhz; | |
667 | default: | |
668 | MISSING_CASE(crystal_clock); | |
669 | return 0; | |
670 | } | |
671 | } | |
672 | ||
f577a03b | 673 | static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) |
dab91783 | 674 | { |
f577a03b LL |
675 | u32 f12_5_mhz = 12500; |
676 | u32 f19_2_mhz = 19200; | |
677 | u32 f24_mhz = 24000; | |
dab91783 LL |
678 | |
679 | if (INTEL_GEN(dev_priv) <= 4) { | |
680 | /* PRMs say: | |
681 | * | |
682 | * "The value in this register increments once every 16 | |
683 | * hclks." (through the “Clocking Configuration” | |
684 | * (“CLKCFG”) MCHBAR register) | |
685 | */ | |
f577a03b | 686 | return dev_priv->rawclk_freq / 16; |
dab91783 LL |
687 | } else if (INTEL_GEN(dev_priv) <= 8) { |
688 | /* PRMs say: | |
689 | * | |
690 | * "The PCU TSC counts 10ns increments; this timestamp | |
691 | * reflects bits 38:3 of the TSC (i.e. 80ns granularity, | |
692 | * rolling over every 1.5 hours). | |
693 | */ | |
694 | return f12_5_mhz; | |
695 | } else if (INTEL_GEN(dev_priv) <= 9) { | |
696 | u32 ctc_reg = I915_READ(CTC_MODE); | |
f577a03b | 697 | u32 freq = 0; |
dab91783 LL |
698 | |
699 | if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { | |
700 | freq = read_reference_ts_freq(dev_priv); | |
701 | } else { | |
702 | freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz; | |
703 | ||
704 | /* Now figure out how the command stream's timestamp | |
705 | * register increments from this frequency (it might | |
706 | * increment only every few clock cycle). | |
707 | */ | |
708 | freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> | |
709 | CTC_SHIFT_PARAMETER_SHIFT); | |
710 | } | |
711 | ||
712 | return freq; | |
465242ee | 713 | } else if (INTEL_GEN(dev_priv) <= 12) { |
dab91783 | 714 | u32 ctc_reg = I915_READ(CTC_MODE); |
f577a03b | 715 | u32 freq = 0; |
dab91783 LL |
716 | |
717 | /* First figure out the reference frequency. There are 2 ways | |
718 | * we can compute the frequency, either through the | |
719 | * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE | |
720 | * tells us which one we should use. | |
721 | */ | |
722 | if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { | |
723 | freq = read_reference_ts_freq(dev_priv); | |
724 | } else { | |
d775a7b1 PZ |
725 | u32 rpm_config_reg = I915_READ(RPM_CONFIG0); |
726 | ||
727 | if (INTEL_GEN(dev_priv) <= 10) | |
728 | freq = gen10_get_crystal_clock_freq(dev_priv, | |
729 | rpm_config_reg); | |
730 | else | |
731 | freq = gen11_get_crystal_clock_freq(dev_priv, | |
732 | rpm_config_reg); | |
dab91783 | 733 | |
53ff2641 LL |
734 | /* Now figure out how the command stream's timestamp |
735 | * register increments from this frequency (it might | |
736 | * increment only every few clock cycle). | |
737 | */ | |
738 | freq >>= 3 - ((rpm_config_reg & | |
739 | GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> | |
740 | GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); | |
741 | } | |
dab91783 LL |
742 | |
743 | return freq; | |
744 | } | |
745 | ||
fe66e928 | 746 | MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); |
dab91783 LL |
747 | return 0; |
748 | } | |
749 | ||
805446c8 TU |
750 | #undef INTEL_VGA_DEVICE |
751 | #define INTEL_VGA_DEVICE(id, info) (id) | |
752 | ||
753 | static const u16 subplatform_ult_ids[] = { | |
754 | INTEL_HSW_ULT_GT1_IDS(0), | |
755 | INTEL_HSW_ULT_GT2_IDS(0), | |
756 | INTEL_HSW_ULT_GT3_IDS(0), | |
757 | INTEL_BDW_ULT_GT1_IDS(0), | |
758 | INTEL_BDW_ULT_GT2_IDS(0), | |
759 | INTEL_BDW_ULT_GT3_IDS(0), | |
760 | INTEL_BDW_ULT_RSVD_IDS(0), | |
761 | INTEL_SKL_ULT_GT1_IDS(0), | |
762 | INTEL_SKL_ULT_GT2_IDS(0), | |
763 | INTEL_SKL_ULT_GT3_IDS(0), | |
764 | INTEL_KBL_ULT_GT1_IDS(0), | |
765 | INTEL_KBL_ULT_GT2_IDS(0), | |
766 | INTEL_KBL_ULT_GT3_IDS(0), | |
767 | INTEL_CFL_U_GT2_IDS(0), | |
768 | INTEL_CFL_U_GT3_IDS(0), | |
769 | INTEL_WHL_U_GT1_IDS(0), | |
770 | INTEL_WHL_U_GT2_IDS(0), | |
c3ad8d29 | 771 | INTEL_WHL_U_GT3_IDS(0), |
805446c8 TU |
772 | }; |
773 | ||
774 | static const u16 subplatform_ulx_ids[] = { | |
775 | INTEL_HSW_ULX_GT1_IDS(0), | |
776 | INTEL_HSW_ULX_GT2_IDS(0), | |
777 | INTEL_BDW_ULX_GT1_IDS(0), | |
778 | INTEL_BDW_ULX_GT2_IDS(0), | |
779 | INTEL_BDW_ULX_GT3_IDS(0), | |
780 | INTEL_BDW_ULX_RSVD_IDS(0), | |
781 | INTEL_SKL_ULX_GT1_IDS(0), | |
782 | INTEL_SKL_ULX_GT2_IDS(0), | |
783 | INTEL_KBL_ULX_GT1_IDS(0), | |
c3ad8d29 | 784 | INTEL_KBL_ULX_GT2_IDS(0), |
805446c8 | 785 | INTEL_AML_KBL_GT2_IDS(0), |
c3ad8d29 | 786 | INTEL_AML_CFL_GT2_IDS(0), |
805446c8 TU |
787 | }; |
788 | ||
789 | static const u16 subplatform_portf_ids[] = { | |
790 | INTEL_CNL_PORT_F_IDS(0), | |
c3ad8d29 | 791 | INTEL_ICL_PORT_F_IDS(0), |
805446c8 TU |
792 | }; |
793 | ||
794 | static bool find_devid(u16 id, const u16 *p, unsigned int num) | |
795 | { | |
796 | for (; num; num--, p++) { | |
797 | if (*p == id) | |
798 | return true; | |
799 | } | |
800 | ||
801 | return false; | |
802 | } | |
803 | ||
804 | void intel_device_info_subplatform_init(struct drm_i915_private *i915) | |
805 | { | |
806 | const struct intel_device_info *info = INTEL_INFO(i915); | |
807 | const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915); | |
808 | const unsigned int pi = __platform_mask_index(rinfo, info->platform); | |
809 | const unsigned int pb = __platform_mask_bit(rinfo, info->platform); | |
810 | u16 devid = INTEL_DEVID(i915); | |
640cde65 | 811 | u32 mask = 0; |
805446c8 TU |
812 | |
813 | /* Make sure IS_<platform> checks are working. */ | |
814 | RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb); | |
815 | ||
816 | /* Find and mark subplatform bits based on the PCI device id. */ | |
817 | if (find_devid(devid, subplatform_ult_ids, | |
818 | ARRAY_SIZE(subplatform_ult_ids))) { | |
819 | mask = BIT(INTEL_SUBPLATFORM_ULT); | |
820 | } else if (find_devid(devid, subplatform_ulx_ids, | |
821 | ARRAY_SIZE(subplatform_ulx_ids))) { | |
822 | mask = BIT(INTEL_SUBPLATFORM_ULX); | |
823 | if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { | |
824 | /* ULX machines are also considered ULT. */ | |
825 | mask |= BIT(INTEL_SUBPLATFORM_ULT); | |
826 | } | |
805446c8 TU |
827 | } else if (find_devid(devid, subplatform_portf_ids, |
828 | ARRAY_SIZE(subplatform_portf_ids))) { | |
829 | mask = BIT(INTEL_SUBPLATFORM_PORTF); | |
830 | } | |
831 | ||
832 | GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS); | |
833 | ||
834 | RUNTIME_INFO(i915)->platform_mask[pi] |= mask; | |
835 | } | |
836 | ||
6a7e51f3 MW |
837 | /** |
838 | * intel_device_info_runtime_init - initialize runtime info | |
963cc126 | 839 | * @dev_priv: the i915 device |
6a7e51f3 | 840 | * |
94b4f3ba CW |
841 | * Determine various intel_device_info fields at runtime. |
842 | * | |
843 | * Use it when either: | |
844 | * - it's judged too laborious to fill n static structures with the limit | |
845 | * when a simple if statement does the job, | |
846 | * - run-time checks (eg read fuse/strap registers) are needed. | |
847 | * | |
848 | * This function needs to be called: | |
849 | * - after the MMIO has been setup as we are reading registers, | |
850 | * - after the PCH has been detected, | |
851 | * - before the first usage of the fields it can tweak. | |
852 | */ | |
1400cc7e | 853 | void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) |
94b4f3ba | 854 | { |
1400cc7e | 855 | struct intel_device_info *info = mkwrite_device_info(dev_priv); |
0258404f | 856 | struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); |
94b4f3ba CW |
857 | enum pipe pipe; |
858 | ||
6e7406db MK |
859 | if (INTEL_GEN(dev_priv) >= 10) { |
860 | for_each_pipe(dev_priv, pipe) | |
0258404f | 861 | runtime->num_scalers[pipe] = 2; |
cf819eff | 862 | } else if (IS_GEN(dev_priv, 9)) { |
0258404f JN |
863 | runtime->num_scalers[PIPE_A] = 2; |
864 | runtime->num_scalers[PIPE_B] = 2; | |
865 | runtime->num_scalers[PIPE_C] = 1; | |
0bf0230e ACO |
866 | } |
867 | ||
8a68d464 | 868 | BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); |
022d3093 | 869 | |
2dd24a9c | 870 | if (INTEL_GEN(dev_priv) >= 11) |
6711bd73 | 871 | for_each_pipe(dev_priv, pipe) |
0258404f | 872 | runtime->num_sprites[pipe] = 6; |
cf819eff | 873 | else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv)) |
e9c98825 | 874 | for_each_pipe(dev_priv, pipe) |
0258404f | 875 | runtime->num_sprites[pipe] = 3; |
e9c98825 | 876 | else if (IS_BROXTON(dev_priv)) { |
6711bd73 ML |
877 | /* |
878 | * Skylake and Broxton currently don't expose the topmost plane as its | |
879 | * use is exclusive with the legacy cursor and we only want to expose | |
880 | * one of those, not both. Until we can safely expose the topmost plane | |
881 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | |
882 | * we don't expose the topmost plane at all to prevent ABI breakage | |
883 | * down the line. | |
884 | */ | |
885 | ||
0258404f JN |
886 | runtime->num_sprites[PIPE_A] = 2; |
887 | runtime->num_sprites[PIPE_B] = 2; | |
888 | runtime->num_sprites[PIPE_C] = 1; | |
33edc24d | 889 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
94b4f3ba | 890 | for_each_pipe(dev_priv, pipe) |
0258404f | 891 | runtime->num_sprites[pipe] = 2; |
ab33081a | 892 | } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { |
94b4f3ba | 893 | for_each_pipe(dev_priv, pipe) |
0258404f | 894 | runtime->num_sprites[pipe] = 1; |
33edc24d | 895 | } |
94b4f3ba | 896 | |
4f044a88 | 897 | if (i915_modparams.disable_display) { |
94b4f3ba | 898 | DRM_INFO("Display disabled (module parameter)\n"); |
8d8b0031 | 899 | info->pipe_mask = 0; |
e1bf094b | 900 | } else if (HAS_DISPLAY(dev_priv) && |
f3ce44a0 | 901 | (IS_GEN_RANGE(dev_priv, 7, 8)) && |
94b4f3ba CW |
902 | HAS_PCH_SPLIT(dev_priv)) { |
903 | u32 fuse_strap = I915_READ(FUSE_STRAP); | |
904 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); | |
905 | ||
906 | /* | |
907 | * SFUSE_STRAP is supposed to have a bit signalling the display | |
908 | * is fused off. Unfortunately it seems that, at least in | |
909 | * certain cases, fused off display means that PCH display | |
910 | * reads don't land anywhere. In that case, we read 0s. | |
911 | * | |
912 | * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK | |
913 | * should be set when taking over after the firmware. | |
914 | */ | |
915 | if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || | |
916 | sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || | |
b9eb89b2 | 917 | (HAS_PCH_CPT(dev_priv) && |
94b4f3ba CW |
918 | !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { |
919 | DRM_INFO("Display fused off, disabling\n"); | |
8d8b0031 | 920 | info->pipe_mask = 0; |
94b4f3ba CW |
921 | } else if (fuse_strap & IVB_PIPE_C_DISABLE) { |
922 | DRM_INFO("PipeC fused off\n"); | |
8d8b0031 | 923 | info->pipe_mask &= ~BIT(PIPE_C); |
94b4f3ba | 924 | } |
bea68f4a | 925 | } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { |
94b4f3ba | 926 | u32 dfsm = I915_READ(SKL_DFSM); |
8d8b0031 | 927 | u8 enabled_mask = info->pipe_mask; |
94b4f3ba CW |
928 | |
929 | if (dfsm & SKL_DFSM_PIPE_A_DISABLE) | |
501ec325 | 930 | enabled_mask &= ~BIT(PIPE_A); |
94b4f3ba | 931 | if (dfsm & SKL_DFSM_PIPE_B_DISABLE) |
501ec325 | 932 | enabled_mask &= ~BIT(PIPE_B); |
94b4f3ba | 933 | if (dfsm & SKL_DFSM_PIPE_C_DISABLE) |
501ec325 | 934 | enabled_mask &= ~BIT(PIPE_C); |
7ff0fca4 JRS |
935 | if (INTEL_GEN(dev_priv) >= 12 && |
936 | (dfsm & TGL_DFSM_PIPE_D_DISABLE)) | |
937 | enabled_mask &= ~BIT(PIPE_D); | |
94b4f3ba | 938 | |
501ec325 LDM |
939 | /* |
940 | * At least one pipe should be enabled and if there are | |
941 | * disabled pipes, they should be the last ones, with no holes | |
942 | * in the mask. | |
943 | */ | |
944 | if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1)) | |
945 | DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", | |
946 | enabled_mask); | |
94b4f3ba | 947 | else |
8d8b0031 | 948 | info->pipe_mask = enabled_mask; |
94b4f3ba CW |
949 | } |
950 | ||
951 | /* Initialize slice/subslice/EU info */ | |
b8ec759e LL |
952 | if (IS_HASWELL(dev_priv)) |
953 | haswell_sseu_info_init(dev_priv); | |
954 | else if (IS_CHERRYVIEW(dev_priv)) | |
94b4f3ba CW |
955 | cherryview_sseu_info_init(dev_priv); |
956 | else if (IS_BROADWELL(dev_priv)) | |
957 | broadwell_sseu_info_init(dev_priv); | |
cf819eff | 958 | else if (IS_GEN(dev_priv, 9)) |
94b4f3ba | 959 | gen9_sseu_info_init(dev_priv); |
cf819eff | 960 | else if (IS_GEN(dev_priv, 10)) |
4e9767bc | 961 | gen10_sseu_info_init(dev_priv); |
f60fa408 | 962 | else if (INTEL_GEN(dev_priv) >= 11) |
8b5eb5e2 | 963 | gen11_sseu_info_init(dev_priv); |
94b4f3ba | 964 | |
cf819eff | 965 | if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { |
4bdafb9d | 966 | DRM_INFO("Disabling ppGTT for VT-d support\n"); |
cbecbcca | 967 | info->ppgtt_type = INTEL_PPGTT_NONE; |
4bdafb9d CW |
968 | } |
969 | ||
dab91783 | 970 | /* Initialize command stream timestamp frequency */ |
0258404f | 971 | runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); |
94b4f3ba | 972 | } |
3fed1808 CW |
973 | |
974 | void intel_driver_caps_print(const struct intel_driver_caps *caps, | |
975 | struct drm_printer *p) | |
976 | { | |
481827b4 CW |
977 | drm_printf(p, "Has logical contexts? %s\n", |
978 | yesno(caps->has_logical_contexts)); | |
3fed1808 CW |
979 | drm_printf(p, "scheduler: %x\n", caps->scheduler); |
980 | } | |
26376a7e OM |
981 | |
982 | /* | |
983 | * Determine which engines are fused off in our particular hardware. Since the | |
984 | * fuse register is in the blitter powerwell, we need forcewake to be ready at | |
985 | * this point (but later we need to prune the forcewake domains for engines that | |
986 | * are indeed fused off). | |
987 | */ | |
988 | void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) | |
989 | { | |
990 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | |
57b19d55 | 991 | unsigned int logical_vdbox = 0; |
9213e4f5 | 992 | unsigned int i; |
57b19d55 | 993 | u32 media_fuse; |
97ee6e92 DCS |
994 | u16 vdbox_mask; |
995 | u16 vebox_mask; | |
26376a7e OM |
996 | |
997 | if (INTEL_GEN(dev_priv) < 11) | |
998 | return; | |
999 | ||
9213e4f5 | 1000 | media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); |
26376a7e | 1001 | |
97ee6e92 DCS |
1002 | vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; |
1003 | vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> | |
1004 | GEN11_GT_VEBOX_DISABLE_SHIFT; | |
26376a7e | 1005 | |
26376a7e | 1006 | for (i = 0; i < I915_MAX_VCS; i++) { |
6da301e5 CW |
1007 | if (!HAS_ENGINE(dev_priv, _VCS(i))) { |
1008 | vdbox_mask &= ~BIT(i); | |
26376a7e | 1009 | continue; |
6da301e5 | 1010 | } |
26376a7e | 1011 | |
97ee6e92 | 1012 | if (!(BIT(i) & vdbox_mask)) { |
8a68d464 | 1013 | info->engine_mask &= ~BIT(_VCS(i)); |
9213e4f5 | 1014 | DRM_DEBUG_DRIVER("vcs%u fused off\n", i); |
57b19d55 | 1015 | continue; |
9213e4f5 | 1016 | } |
57b19d55 OM |
1017 | |
1018 | /* | |
1019 | * In Gen11, only even numbered logical VDBOXes are | |
1020 | * hooked up to an SFC (Scaler & Format Converter) unit. | |
bd3b3004 | 1021 | * In TGL each VDBOX has access to an SFC. |
57b19d55 | 1022 | */ |
bd3b3004 | 1023 | if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0) |
0258404f | 1024 | RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); |
26376a7e | 1025 | } |
9511cb64 CW |
1026 | DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", |
1027 | vdbox_mask, VDBOX_MASK(dev_priv)); | |
1028 | GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); | |
26376a7e | 1029 | |
26376a7e | 1030 | for (i = 0; i < I915_MAX_VECS; i++) { |
6da301e5 CW |
1031 | if (!HAS_ENGINE(dev_priv, _VECS(i))) { |
1032 | vebox_mask &= ~BIT(i); | |
26376a7e | 1033 | continue; |
6da301e5 | 1034 | } |
26376a7e | 1035 | |
97ee6e92 | 1036 | if (!(BIT(i) & vebox_mask)) { |
8a68d464 | 1037 | info->engine_mask &= ~BIT(_VECS(i)); |
9213e4f5 TU |
1038 | DRM_DEBUG_DRIVER("vecs%u fused off\n", i); |
1039 | } | |
26376a7e | 1040 | } |
9511cb64 CW |
1041 | DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n", |
1042 | vebox_mask, VEBOX_MASK(dev_priv)); | |
1043 | GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); | |
26376a7e | 1044 | } |