Commit | Line | Data |
---|---|---|
24f90d66 | 1 | // SPDX-License-Identifier: MIT |
750e76b4 | 2 | /* |
750e76b4 CW |
3 | * Copyright © 2019 Intel Corporation |
4 | */ | |
5 | ||
6 | #include <linux/list.h> | |
7 | #include <linux/list_sort.h> | |
8 | #include <linux/llist.h> | |
9 | ||
10 | #include "i915_drv.h" | |
11 | #include "intel_engine.h" | |
12 | #include "intel_engine_user.h" | |
b761a7b4 | 13 | #include "intel_gt.h" |
ee242ca7 | 14 | #include "uc/intel_guc_submission.h" |
750e76b4 CW |
15 | |
16 | struct intel_engine_cs * | |
17 | intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) | |
18 | { | |
19 | struct rb_node *p = i915->uabi_engines.rb_node; | |
20 | ||
21 | while (p) { | |
22 | struct intel_engine_cs *it = | |
23 | rb_entry(p, typeof(*it), uabi_node); | |
24 | ||
25 | if (class < it->uabi_class) | |
26 | p = p->rb_left; | |
27 | else if (class > it->uabi_class || | |
28 | instance > it->uabi_instance) | |
29 | p = p->rb_right; | |
30 | else if (instance < it->uabi_instance) | |
31 | p = p->rb_left; | |
32 | else | |
33 | return it; | |
34 | } | |
35 | ||
36 | return NULL; | |
37 | } | |
38 | ||
39 | void intel_engine_add_user(struct intel_engine_cs *engine) | |
40 | { | |
41 | llist_add((struct llist_node *)&engine->uabi_node, | |
42 | (struct llist_head *)&engine->i915->uabi_engines); | |
43 | } | |
44 | ||
45 | static const u8 uabi_classes[] = { | |
46 | [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, | |
47 | [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, | |
48 | [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, | |
49 | [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, | |
ecf8eca5 | 50 | [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE, |
750e76b4 CW |
51 | }; |
52 | ||
4f0f586b ST |
53 | static int engine_cmp(void *priv, const struct list_head *A, |
54 | const struct list_head *B) | |
750e76b4 CW |
55 | { |
56 | const struct intel_engine_cs *a = | |
57 | container_of((struct rb_node *)A, typeof(*a), uabi_node); | |
58 | const struct intel_engine_cs *b = | |
59 | container_of((struct rb_node *)B, typeof(*b), uabi_node); | |
60 | ||
61 | if (uabi_classes[a->class] < uabi_classes[b->class]) | |
62 | return -1; | |
63 | if (uabi_classes[a->class] > uabi_classes[b->class]) | |
64 | return 1; | |
65 | ||
66 | if (a->instance < b->instance) | |
67 | return -1; | |
68 | if (a->instance > b->instance) | |
69 | return 1; | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | static struct llist_node *get_engines(struct drm_i915_private *i915) | |
75 | { | |
76 | return llist_del_all((struct llist_head *)&i915->uabi_engines); | |
77 | } | |
78 | ||
79 | static void sort_engines(struct drm_i915_private *i915, | |
80 | struct list_head *engines) | |
81 | { | |
82 | struct llist_node *pos, *next; | |
83 | ||
84 | llist_for_each_safe(pos, next, get_engines(i915)) { | |
85 | struct intel_engine_cs *engine = | |
86 | container_of((struct rb_node *)pos, typeof(*engine), | |
87 | uabi_node); | |
88 | list_add((struct list_head *)&engine->uabi_node, engines); | |
89 | } | |
90 | list_sort(NULL, engines, engine_cmp); | |
91 | } | |
92 | ||
93 | static void set_scheduler_caps(struct drm_i915_private *i915) | |
94 | { | |
95 | static const struct { | |
96 | u8 engine; | |
97 | u8 sched; | |
98 | } map[] = { | |
99 | #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } | |
100 | MAP(HAS_PREEMPTION, PREEMPTION), | |
101 | MAP(HAS_SEMAPHORES, SEMAPHORES), | |
102 | MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), | |
103 | #undef MAP | |
104 | }; | |
105 | struct intel_engine_cs *engine; | |
106 | u32 enabled, disabled; | |
107 | ||
108 | enabled = 0; | |
109 | disabled = 0; | |
110 | for_each_uabi_engine(engine, i915) { /* all engines must agree! */ | |
111 | int i; | |
112 | ||
3f623e06 | 113 | if (engine->sched_engine->schedule) |
750e76b4 CW |
114 | enabled |= (I915_SCHEDULER_CAP_ENABLED | |
115 | I915_SCHEDULER_CAP_PRIORITY); | |
116 | else | |
117 | disabled |= (I915_SCHEDULER_CAP_ENABLED | | |
118 | I915_SCHEDULER_CAP_PRIORITY); | |
119 | ||
c14adcbd | 120 | if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) |
ee242ca7 MB |
121 | enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; |
122 | ||
750e76b4 CW |
123 | for (i = 0; i < ARRAY_SIZE(map); i++) { |
124 | if (engine->flags & BIT(map[i].engine)) | |
125 | enabled |= BIT(map[i].sched); | |
126 | else | |
127 | disabled |= BIT(map[i].sched); | |
128 | } | |
129 | } | |
130 | ||
131 | i915->caps.scheduler = enabled & ~disabled; | |
132 | if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) | |
133 | i915->caps.scheduler = 0; | |
134 | } | |
135 | ||
2edda80d CW |
136 | const char *intel_engine_class_repr(u8 class) |
137 | { | |
138 | static const char * const uabi_names[] = { | |
139 | [RENDER_CLASS] = "rcs", | |
140 | [COPY_ENGINE_CLASS] = "bcs", | |
141 | [VIDEO_DECODE_CLASS] = "vcs", | |
142 | [VIDEO_ENHANCEMENT_CLASS] = "vecs", | |
5fd974d1 | 143 | [OTHER_CLASS] = "other", |
944823c9 | 144 | [COMPUTE_CLASS] = "ccs", |
2edda80d CW |
145 | }; |
146 | ||
147 | if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class]) | |
148 | return "xxx"; | |
149 | ||
150 | return uabi_names[class]; | |
151 | } | |
152 | ||
f1c4d157 CW |
153 | struct legacy_ring { |
154 | struct intel_gt *gt; | |
155 | u8 class; | |
156 | u8 instance; | |
157 | }; | |
158 | ||
159 | static int legacy_ring_idx(const struct legacy_ring *ring) | |
160 | { | |
161 | static const struct { | |
162 | u8 base, max; | |
163 | } map[] = { | |
164 | [RENDER_CLASS] = { RCS0, 1 }, | |
165 | [COPY_ENGINE_CLASS] = { BCS0, 1 }, | |
166 | [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS }, | |
167 | [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS }, | |
944823c9 | 168 | [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS }, |
f1c4d157 CW |
169 | }; |
170 | ||
171 | if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) | |
a50134b1 | 172 | return INVALID_ENGINE; |
f1c4d157 CW |
173 | |
174 | if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) | |
a50134b1 | 175 | return INVALID_ENGINE; |
f1c4d157 CW |
176 | |
177 | return map[ring->class].base + ring->instance; | |
178 | } | |
179 | ||
180 | static void add_legacy_ring(struct legacy_ring *ring, | |
181 | struct intel_engine_cs *engine) | |
182 | { | |
f1c4d157 CW |
183 | if (engine->gt != ring->gt || engine->class != ring->class) { |
184 | ring->gt = engine->gt; | |
185 | ring->class = engine->class; | |
186 | ring->instance = 0; | |
187 | } | |
188 | ||
a50134b1 TU |
189 | engine->legacy_idx = legacy_ring_idx(ring); |
190 | if (engine->legacy_idx != INVALID_ENGINE) | |
191 | ring->instance++; | |
f1c4d157 CW |
192 | } |
193 | ||
194babe2 DCS |
194 | static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance) |
195 | { | |
196 | char old[sizeof(engine->name)]; | |
197 | ||
198 | memcpy(old, engine->name, sizeof(engine->name)); | |
199 | scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance); | |
200 | drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name); | |
201 | } | |
202 | ||
750e76b4 CW |
203 | void intel_engines_driver_register(struct drm_i915_private *i915) |
204 | { | |
f1c4d157 | 205 | struct legacy_ring ring = {}; |
750e76b4 CW |
206 | struct list_head *it, *next; |
207 | struct rb_node **p, *prev; | |
208 | LIST_HEAD(engines); | |
209 | ||
210 | sort_engines(i915, &engines); | |
211 | ||
212 | prev = NULL; | |
213 | p = &i915->uabi_engines.rb_node; | |
214 | list_for_each_safe(it, next, &engines) { | |
215 | struct intel_engine_cs *engine = | |
216 | container_of((struct rb_node *)it, typeof(*engine), | |
217 | uabi_node); | |
218 | ||
3f04bdce | 219 | if (intel_gt_has_unrecoverable_error(engine->gt)) |
b761a7b4 CW |
220 | continue; /* ignore incomplete engines */ |
221 | ||
194babe2 DCS |
222 | /* |
223 | * We don't want to expose the GSC engine to the users, but we | |
224 | * still rename it so it is easier to identify in the debug logs | |
225 | */ | |
226 | if (engine->id == GSC0) { | |
227 | engine_rename(engine, "gsc", 0); | |
228 | continue; | |
229 | } | |
230 | ||
750e76b4 CW |
231 | GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); |
232 | engine->uabi_class = uabi_classes[engine->class]; | |
233 | ||
e2d0ff35 TU |
234 | GEM_BUG_ON(engine->uabi_class >= |
235 | ARRAY_SIZE(i915->engine_uabi_class_count)); | |
236 | engine->uabi_instance = | |
237 | i915->engine_uabi_class_count[engine->uabi_class]++; | |
750e76b4 | 238 | |
2edda80d | 239 | /* Replace the internal name with the final user facing name */ |
194babe2 DCS |
240 | engine_rename(engine, |
241 | intel_engine_class_repr(engine->class), | |
242 | engine->uabi_instance); | |
2edda80d | 243 | |
750e76b4 CW |
244 | rb_link_node(&engine->uabi_node, prev, p); |
245 | rb_insert_color(&engine->uabi_node, &i915->uabi_engines); | |
246 | ||
247 | GEM_BUG_ON(intel_engine_lookup_user(i915, | |
248 | engine->uabi_class, | |
249 | engine->uabi_instance) != engine); | |
250 | ||
f1c4d157 CW |
251 | /* Fix up the mapping to match default execbuf::user_map[] */ |
252 | add_legacy_ring(&ring, engine); | |
253 | ||
750e76b4 CW |
254 | prev = &engine->uabi_node; |
255 | p = &prev->rb_right; | |
256 | } | |
257 | ||
258 | if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && | |
259 | IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { | |
260 | struct intel_engine_cs *engine; | |
261 | unsigned int isolation; | |
262 | int class, inst; | |
263 | int errors = 0; | |
264 | ||
e2d0ff35 TU |
265 | for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) { |
266 | for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) { | |
750e76b4 CW |
267 | engine = intel_engine_lookup_user(i915, |
268 | class, inst); | |
269 | if (!engine) { | |
270 | pr_err("UABI engine not found for { class:%d, instance:%d }\n", | |
271 | class, inst); | |
272 | errors++; | |
273 | continue; | |
274 | } | |
275 | ||
276 | if (engine->uabi_class != class || | |
277 | engine->uabi_instance != inst) { | |
278 | pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", | |
279 | engine->name, | |
280 | engine->uabi_class, | |
281 | engine->uabi_instance, | |
282 | class, inst); | |
283 | errors++; | |
284 | continue; | |
285 | } | |
286 | } | |
287 | } | |
288 | ||
289 | /* | |
290 | * Make sure that classes with multiple engine instances all | |
291 | * share the same basic configuration. | |
292 | */ | |
293 | isolation = intel_engines_has_context_isolation(i915); | |
294 | for_each_uabi_engine(engine, i915) { | |
295 | unsigned int bit = BIT(engine->uabi_class); | |
296 | unsigned int expected = engine->default_state ? bit : 0; | |
297 | ||
298 | if ((isolation & bit) != expected) { | |
299 | pr_err("mismatching default context state for class %d on engine %s\n", | |
300 | engine->uabi_class, engine->name); | |
301 | errors++; | |
302 | } | |
303 | } | |
304 | ||
0d4c351a PB |
305 | if (drm_WARN(&i915->drm, errors, |
306 | "Invalid UABI engine mapping found")) | |
750e76b4 CW |
307 | i915->uabi_engines = RB_ROOT; |
308 | } | |
309 | ||
310 | set_scheduler_caps(i915); | |
311 | } | |
312 | ||
313 | unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) | |
314 | { | |
315 | struct intel_engine_cs *engine; | |
316 | unsigned int which; | |
317 | ||
318 | which = 0; | |
319 | for_each_uabi_engine(engine, i915) | |
320 | if (engine->default_state) | |
321 | which |= BIT(engine->uabi_class); | |
322 | ||
323 | return which; | |
324 | } |