drm/i915: Expose logical engine instance to user
[linux-block.git] / drivers / gpu / drm / i915 / i915_query.c
CommitLineData
a446ae2c
LL
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
84b510e2
CW
7#include <linux/nospec.h>
8
a446ae2c 9#include "i915_drv.h"
4f6ccc74 10#include "i915_perf.h"
a446ae2c
LL
11#include "i915_query.h"
12#include <uapi/drm/i915_drm.h>
13
5a80e4a2
AJ
14static int copy_query_item(void *query_hdr, size_t query_sz,
15 u32 total_length,
16 struct drm_i915_query_item *query_item)
17{
18 if (query_item->length == 0)
19 return total_length;
20
21 if (query_item->length < total_length)
22 return -EINVAL;
23
24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
25 query_sz))
26 return -EFAULT;
27
5a80e4a2
AJ
28 return 0;
29}
30
c822e059
LL
31static int query_topology_info(struct drm_i915_private *dev_priv,
32 struct drm_i915_query_item *query_item)
33{
0b6613c6 34 const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
c822e059
LL
35 struct drm_i915_query_topology_info topo;
36 u32 slice_length, subslice_length, eu_length, total_length;
5a80e4a2 37 int ret;
c822e059
LL
38
39 if (query_item->flags != 0)
40 return -EINVAL;
41
42 if (sseu->max_slices == 0)
43 return -ENODEV;
44
45 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
46
47 slice_length = sizeof(sseu->slice_mask);
7a200aad 48 subslice_length = sseu->max_slices * sseu->ss_stride;
49610c37 49 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
135a63b6
SS
50 total_length = sizeof(topo) + slice_length + subslice_length +
51 eu_length;
c822e059 52
5a80e4a2
AJ
53 ret = copy_query_item(&topo, sizeof(topo), total_length,
54 query_item);
55 if (ret != 0)
56 return ret;
c822e059
LL
57
58 if (topo.flags != 0)
59 return -EINVAL;
60
c822e059
LL
61 memset(&topo, 0, sizeof(topo));
62 topo.max_slices = sseu->max_slices;
63 topo.max_subslices = sseu->max_subslices;
64 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
65
66 topo.subslice_offset = slice_length;
7a200aad 67 topo.subslice_stride = sseu->ss_stride;
c822e059 68 topo.eu_offset = slice_length + subslice_length;
49610c37 69 topo.eu_stride = sseu->eu_stride;
c822e059 70
516431ae 71 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
c822e059
LL
72 &topo, sizeof(topo)))
73 return -EFAULT;
74
516431ae 75 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
c822e059
LL
76 &sseu->slice_mask, slice_length))
77 return -EFAULT;
78
516431ae 79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
c822e059
LL
80 sizeof(topo) + slice_length),
81 sseu->subslice_mask, subslice_length))
82 return -EFAULT;
83
516431ae 84 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
c822e059
LL
85 sizeof(topo) +
86 slice_length + subslice_length),
87 sseu->eu_mask, eu_length))
88 return -EFAULT;
89
90 return total_length;
91}
92
c5d3e39c
TU
93static int
94query_engine_info(struct drm_i915_private *i915,
95 struct drm_i915_query_item *query_item)
96{
97 struct drm_i915_query_engine_info __user *query_ptr =
98 u64_to_user_ptr(query_item->data_ptr);
99 struct drm_i915_engine_info __user *info_ptr;
100 struct drm_i915_query_engine_info query;
101 struct drm_i915_engine_info info = { };
9acc99d8 102 unsigned int num_uabi_engines = 0;
c5d3e39c 103 struct intel_engine_cs *engine;
c5d3e39c
TU
104 int len, ret;
105
106 if (query_item->flags)
107 return -EINVAL;
108
9acc99d8
TU
109 for_each_uabi_engine(engine, i915)
110 num_uabi_engines++;
111
bf3c5083 112 len = struct_size(query_ptr, engines, num_uabi_engines);
c5d3e39c
TU
113
114 ret = copy_query_item(&query, sizeof(query), len, query_item);
115 if (ret != 0)
116 return ret;
117
118 if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
119 query.rsvd[2])
120 return -EINVAL;
121
122 info_ptr = &query_ptr->engines[0];
123
750e76b4 124 for_each_uabi_engine(engine, i915) {
c5d3e39c 125 info.engine.engine_class = engine->uabi_class;
750e76b4 126 info.engine.engine_instance = engine->uabi_instance;
9409eb35 127 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
c5d3e39c 128 info.capabilities = engine->uabi_capabilities;
9409eb35 129 info.logical_instance = ilog2(engine->logical_mask);
c5d3e39c 130
516431ae 131 if (copy_to_user(info_ptr, &info, sizeof(info)))
c5d3e39c
TU
132 return -EFAULT;
133
134 query.num_engines++;
135 info_ptr++;
136 }
137
516431ae 138 if (copy_to_user(query_ptr, &query, sizeof(query)))
c5d3e39c
TU
139 return -EFAULT;
140
141 return len;
142}
143
4f6ccc74
LL
144static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
145 u64 user_regs_ptr,
146 u32 kernel_n_regs)
147{
148 /*
149 * We'll just put the number of registers, and won't copy the
150 * register.
151 */
152 if (user_n_regs == 0)
153 return 0;
154
155 if (user_n_regs < kernel_n_regs)
156 return -EINVAL;
157
4f6ccc74
LL
158 return 0;
159}
160
161static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
162 u32 kernel_n_regs,
163 u64 user_regs_ptr,
164 u32 *user_n_regs)
165{
502f78c8 166 u32 __user *p = u64_to_user_ptr(user_regs_ptr);
4f6ccc74
LL
167 u32 r;
168
169 if (*user_n_regs == 0) {
170 *user_n_regs = kernel_n_regs;
171 return 0;
172 }
173
174 *user_n_regs = kernel_n_regs;
175
502f78c8
AV
176 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
177 return -EFAULT;
4f6ccc74 178
502f78c8
AV
179 for (r = 0; r < kernel_n_regs; r++, p += 2) {
180 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
181 p, Efault);
182 unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
4f6ccc74 183 }
502f78c8 184 user_write_access_end();
4f6ccc74 185 return 0;
502f78c8
AV
186Efault:
187 user_write_access_end();
188 return -EFAULT;
4f6ccc74
LL
189}
190
191static int query_perf_config_data(struct drm_i915_private *i915,
192 struct drm_i915_query_item *query_item,
193 bool use_uuid)
194{
195 struct drm_i915_query_perf_config __user *user_query_config_ptr =
196 u64_to_user_ptr(query_item->data_ptr);
197 struct drm_i915_perf_oa_config __user *user_config_ptr =
198 u64_to_user_ptr(query_item->data_ptr +
199 sizeof(struct drm_i915_query_perf_config));
200 struct drm_i915_perf_oa_config user_config;
201 struct i915_perf *perf = &i915->perf;
202 struct i915_oa_config *oa_config;
203 char uuid[UUID_STRING_LEN + 1];
204 u64 config_id;
205 u32 flags, total_size;
206 int ret;
207
208 if (!perf->i915)
209 return -ENODEV;
210
211 total_size =
212 sizeof(struct drm_i915_query_perf_config) +
213 sizeof(struct drm_i915_perf_oa_config);
214
215 if (query_item->length == 0)
216 return total_size;
217
218 if (query_item->length < total_size) {
219 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
220 query_item->length, total_size);
221 return -EINVAL;
222 }
223
502f78c8 224 if (get_user(flags, &user_query_config_ptr->flags))
4f6ccc74
LL
225 return -EFAULT;
226
227 if (flags != 0)
228 return -EINVAL;
229
230 if (use_uuid) {
231 struct i915_oa_config *tmp;
232 int id;
233
234 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
235
236 memset(&uuid, 0, sizeof(uuid));
502f78c8 237 if (copy_from_user(uuid, user_query_config_ptr->uuid,
4f6ccc74
LL
238 sizeof(user_query_config_ptr->uuid)))
239 return -EFAULT;
240
241 oa_config = NULL;
242 rcu_read_lock();
243 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
244 if (!strcmp(tmp->uuid, uuid)) {
245 oa_config = i915_oa_config_get(tmp);
246 break;
247 }
248 }
249 rcu_read_unlock();
250 } else {
502f78c8 251 if (get_user(config_id, &user_query_config_ptr->config))
4f6ccc74
LL
252 return -EFAULT;
253
254 oa_config = i915_perf_get_oa_config(perf, config_id);
255 }
256 if (!oa_config)
257 return -ENOENT;
258
502f78c8 259 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
4f6ccc74
LL
260 ret = -EFAULT;
261 goto out;
262 }
263
264 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
265 user_config.boolean_regs_ptr,
266 oa_config->b_counter_regs_len);
267 if (ret)
268 goto out;
269
270 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
271 user_config.flex_regs_ptr,
272 oa_config->flex_regs_len);
273 if (ret)
274 goto out;
275
276 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
277 user_config.mux_regs_ptr,
278 oa_config->mux_regs_len);
279 if (ret)
280 goto out;
281
282 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
283 oa_config->b_counter_regs_len,
284 user_config.boolean_regs_ptr,
285 &user_config.n_boolean_regs);
286 if (ret)
287 goto out;
288
289 ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
290 oa_config->flex_regs_len,
291 user_config.flex_regs_ptr,
292 &user_config.n_flex_regs);
293 if (ret)
294 goto out;
295
296 ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
297 oa_config->mux_regs_len,
298 user_config.mux_regs_ptr,
299 &user_config.n_mux_regs);
300 if (ret)
301 goto out;
302
303 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
304
502f78c8 305 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
4f6ccc74
LL
306 ret = -EFAULT;
307 goto out;
308 }
309
310 ret = total_size;
311
312out:
313 i915_oa_config_put(oa_config);
314 return ret;
315}
316
317static size_t sizeof_perf_config_list(size_t count)
318{
319 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
320}
321
322static size_t sizeof_perf_metrics(struct i915_perf *perf)
323{
324 struct i915_oa_config *tmp;
325 size_t i;
326 int id;
327
328 i = 1;
329 rcu_read_lock();
330 idr_for_each_entry(&perf->metrics_idr, tmp, id)
331 i++;
332 rcu_read_unlock();
333
334 return sizeof_perf_config_list(i);
335}
336
337static int query_perf_config_list(struct drm_i915_private *i915,
338 struct drm_i915_query_item *query_item)
339{
340 struct drm_i915_query_perf_config __user *user_query_config_ptr =
341 u64_to_user_ptr(query_item->data_ptr);
342 struct i915_perf *perf = &i915->perf;
343 u64 *oa_config_ids = NULL;
344 int alloc, n_configs;
345 u32 flags;
346 int ret;
347
348 if (!perf->i915)
349 return -ENODEV;
350
351 if (query_item->length == 0)
352 return sizeof_perf_metrics(perf);
353
354 if (get_user(flags, &user_query_config_ptr->flags))
355 return -EFAULT;
356
357 if (flags != 0)
358 return -EINVAL;
359
360 n_configs = 1;
361 do {
362 struct i915_oa_config *tmp;
363 u64 *ids;
364 int id;
365
366 ids = krealloc(oa_config_ids,
367 n_configs * sizeof(*oa_config_ids),
368 GFP_KERNEL);
369 if (!ids)
370 return -ENOMEM;
371
372 alloc = fetch_and_zero(&n_configs);
373
374 ids[n_configs++] = 1ull; /* reserved for test_config */
375 rcu_read_lock();
376 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
377 if (n_configs < alloc)
378 ids[n_configs] = id;
379 n_configs++;
380 }
381 rcu_read_unlock();
382
383 oa_config_ids = ids;
384 } while (n_configs > alloc);
385
386 if (query_item->length < sizeof_perf_config_list(n_configs)) {
387 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
388 query_item->length,
389 sizeof_perf_config_list(n_configs));
390 kfree(oa_config_ids);
391 return -EINVAL;
392 }
393
394 if (put_user(n_configs, &user_query_config_ptr->config)) {
395 kfree(oa_config_ids);
396 return -EFAULT;
397 }
398
399 ret = copy_to_user(user_query_config_ptr + 1,
400 oa_config_ids,
401 n_configs * sizeof(*oa_config_ids));
402 kfree(oa_config_ids);
403 if (ret)
404 return -EFAULT;
405
406 return sizeof_perf_config_list(n_configs);
407}
408
409static int query_perf_config(struct drm_i915_private *i915,
410 struct drm_i915_query_item *query_item)
411{
412 switch (query_item->flags) {
413 case DRM_I915_QUERY_PERF_CONFIG_LIST:
414 return query_perf_config_list(i915, query_item);
415 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
416 return query_perf_config_data(i915, query_item, true);
417 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
418 return query_perf_config_data(i915, query_item, false);
419 default:
420 return -EINVAL;
421 }
422}
423
71021729
AJ
424static int query_memregion_info(struct drm_i915_private *i915,
425 struct drm_i915_query_item *query_item)
426{
427 struct drm_i915_query_memory_regions __user *query_ptr =
428 u64_to_user_ptr(query_item->data_ptr);
429 struct drm_i915_memory_region_info __user *info_ptr =
430 &query_ptr->regions[0];
431 struct drm_i915_memory_region_info info = { };
432 struct drm_i915_query_memory_regions query;
433 struct intel_memory_region *mr;
434 u32 total_length;
435 int ret, id, i;
436
437 if (query_item->flags != 0)
438 return -EINVAL;
439
440 total_length = sizeof(query);
441 for_each_memory_region(mr, i915, id) {
442 if (mr->private)
443 continue;
444
445 total_length += sizeof(info);
446 }
447
448 ret = copy_query_item(&query, sizeof(query), total_length, query_item);
449 if (ret != 0)
450 return ret;
451
452 if (query.num_regions)
453 return -EINVAL;
454
455 for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
456 if (query.rsvd[i])
457 return -EINVAL;
458 }
459
460 for_each_memory_region(mr, i915, id) {
461 if (mr->private)
462 continue;
463
464 info.region.memory_class = mr->type;
465 info.region.memory_instance = mr->instance;
466 info.probed_size = mr->total;
467 info.unallocated_size = mr->avail;
468
469 if (__copy_to_user(info_ptr, &info, sizeof(info)))
470 return -EFAULT;
471
472 query.num_regions++;
473 info_ptr++;
474 }
475
476 if (__copy_to_user(query_ptr, &query, sizeof(query)))
477 return -EFAULT;
478
479 return total_length;
480}
481
a446ae2c
LL
482static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
483 struct drm_i915_query_item *query_item) = {
c822e059 484 query_topology_info,
c5d3e39c 485 query_engine_info,
4f6ccc74 486 query_perf_config,
71021729 487 query_memregion_info,
a446ae2c
LL
488};
489
490int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
491{
492 struct drm_i915_private *dev_priv = to_i915(dev);
493 struct drm_i915_query *args = data;
494 struct drm_i915_query_item __user *user_item_ptr =
495 u64_to_user_ptr(args->items_ptr);
496 u32 i;
497
498 if (args->flags != 0)
499 return -EINVAL;
500
501 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
502 struct drm_i915_query_item item;
a33b1dc8 503 unsigned long func_idx;
a446ae2c
LL
504 int ret;
505
506 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
507 return -EFAULT;
508
509 if (item.query_id == 0)
510 return -EINVAL;
511
a33b1dc8
CW
512 if (overflows_type(item.query_id - 1, unsigned long))
513 return -EINVAL;
514
a446ae2c
LL
515 func_idx = item.query_id - 1;
516
84b510e2
CW
517 ret = -EINVAL;
518 if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
519 func_idx = array_index_nospec(func_idx,
520 ARRAY_SIZE(i915_query_funcs));
a446ae2c 521 ret = i915_query_funcs[func_idx](dev_priv, &item);
84b510e2 522 }
a446ae2c
LL
523
524 /* Only write the length back to userspace if they differ. */
525 if (ret != item.length && put_user(ret, &user_item_ptr->length))
526 return -EFAULT;
527 }
528
529 return 0;
530}