dmaengine: idxd: fix opcap sysfs attribute output
[linux-2.6-block.git] / drivers / dma / idxd / sysfs.c
CommitLineData
c52ca478
DJ
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
42d279f9 16 [IDXD_WQT_USER] = "user",
c52ca478
DJ
17};
18
19static void idxd_conf_device_release(struct device *dev)
20{
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22}
23
24static struct device_type idxd_group_device_type = {
25 .name = "group",
26 .release = idxd_conf_device_release,
27};
28
29static struct device_type idxd_wq_device_type = {
30 .name = "wq",
31 .release = idxd_conf_device_release,
32};
33
34static struct device_type idxd_engine_device_type = {
35 .name = "engine",
36 .release = idxd_conf_device_release,
37};
38
39static struct device_type dsa_device_type = {
40 .name = "dsa",
41 .release = idxd_conf_device_release,
42};
43
f25b4638
DJ
44static struct device_type iax_device_type = {
45 .name = "iax",
46 .release = idxd_conf_device_release,
47};
48
c52ca478
DJ
49static inline bool is_dsa_dev(struct device *dev)
50{
51 return dev ? dev->type == &dsa_device_type : false;
52}
53
f25b4638
DJ
54static inline bool is_iax_dev(struct device *dev)
55{
56 return dev ? dev->type == &iax_device_type : false;
57}
58
c52ca478
DJ
59static inline bool is_idxd_dev(struct device *dev)
60{
f25b4638 61 return is_dsa_dev(dev) || is_iax_dev(dev);
c52ca478
DJ
62}
63
64static inline bool is_idxd_wq_dev(struct device *dev)
65{
66 return dev ? dev->type == &idxd_wq_device_type : false;
67}
68
8f47d1a5
DJ
69static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
70{
71 if (wq->type == IDXD_WQT_KERNEL &&
72 strcmp(wq->name, "dmaengine") == 0)
73 return true;
74 return false;
75}
76
42d279f9
DJ
77static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
78{
a9113a90 79 return wq->type == IDXD_WQT_USER;
42d279f9
DJ
80}
81
c52ca478
DJ
82static int idxd_config_bus_match(struct device *dev,
83 struct device_driver *drv)
84{
85 int matched = 0;
86
87 if (is_idxd_dev(dev)) {
88 struct idxd_device *idxd = confdev_to_idxd(dev);
89
90 if (idxd->state != IDXD_DEV_CONF_READY)
91 return 0;
92 matched = 1;
93 } else if (is_idxd_wq_dev(dev)) {
94 struct idxd_wq *wq = confdev_to_wq(dev);
95 struct idxd_device *idxd = wq->idxd;
96
97 if (idxd->state < IDXD_DEV_CONF_READY)
98 return 0;
99
100 if (wq->state != IDXD_WQ_DISABLED) {
101 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
102 return 0;
103 }
104 matched = 1;
105 }
106
107 if (matched)
108 dev_dbg(dev, "%s matched\n", dev_name(dev));
109
110 return matched;
111}
112
113static int idxd_config_bus_probe(struct device *dev)
114{
115 int rc;
116 unsigned long flags;
117
118 dev_dbg(dev, "%s called\n", __func__);
119
120 if (is_idxd_dev(dev)) {
121 struct idxd_device *idxd = confdev_to_idxd(dev);
122
123 if (idxd->state != IDXD_DEV_CONF_READY) {
124 dev_warn(dev, "Device not ready for config\n");
125 return -EBUSY;
126 }
127
42d279f9
DJ
128 if (!try_module_get(THIS_MODULE))
129 return -ENXIO;
130
c52ca478 131 /* Perform IDXD configuration and enabling */
0d5c10b4 132 spin_lock_irqsave(&idxd->dev_lock, flags);
c52ca478 133 rc = idxd_device_config(idxd);
0d5c10b4 134 spin_unlock_irqrestore(&idxd->dev_lock, flags);
c52ca478 135 if (rc < 0) {
61b5865d 136 module_put(THIS_MODULE);
c52ca478
DJ
137 dev_warn(dev, "Device config failed: %d\n", rc);
138 return rc;
139 }
140
141 /* start device */
142 rc = idxd_device_enable(idxd);
143 if (rc < 0) {
61b5865d 144 module_put(THIS_MODULE);
c52ca478
DJ
145 dev_warn(dev, "Device enable failed: %d\n", rc);
146 return rc;
147 }
148
c52ca478
DJ
149 dev_info(dev, "Device %s enabled\n", dev_name(dev));
150
8f47d1a5
DJ
151 rc = idxd_register_dma_device(idxd);
152 if (rc < 0) {
61b5865d 153 module_put(THIS_MODULE);
8f47d1a5
DJ
154 dev_dbg(dev, "Failed to register dmaengine device\n");
155 return rc;
156 }
c52ca478
DJ
157 return 0;
158 } else if (is_idxd_wq_dev(dev)) {
159 struct idxd_wq *wq = confdev_to_wq(dev);
160 struct idxd_device *idxd = wq->idxd;
161
162 mutex_lock(&wq->wq_lock);
163
164 if (idxd->state != IDXD_DEV_ENABLED) {
165 mutex_unlock(&wq->wq_lock);
166 dev_warn(dev, "Enabling while device not enabled.\n");
167 return -EPERM;
168 }
169
170 if (wq->state != IDXD_WQ_DISABLED) {
171 mutex_unlock(&wq->wq_lock);
172 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
173 return -EBUSY;
174 }
175
176 if (!wq->group) {
177 mutex_unlock(&wq->wq_lock);
178 dev_warn(dev, "WQ not attached to group.\n");
179 return -EINVAL;
180 }
181
182 if (strlen(wq->name) == 0) {
183 mutex_unlock(&wq->wq_lock);
184 dev_warn(dev, "WQ name not set.\n");
185 return -EINVAL;
186 }
187
8e50d392
DJ
188 /* Shared WQ checks */
189 if (wq_shared(wq)) {
190 if (!device_swq_supported(idxd)) {
191 dev_warn(dev,
192 "PASID not enabled and shared WQ.\n");
193 mutex_unlock(&wq->wq_lock);
194 return -ENXIO;
195 }
196 /*
197 * Shared wq with the threshold set to 0 means the user
198 * did not set the threshold or transitioned from a
199 * dedicated wq but did not set threshold. A value
200 * of 0 would effectively disable the shared wq. The
201 * driver does not allow a value of 0 to be set for
202 * threshold via sysfs.
203 */
204 if (wq->threshold == 0) {
205 dev_warn(dev,
206 "Shared WQ and threshold 0.\n");
207 mutex_unlock(&wq->wq_lock);
208 return -EINVAL;
209 }
210 }
211
c52ca478
DJ
212 rc = idxd_wq_alloc_resources(wq);
213 if (rc < 0) {
214 mutex_unlock(&wq->wq_lock);
215 dev_warn(dev, "WQ resource alloc failed\n");
216 return rc;
217 }
218
219 spin_lock_irqsave(&idxd->dev_lock, flags);
220 rc = idxd_device_config(idxd);
0d5c10b4 221 spin_unlock_irqrestore(&idxd->dev_lock, flags);
c52ca478 222 if (rc < 0) {
c52ca478
DJ
223 mutex_unlock(&wq->wq_lock);
224 dev_warn(dev, "Writing WQ %d config failed: %d\n",
225 wq->id, rc);
226 return rc;
227 }
228
229 rc = idxd_wq_enable(wq);
230 if (rc < 0) {
c52ca478
DJ
231 mutex_unlock(&wq->wq_lock);
232 dev_warn(dev, "WQ %d enabling failed: %d\n",
233 wq->id, rc);
234 return rc;
235 }
c52ca478
DJ
236
237 rc = idxd_wq_map_portal(wq);
238 if (rc < 0) {
239 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
240 rc = idxd_wq_disable(wq);
241 if (rc < 0)
242 dev_warn(dev, "IDXD wq disable failed\n");
c52ca478
DJ
243 mutex_unlock(&wq->wq_lock);
244 return rc;
245 }
246
247 wq->client_count = 0;
248
249 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
8f47d1a5
DJ
250
251 if (is_idxd_wq_dmaengine(wq)) {
252 rc = idxd_register_dma_channel(wq);
253 if (rc < 0) {
254 dev_dbg(dev, "DMA channel register failed\n");
255 mutex_unlock(&wq->wq_lock);
256 return rc;
257 }
42d279f9
DJ
258 } else if (is_idxd_wq_cdev(wq)) {
259 rc = idxd_wq_add_cdev(wq);
260 if (rc < 0) {
261 dev_dbg(dev, "Cdev creation failed\n");
262 mutex_unlock(&wq->wq_lock);
263 return rc;
264 }
8f47d1a5
DJ
265 }
266
c52ca478
DJ
267 mutex_unlock(&wq->wq_lock);
268 return 0;
269 }
270
271 return -ENODEV;
272}
273
274static void disable_wq(struct idxd_wq *wq)
275{
276 struct idxd_device *idxd = wq->idxd;
277 struct device *dev = &idxd->pdev->dev;
c52ca478
DJ
278 int rc;
279
280 mutex_lock(&wq->wq_lock);
281 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
282 if (wq->state == IDXD_WQ_DISABLED) {
283 mutex_unlock(&wq->wq_lock);
284 return;
285 }
286
8f47d1a5
DJ
287 if (is_idxd_wq_dmaengine(wq))
288 idxd_unregister_dma_channel(wq);
42d279f9
DJ
289 else if (is_idxd_wq_cdev(wq))
290 idxd_wq_del_cdev(wq);
8f47d1a5 291
c52ca478
DJ
292 if (idxd_wq_refcount(wq))
293 dev_warn(dev, "Clients has claim on wq %d: %d\n",
294 wq->id, idxd_wq_refcount(wq));
295
296 idxd_wq_unmap_portal(wq);
297
0d5c10b4 298 idxd_wq_drain(wq);
c52ca478 299 rc = idxd_wq_disable(wq);
c52ca478
DJ
300
301 idxd_wq_free_resources(wq);
302 wq->client_count = 0;
303 mutex_unlock(&wq->wq_lock);
304
305 if (rc < 0)
306 dev_warn(dev, "Failed to disable %s: %d\n",
307 dev_name(&wq->conf_dev), rc);
308 else
309 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
310}
311
312static int idxd_config_bus_remove(struct device *dev)
313{
314 int rc;
c52ca478
DJ
315
316 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
317
318 /* disable workqueue here */
319 if (is_idxd_wq_dev(dev)) {
320 struct idxd_wq *wq = confdev_to_wq(dev);
321
322 disable_wq(wq);
323 } else if (is_idxd_dev(dev)) {
324 struct idxd_device *idxd = confdev_to_idxd(dev);
325 int i;
326
327 dev_dbg(dev, "%s removing dev %s\n", __func__,
328 dev_name(&idxd->conf_dev));
329 for (i = 0; i < idxd->max_wqs; i++) {
330 struct idxd_wq *wq = &idxd->wqs[i];
331
332 if (wq->state == IDXD_WQ_DISABLED)
333 continue;
334 dev_warn(dev, "Active wq %d on disable %s.\n", i,
335 dev_name(&idxd->conf_dev));
336 device_release_driver(&wq->conf_dev);
337 }
338
8f47d1a5 339 idxd_unregister_dma_device(idxd);
c52ca478 340 rc = idxd_device_disable(idxd);
da32b28c
DJ
341 for (i = 0; i < idxd->max_wqs; i++) {
342 struct idxd_wq *wq = &idxd->wqs[i];
343
0b5ad7b9 344 mutex_lock(&wq->wq_lock);
da32b28c 345 idxd_wq_disable_cleanup(wq);
0b5ad7b9 346 mutex_unlock(&wq->wq_lock);
da32b28c 347 }
42d279f9 348 module_put(THIS_MODULE);
c52ca478
DJ
349 if (rc < 0)
350 dev_warn(dev, "Device disable failed\n");
351 else
352 dev_info(dev, "Device %s disabled\n", dev_name(dev));
42d279f9 353
c52ca478
DJ
354 }
355
356 return 0;
357}
358
359static void idxd_config_bus_shutdown(struct device *dev)
360{
361 dev_dbg(dev, "%s called\n", __func__);
362}
363
42d279f9 364struct bus_type dsa_bus_type = {
c52ca478
DJ
365 .name = "dsa",
366 .match = idxd_config_bus_match,
367 .probe = idxd_config_bus_probe,
368 .remove = idxd_config_bus_remove,
369 .shutdown = idxd_config_bus_shutdown,
370};
371
f25b4638
DJ
372struct bus_type iax_bus_type = {
373 .name = "iax",
374 .match = idxd_config_bus_match,
375 .probe = idxd_config_bus_probe,
376 .remove = idxd_config_bus_remove,
377 .shutdown = idxd_config_bus_shutdown,
378};
379
c52ca478 380static struct bus_type *idxd_bus_types[] = {
f25b4638
DJ
381 &dsa_bus_type,
382 &iax_bus_type
c52ca478
DJ
383};
384
385static struct idxd_device_driver dsa_drv = {
386 .drv = {
387 .name = "dsa",
388 .bus = &dsa_bus_type,
389 .owner = THIS_MODULE,
390 .mod_name = KBUILD_MODNAME,
391 },
392};
393
f25b4638
DJ
394static struct idxd_device_driver iax_drv = {
395 .drv = {
396 .name = "iax",
397 .bus = &iax_bus_type,
398 .owner = THIS_MODULE,
399 .mod_name = KBUILD_MODNAME,
400 },
401};
402
c52ca478 403static struct idxd_device_driver *idxd_drvs[] = {
f25b4638
DJ
404 &dsa_drv,
405 &iax_drv
c52ca478
DJ
406};
407
42d279f9 408struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
c52ca478
DJ
409{
410 return idxd_bus_types[idxd->type];
411}
412
413static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
414{
415 if (idxd->type == IDXD_TYPE_DSA)
416 return &dsa_device_type;
f25b4638
DJ
417 else if (idxd->type == IDXD_TYPE_IAX)
418 return &iax_device_type;
c52ca478
DJ
419 else
420 return NULL;
421}
422
423/* IDXD generic driver setup */
424int idxd_register_driver(void)
425{
426 int i, rc;
427
428 for (i = 0; i < IDXD_TYPE_MAX; i++) {
429 rc = driver_register(&idxd_drvs[i]->drv);
430 if (rc < 0)
431 goto drv_fail;
432 }
433
434 return 0;
435
436drv_fail:
ff58f7dd 437 while (--i >= 0)
c52ca478
DJ
438 driver_unregister(&idxd_drvs[i]->drv);
439 return rc;
440}
441
442void idxd_unregister_driver(void)
443{
444 int i;
445
446 for (i = 0; i < IDXD_TYPE_MAX; i++)
447 driver_unregister(&idxd_drvs[i]->drv);
448}
449
450/* IDXD engine attributes */
451static ssize_t engine_group_id_show(struct device *dev,
452 struct device_attribute *attr, char *buf)
453{
454 struct idxd_engine *engine =
455 container_of(dev, struct idxd_engine, conf_dev);
456
457 if (engine->group)
458 return sprintf(buf, "%d\n", engine->group->id);
459 else
460 return sprintf(buf, "%d\n", -1);
461}
462
463static ssize_t engine_group_id_store(struct device *dev,
464 struct device_attribute *attr,
465 const char *buf, size_t count)
466{
467 struct idxd_engine *engine =
468 container_of(dev, struct idxd_engine, conf_dev);
469 struct idxd_device *idxd = engine->idxd;
470 long id;
471 int rc;
f7b280c6 472 struct idxd_group *prevg;
c52ca478
DJ
473
474 rc = kstrtol(buf, 10, &id);
475 if (rc < 0)
476 return -EINVAL;
477
478 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
479 return -EPERM;
480
481 if (id > idxd->max_groups - 1 || id < -1)
482 return -EINVAL;
483
484 if (id == -1) {
485 if (engine->group) {
486 engine->group->num_engines--;
487 engine->group = NULL;
488 }
489 return count;
490 }
491
c52ca478
DJ
492 prevg = engine->group;
493
494 if (prevg)
495 prevg->num_engines--;
496 engine->group = &idxd->groups[id];
497 engine->group->num_engines++;
498
499 return count;
500}
501
502static struct device_attribute dev_attr_engine_group =
503 __ATTR(group_id, 0644, engine_group_id_show,
504 engine_group_id_store);
505
506static struct attribute *idxd_engine_attributes[] = {
507 &dev_attr_engine_group.attr,
508 NULL,
509};
510
511static const struct attribute_group idxd_engine_attribute_group = {
512 .attrs = idxd_engine_attributes,
513};
514
515static const struct attribute_group *idxd_engine_attribute_groups[] = {
516 &idxd_engine_attribute_group,
517 NULL,
518};
519
520/* Group attributes */
521
522static void idxd_set_free_tokens(struct idxd_device *idxd)
523{
524 int i, tokens;
525
526 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
527 struct idxd_group *g = &idxd->groups[i];
528
529 tokens += g->tokens_reserved;
530 }
531
532 idxd->nr_tokens = idxd->max_tokens - tokens;
533}
534
535static ssize_t group_tokens_reserved_show(struct device *dev,
536 struct device_attribute *attr,
537 char *buf)
538{
539 struct idxd_group *group =
540 container_of(dev, struct idxd_group, conf_dev);
541
542 return sprintf(buf, "%u\n", group->tokens_reserved);
543}
544
545static ssize_t group_tokens_reserved_store(struct device *dev,
546 struct device_attribute *attr,
547 const char *buf, size_t count)
548{
549 struct idxd_group *group =
550 container_of(dev, struct idxd_group, conf_dev);
551 struct idxd_device *idxd = group->idxd;
552 unsigned long val;
553 int rc;
554
555 rc = kstrtoul(buf, 10, &val);
556 if (rc < 0)
557 return -EINVAL;
558
f25b4638
DJ
559 if (idxd->type == IDXD_TYPE_IAX)
560 return -EOPNOTSUPP;
561
c52ca478
DJ
562 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
563 return -EPERM;
564
565 if (idxd->state == IDXD_DEV_ENABLED)
566 return -EPERM;
567
c52ca478
DJ
568 if (val > idxd->max_tokens)
569 return -EINVAL;
570
2d0b1919 571 if (val > idxd->nr_tokens + group->tokens_reserved)
c52ca478
DJ
572 return -EINVAL;
573
574 group->tokens_reserved = val;
575 idxd_set_free_tokens(idxd);
576 return count;
577}
578
579static struct device_attribute dev_attr_group_tokens_reserved =
580 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
581 group_tokens_reserved_store);
582
583static ssize_t group_tokens_allowed_show(struct device *dev,
584 struct device_attribute *attr,
585 char *buf)
586{
587 struct idxd_group *group =
588 container_of(dev, struct idxd_group, conf_dev);
589
590 return sprintf(buf, "%u\n", group->tokens_allowed);
591}
592
593static ssize_t group_tokens_allowed_store(struct device *dev,
594 struct device_attribute *attr,
595 const char *buf, size_t count)
596{
597 struct idxd_group *group =
598 container_of(dev, struct idxd_group, conf_dev);
599 struct idxd_device *idxd = group->idxd;
600 unsigned long val;
601 int rc;
602
603 rc = kstrtoul(buf, 10, &val);
604 if (rc < 0)
605 return -EINVAL;
606
f25b4638
DJ
607 if (idxd->type == IDXD_TYPE_IAX)
608 return -EOPNOTSUPP;
609
c52ca478
DJ
610 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
611 return -EPERM;
612
613 if (idxd->state == IDXD_DEV_ENABLED)
614 return -EPERM;
615
c52ca478
DJ
616 if (val < 4 * group->num_engines ||
617 val > group->tokens_reserved + idxd->nr_tokens)
618 return -EINVAL;
619
620 group->tokens_allowed = val;
621 return count;
622}
623
624static struct device_attribute dev_attr_group_tokens_allowed =
625 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
626 group_tokens_allowed_store);
627
628static ssize_t group_use_token_limit_show(struct device *dev,
629 struct device_attribute *attr,
630 char *buf)
631{
632 struct idxd_group *group =
633 container_of(dev, struct idxd_group, conf_dev);
634
635 return sprintf(buf, "%u\n", group->use_token_limit);
636}
637
638static ssize_t group_use_token_limit_store(struct device *dev,
639 struct device_attribute *attr,
640 const char *buf, size_t count)
641{
642 struct idxd_group *group =
643 container_of(dev, struct idxd_group, conf_dev);
644 struct idxd_device *idxd = group->idxd;
645 unsigned long val;
646 int rc;
647
648 rc = kstrtoul(buf, 10, &val);
649 if (rc < 0)
650 return -EINVAL;
651
f25b4638
DJ
652 if (idxd->type == IDXD_TYPE_IAX)
653 return -EOPNOTSUPP;
654
c52ca478
DJ
655 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
656 return -EPERM;
657
658 if (idxd->state == IDXD_DEV_ENABLED)
659 return -EPERM;
660
661 if (idxd->token_limit == 0)
662 return -EPERM;
663
664 group->use_token_limit = !!val;
665 return count;
666}
667
668static struct device_attribute dev_attr_group_use_token_limit =
669 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
670 group_use_token_limit_store);
671
672static ssize_t group_engines_show(struct device *dev,
673 struct device_attribute *attr, char *buf)
674{
675 struct idxd_group *group =
676 container_of(dev, struct idxd_group, conf_dev);
677 int i, rc = 0;
678 char *tmp = buf;
679 struct idxd_device *idxd = group->idxd;
680
681 for (i = 0; i < idxd->max_engines; i++) {
682 struct idxd_engine *engine = &idxd->engines[i];
683
684 if (!engine->group)
685 continue;
686
687 if (engine->group->id == group->id)
688 rc += sprintf(tmp + rc, "engine%d.%d ",
689 idxd->id, engine->id);
690 }
691
692 rc--;
693 rc += sprintf(tmp + rc, "\n");
694
695 return rc;
696}
697
698static struct device_attribute dev_attr_group_engines =
699 __ATTR(engines, 0444, group_engines_show, NULL);
700
701static ssize_t group_work_queues_show(struct device *dev,
702 struct device_attribute *attr, char *buf)
703{
704 struct idxd_group *group =
705 container_of(dev, struct idxd_group, conf_dev);
706 int i, rc = 0;
707 char *tmp = buf;
708 struct idxd_device *idxd = group->idxd;
709
710 for (i = 0; i < idxd->max_wqs; i++) {
711 struct idxd_wq *wq = &idxd->wqs[i];
712
713 if (!wq->group)
714 continue;
715
716 if (wq->group->id == group->id)
717 rc += sprintf(tmp + rc, "wq%d.%d ",
718 idxd->id, wq->id);
719 }
720
721 rc--;
722 rc += sprintf(tmp + rc, "\n");
723
724 return rc;
725}
726
727static struct device_attribute dev_attr_group_work_queues =
728 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
729
730static ssize_t group_traffic_class_a_show(struct device *dev,
731 struct device_attribute *attr,
732 char *buf)
733{
734 struct idxd_group *group =
735 container_of(dev, struct idxd_group, conf_dev);
736
737 return sprintf(buf, "%d\n", group->tc_a);
738}
739
740static ssize_t group_traffic_class_a_store(struct device *dev,
741 struct device_attribute *attr,
742 const char *buf, size_t count)
743{
744 struct idxd_group *group =
745 container_of(dev, struct idxd_group, conf_dev);
746 struct idxd_device *idxd = group->idxd;
747 long val;
748 int rc;
749
750 rc = kstrtol(buf, 10, &val);
751 if (rc < 0)
752 return -EINVAL;
753
754 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
755 return -EPERM;
756
757 if (idxd->state == IDXD_DEV_ENABLED)
758 return -EPERM;
759
760 if (val < 0 || val > 7)
761 return -EINVAL;
762
763 group->tc_a = val;
764 return count;
765}
766
767static struct device_attribute dev_attr_group_traffic_class_a =
768 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
769 group_traffic_class_a_store);
770
771static ssize_t group_traffic_class_b_show(struct device *dev,
772 struct device_attribute *attr,
773 char *buf)
774{
775 struct idxd_group *group =
776 container_of(dev, struct idxd_group, conf_dev);
777
778 return sprintf(buf, "%d\n", group->tc_b);
779}
780
781static ssize_t group_traffic_class_b_store(struct device *dev,
782 struct device_attribute *attr,
783 const char *buf, size_t count)
784{
785 struct idxd_group *group =
786 container_of(dev, struct idxd_group, conf_dev);
787 struct idxd_device *idxd = group->idxd;
788 long val;
789 int rc;
790
791 rc = kstrtol(buf, 10, &val);
792 if (rc < 0)
793 return -EINVAL;
794
795 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
796 return -EPERM;
797
798 if (idxd->state == IDXD_DEV_ENABLED)
799 return -EPERM;
800
801 if (val < 0 || val > 7)
802 return -EINVAL;
803
804 group->tc_b = val;
805 return count;
806}
807
808static struct device_attribute dev_attr_group_traffic_class_b =
809 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
810 group_traffic_class_b_store);
811
812static struct attribute *idxd_group_attributes[] = {
813 &dev_attr_group_work_queues.attr,
814 &dev_attr_group_engines.attr,
815 &dev_attr_group_use_token_limit.attr,
816 &dev_attr_group_tokens_allowed.attr,
817 &dev_attr_group_tokens_reserved.attr,
818 &dev_attr_group_traffic_class_a.attr,
819 &dev_attr_group_traffic_class_b.attr,
820 NULL,
821};
822
823static const struct attribute_group idxd_group_attribute_group = {
824 .attrs = idxd_group_attributes,
825};
826
827static const struct attribute_group *idxd_group_attribute_groups[] = {
828 &idxd_group_attribute_group,
829 NULL,
830};
831
832/* IDXD work queue attribs */
833static ssize_t wq_clients_show(struct device *dev,
834 struct device_attribute *attr, char *buf)
835{
836 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
837
838 return sprintf(buf, "%d\n", wq->client_count);
839}
840
841static struct device_attribute dev_attr_wq_clients =
842 __ATTR(clients, 0444, wq_clients_show, NULL);
843
844static ssize_t wq_state_show(struct device *dev,
845 struct device_attribute *attr, char *buf)
846{
847 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
848
849 switch (wq->state) {
850 case IDXD_WQ_DISABLED:
851 return sprintf(buf, "disabled\n");
852 case IDXD_WQ_ENABLED:
853 return sprintf(buf, "enabled\n");
854 }
855
856 return sprintf(buf, "unknown\n");
857}
858
859static struct device_attribute dev_attr_wq_state =
860 __ATTR(state, 0444, wq_state_show, NULL);
861
862static ssize_t wq_group_id_show(struct device *dev,
863 struct device_attribute *attr, char *buf)
864{
865 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
866
867 if (wq->group)
868 return sprintf(buf, "%u\n", wq->group->id);
869 else
870 return sprintf(buf, "-1\n");
871}
872
873static ssize_t wq_group_id_store(struct device *dev,
874 struct device_attribute *attr,
875 const char *buf, size_t count)
876{
877 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
878 struct idxd_device *idxd = wq->idxd;
879 long id;
880 int rc;
881 struct idxd_group *prevg, *group;
882
883 rc = kstrtol(buf, 10, &id);
884 if (rc < 0)
885 return -EINVAL;
886
887 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
888 return -EPERM;
889
890 if (wq->state != IDXD_WQ_DISABLED)
891 return -EPERM;
892
893 if (id > idxd->max_groups - 1 || id < -1)
894 return -EINVAL;
895
896 if (id == -1) {
897 if (wq->group) {
898 wq->group->num_wqs--;
899 wq->group = NULL;
900 }
901 return count;
902 }
903
904 group = &idxd->groups[id];
905 prevg = wq->group;
906
907 if (prevg)
908 prevg->num_wqs--;
909 wq->group = group;
910 group->num_wqs++;
911 return count;
912}
913
914static struct device_attribute dev_attr_wq_group_id =
915 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
916
917static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
918 char *buf)
919{
920 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
921
922 return sprintf(buf, "%s\n",
923 wq_dedicated(wq) ? "dedicated" : "shared");
924}
925
926static ssize_t wq_mode_store(struct device *dev,
927 struct device_attribute *attr, const char *buf,
928 size_t count)
929{
930 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
931 struct idxd_device *idxd = wq->idxd;
932
933 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
934 return -EPERM;
935
936 if (wq->state != IDXD_WQ_DISABLED)
937 return -EPERM;
938
939 if (sysfs_streq(buf, "dedicated")) {
940 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
941 wq->threshold = 0;
8e50d392
DJ
942 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
943 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
c52ca478
DJ
944 } else {
945 return -EINVAL;
946 }
947
948 return count;
949}
950
951static struct device_attribute dev_attr_wq_mode =
952 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
953
954static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
955 char *buf)
956{
957 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
958
959 return sprintf(buf, "%u\n", wq->size);
960}
961
50e7e7f6
DJ
962static int total_claimed_wq_size(struct idxd_device *idxd)
963{
964 int i;
965 int wq_size = 0;
966
967 for (i = 0; i < idxd->max_wqs; i++) {
968 struct idxd_wq *wq = &idxd->wqs[i];
969
970 wq_size += wq->size;
971 }
972
973 return wq_size;
974}
975
c52ca478
DJ
976static ssize_t wq_size_store(struct device *dev,
977 struct device_attribute *attr, const char *buf,
978 size_t count)
979{
980 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
981 unsigned long size;
982 struct idxd_device *idxd = wq->idxd;
983 int rc;
984
985 rc = kstrtoul(buf, 10, &size);
986 if (rc < 0)
987 return -EINVAL;
988
989 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
990 return -EPERM;
991
992 if (wq->state != IDXD_WQ_DISABLED)
993 return -EPERM;
994
50e7e7f6 995 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
c52ca478
DJ
996 return -EINVAL;
997
998 wq->size = size;
999 return count;
1000}
1001
1002static struct device_attribute dev_attr_wq_size =
1003 __ATTR(size, 0644, wq_size_show, wq_size_store);
1004
1005static ssize_t wq_priority_show(struct device *dev,
1006 struct device_attribute *attr, char *buf)
1007{
1008 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1009
1010 return sprintf(buf, "%u\n", wq->priority);
1011}
1012
1013static ssize_t wq_priority_store(struct device *dev,
1014 struct device_attribute *attr,
1015 const char *buf, size_t count)
1016{
1017 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1018 unsigned long prio;
1019 struct idxd_device *idxd = wq->idxd;
1020 int rc;
1021
1022 rc = kstrtoul(buf, 10, &prio);
1023 if (rc < 0)
1024 return -EINVAL;
1025
1026 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1027 return -EPERM;
1028
1029 if (wq->state != IDXD_WQ_DISABLED)
1030 return -EPERM;
1031
1032 if (prio > IDXD_MAX_PRIORITY)
1033 return -EINVAL;
1034
1035 wq->priority = prio;
1036 return count;
1037}
1038
1039static struct device_attribute dev_attr_wq_priority =
1040 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
1041
8e50d392
DJ
1042static ssize_t wq_block_on_fault_show(struct device *dev,
1043 struct device_attribute *attr, char *buf)
1044{
1045 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1046
1047 return sprintf(buf, "%u\n",
1048 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
1049}
1050
1051static ssize_t wq_block_on_fault_store(struct device *dev,
1052 struct device_attribute *attr,
1053 const char *buf, size_t count)
1054{
1055 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1056 struct idxd_device *idxd = wq->idxd;
1057 bool bof;
1058 int rc;
1059
1060 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1061 return -EPERM;
1062
1063 if (wq->state != IDXD_WQ_DISABLED)
1064 return -ENXIO;
1065
1066 rc = kstrtobool(buf, &bof);
1067 if (rc < 0)
1068 return rc;
1069
1070 if (bof)
1071 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1072 else
1073 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1074
1075 return count;
1076}
1077
1078static struct device_attribute dev_attr_wq_block_on_fault =
1079 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
1080 wq_block_on_fault_store);
1081
1082static ssize_t wq_threshold_show(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1084{
1085 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1086
1087 return sprintf(buf, "%u\n", wq->threshold);
1088}
1089
1090static ssize_t wq_threshold_store(struct device *dev,
1091 struct device_attribute *attr,
1092 const char *buf, size_t count)
1093{
1094 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1095 struct idxd_device *idxd = wq->idxd;
1096 unsigned int val;
1097 int rc;
1098
1099 rc = kstrtouint(buf, 0, &val);
1100 if (rc < 0)
1101 return -EINVAL;
1102
1103 if (val > wq->size || val <= 0)
1104 return -EINVAL;
1105
1106 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1107 return -EPERM;
1108
1109 if (wq->state != IDXD_WQ_DISABLED)
1110 return -ENXIO;
1111
1112 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1113 return -EINVAL;
1114
1115 wq->threshold = val;
1116
1117 return count;
1118}
1119
1120static struct device_attribute dev_attr_wq_threshold =
1121 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1122
c52ca478
DJ
1123static ssize_t wq_type_show(struct device *dev,
1124 struct device_attribute *attr, char *buf)
1125{
1126 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1127
1128 switch (wq->type) {
1129 case IDXD_WQT_KERNEL:
1130 return sprintf(buf, "%s\n",
1131 idxd_wq_type_names[IDXD_WQT_KERNEL]);
42d279f9
DJ
1132 case IDXD_WQT_USER:
1133 return sprintf(buf, "%s\n",
1134 idxd_wq_type_names[IDXD_WQT_USER]);
c52ca478
DJ
1135 case IDXD_WQT_NONE:
1136 default:
1137 return sprintf(buf, "%s\n",
1138 idxd_wq_type_names[IDXD_WQT_NONE]);
1139 }
1140
1141 return -EINVAL;
1142}
1143
1144static ssize_t wq_type_store(struct device *dev,
1145 struct device_attribute *attr, const char *buf,
1146 size_t count)
1147{
1148 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1149 enum idxd_wq_type old_type;
1150
1151 if (wq->state != IDXD_WQ_DISABLED)
1152 return -EPERM;
1153
1154 old_type = wq->type;
88402c5b
DJ
1155 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1156 wq->type = IDXD_WQT_NONE;
1157 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
c52ca478 1158 wq->type = IDXD_WQT_KERNEL;
42d279f9
DJ
1159 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1160 wq->type = IDXD_WQT_USER;
c52ca478 1161 else
88402c5b 1162 return -EINVAL;
c52ca478
DJ
1163
1164 /* If we are changing queue type, clear the name */
1165 if (wq->type != old_type)
1166 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1167
1168 return count;
1169}
1170
1171static struct device_attribute dev_attr_wq_type =
1172 __ATTR(type, 0644, wq_type_show, wq_type_store);
1173
1174static ssize_t wq_name_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176{
1177 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1178
1179 return sprintf(buf, "%s\n", wq->name);
1180}
1181
1182static ssize_t wq_name_store(struct device *dev,
1183 struct device_attribute *attr, const char *buf,
1184 size_t count)
1185{
1186 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1187
1188 if (wq->state != IDXD_WQ_DISABLED)
1189 return -EPERM;
1190
1191 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1192 return -EINVAL;
1193
8e50d392
DJ
1194 /*
1195 * This is temporarily placed here until we have SVM support for
1196 * dmaengine.
1197 */
1198 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1199 return -EOPNOTSUPP;
1200
c52ca478
DJ
1201 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1202 strncpy(wq->name, buf, WQ_NAME_SIZE);
1203 strreplace(wq->name, '\n', '\0');
1204 return count;
1205}
1206
1207static struct device_attribute dev_attr_wq_name =
1208 __ATTR(name, 0644, wq_name_show, wq_name_store);
1209
42d279f9
DJ
1210static ssize_t wq_cdev_minor_show(struct device *dev,
1211 struct device_attribute *attr, char *buf)
1212{
1213 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1214
1215 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1216}
1217
1218static struct device_attribute dev_attr_wq_cdev_minor =
1219 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1220
e7184b15
DJ
1221static int __get_sysfs_u64(const char *buf, u64 *val)
1222{
1223 int rc;
1224
1225 rc = kstrtou64(buf, 0, val);
1226 if (rc < 0)
1227 return -EINVAL;
1228
1229 if (*val == 0)
1230 return -EINVAL;
1231
1232 *val = roundup_pow_of_two(*val);
1233 return 0;
1234}
1235
d7aad555
DJ
1236static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1237 char *buf)
1238{
1239 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1240
1241 return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1242}
1243
1244static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1245 const char *buf, size_t count)
1246{
1247 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1248 struct idxd_device *idxd = wq->idxd;
1249 u64 xfer_size;
1250 int rc;
1251
1252 if (wq->state != IDXD_WQ_DISABLED)
1253 return -EPERM;
1254
e7184b15 1255 rc = __get_sysfs_u64(buf, &xfer_size);
d7aad555 1256 if (rc < 0)
e7184b15 1257 return rc;
d7aad555 1258
d7aad555
DJ
1259 if (xfer_size > idxd->max_xfer_bytes)
1260 return -EINVAL;
1261
1262 wq->max_xfer_bytes = xfer_size;
1263
1264 return count;
1265}
1266
1267static struct device_attribute dev_attr_wq_max_transfer_size =
1268 __ATTR(max_transfer_size, 0644,
1269 wq_max_transfer_size_show, wq_max_transfer_size_store);
1270
e7184b15
DJ
1271static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1272{
1273 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1274
1275 return sprintf(buf, "%u\n", wq->max_batch_size);
1276}
1277
1278static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1279 const char *buf, size_t count)
1280{
1281 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1282 struct idxd_device *idxd = wq->idxd;
1283 u64 batch_size;
1284 int rc;
1285
1286 if (wq->state != IDXD_WQ_DISABLED)
1287 return -EPERM;
1288
1289 rc = __get_sysfs_u64(buf, &batch_size);
1290 if (rc < 0)
1291 return rc;
1292
1293 if (batch_size > idxd->max_batch_size)
1294 return -EINVAL;
1295
1296 wq->max_batch_size = (u32)batch_size;
1297
1298 return count;
1299}
1300
1301static struct device_attribute dev_attr_wq_max_batch_size =
1302 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1303
92de5fa2
DJ
1304static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1305{
1306 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1307
1308 return sprintf(buf, "%u\n", wq->ats_dis);
1309}
1310
1311static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1312 const char *buf, size_t count)
1313{
1314 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1315 struct idxd_device *idxd = wq->idxd;
1316 bool ats_dis;
1317 int rc;
1318
1319 if (wq->state != IDXD_WQ_DISABLED)
1320 return -EPERM;
1321
1322 if (!idxd->hw.wq_cap.wq_ats_support)
1323 return -EOPNOTSUPP;
1324
1325 rc = kstrtobool(buf, &ats_dis);
1326 if (rc < 0)
1327 return rc;
1328
1329 wq->ats_dis = ats_dis;
1330
1331 return count;
1332}
1333
1334static struct device_attribute dev_attr_wq_ats_disable =
1335 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1336
c52ca478
DJ
1337static struct attribute *idxd_wq_attributes[] = {
1338 &dev_attr_wq_clients.attr,
1339 &dev_attr_wq_state.attr,
1340 &dev_attr_wq_group_id.attr,
1341 &dev_attr_wq_mode.attr,
1342 &dev_attr_wq_size.attr,
1343 &dev_attr_wq_priority.attr,
8e50d392
DJ
1344 &dev_attr_wq_block_on_fault.attr,
1345 &dev_attr_wq_threshold.attr,
c52ca478
DJ
1346 &dev_attr_wq_type.attr,
1347 &dev_attr_wq_name.attr,
42d279f9 1348 &dev_attr_wq_cdev_minor.attr,
d7aad555 1349 &dev_attr_wq_max_transfer_size.attr,
e7184b15 1350 &dev_attr_wq_max_batch_size.attr,
92de5fa2 1351 &dev_attr_wq_ats_disable.attr,
c52ca478
DJ
1352 NULL,
1353};
1354
1355static const struct attribute_group idxd_wq_attribute_group = {
1356 .attrs = idxd_wq_attributes,
1357};
1358
1359static const struct attribute_group *idxd_wq_attribute_groups[] = {
1360 &idxd_wq_attribute_group,
1361 NULL,
1362};
1363
1364/* IDXD device attribs */
c2ce6bbc
DJ
1365static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1366 char *buf)
1367{
1368 struct idxd_device *idxd =
1369 container_of(dev, struct idxd_device, conf_dev);
1370
1371 return sprintf(buf, "%#x\n", idxd->hw.version);
1372}
1373static DEVICE_ATTR_RO(version);
1374
c52ca478
DJ
1375static ssize_t max_work_queues_size_show(struct device *dev,
1376 struct device_attribute *attr,
1377 char *buf)
1378{
1379 struct idxd_device *idxd =
1380 container_of(dev, struct idxd_device, conf_dev);
1381
1382 return sprintf(buf, "%u\n", idxd->max_wq_size);
1383}
1384static DEVICE_ATTR_RO(max_work_queues_size);
1385
1386static ssize_t max_groups_show(struct device *dev,
1387 struct device_attribute *attr, char *buf)
1388{
1389 struct idxd_device *idxd =
1390 container_of(dev, struct idxd_device, conf_dev);
1391
1392 return sprintf(buf, "%u\n", idxd->max_groups);
1393}
1394static DEVICE_ATTR_RO(max_groups);
1395
1396static ssize_t max_work_queues_show(struct device *dev,
1397 struct device_attribute *attr, char *buf)
1398{
1399 struct idxd_device *idxd =
1400 container_of(dev, struct idxd_device, conf_dev);
1401
1402 return sprintf(buf, "%u\n", idxd->max_wqs);
1403}
1404static DEVICE_ATTR_RO(max_work_queues);
1405
1406static ssize_t max_engines_show(struct device *dev,
1407 struct device_attribute *attr, char *buf)
1408{
1409 struct idxd_device *idxd =
1410 container_of(dev, struct idxd_device, conf_dev);
1411
1412 return sprintf(buf, "%u\n", idxd->max_engines);
1413}
1414static DEVICE_ATTR_RO(max_engines);
1415
1416static ssize_t numa_node_show(struct device *dev,
1417 struct device_attribute *attr, char *buf)
1418{
1419 struct idxd_device *idxd =
1420 container_of(dev, struct idxd_device, conf_dev);
1421
1422 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1423}
1424static DEVICE_ATTR_RO(numa_node);
1425
1426static ssize_t max_batch_size_show(struct device *dev,
1427 struct device_attribute *attr, char *buf)
1428{
1429 struct idxd_device *idxd =
1430 container_of(dev, struct idxd_device, conf_dev);
1431
1432 return sprintf(buf, "%u\n", idxd->max_batch_size);
1433}
1434static DEVICE_ATTR_RO(max_batch_size);
1435
1436static ssize_t max_transfer_size_show(struct device *dev,
1437 struct device_attribute *attr,
1438 char *buf)
1439{
1440 struct idxd_device *idxd =
1441 container_of(dev, struct idxd_device, conf_dev);
1442
1443 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1444}
1445static DEVICE_ATTR_RO(max_transfer_size);
1446
1447static ssize_t op_cap_show(struct device *dev,
1448 struct device_attribute *attr, char *buf)
1449{
1450 struct idxd_device *idxd =
1451 container_of(dev, struct idxd_device, conf_dev);
ea6a5735
DJ
1452 int i, rc = 0;
1453
1454 for (i = 0; i < 4; i++)
1455 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
c52ca478 1456
ea6a5735
DJ
1457 rc--;
1458 rc += sysfs_emit_at(buf, rc, "\n");
1459 return rc;
c52ca478
DJ
1460}
1461static DEVICE_ATTR_RO(op_cap);
1462
9065958e
DJ
1463static ssize_t gen_cap_show(struct device *dev,
1464 struct device_attribute *attr, char *buf)
1465{
1466 struct idxd_device *idxd =
1467 container_of(dev, struct idxd_device, conf_dev);
1468
1469 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1470}
1471static DEVICE_ATTR_RO(gen_cap);
1472
c52ca478
DJ
1473static ssize_t configurable_show(struct device *dev,
1474 struct device_attribute *attr, char *buf)
1475{
1476 struct idxd_device *idxd =
1477 container_of(dev, struct idxd_device, conf_dev);
1478
1479 return sprintf(buf, "%u\n",
1480 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1481}
1482static DEVICE_ATTR_RO(configurable);
1483
1484static ssize_t clients_show(struct device *dev,
1485 struct device_attribute *attr, char *buf)
1486{
1487 struct idxd_device *idxd =
1488 container_of(dev, struct idxd_device, conf_dev);
1489 unsigned long flags;
1490 int count = 0, i;
1491
1492 spin_lock_irqsave(&idxd->dev_lock, flags);
1493 for (i = 0; i < idxd->max_wqs; i++) {
1494 struct idxd_wq *wq = &idxd->wqs[i];
1495
1496 count += wq->client_count;
1497 }
1498 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1499
1500 return sprintf(buf, "%d\n", count);
1501}
1502static DEVICE_ATTR_RO(clients);
1503
8e50d392
DJ
1504static ssize_t pasid_enabled_show(struct device *dev,
1505 struct device_attribute *attr, char *buf)
1506{
1507 struct idxd_device *idxd =
1508 container_of(dev, struct idxd_device, conf_dev);
1509
1510 return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
1511}
1512static DEVICE_ATTR_RO(pasid_enabled);
1513
c52ca478
DJ
1514static ssize_t state_show(struct device *dev,
1515 struct device_attribute *attr, char *buf)
1516{
1517 struct idxd_device *idxd =
1518 container_of(dev, struct idxd_device, conf_dev);
1519
1520 switch (idxd->state) {
1521 case IDXD_DEV_DISABLED:
1522 case IDXD_DEV_CONF_READY:
1523 return sprintf(buf, "disabled\n");
1524 case IDXD_DEV_ENABLED:
1525 return sprintf(buf, "enabled\n");
1526 case IDXD_DEV_HALTED:
1527 return sprintf(buf, "halted\n");
1528 }
1529
1530 return sprintf(buf, "unknown\n");
1531}
1532static DEVICE_ATTR_RO(state);
1533
1534static ssize_t errors_show(struct device *dev,
1535 struct device_attribute *attr, char *buf)
1536{
1537 struct idxd_device *idxd =
1538 container_of(dev, struct idxd_device, conf_dev);
1539 int i, out = 0;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&idxd->dev_lock, flags);
1543 for (i = 0; i < 4; i++)
1544 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1545 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1546 out--;
1547 out += sprintf(buf + out, "\n");
1548 return out;
1549}
1550static DEVICE_ATTR_RO(errors);
1551
1552static ssize_t max_tokens_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554{
1555 struct idxd_device *idxd =
1556 container_of(dev, struct idxd_device, conf_dev);
1557
1558 return sprintf(buf, "%u\n", idxd->max_tokens);
1559}
1560static DEVICE_ATTR_RO(max_tokens);
1561
1562static ssize_t token_limit_show(struct device *dev,
1563 struct device_attribute *attr, char *buf)
1564{
1565 struct idxd_device *idxd =
1566 container_of(dev, struct idxd_device, conf_dev);
1567
1568 return sprintf(buf, "%u\n", idxd->token_limit);
1569}
1570
1571static ssize_t token_limit_store(struct device *dev,
1572 struct device_attribute *attr,
1573 const char *buf, size_t count)
1574{
1575 struct idxd_device *idxd =
1576 container_of(dev, struct idxd_device, conf_dev);
1577 unsigned long val;
1578 int rc;
1579
1580 rc = kstrtoul(buf, 10, &val);
1581 if (rc < 0)
1582 return -EINVAL;
1583
1584 if (idxd->state == IDXD_DEV_ENABLED)
1585 return -EPERM;
1586
1587 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1588 return -EPERM;
1589
1590 if (!idxd->hw.group_cap.token_limit)
1591 return -EPERM;
1592
1593 if (val > idxd->hw.group_cap.total_tokens)
1594 return -EINVAL;
1595
1596 idxd->token_limit = val;
1597 return count;
1598}
1599static DEVICE_ATTR_RW(token_limit);
1600
42d279f9
DJ
1601static ssize_t cdev_major_show(struct device *dev,
1602 struct device_attribute *attr, char *buf)
1603{
1604 struct idxd_device *idxd =
1605 container_of(dev, struct idxd_device, conf_dev);
1606
1607 return sprintf(buf, "%u\n", idxd->major);
1608}
1609static DEVICE_ATTR_RO(cdev_major);
1610
ff18de55
DJ
1611static ssize_t cmd_status_show(struct device *dev,
1612 struct device_attribute *attr, char *buf)
1613{
1614 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1615
1616 return sprintf(buf, "%#x\n", idxd->cmd_status);
1617}
1618static DEVICE_ATTR_RO(cmd_status);
1619
c52ca478 1620static struct attribute *idxd_device_attributes[] = {
c2ce6bbc 1621 &dev_attr_version.attr,
c52ca478
DJ
1622 &dev_attr_max_groups.attr,
1623 &dev_attr_max_work_queues.attr,
1624 &dev_attr_max_work_queues_size.attr,
1625 &dev_attr_max_engines.attr,
1626 &dev_attr_numa_node.attr,
1627 &dev_attr_max_batch_size.attr,
1628 &dev_attr_max_transfer_size.attr,
1629 &dev_attr_op_cap.attr,
9065958e 1630 &dev_attr_gen_cap.attr,
c52ca478
DJ
1631 &dev_attr_configurable.attr,
1632 &dev_attr_clients.attr,
8e50d392 1633 &dev_attr_pasid_enabled.attr,
c52ca478
DJ
1634 &dev_attr_state.attr,
1635 &dev_attr_errors.attr,
1636 &dev_attr_max_tokens.attr,
1637 &dev_attr_token_limit.attr,
42d279f9 1638 &dev_attr_cdev_major.attr,
ff18de55 1639 &dev_attr_cmd_status.attr,
c52ca478
DJ
1640 NULL,
1641};
1642
1643static const struct attribute_group idxd_device_attribute_group = {
1644 .attrs = idxd_device_attributes,
1645};
1646
1647static const struct attribute_group *idxd_attribute_groups[] = {
1648 &idxd_device_attribute_group,
1649 NULL,
1650};
1651
1652static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1653{
1654 struct device *dev = &idxd->pdev->dev;
1655 int i, rc;
1656
1657 for (i = 0; i < idxd->max_engines; i++) {
1658 struct idxd_engine *engine = &idxd->engines[i];
1659
1660 engine->conf_dev.parent = &idxd->conf_dev;
1661 dev_set_name(&engine->conf_dev, "engine%d.%d",
1662 idxd->id, engine->id);
1663 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1664 engine->conf_dev.groups = idxd_engine_attribute_groups;
1665 engine->conf_dev.type = &idxd_engine_device_type;
1666 dev_dbg(dev, "Engine device register: %s\n",
1667 dev_name(&engine->conf_dev));
1668 rc = device_register(&engine->conf_dev);
1669 if (rc < 0) {
1670 put_device(&engine->conf_dev);
1671 goto cleanup;
1672 }
1673 }
1674
1675 return 0;
1676
1677cleanup:
1678 while (i--) {
1679 struct idxd_engine *engine = &idxd->engines[i];
1680
1681 device_unregister(&engine->conf_dev);
1682 }
1683 return rc;
1684}
1685
1686static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1687{
1688 struct device *dev = &idxd->pdev->dev;
1689 int i, rc;
1690
1691 for (i = 0; i < idxd->max_groups; i++) {
1692 struct idxd_group *group = &idxd->groups[i];
1693
1694 group->conf_dev.parent = &idxd->conf_dev;
1695 dev_set_name(&group->conf_dev, "group%d.%d",
1696 idxd->id, group->id);
1697 group->conf_dev.bus = idxd_get_bus_type(idxd);
1698 group->conf_dev.groups = idxd_group_attribute_groups;
1699 group->conf_dev.type = &idxd_group_device_type;
1700 dev_dbg(dev, "Group device register: %s\n",
1701 dev_name(&group->conf_dev));
1702 rc = device_register(&group->conf_dev);
1703 if (rc < 0) {
1704 put_device(&group->conf_dev);
1705 goto cleanup;
1706 }
1707 }
1708
1709 return 0;
1710
1711cleanup:
1712 while (i--) {
1713 struct idxd_group *group = &idxd->groups[i];
1714
1715 device_unregister(&group->conf_dev);
1716 }
1717 return rc;
1718}
1719
1720static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1721{
1722 struct device *dev = &idxd->pdev->dev;
1723 int i, rc;
1724
1725 for (i = 0; i < idxd->max_wqs; i++) {
1726 struct idxd_wq *wq = &idxd->wqs[i];
1727
1728 wq->conf_dev.parent = &idxd->conf_dev;
1729 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1730 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1731 wq->conf_dev.groups = idxd_wq_attribute_groups;
1732 wq->conf_dev.type = &idxd_wq_device_type;
1733 dev_dbg(dev, "WQ device register: %s\n",
1734 dev_name(&wq->conf_dev));
1735 rc = device_register(&wq->conf_dev);
1736 if (rc < 0) {
1737 put_device(&wq->conf_dev);
1738 goto cleanup;
1739 }
1740 }
1741
1742 return 0;
1743
1744cleanup:
1745 while (i--) {
1746 struct idxd_wq *wq = &idxd->wqs[i];
1747
1748 device_unregister(&wq->conf_dev);
1749 }
1750 return rc;
1751}
1752
1753static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1754{
1755 struct device *dev = &idxd->pdev->dev;
1756 int rc;
1757 char devname[IDXD_NAME_SIZE];
1758
1759 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1760 idxd->conf_dev.parent = dev;
1761 dev_set_name(&idxd->conf_dev, "%s", devname);
1762 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1763 idxd->conf_dev.groups = idxd_attribute_groups;
1764 idxd->conf_dev.type = idxd_get_device_type(idxd);
1765
1766 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1767 rc = device_register(&idxd->conf_dev);
1768 if (rc < 0) {
1769 put_device(&idxd->conf_dev);
1770 return rc;
1771 }
1772
1773 return 0;
1774}
1775
1776int idxd_setup_sysfs(struct idxd_device *idxd)
1777{
1778 struct device *dev = &idxd->pdev->dev;
1779 int rc;
1780
1781 rc = idxd_setup_device_sysfs(idxd);
1782 if (rc < 0) {
1783 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1784 return rc;
1785 }
1786
1787 rc = idxd_setup_wq_sysfs(idxd);
1788 if (rc < 0) {
1789 /* unregister conf dev */
1790 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1791 return rc;
1792 }
1793
1794 rc = idxd_setup_group_sysfs(idxd);
1795 if (rc < 0) {
1796 /* unregister conf dev */
1797 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1798 return rc;
1799 }
1800
1801 rc = idxd_setup_engine_sysfs(idxd);
1802 if (rc < 0) {
1803 /* unregister conf dev */
1804 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1805 return rc;
1806 }
1807
1808 return 0;
1809}
1810
1811void idxd_cleanup_sysfs(struct idxd_device *idxd)
1812{
1813 int i;
1814
1815 for (i = 0; i < idxd->max_wqs; i++) {
1816 struct idxd_wq *wq = &idxd->wqs[i];
1817
1818 device_unregister(&wq->conf_dev);
1819 }
1820
1821 for (i = 0; i < idxd->max_engines; i++) {
1822 struct idxd_engine *engine = &idxd->engines[i];
1823
1824 device_unregister(&engine->conf_dev);
1825 }
1826
1827 for (i = 0; i < idxd->max_groups; i++) {
1828 struct idxd_group *group = &idxd->groups[i];
1829
1830 device_unregister(&group->conf_dev);
1831 }
1832
1833 device_unregister(&idxd->conf_dev);
1834}
1835
1836int idxd_register_bus_type(void)
1837{
1838 int i, rc;
1839
1840 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1841 rc = bus_register(idxd_bus_types[i]);
1842 if (rc < 0)
1843 goto bus_err;
1844 }
1845
1846 return 0;
1847
1848bus_err:
ff58f7dd 1849 while (--i >= 0)
c52ca478
DJ
1850 bus_unregister(idxd_bus_types[i]);
1851 return rc;
1852}
1853
1854void idxd_unregister_bus_type(void)
1855{
1856 int i;
1857
1858 for (i = 0; i < IDXD_TYPE_MAX; i++)
1859 bus_unregister(idxd_bus_types[i]);
1860}