dmaengine: idxd: add descriptor manipulation routines
[linux-2.6-block.git] / drivers / dma / idxd / sysfs.c
CommitLineData
c52ca478
DJ
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16};
17
18static void idxd_conf_device_release(struct device *dev)
19{
20 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
21}
22
23static struct device_type idxd_group_device_type = {
24 .name = "group",
25 .release = idxd_conf_device_release,
26};
27
28static struct device_type idxd_wq_device_type = {
29 .name = "wq",
30 .release = idxd_conf_device_release,
31};
32
33static struct device_type idxd_engine_device_type = {
34 .name = "engine",
35 .release = idxd_conf_device_release,
36};
37
38static struct device_type dsa_device_type = {
39 .name = "dsa",
40 .release = idxd_conf_device_release,
41};
42
43static inline bool is_dsa_dev(struct device *dev)
44{
45 return dev ? dev->type == &dsa_device_type : false;
46}
47
48static inline bool is_idxd_dev(struct device *dev)
49{
50 return is_dsa_dev(dev);
51}
52
53static inline bool is_idxd_wq_dev(struct device *dev)
54{
55 return dev ? dev->type == &idxd_wq_device_type : false;
56}
57
58static int idxd_config_bus_match(struct device *dev,
59 struct device_driver *drv)
60{
61 int matched = 0;
62
63 if (is_idxd_dev(dev)) {
64 struct idxd_device *idxd = confdev_to_idxd(dev);
65
66 if (idxd->state != IDXD_DEV_CONF_READY)
67 return 0;
68 matched = 1;
69 } else if (is_idxd_wq_dev(dev)) {
70 struct idxd_wq *wq = confdev_to_wq(dev);
71 struct idxd_device *idxd = wq->idxd;
72
73 if (idxd->state < IDXD_DEV_CONF_READY)
74 return 0;
75
76 if (wq->state != IDXD_WQ_DISABLED) {
77 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
78 return 0;
79 }
80 matched = 1;
81 }
82
83 if (matched)
84 dev_dbg(dev, "%s matched\n", dev_name(dev));
85
86 return matched;
87}
88
89static int idxd_config_bus_probe(struct device *dev)
90{
91 int rc;
92 unsigned long flags;
93
94 dev_dbg(dev, "%s called\n", __func__);
95
96 if (is_idxd_dev(dev)) {
97 struct idxd_device *idxd = confdev_to_idxd(dev);
98
99 if (idxd->state != IDXD_DEV_CONF_READY) {
100 dev_warn(dev, "Device not ready for config\n");
101 return -EBUSY;
102 }
103
104 spin_lock_irqsave(&idxd->dev_lock, flags);
105
106 /* Perform IDXD configuration and enabling */
107 rc = idxd_device_config(idxd);
108 if (rc < 0) {
109 spin_unlock_irqrestore(&idxd->dev_lock, flags);
110 dev_warn(dev, "Device config failed: %d\n", rc);
111 return rc;
112 }
113
114 /* start device */
115 rc = idxd_device_enable(idxd);
116 if (rc < 0) {
117 spin_unlock_irqrestore(&idxd->dev_lock, flags);
118 dev_warn(dev, "Device enable failed: %d\n", rc);
119 return rc;
120 }
121
122 spin_unlock_irqrestore(&idxd->dev_lock, flags);
123 dev_info(dev, "Device %s enabled\n", dev_name(dev));
124
125 return 0;
126 } else if (is_idxd_wq_dev(dev)) {
127 struct idxd_wq *wq = confdev_to_wq(dev);
128 struct idxd_device *idxd = wq->idxd;
129
130 mutex_lock(&wq->wq_lock);
131
132 if (idxd->state != IDXD_DEV_ENABLED) {
133 mutex_unlock(&wq->wq_lock);
134 dev_warn(dev, "Enabling while device not enabled.\n");
135 return -EPERM;
136 }
137
138 if (wq->state != IDXD_WQ_DISABLED) {
139 mutex_unlock(&wq->wq_lock);
140 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
141 return -EBUSY;
142 }
143
144 if (!wq->group) {
145 mutex_unlock(&wq->wq_lock);
146 dev_warn(dev, "WQ not attached to group.\n");
147 return -EINVAL;
148 }
149
150 if (strlen(wq->name) == 0) {
151 mutex_unlock(&wq->wq_lock);
152 dev_warn(dev, "WQ name not set.\n");
153 return -EINVAL;
154 }
155
156 rc = idxd_wq_alloc_resources(wq);
157 if (rc < 0) {
158 mutex_unlock(&wq->wq_lock);
159 dev_warn(dev, "WQ resource alloc failed\n");
160 return rc;
161 }
162
163 spin_lock_irqsave(&idxd->dev_lock, flags);
164 rc = idxd_device_config(idxd);
165 if (rc < 0) {
166 spin_unlock_irqrestore(&idxd->dev_lock, flags);
167 mutex_unlock(&wq->wq_lock);
168 dev_warn(dev, "Writing WQ %d config failed: %d\n",
169 wq->id, rc);
170 return rc;
171 }
172
173 rc = idxd_wq_enable(wq);
174 if (rc < 0) {
175 spin_unlock_irqrestore(&idxd->dev_lock, flags);
176 mutex_unlock(&wq->wq_lock);
177 dev_warn(dev, "WQ %d enabling failed: %d\n",
178 wq->id, rc);
179 return rc;
180 }
181 spin_unlock_irqrestore(&idxd->dev_lock, flags);
182
183 rc = idxd_wq_map_portal(wq);
184 if (rc < 0) {
185 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
186 rc = idxd_wq_disable(wq);
187 if (rc < 0)
188 dev_warn(dev, "IDXD wq disable failed\n");
189 spin_unlock_irqrestore(&idxd->dev_lock, flags);
190 mutex_unlock(&wq->wq_lock);
191 return rc;
192 }
193
194 wq->client_count = 0;
195
196 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
197 mutex_unlock(&wq->wq_lock);
198 return 0;
199 }
200
201 return -ENODEV;
202}
203
204static void disable_wq(struct idxd_wq *wq)
205{
206 struct idxd_device *idxd = wq->idxd;
207 struct device *dev = &idxd->pdev->dev;
208 unsigned long flags;
209 int rc;
210
211 mutex_lock(&wq->wq_lock);
212 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
213 if (wq->state == IDXD_WQ_DISABLED) {
214 mutex_unlock(&wq->wq_lock);
215 return;
216 }
217
218 if (idxd_wq_refcount(wq))
219 dev_warn(dev, "Clients has claim on wq %d: %d\n",
220 wq->id, idxd_wq_refcount(wq));
221
222 idxd_wq_unmap_portal(wq);
223
224 spin_lock_irqsave(&idxd->dev_lock, flags);
225 rc = idxd_wq_disable(wq);
226 spin_unlock_irqrestore(&idxd->dev_lock, flags);
227
228 idxd_wq_free_resources(wq);
229 wq->client_count = 0;
230 mutex_unlock(&wq->wq_lock);
231
232 if (rc < 0)
233 dev_warn(dev, "Failed to disable %s: %d\n",
234 dev_name(&wq->conf_dev), rc);
235 else
236 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
237}
238
239static int idxd_config_bus_remove(struct device *dev)
240{
241 int rc;
242 unsigned long flags;
243
244 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
245
246 /* disable workqueue here */
247 if (is_idxd_wq_dev(dev)) {
248 struct idxd_wq *wq = confdev_to_wq(dev);
249
250 disable_wq(wq);
251 } else if (is_idxd_dev(dev)) {
252 struct idxd_device *idxd = confdev_to_idxd(dev);
253 int i;
254
255 dev_dbg(dev, "%s removing dev %s\n", __func__,
256 dev_name(&idxd->conf_dev));
257 for (i = 0; i < idxd->max_wqs; i++) {
258 struct idxd_wq *wq = &idxd->wqs[i];
259
260 if (wq->state == IDXD_WQ_DISABLED)
261 continue;
262 dev_warn(dev, "Active wq %d on disable %s.\n", i,
263 dev_name(&idxd->conf_dev));
264 device_release_driver(&wq->conf_dev);
265 }
266
267 spin_lock_irqsave(&idxd->dev_lock, flags);
268 rc = idxd_device_disable(idxd);
269 spin_unlock_irqrestore(&idxd->dev_lock, flags);
270 if (rc < 0)
271 dev_warn(dev, "Device disable failed\n");
272 else
273 dev_info(dev, "Device %s disabled\n", dev_name(dev));
274 }
275
276 return 0;
277}
278
279static void idxd_config_bus_shutdown(struct device *dev)
280{
281 dev_dbg(dev, "%s called\n", __func__);
282}
283
284static struct bus_type dsa_bus_type = {
285 .name = "dsa",
286 .match = idxd_config_bus_match,
287 .probe = idxd_config_bus_probe,
288 .remove = idxd_config_bus_remove,
289 .shutdown = idxd_config_bus_shutdown,
290};
291
292static struct bus_type *idxd_bus_types[] = {
293 &dsa_bus_type
294};
295
296static struct idxd_device_driver dsa_drv = {
297 .drv = {
298 .name = "dsa",
299 .bus = &dsa_bus_type,
300 .owner = THIS_MODULE,
301 .mod_name = KBUILD_MODNAME,
302 },
303};
304
305static struct idxd_device_driver *idxd_drvs[] = {
306 &dsa_drv
307};
308
309static struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
310{
311 return idxd_bus_types[idxd->type];
312}
313
314static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
315{
316 if (idxd->type == IDXD_TYPE_DSA)
317 return &dsa_device_type;
318 else
319 return NULL;
320}
321
322/* IDXD generic driver setup */
323int idxd_register_driver(void)
324{
325 int i, rc;
326
327 for (i = 0; i < IDXD_TYPE_MAX; i++) {
328 rc = driver_register(&idxd_drvs[i]->drv);
329 if (rc < 0)
330 goto drv_fail;
331 }
332
333 return 0;
334
335drv_fail:
336 for (; i > 0; i--)
337 driver_unregister(&idxd_drvs[i]->drv);
338 return rc;
339}
340
341void idxd_unregister_driver(void)
342{
343 int i;
344
345 for (i = 0; i < IDXD_TYPE_MAX; i++)
346 driver_unregister(&idxd_drvs[i]->drv);
347}
348
349/* IDXD engine attributes */
350static ssize_t engine_group_id_show(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct idxd_engine *engine =
354 container_of(dev, struct idxd_engine, conf_dev);
355
356 if (engine->group)
357 return sprintf(buf, "%d\n", engine->group->id);
358 else
359 return sprintf(buf, "%d\n", -1);
360}
361
362static ssize_t engine_group_id_store(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf, size_t count)
365{
366 struct idxd_engine *engine =
367 container_of(dev, struct idxd_engine, conf_dev);
368 struct idxd_device *idxd = engine->idxd;
369 long id;
370 int rc;
371 struct idxd_group *prevg, *group;
372
373 rc = kstrtol(buf, 10, &id);
374 if (rc < 0)
375 return -EINVAL;
376
377 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
378 return -EPERM;
379
380 if (id > idxd->max_groups - 1 || id < -1)
381 return -EINVAL;
382
383 if (id == -1) {
384 if (engine->group) {
385 engine->group->num_engines--;
386 engine->group = NULL;
387 }
388 return count;
389 }
390
391 group = &idxd->groups[id];
392 prevg = engine->group;
393
394 if (prevg)
395 prevg->num_engines--;
396 engine->group = &idxd->groups[id];
397 engine->group->num_engines++;
398
399 return count;
400}
401
402static struct device_attribute dev_attr_engine_group =
403 __ATTR(group_id, 0644, engine_group_id_show,
404 engine_group_id_store);
405
406static struct attribute *idxd_engine_attributes[] = {
407 &dev_attr_engine_group.attr,
408 NULL,
409};
410
411static const struct attribute_group idxd_engine_attribute_group = {
412 .attrs = idxd_engine_attributes,
413};
414
415static const struct attribute_group *idxd_engine_attribute_groups[] = {
416 &idxd_engine_attribute_group,
417 NULL,
418};
419
420/* Group attributes */
421
422static void idxd_set_free_tokens(struct idxd_device *idxd)
423{
424 int i, tokens;
425
426 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
427 struct idxd_group *g = &idxd->groups[i];
428
429 tokens += g->tokens_reserved;
430 }
431
432 idxd->nr_tokens = idxd->max_tokens - tokens;
433}
434
435static ssize_t group_tokens_reserved_show(struct device *dev,
436 struct device_attribute *attr,
437 char *buf)
438{
439 struct idxd_group *group =
440 container_of(dev, struct idxd_group, conf_dev);
441
442 return sprintf(buf, "%u\n", group->tokens_reserved);
443}
444
445static ssize_t group_tokens_reserved_store(struct device *dev,
446 struct device_attribute *attr,
447 const char *buf, size_t count)
448{
449 struct idxd_group *group =
450 container_of(dev, struct idxd_group, conf_dev);
451 struct idxd_device *idxd = group->idxd;
452 unsigned long val;
453 int rc;
454
455 rc = kstrtoul(buf, 10, &val);
456 if (rc < 0)
457 return -EINVAL;
458
459 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
460 return -EPERM;
461
462 if (idxd->state == IDXD_DEV_ENABLED)
463 return -EPERM;
464
465 if (idxd->token_limit == 0)
466 return -EPERM;
467
468 if (val > idxd->max_tokens)
469 return -EINVAL;
470
471 if (val > idxd->nr_tokens)
472 return -EINVAL;
473
474 group->tokens_reserved = val;
475 idxd_set_free_tokens(idxd);
476 return count;
477}
478
479static struct device_attribute dev_attr_group_tokens_reserved =
480 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
481 group_tokens_reserved_store);
482
483static ssize_t group_tokens_allowed_show(struct device *dev,
484 struct device_attribute *attr,
485 char *buf)
486{
487 struct idxd_group *group =
488 container_of(dev, struct idxd_group, conf_dev);
489
490 return sprintf(buf, "%u\n", group->tokens_allowed);
491}
492
493static ssize_t group_tokens_allowed_store(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t count)
496{
497 struct idxd_group *group =
498 container_of(dev, struct idxd_group, conf_dev);
499 struct idxd_device *idxd = group->idxd;
500 unsigned long val;
501 int rc;
502
503 rc = kstrtoul(buf, 10, &val);
504 if (rc < 0)
505 return -EINVAL;
506
507 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
508 return -EPERM;
509
510 if (idxd->state == IDXD_DEV_ENABLED)
511 return -EPERM;
512
513 if (idxd->token_limit == 0)
514 return -EPERM;
515 if (val < 4 * group->num_engines ||
516 val > group->tokens_reserved + idxd->nr_tokens)
517 return -EINVAL;
518
519 group->tokens_allowed = val;
520 return count;
521}
522
523static struct device_attribute dev_attr_group_tokens_allowed =
524 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
525 group_tokens_allowed_store);
526
527static ssize_t group_use_token_limit_show(struct device *dev,
528 struct device_attribute *attr,
529 char *buf)
530{
531 struct idxd_group *group =
532 container_of(dev, struct idxd_group, conf_dev);
533
534 return sprintf(buf, "%u\n", group->use_token_limit);
535}
536
537static ssize_t group_use_token_limit_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t count)
540{
541 struct idxd_group *group =
542 container_of(dev, struct idxd_group, conf_dev);
543 struct idxd_device *idxd = group->idxd;
544 unsigned long val;
545 int rc;
546
547 rc = kstrtoul(buf, 10, &val);
548 if (rc < 0)
549 return -EINVAL;
550
551 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
552 return -EPERM;
553
554 if (idxd->state == IDXD_DEV_ENABLED)
555 return -EPERM;
556
557 if (idxd->token_limit == 0)
558 return -EPERM;
559
560 group->use_token_limit = !!val;
561 return count;
562}
563
564static struct device_attribute dev_attr_group_use_token_limit =
565 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
566 group_use_token_limit_store);
567
568static ssize_t group_engines_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570{
571 struct idxd_group *group =
572 container_of(dev, struct idxd_group, conf_dev);
573 int i, rc = 0;
574 char *tmp = buf;
575 struct idxd_device *idxd = group->idxd;
576
577 for (i = 0; i < idxd->max_engines; i++) {
578 struct idxd_engine *engine = &idxd->engines[i];
579
580 if (!engine->group)
581 continue;
582
583 if (engine->group->id == group->id)
584 rc += sprintf(tmp + rc, "engine%d.%d ",
585 idxd->id, engine->id);
586 }
587
588 rc--;
589 rc += sprintf(tmp + rc, "\n");
590
591 return rc;
592}
593
594static struct device_attribute dev_attr_group_engines =
595 __ATTR(engines, 0444, group_engines_show, NULL);
596
597static ssize_t group_work_queues_show(struct device *dev,
598 struct device_attribute *attr, char *buf)
599{
600 struct idxd_group *group =
601 container_of(dev, struct idxd_group, conf_dev);
602 int i, rc = 0;
603 char *tmp = buf;
604 struct idxd_device *idxd = group->idxd;
605
606 for (i = 0; i < idxd->max_wqs; i++) {
607 struct idxd_wq *wq = &idxd->wqs[i];
608
609 if (!wq->group)
610 continue;
611
612 if (wq->group->id == group->id)
613 rc += sprintf(tmp + rc, "wq%d.%d ",
614 idxd->id, wq->id);
615 }
616
617 rc--;
618 rc += sprintf(tmp + rc, "\n");
619
620 return rc;
621}
622
623static struct device_attribute dev_attr_group_work_queues =
624 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
625
626static ssize_t group_traffic_class_a_show(struct device *dev,
627 struct device_attribute *attr,
628 char *buf)
629{
630 struct idxd_group *group =
631 container_of(dev, struct idxd_group, conf_dev);
632
633 return sprintf(buf, "%d\n", group->tc_a);
634}
635
636static ssize_t group_traffic_class_a_store(struct device *dev,
637 struct device_attribute *attr,
638 const char *buf, size_t count)
639{
640 struct idxd_group *group =
641 container_of(dev, struct idxd_group, conf_dev);
642 struct idxd_device *idxd = group->idxd;
643 long val;
644 int rc;
645
646 rc = kstrtol(buf, 10, &val);
647 if (rc < 0)
648 return -EINVAL;
649
650 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
651 return -EPERM;
652
653 if (idxd->state == IDXD_DEV_ENABLED)
654 return -EPERM;
655
656 if (val < 0 || val > 7)
657 return -EINVAL;
658
659 group->tc_a = val;
660 return count;
661}
662
663static struct device_attribute dev_attr_group_traffic_class_a =
664 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
665 group_traffic_class_a_store);
666
667static ssize_t group_traffic_class_b_show(struct device *dev,
668 struct device_attribute *attr,
669 char *buf)
670{
671 struct idxd_group *group =
672 container_of(dev, struct idxd_group, conf_dev);
673
674 return sprintf(buf, "%d\n", group->tc_b);
675}
676
677static ssize_t group_traffic_class_b_store(struct device *dev,
678 struct device_attribute *attr,
679 const char *buf, size_t count)
680{
681 struct idxd_group *group =
682 container_of(dev, struct idxd_group, conf_dev);
683 struct idxd_device *idxd = group->idxd;
684 long val;
685 int rc;
686
687 rc = kstrtol(buf, 10, &val);
688 if (rc < 0)
689 return -EINVAL;
690
691 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
692 return -EPERM;
693
694 if (idxd->state == IDXD_DEV_ENABLED)
695 return -EPERM;
696
697 if (val < 0 || val > 7)
698 return -EINVAL;
699
700 group->tc_b = val;
701 return count;
702}
703
704static struct device_attribute dev_attr_group_traffic_class_b =
705 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
706 group_traffic_class_b_store);
707
708static struct attribute *idxd_group_attributes[] = {
709 &dev_attr_group_work_queues.attr,
710 &dev_attr_group_engines.attr,
711 &dev_attr_group_use_token_limit.attr,
712 &dev_attr_group_tokens_allowed.attr,
713 &dev_attr_group_tokens_reserved.attr,
714 &dev_attr_group_traffic_class_a.attr,
715 &dev_attr_group_traffic_class_b.attr,
716 NULL,
717};
718
719static const struct attribute_group idxd_group_attribute_group = {
720 .attrs = idxd_group_attributes,
721};
722
723static const struct attribute_group *idxd_group_attribute_groups[] = {
724 &idxd_group_attribute_group,
725 NULL,
726};
727
728/* IDXD work queue attribs */
729static ssize_t wq_clients_show(struct device *dev,
730 struct device_attribute *attr, char *buf)
731{
732 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
733
734 return sprintf(buf, "%d\n", wq->client_count);
735}
736
737static struct device_attribute dev_attr_wq_clients =
738 __ATTR(clients, 0444, wq_clients_show, NULL);
739
740static ssize_t wq_state_show(struct device *dev,
741 struct device_attribute *attr, char *buf)
742{
743 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
744
745 switch (wq->state) {
746 case IDXD_WQ_DISABLED:
747 return sprintf(buf, "disabled\n");
748 case IDXD_WQ_ENABLED:
749 return sprintf(buf, "enabled\n");
750 }
751
752 return sprintf(buf, "unknown\n");
753}
754
755static struct device_attribute dev_attr_wq_state =
756 __ATTR(state, 0444, wq_state_show, NULL);
757
758static ssize_t wq_group_id_show(struct device *dev,
759 struct device_attribute *attr, char *buf)
760{
761 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
762
763 if (wq->group)
764 return sprintf(buf, "%u\n", wq->group->id);
765 else
766 return sprintf(buf, "-1\n");
767}
768
769static ssize_t wq_group_id_store(struct device *dev,
770 struct device_attribute *attr,
771 const char *buf, size_t count)
772{
773 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
774 struct idxd_device *idxd = wq->idxd;
775 long id;
776 int rc;
777 struct idxd_group *prevg, *group;
778
779 rc = kstrtol(buf, 10, &id);
780 if (rc < 0)
781 return -EINVAL;
782
783 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
784 return -EPERM;
785
786 if (wq->state != IDXD_WQ_DISABLED)
787 return -EPERM;
788
789 if (id > idxd->max_groups - 1 || id < -1)
790 return -EINVAL;
791
792 if (id == -1) {
793 if (wq->group) {
794 wq->group->num_wqs--;
795 wq->group = NULL;
796 }
797 return count;
798 }
799
800 group = &idxd->groups[id];
801 prevg = wq->group;
802
803 if (prevg)
804 prevg->num_wqs--;
805 wq->group = group;
806 group->num_wqs++;
807 return count;
808}
809
810static struct device_attribute dev_attr_wq_group_id =
811 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
812
813static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
814 char *buf)
815{
816 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
817
818 return sprintf(buf, "%s\n",
819 wq_dedicated(wq) ? "dedicated" : "shared");
820}
821
822static ssize_t wq_mode_store(struct device *dev,
823 struct device_attribute *attr, const char *buf,
824 size_t count)
825{
826 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
827 struct idxd_device *idxd = wq->idxd;
828
829 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
830 return -EPERM;
831
832 if (wq->state != IDXD_WQ_DISABLED)
833 return -EPERM;
834
835 if (sysfs_streq(buf, "dedicated")) {
836 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
837 wq->threshold = 0;
838 } else {
839 return -EINVAL;
840 }
841
842 return count;
843}
844
845static struct device_attribute dev_attr_wq_mode =
846 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
847
848static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
849 char *buf)
850{
851 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
852
853 return sprintf(buf, "%u\n", wq->size);
854}
855
856static ssize_t wq_size_store(struct device *dev,
857 struct device_attribute *attr, const char *buf,
858 size_t count)
859{
860 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
861 unsigned long size;
862 struct idxd_device *idxd = wq->idxd;
863 int rc;
864
865 rc = kstrtoul(buf, 10, &size);
866 if (rc < 0)
867 return -EINVAL;
868
869 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
870 return -EPERM;
871
872 if (wq->state != IDXD_WQ_DISABLED)
873 return -EPERM;
874
875 if (size > idxd->max_wq_size)
876 return -EINVAL;
877
878 wq->size = size;
879 return count;
880}
881
882static struct device_attribute dev_attr_wq_size =
883 __ATTR(size, 0644, wq_size_show, wq_size_store);
884
885static ssize_t wq_priority_show(struct device *dev,
886 struct device_attribute *attr, char *buf)
887{
888 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
889
890 return sprintf(buf, "%u\n", wq->priority);
891}
892
893static ssize_t wq_priority_store(struct device *dev,
894 struct device_attribute *attr,
895 const char *buf, size_t count)
896{
897 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
898 unsigned long prio;
899 struct idxd_device *idxd = wq->idxd;
900 int rc;
901
902 rc = kstrtoul(buf, 10, &prio);
903 if (rc < 0)
904 return -EINVAL;
905
906 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
907 return -EPERM;
908
909 if (wq->state != IDXD_WQ_DISABLED)
910 return -EPERM;
911
912 if (prio > IDXD_MAX_PRIORITY)
913 return -EINVAL;
914
915 wq->priority = prio;
916 return count;
917}
918
919static struct device_attribute dev_attr_wq_priority =
920 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
921
922static ssize_t wq_type_show(struct device *dev,
923 struct device_attribute *attr, char *buf)
924{
925 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
926
927 switch (wq->type) {
928 case IDXD_WQT_KERNEL:
929 return sprintf(buf, "%s\n",
930 idxd_wq_type_names[IDXD_WQT_KERNEL]);
931 case IDXD_WQT_NONE:
932 default:
933 return sprintf(buf, "%s\n",
934 idxd_wq_type_names[IDXD_WQT_NONE]);
935 }
936
937 return -EINVAL;
938}
939
940static ssize_t wq_type_store(struct device *dev,
941 struct device_attribute *attr, const char *buf,
942 size_t count)
943{
944 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
945 enum idxd_wq_type old_type;
946
947 if (wq->state != IDXD_WQ_DISABLED)
948 return -EPERM;
949
950 old_type = wq->type;
951 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
952 wq->type = IDXD_WQT_KERNEL;
953 else
954 wq->type = IDXD_WQT_NONE;
955
956 /* If we are changing queue type, clear the name */
957 if (wq->type != old_type)
958 memset(wq->name, 0, WQ_NAME_SIZE + 1);
959
960 return count;
961}
962
963static struct device_attribute dev_attr_wq_type =
964 __ATTR(type, 0644, wq_type_show, wq_type_store);
965
966static ssize_t wq_name_show(struct device *dev,
967 struct device_attribute *attr, char *buf)
968{
969 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
970
971 return sprintf(buf, "%s\n", wq->name);
972}
973
974static ssize_t wq_name_store(struct device *dev,
975 struct device_attribute *attr, const char *buf,
976 size_t count)
977{
978 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
979
980 if (wq->state != IDXD_WQ_DISABLED)
981 return -EPERM;
982
983 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
984 return -EINVAL;
985
986 memset(wq->name, 0, WQ_NAME_SIZE + 1);
987 strncpy(wq->name, buf, WQ_NAME_SIZE);
988 strreplace(wq->name, '\n', '\0');
989 return count;
990}
991
992static struct device_attribute dev_attr_wq_name =
993 __ATTR(name, 0644, wq_name_show, wq_name_store);
994
995static struct attribute *idxd_wq_attributes[] = {
996 &dev_attr_wq_clients.attr,
997 &dev_attr_wq_state.attr,
998 &dev_attr_wq_group_id.attr,
999 &dev_attr_wq_mode.attr,
1000 &dev_attr_wq_size.attr,
1001 &dev_attr_wq_priority.attr,
1002 &dev_attr_wq_type.attr,
1003 &dev_attr_wq_name.attr,
1004 NULL,
1005};
1006
1007static const struct attribute_group idxd_wq_attribute_group = {
1008 .attrs = idxd_wq_attributes,
1009};
1010
1011static const struct attribute_group *idxd_wq_attribute_groups[] = {
1012 &idxd_wq_attribute_group,
1013 NULL,
1014};
1015
1016/* IDXD device attribs */
1017static ssize_t max_work_queues_size_show(struct device *dev,
1018 struct device_attribute *attr,
1019 char *buf)
1020{
1021 struct idxd_device *idxd =
1022 container_of(dev, struct idxd_device, conf_dev);
1023
1024 return sprintf(buf, "%u\n", idxd->max_wq_size);
1025}
1026static DEVICE_ATTR_RO(max_work_queues_size);
1027
1028static ssize_t max_groups_show(struct device *dev,
1029 struct device_attribute *attr, char *buf)
1030{
1031 struct idxd_device *idxd =
1032 container_of(dev, struct idxd_device, conf_dev);
1033
1034 return sprintf(buf, "%u\n", idxd->max_groups);
1035}
1036static DEVICE_ATTR_RO(max_groups);
1037
1038static ssize_t max_work_queues_show(struct device *dev,
1039 struct device_attribute *attr, char *buf)
1040{
1041 struct idxd_device *idxd =
1042 container_of(dev, struct idxd_device, conf_dev);
1043
1044 return sprintf(buf, "%u\n", idxd->max_wqs);
1045}
1046static DEVICE_ATTR_RO(max_work_queues);
1047
1048static ssize_t max_engines_show(struct device *dev,
1049 struct device_attribute *attr, char *buf)
1050{
1051 struct idxd_device *idxd =
1052 container_of(dev, struct idxd_device, conf_dev);
1053
1054 return sprintf(buf, "%u\n", idxd->max_engines);
1055}
1056static DEVICE_ATTR_RO(max_engines);
1057
1058static ssize_t numa_node_show(struct device *dev,
1059 struct device_attribute *attr, char *buf)
1060{
1061 struct idxd_device *idxd =
1062 container_of(dev, struct idxd_device, conf_dev);
1063
1064 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1065}
1066static DEVICE_ATTR_RO(numa_node);
1067
1068static ssize_t max_batch_size_show(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1070{
1071 struct idxd_device *idxd =
1072 container_of(dev, struct idxd_device, conf_dev);
1073
1074 return sprintf(buf, "%u\n", idxd->max_batch_size);
1075}
1076static DEVICE_ATTR_RO(max_batch_size);
1077
1078static ssize_t max_transfer_size_show(struct device *dev,
1079 struct device_attribute *attr,
1080 char *buf)
1081{
1082 struct idxd_device *idxd =
1083 container_of(dev, struct idxd_device, conf_dev);
1084
1085 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1086}
1087static DEVICE_ATTR_RO(max_transfer_size);
1088
1089static ssize_t op_cap_show(struct device *dev,
1090 struct device_attribute *attr, char *buf)
1091{
1092 struct idxd_device *idxd =
1093 container_of(dev, struct idxd_device, conf_dev);
1094
1095 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1096}
1097static DEVICE_ATTR_RO(op_cap);
1098
1099static ssize_t configurable_show(struct device *dev,
1100 struct device_attribute *attr, char *buf)
1101{
1102 struct idxd_device *idxd =
1103 container_of(dev, struct idxd_device, conf_dev);
1104
1105 return sprintf(buf, "%u\n",
1106 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1107}
1108static DEVICE_ATTR_RO(configurable);
1109
1110static ssize_t clients_show(struct device *dev,
1111 struct device_attribute *attr, char *buf)
1112{
1113 struct idxd_device *idxd =
1114 container_of(dev, struct idxd_device, conf_dev);
1115 unsigned long flags;
1116 int count = 0, i;
1117
1118 spin_lock_irqsave(&idxd->dev_lock, flags);
1119 for (i = 0; i < idxd->max_wqs; i++) {
1120 struct idxd_wq *wq = &idxd->wqs[i];
1121
1122 count += wq->client_count;
1123 }
1124 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1125
1126 return sprintf(buf, "%d\n", count);
1127}
1128static DEVICE_ATTR_RO(clients);
1129
1130static ssize_t state_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1132{
1133 struct idxd_device *idxd =
1134 container_of(dev, struct idxd_device, conf_dev);
1135
1136 switch (idxd->state) {
1137 case IDXD_DEV_DISABLED:
1138 case IDXD_DEV_CONF_READY:
1139 return sprintf(buf, "disabled\n");
1140 case IDXD_DEV_ENABLED:
1141 return sprintf(buf, "enabled\n");
1142 case IDXD_DEV_HALTED:
1143 return sprintf(buf, "halted\n");
1144 }
1145
1146 return sprintf(buf, "unknown\n");
1147}
1148static DEVICE_ATTR_RO(state);
1149
1150static ssize_t errors_show(struct device *dev,
1151 struct device_attribute *attr, char *buf)
1152{
1153 struct idxd_device *idxd =
1154 container_of(dev, struct idxd_device, conf_dev);
1155 int i, out = 0;
1156 unsigned long flags;
1157
1158 spin_lock_irqsave(&idxd->dev_lock, flags);
1159 for (i = 0; i < 4; i++)
1160 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1161 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1162 out--;
1163 out += sprintf(buf + out, "\n");
1164 return out;
1165}
1166static DEVICE_ATTR_RO(errors);
1167
1168static ssize_t max_tokens_show(struct device *dev,
1169 struct device_attribute *attr, char *buf)
1170{
1171 struct idxd_device *idxd =
1172 container_of(dev, struct idxd_device, conf_dev);
1173
1174 return sprintf(buf, "%u\n", idxd->max_tokens);
1175}
1176static DEVICE_ATTR_RO(max_tokens);
1177
1178static ssize_t token_limit_show(struct device *dev,
1179 struct device_attribute *attr, char *buf)
1180{
1181 struct idxd_device *idxd =
1182 container_of(dev, struct idxd_device, conf_dev);
1183
1184 return sprintf(buf, "%u\n", idxd->token_limit);
1185}
1186
1187static ssize_t token_limit_store(struct device *dev,
1188 struct device_attribute *attr,
1189 const char *buf, size_t count)
1190{
1191 struct idxd_device *idxd =
1192 container_of(dev, struct idxd_device, conf_dev);
1193 unsigned long val;
1194 int rc;
1195
1196 rc = kstrtoul(buf, 10, &val);
1197 if (rc < 0)
1198 return -EINVAL;
1199
1200 if (idxd->state == IDXD_DEV_ENABLED)
1201 return -EPERM;
1202
1203 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1204 return -EPERM;
1205
1206 if (!idxd->hw.group_cap.token_limit)
1207 return -EPERM;
1208
1209 if (val > idxd->hw.group_cap.total_tokens)
1210 return -EINVAL;
1211
1212 idxd->token_limit = val;
1213 return count;
1214}
1215static DEVICE_ATTR_RW(token_limit);
1216
1217static struct attribute *idxd_device_attributes[] = {
1218 &dev_attr_max_groups.attr,
1219 &dev_attr_max_work_queues.attr,
1220 &dev_attr_max_work_queues_size.attr,
1221 &dev_attr_max_engines.attr,
1222 &dev_attr_numa_node.attr,
1223 &dev_attr_max_batch_size.attr,
1224 &dev_attr_max_transfer_size.attr,
1225 &dev_attr_op_cap.attr,
1226 &dev_attr_configurable.attr,
1227 &dev_attr_clients.attr,
1228 &dev_attr_state.attr,
1229 &dev_attr_errors.attr,
1230 &dev_attr_max_tokens.attr,
1231 &dev_attr_token_limit.attr,
1232 NULL,
1233};
1234
1235static const struct attribute_group idxd_device_attribute_group = {
1236 .attrs = idxd_device_attributes,
1237};
1238
1239static const struct attribute_group *idxd_attribute_groups[] = {
1240 &idxd_device_attribute_group,
1241 NULL,
1242};
1243
1244static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1245{
1246 struct device *dev = &idxd->pdev->dev;
1247 int i, rc;
1248
1249 for (i = 0; i < idxd->max_engines; i++) {
1250 struct idxd_engine *engine = &idxd->engines[i];
1251
1252 engine->conf_dev.parent = &idxd->conf_dev;
1253 dev_set_name(&engine->conf_dev, "engine%d.%d",
1254 idxd->id, engine->id);
1255 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1256 engine->conf_dev.groups = idxd_engine_attribute_groups;
1257 engine->conf_dev.type = &idxd_engine_device_type;
1258 dev_dbg(dev, "Engine device register: %s\n",
1259 dev_name(&engine->conf_dev));
1260 rc = device_register(&engine->conf_dev);
1261 if (rc < 0) {
1262 put_device(&engine->conf_dev);
1263 goto cleanup;
1264 }
1265 }
1266
1267 return 0;
1268
1269cleanup:
1270 while (i--) {
1271 struct idxd_engine *engine = &idxd->engines[i];
1272
1273 device_unregister(&engine->conf_dev);
1274 }
1275 return rc;
1276}
1277
1278static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1279{
1280 struct device *dev = &idxd->pdev->dev;
1281 int i, rc;
1282
1283 for (i = 0; i < idxd->max_groups; i++) {
1284 struct idxd_group *group = &idxd->groups[i];
1285
1286 group->conf_dev.parent = &idxd->conf_dev;
1287 dev_set_name(&group->conf_dev, "group%d.%d",
1288 idxd->id, group->id);
1289 group->conf_dev.bus = idxd_get_bus_type(idxd);
1290 group->conf_dev.groups = idxd_group_attribute_groups;
1291 group->conf_dev.type = &idxd_group_device_type;
1292 dev_dbg(dev, "Group device register: %s\n",
1293 dev_name(&group->conf_dev));
1294 rc = device_register(&group->conf_dev);
1295 if (rc < 0) {
1296 put_device(&group->conf_dev);
1297 goto cleanup;
1298 }
1299 }
1300
1301 return 0;
1302
1303cleanup:
1304 while (i--) {
1305 struct idxd_group *group = &idxd->groups[i];
1306
1307 device_unregister(&group->conf_dev);
1308 }
1309 return rc;
1310}
1311
1312static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1313{
1314 struct device *dev = &idxd->pdev->dev;
1315 int i, rc;
1316
1317 for (i = 0; i < idxd->max_wqs; i++) {
1318 struct idxd_wq *wq = &idxd->wqs[i];
1319
1320 wq->conf_dev.parent = &idxd->conf_dev;
1321 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1322 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1323 wq->conf_dev.groups = idxd_wq_attribute_groups;
1324 wq->conf_dev.type = &idxd_wq_device_type;
1325 dev_dbg(dev, "WQ device register: %s\n",
1326 dev_name(&wq->conf_dev));
1327 rc = device_register(&wq->conf_dev);
1328 if (rc < 0) {
1329 put_device(&wq->conf_dev);
1330 goto cleanup;
1331 }
1332 }
1333
1334 return 0;
1335
1336cleanup:
1337 while (i--) {
1338 struct idxd_wq *wq = &idxd->wqs[i];
1339
1340 device_unregister(&wq->conf_dev);
1341 }
1342 return rc;
1343}
1344
1345static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1346{
1347 struct device *dev = &idxd->pdev->dev;
1348 int rc;
1349 char devname[IDXD_NAME_SIZE];
1350
1351 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1352 idxd->conf_dev.parent = dev;
1353 dev_set_name(&idxd->conf_dev, "%s", devname);
1354 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1355 idxd->conf_dev.groups = idxd_attribute_groups;
1356 idxd->conf_dev.type = idxd_get_device_type(idxd);
1357
1358 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1359 rc = device_register(&idxd->conf_dev);
1360 if (rc < 0) {
1361 put_device(&idxd->conf_dev);
1362 return rc;
1363 }
1364
1365 return 0;
1366}
1367
1368int idxd_setup_sysfs(struct idxd_device *idxd)
1369{
1370 struct device *dev = &idxd->pdev->dev;
1371 int rc;
1372
1373 rc = idxd_setup_device_sysfs(idxd);
1374 if (rc < 0) {
1375 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1376 return rc;
1377 }
1378
1379 rc = idxd_setup_wq_sysfs(idxd);
1380 if (rc < 0) {
1381 /* unregister conf dev */
1382 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1383 return rc;
1384 }
1385
1386 rc = idxd_setup_group_sysfs(idxd);
1387 if (rc < 0) {
1388 /* unregister conf dev */
1389 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1390 return rc;
1391 }
1392
1393 rc = idxd_setup_engine_sysfs(idxd);
1394 if (rc < 0) {
1395 /* unregister conf dev */
1396 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1397 return rc;
1398 }
1399
1400 return 0;
1401}
1402
1403void idxd_cleanup_sysfs(struct idxd_device *idxd)
1404{
1405 int i;
1406
1407 for (i = 0; i < idxd->max_wqs; i++) {
1408 struct idxd_wq *wq = &idxd->wqs[i];
1409
1410 device_unregister(&wq->conf_dev);
1411 }
1412
1413 for (i = 0; i < idxd->max_engines; i++) {
1414 struct idxd_engine *engine = &idxd->engines[i];
1415
1416 device_unregister(&engine->conf_dev);
1417 }
1418
1419 for (i = 0; i < idxd->max_groups; i++) {
1420 struct idxd_group *group = &idxd->groups[i];
1421
1422 device_unregister(&group->conf_dev);
1423 }
1424
1425 device_unregister(&idxd->conf_dev);
1426}
1427
1428int idxd_register_bus_type(void)
1429{
1430 int i, rc;
1431
1432 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1433 rc = bus_register(idxd_bus_types[i]);
1434 if (rc < 0)
1435 goto bus_err;
1436 }
1437
1438 return 0;
1439
1440bus_err:
1441 for (; i > 0; i--)
1442 bus_unregister(idxd_bus_types[i]);
1443 return rc;
1444}
1445
1446void idxd_unregister_bus_type(void)
1447{
1448 int i;
1449
1450 for (i = 0; i < IDXD_TYPE_MAX; i++)
1451 bus_unregister(idxd_bus_types[i]);
1452}