1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 static void idxd_conf_device_release(struct device *dev)
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
24 static struct device_type idxd_group_device_type = {
26 .release = idxd_conf_device_release,
29 static struct device_type idxd_wq_device_type = {
31 .release = idxd_conf_device_release,
34 static struct device_type idxd_engine_device_type = {
36 .release = idxd_conf_device_release,
39 static struct device_type dsa_device_type = {
41 .release = idxd_conf_device_release,
44 static inline bool is_dsa_dev(struct device *dev)
46 return dev ? dev->type == &dsa_device_type : false;
49 static inline bool is_idxd_dev(struct device *dev)
51 return is_dsa_dev(dev);
54 static inline bool is_idxd_wq_dev(struct device *dev)
56 return dev ? dev->type == &idxd_wq_device_type : false;
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
69 return wq->type == IDXD_WQT_USER;
72 static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
80 if (idxd->state != IDXD_DEV_CONF_READY)
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
87 if (idxd->state < IDXD_DEV_CONF_READY)
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
103 static int idxd_config_bus_probe(struct device *dev)
108 dev_dbg(dev, "%s called\n", __func__);
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
118 if (!try_module_get(THIS_MODULE))
121 /* Perform IDXD configuration and enabling */
122 spin_lock_irqsave(&idxd->dev_lock, flags);
123 rc = idxd_device_config(idxd);
124 spin_unlock_irqrestore(&idxd->dev_lock, flags);
126 module_put(THIS_MODULE);
127 dev_warn(dev, "Device config failed: %d\n", rc);
132 rc = idxd_device_enable(idxd);
134 module_put(THIS_MODULE);
135 dev_warn(dev, "Device enable failed: %d\n", rc);
139 dev_info(dev, "Device %s enabled\n", dev_name(dev));
141 rc = idxd_register_dma_device(idxd);
143 module_put(THIS_MODULE);
144 dev_dbg(dev, "Failed to register dmaengine device\n");
148 } else if (is_idxd_wq_dev(dev)) {
149 struct idxd_wq *wq = confdev_to_wq(dev);
150 struct idxd_device *idxd = wq->idxd;
152 mutex_lock(&wq->wq_lock);
154 if (idxd->state != IDXD_DEV_ENABLED) {
155 mutex_unlock(&wq->wq_lock);
156 dev_warn(dev, "Enabling while device not enabled.\n");
160 if (wq->state != IDXD_WQ_DISABLED) {
161 mutex_unlock(&wq->wq_lock);
162 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
167 mutex_unlock(&wq->wq_lock);
168 dev_warn(dev, "WQ not attached to group.\n");
172 if (strlen(wq->name) == 0) {
173 mutex_unlock(&wq->wq_lock);
174 dev_warn(dev, "WQ name not set.\n");
178 rc = idxd_wq_alloc_resources(wq);
180 mutex_unlock(&wq->wq_lock);
181 dev_warn(dev, "WQ resource alloc failed\n");
185 spin_lock_irqsave(&idxd->dev_lock, flags);
186 rc = idxd_device_config(idxd);
187 spin_unlock_irqrestore(&idxd->dev_lock, flags);
189 mutex_unlock(&wq->wq_lock);
190 dev_warn(dev, "Writing WQ %d config failed: %d\n",
195 rc = idxd_wq_enable(wq);
197 mutex_unlock(&wq->wq_lock);
198 dev_warn(dev, "WQ %d enabling failed: %d\n",
203 rc = idxd_wq_map_portal(wq);
205 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
206 rc = idxd_wq_disable(wq);
208 dev_warn(dev, "IDXD wq disable failed\n");
209 mutex_unlock(&wq->wq_lock);
213 wq->client_count = 0;
215 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
217 if (is_idxd_wq_dmaengine(wq)) {
218 rc = idxd_register_dma_channel(wq);
220 dev_dbg(dev, "DMA channel register failed\n");
221 mutex_unlock(&wq->wq_lock);
224 } else if (is_idxd_wq_cdev(wq)) {
225 rc = idxd_wq_add_cdev(wq);
227 dev_dbg(dev, "Cdev creation failed\n");
228 mutex_unlock(&wq->wq_lock);
233 mutex_unlock(&wq->wq_lock);
240 static void disable_wq(struct idxd_wq *wq)
242 struct idxd_device *idxd = wq->idxd;
243 struct device *dev = &idxd->pdev->dev;
246 mutex_lock(&wq->wq_lock);
247 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
248 if (wq->state == IDXD_WQ_DISABLED) {
249 mutex_unlock(&wq->wq_lock);
253 if (is_idxd_wq_dmaengine(wq))
254 idxd_unregister_dma_channel(wq);
255 else if (is_idxd_wq_cdev(wq))
256 idxd_wq_del_cdev(wq);
258 if (idxd_wq_refcount(wq))
259 dev_warn(dev, "Clients has claim on wq %d: %d\n",
260 wq->id, idxd_wq_refcount(wq));
262 idxd_wq_unmap_portal(wq);
265 rc = idxd_wq_disable(wq);
267 idxd_wq_free_resources(wq);
268 wq->client_count = 0;
269 mutex_unlock(&wq->wq_lock);
272 dev_warn(dev, "Failed to disable %s: %d\n",
273 dev_name(&wq->conf_dev), rc);
275 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
278 static int idxd_config_bus_remove(struct device *dev)
282 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
284 /* disable workqueue here */
285 if (is_idxd_wq_dev(dev)) {
286 struct idxd_wq *wq = confdev_to_wq(dev);
289 } else if (is_idxd_dev(dev)) {
290 struct idxd_device *idxd = confdev_to_idxd(dev);
293 dev_dbg(dev, "%s removing dev %s\n", __func__,
294 dev_name(&idxd->conf_dev));
295 for (i = 0; i < idxd->max_wqs; i++) {
296 struct idxd_wq *wq = &idxd->wqs[i];
298 if (wq->state == IDXD_WQ_DISABLED)
300 dev_warn(dev, "Active wq %d on disable %s.\n", i,
301 dev_name(&idxd->conf_dev));
302 device_release_driver(&wq->conf_dev);
305 idxd_unregister_dma_device(idxd);
306 rc = idxd_device_disable(idxd);
307 for (i = 0; i < idxd->max_wqs; i++) {
308 struct idxd_wq *wq = &idxd->wqs[i];
310 mutex_lock(&wq->wq_lock);
311 idxd_wq_disable_cleanup(wq);
312 mutex_unlock(&wq->wq_lock);
314 module_put(THIS_MODULE);
316 dev_warn(dev, "Device disable failed\n");
318 dev_info(dev, "Device %s disabled\n", dev_name(dev));
325 static void idxd_config_bus_shutdown(struct device *dev)
327 dev_dbg(dev, "%s called\n", __func__);
330 struct bus_type dsa_bus_type = {
332 .match = idxd_config_bus_match,
333 .probe = idxd_config_bus_probe,
334 .remove = idxd_config_bus_remove,
335 .shutdown = idxd_config_bus_shutdown,
338 static struct bus_type *idxd_bus_types[] = {
342 static struct idxd_device_driver dsa_drv = {
345 .bus = &dsa_bus_type,
346 .owner = THIS_MODULE,
347 .mod_name = KBUILD_MODNAME,
351 static struct idxd_device_driver *idxd_drvs[] = {
355 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
357 return idxd_bus_types[idxd->type];
360 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
362 if (idxd->type == IDXD_TYPE_DSA)
363 return &dsa_device_type;
368 /* IDXD generic driver setup */
369 int idxd_register_driver(void)
373 for (i = 0; i < IDXD_TYPE_MAX; i++) {
374 rc = driver_register(&idxd_drvs[i]->drv);
383 driver_unregister(&idxd_drvs[i]->drv);
387 void idxd_unregister_driver(void)
391 for (i = 0; i < IDXD_TYPE_MAX; i++)
392 driver_unregister(&idxd_drvs[i]->drv);
395 /* IDXD engine attributes */
396 static ssize_t engine_group_id_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
399 struct idxd_engine *engine =
400 container_of(dev, struct idxd_engine, conf_dev);
403 return sprintf(buf, "%d\n", engine->group->id);
405 return sprintf(buf, "%d\n", -1);
408 static ssize_t engine_group_id_store(struct device *dev,
409 struct device_attribute *attr,
410 const char *buf, size_t count)
412 struct idxd_engine *engine =
413 container_of(dev, struct idxd_engine, conf_dev);
414 struct idxd_device *idxd = engine->idxd;
417 struct idxd_group *prevg;
419 rc = kstrtol(buf, 10, &id);
423 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
426 if (id > idxd->max_groups - 1 || id < -1)
431 engine->group->num_engines--;
432 engine->group = NULL;
437 prevg = engine->group;
440 prevg->num_engines--;
441 engine->group = &idxd->groups[id];
442 engine->group->num_engines++;
447 static struct device_attribute dev_attr_engine_group =
448 __ATTR(group_id, 0644, engine_group_id_show,
449 engine_group_id_store);
451 static struct attribute *idxd_engine_attributes[] = {
452 &dev_attr_engine_group.attr,
456 static const struct attribute_group idxd_engine_attribute_group = {
457 .attrs = idxd_engine_attributes,
460 static const struct attribute_group *idxd_engine_attribute_groups[] = {
461 &idxd_engine_attribute_group,
465 /* Group attributes */
467 static void idxd_set_free_tokens(struct idxd_device *idxd)
471 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
472 struct idxd_group *g = &idxd->groups[i];
474 tokens += g->tokens_reserved;
477 idxd->nr_tokens = idxd->max_tokens - tokens;
480 static ssize_t group_tokens_reserved_show(struct device *dev,
481 struct device_attribute *attr,
484 struct idxd_group *group =
485 container_of(dev, struct idxd_group, conf_dev);
487 return sprintf(buf, "%u\n", group->tokens_reserved);
490 static ssize_t group_tokens_reserved_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t count)
494 struct idxd_group *group =
495 container_of(dev, struct idxd_group, conf_dev);
496 struct idxd_device *idxd = group->idxd;
500 rc = kstrtoul(buf, 10, &val);
504 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
507 if (idxd->state == IDXD_DEV_ENABLED)
510 if (val > idxd->max_tokens)
513 if (val > idxd->nr_tokens + group->tokens_reserved)
516 group->tokens_reserved = val;
517 idxd_set_free_tokens(idxd);
521 static struct device_attribute dev_attr_group_tokens_reserved =
522 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
523 group_tokens_reserved_store);
525 static ssize_t group_tokens_allowed_show(struct device *dev,
526 struct device_attribute *attr,
529 struct idxd_group *group =
530 container_of(dev, struct idxd_group, conf_dev);
532 return sprintf(buf, "%u\n", group->tokens_allowed);
535 static ssize_t group_tokens_allowed_store(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t count)
539 struct idxd_group *group =
540 container_of(dev, struct idxd_group, conf_dev);
541 struct idxd_device *idxd = group->idxd;
545 rc = kstrtoul(buf, 10, &val);
549 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
552 if (idxd->state == IDXD_DEV_ENABLED)
555 if (val < 4 * group->num_engines ||
556 val > group->tokens_reserved + idxd->nr_tokens)
559 group->tokens_allowed = val;
563 static struct device_attribute dev_attr_group_tokens_allowed =
564 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
565 group_tokens_allowed_store);
567 static ssize_t group_use_token_limit_show(struct device *dev,
568 struct device_attribute *attr,
571 struct idxd_group *group =
572 container_of(dev, struct idxd_group, conf_dev);
574 return sprintf(buf, "%u\n", group->use_token_limit);
577 static ssize_t group_use_token_limit_store(struct device *dev,
578 struct device_attribute *attr,
579 const char *buf, size_t count)
581 struct idxd_group *group =
582 container_of(dev, struct idxd_group, conf_dev);
583 struct idxd_device *idxd = group->idxd;
587 rc = kstrtoul(buf, 10, &val);
591 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
594 if (idxd->state == IDXD_DEV_ENABLED)
597 if (idxd->token_limit == 0)
600 group->use_token_limit = !!val;
604 static struct device_attribute dev_attr_group_use_token_limit =
605 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
606 group_use_token_limit_store);
608 static ssize_t group_engines_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
611 struct idxd_group *group =
612 container_of(dev, struct idxd_group, conf_dev);
615 struct idxd_device *idxd = group->idxd;
617 for (i = 0; i < idxd->max_engines; i++) {
618 struct idxd_engine *engine = &idxd->engines[i];
623 if (engine->group->id == group->id)
624 rc += sprintf(tmp + rc, "engine%d.%d ",
625 idxd->id, engine->id);
629 rc += sprintf(tmp + rc, "\n");
634 static struct device_attribute dev_attr_group_engines =
635 __ATTR(engines, 0444, group_engines_show, NULL);
637 static ssize_t group_work_queues_show(struct device *dev,
638 struct device_attribute *attr, char *buf)
640 struct idxd_group *group =
641 container_of(dev, struct idxd_group, conf_dev);
644 struct idxd_device *idxd = group->idxd;
646 for (i = 0; i < idxd->max_wqs; i++) {
647 struct idxd_wq *wq = &idxd->wqs[i];
652 if (wq->group->id == group->id)
653 rc += sprintf(tmp + rc, "wq%d.%d ",
658 rc += sprintf(tmp + rc, "\n");
663 static struct device_attribute dev_attr_group_work_queues =
664 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
666 static ssize_t group_traffic_class_a_show(struct device *dev,
667 struct device_attribute *attr,
670 struct idxd_group *group =
671 container_of(dev, struct idxd_group, conf_dev);
673 return sprintf(buf, "%d\n", group->tc_a);
676 static ssize_t group_traffic_class_a_store(struct device *dev,
677 struct device_attribute *attr,
678 const char *buf, size_t count)
680 struct idxd_group *group =
681 container_of(dev, struct idxd_group, conf_dev);
682 struct idxd_device *idxd = group->idxd;
686 rc = kstrtol(buf, 10, &val);
690 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
693 if (idxd->state == IDXD_DEV_ENABLED)
696 if (val < 0 || val > 7)
703 static struct device_attribute dev_attr_group_traffic_class_a =
704 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
705 group_traffic_class_a_store);
707 static ssize_t group_traffic_class_b_show(struct device *dev,
708 struct device_attribute *attr,
711 struct idxd_group *group =
712 container_of(dev, struct idxd_group, conf_dev);
714 return sprintf(buf, "%d\n", group->tc_b);
717 static ssize_t group_traffic_class_b_store(struct device *dev,
718 struct device_attribute *attr,
719 const char *buf, size_t count)
721 struct idxd_group *group =
722 container_of(dev, struct idxd_group, conf_dev);
723 struct idxd_device *idxd = group->idxd;
727 rc = kstrtol(buf, 10, &val);
731 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
734 if (idxd->state == IDXD_DEV_ENABLED)
737 if (val < 0 || val > 7)
744 static struct device_attribute dev_attr_group_traffic_class_b =
745 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
746 group_traffic_class_b_store);
748 static struct attribute *idxd_group_attributes[] = {
749 &dev_attr_group_work_queues.attr,
750 &dev_attr_group_engines.attr,
751 &dev_attr_group_use_token_limit.attr,
752 &dev_attr_group_tokens_allowed.attr,
753 &dev_attr_group_tokens_reserved.attr,
754 &dev_attr_group_traffic_class_a.attr,
755 &dev_attr_group_traffic_class_b.attr,
759 static const struct attribute_group idxd_group_attribute_group = {
760 .attrs = idxd_group_attributes,
763 static const struct attribute_group *idxd_group_attribute_groups[] = {
764 &idxd_group_attribute_group,
768 /* IDXD work queue attribs */
769 static ssize_t wq_clients_show(struct device *dev,
770 struct device_attribute *attr, char *buf)
772 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
774 return sprintf(buf, "%d\n", wq->client_count);
777 static struct device_attribute dev_attr_wq_clients =
778 __ATTR(clients, 0444, wq_clients_show, NULL);
780 static ssize_t wq_state_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
786 case IDXD_WQ_DISABLED:
787 return sprintf(buf, "disabled\n");
788 case IDXD_WQ_ENABLED:
789 return sprintf(buf, "enabled\n");
792 return sprintf(buf, "unknown\n");
795 static struct device_attribute dev_attr_wq_state =
796 __ATTR(state, 0444, wq_state_show, NULL);
798 static ssize_t wq_group_id_show(struct device *dev,
799 struct device_attribute *attr, char *buf)
801 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
804 return sprintf(buf, "%u\n", wq->group->id);
806 return sprintf(buf, "-1\n");
809 static ssize_t wq_group_id_store(struct device *dev,
810 struct device_attribute *attr,
811 const char *buf, size_t count)
813 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
814 struct idxd_device *idxd = wq->idxd;
817 struct idxd_group *prevg, *group;
819 rc = kstrtol(buf, 10, &id);
823 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
826 if (wq->state != IDXD_WQ_DISABLED)
829 if (id > idxd->max_groups - 1 || id < -1)
834 wq->group->num_wqs--;
840 group = &idxd->groups[id];
850 static struct device_attribute dev_attr_wq_group_id =
851 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
853 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
856 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
858 return sprintf(buf, "%s\n",
859 wq_dedicated(wq) ? "dedicated" : "shared");
862 static ssize_t wq_mode_store(struct device *dev,
863 struct device_attribute *attr, const char *buf,
866 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
867 struct idxd_device *idxd = wq->idxd;
869 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
872 if (wq->state != IDXD_WQ_DISABLED)
875 if (sysfs_streq(buf, "dedicated")) {
876 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
885 static struct device_attribute dev_attr_wq_mode =
886 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
888 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
891 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
893 return sprintf(buf, "%u\n", wq->size);
896 static int total_claimed_wq_size(struct idxd_device *idxd)
901 for (i = 0; i < idxd->max_wqs; i++) {
902 struct idxd_wq *wq = &idxd->wqs[i];
910 static ssize_t wq_size_store(struct device *dev,
911 struct device_attribute *attr, const char *buf,
914 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
916 struct idxd_device *idxd = wq->idxd;
919 rc = kstrtoul(buf, 10, &size);
923 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
926 if (wq->state != IDXD_WQ_DISABLED)
929 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
936 static struct device_attribute dev_attr_wq_size =
937 __ATTR(size, 0644, wq_size_show, wq_size_store);
939 static ssize_t wq_priority_show(struct device *dev,
940 struct device_attribute *attr, char *buf)
942 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
944 return sprintf(buf, "%u\n", wq->priority);
947 static ssize_t wq_priority_store(struct device *dev,
948 struct device_attribute *attr,
949 const char *buf, size_t count)
951 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
953 struct idxd_device *idxd = wq->idxd;
956 rc = kstrtoul(buf, 10, &prio);
960 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
963 if (wq->state != IDXD_WQ_DISABLED)
966 if (prio > IDXD_MAX_PRIORITY)
973 static struct device_attribute dev_attr_wq_priority =
974 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
976 static ssize_t wq_type_show(struct device *dev,
977 struct device_attribute *attr, char *buf)
979 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
982 case IDXD_WQT_KERNEL:
983 return sprintf(buf, "%s\n",
984 idxd_wq_type_names[IDXD_WQT_KERNEL]);
986 return sprintf(buf, "%s\n",
987 idxd_wq_type_names[IDXD_WQT_USER]);
990 return sprintf(buf, "%s\n",
991 idxd_wq_type_names[IDXD_WQT_NONE]);
997 static ssize_t wq_type_store(struct device *dev,
998 struct device_attribute *attr, const char *buf,
1001 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1002 enum idxd_wq_type old_type;
1004 if (wq->state != IDXD_WQ_DISABLED)
1007 old_type = wq->type;
1008 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1009 wq->type = IDXD_WQT_NONE;
1010 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1011 wq->type = IDXD_WQT_KERNEL;
1012 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1013 wq->type = IDXD_WQT_USER;
1017 /* If we are changing queue type, clear the name */
1018 if (wq->type != old_type)
1019 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1024 static struct device_attribute dev_attr_wq_type =
1025 __ATTR(type, 0644, wq_type_show, wq_type_store);
1027 static ssize_t wq_name_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1030 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1032 return sprintf(buf, "%s\n", wq->name);
1035 static ssize_t wq_name_store(struct device *dev,
1036 struct device_attribute *attr, const char *buf,
1039 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1041 if (wq->state != IDXD_WQ_DISABLED)
1044 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1047 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1048 strncpy(wq->name, buf, WQ_NAME_SIZE);
1049 strreplace(wq->name, '\n', '\0');
1053 static struct device_attribute dev_attr_wq_name =
1054 __ATTR(name, 0644, wq_name_show, wq_name_store);
1056 static ssize_t wq_cdev_minor_show(struct device *dev,
1057 struct device_attribute *attr, char *buf)
1059 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1061 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1064 static struct device_attribute dev_attr_wq_cdev_minor =
1065 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1067 static int __get_sysfs_u64(const char *buf, u64 *val)
1071 rc = kstrtou64(buf, 0, val);
1078 *val = roundup_pow_of_two(*val);
1082 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1085 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1087 return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1090 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1091 const char *buf, size_t count)
1093 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1094 struct idxd_device *idxd = wq->idxd;
1098 if (wq->state != IDXD_WQ_DISABLED)
1101 rc = __get_sysfs_u64(buf, &xfer_size);
1105 if (xfer_size > idxd->max_xfer_bytes)
1108 wq->max_xfer_bytes = xfer_size;
1113 static struct device_attribute dev_attr_wq_max_transfer_size =
1114 __ATTR(max_transfer_size, 0644,
1115 wq_max_transfer_size_show, wq_max_transfer_size_store);
1117 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1119 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1121 return sprintf(buf, "%u\n", wq->max_batch_size);
1124 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1125 const char *buf, size_t count)
1127 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1128 struct idxd_device *idxd = wq->idxd;
1132 if (wq->state != IDXD_WQ_DISABLED)
1135 rc = __get_sysfs_u64(buf, &batch_size);
1139 if (batch_size > idxd->max_batch_size)
1142 wq->max_batch_size = (u32)batch_size;
1147 static struct device_attribute dev_attr_wq_max_batch_size =
1148 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1150 static struct attribute *idxd_wq_attributes[] = {
1151 &dev_attr_wq_clients.attr,
1152 &dev_attr_wq_state.attr,
1153 &dev_attr_wq_group_id.attr,
1154 &dev_attr_wq_mode.attr,
1155 &dev_attr_wq_size.attr,
1156 &dev_attr_wq_priority.attr,
1157 &dev_attr_wq_type.attr,
1158 &dev_attr_wq_name.attr,
1159 &dev_attr_wq_cdev_minor.attr,
1160 &dev_attr_wq_max_transfer_size.attr,
1161 &dev_attr_wq_max_batch_size.attr,
1165 static const struct attribute_group idxd_wq_attribute_group = {
1166 .attrs = idxd_wq_attributes,
1169 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1170 &idxd_wq_attribute_group,
1174 /* IDXD device attribs */
1175 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1178 struct idxd_device *idxd =
1179 container_of(dev, struct idxd_device, conf_dev);
1181 return sprintf(buf, "%#x\n", idxd->hw.version);
1183 static DEVICE_ATTR_RO(version);
1185 static ssize_t max_work_queues_size_show(struct device *dev,
1186 struct device_attribute *attr,
1189 struct idxd_device *idxd =
1190 container_of(dev, struct idxd_device, conf_dev);
1192 return sprintf(buf, "%u\n", idxd->max_wq_size);
1194 static DEVICE_ATTR_RO(max_work_queues_size);
1196 static ssize_t max_groups_show(struct device *dev,
1197 struct device_attribute *attr, char *buf)
1199 struct idxd_device *idxd =
1200 container_of(dev, struct idxd_device, conf_dev);
1202 return sprintf(buf, "%u\n", idxd->max_groups);
1204 static DEVICE_ATTR_RO(max_groups);
1206 static ssize_t max_work_queues_show(struct device *dev,
1207 struct device_attribute *attr, char *buf)
1209 struct idxd_device *idxd =
1210 container_of(dev, struct idxd_device, conf_dev);
1212 return sprintf(buf, "%u\n", idxd->max_wqs);
1214 static DEVICE_ATTR_RO(max_work_queues);
1216 static ssize_t max_engines_show(struct device *dev,
1217 struct device_attribute *attr, char *buf)
1219 struct idxd_device *idxd =
1220 container_of(dev, struct idxd_device, conf_dev);
1222 return sprintf(buf, "%u\n", idxd->max_engines);
1224 static DEVICE_ATTR_RO(max_engines);
1226 static ssize_t numa_node_show(struct device *dev,
1227 struct device_attribute *attr, char *buf)
1229 struct idxd_device *idxd =
1230 container_of(dev, struct idxd_device, conf_dev);
1232 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1234 static DEVICE_ATTR_RO(numa_node);
1236 static ssize_t max_batch_size_show(struct device *dev,
1237 struct device_attribute *attr, char *buf)
1239 struct idxd_device *idxd =
1240 container_of(dev, struct idxd_device, conf_dev);
1242 return sprintf(buf, "%u\n", idxd->max_batch_size);
1244 static DEVICE_ATTR_RO(max_batch_size);
1246 static ssize_t max_transfer_size_show(struct device *dev,
1247 struct device_attribute *attr,
1250 struct idxd_device *idxd =
1251 container_of(dev, struct idxd_device, conf_dev);
1253 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1255 static DEVICE_ATTR_RO(max_transfer_size);
1257 static ssize_t op_cap_show(struct device *dev,
1258 struct device_attribute *attr, char *buf)
1260 struct idxd_device *idxd =
1261 container_of(dev, struct idxd_device, conf_dev);
1263 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1265 static DEVICE_ATTR_RO(op_cap);
1267 static ssize_t gen_cap_show(struct device *dev,
1268 struct device_attribute *attr, char *buf)
1270 struct idxd_device *idxd =
1271 container_of(dev, struct idxd_device, conf_dev);
1273 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1275 static DEVICE_ATTR_RO(gen_cap);
1277 static ssize_t configurable_show(struct device *dev,
1278 struct device_attribute *attr, char *buf)
1280 struct idxd_device *idxd =
1281 container_of(dev, struct idxd_device, conf_dev);
1283 return sprintf(buf, "%u\n",
1284 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1286 static DEVICE_ATTR_RO(configurable);
1288 static ssize_t clients_show(struct device *dev,
1289 struct device_attribute *attr, char *buf)
1291 struct idxd_device *idxd =
1292 container_of(dev, struct idxd_device, conf_dev);
1293 unsigned long flags;
1296 spin_lock_irqsave(&idxd->dev_lock, flags);
1297 for (i = 0; i < idxd->max_wqs; i++) {
1298 struct idxd_wq *wq = &idxd->wqs[i];
1300 count += wq->client_count;
1302 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1304 return sprintf(buf, "%d\n", count);
1306 static DEVICE_ATTR_RO(clients);
1308 static ssize_t state_show(struct device *dev,
1309 struct device_attribute *attr, char *buf)
1311 struct idxd_device *idxd =
1312 container_of(dev, struct idxd_device, conf_dev);
1314 switch (idxd->state) {
1315 case IDXD_DEV_DISABLED:
1316 case IDXD_DEV_CONF_READY:
1317 return sprintf(buf, "disabled\n");
1318 case IDXD_DEV_ENABLED:
1319 return sprintf(buf, "enabled\n");
1320 case IDXD_DEV_HALTED:
1321 return sprintf(buf, "halted\n");
1324 return sprintf(buf, "unknown\n");
1326 static DEVICE_ATTR_RO(state);
1328 static ssize_t errors_show(struct device *dev,
1329 struct device_attribute *attr, char *buf)
1331 struct idxd_device *idxd =
1332 container_of(dev, struct idxd_device, conf_dev);
1334 unsigned long flags;
1336 spin_lock_irqsave(&idxd->dev_lock, flags);
1337 for (i = 0; i < 4; i++)
1338 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1339 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1341 out += sprintf(buf + out, "\n");
1344 static DEVICE_ATTR_RO(errors);
1346 static ssize_t max_tokens_show(struct device *dev,
1347 struct device_attribute *attr, char *buf)
1349 struct idxd_device *idxd =
1350 container_of(dev, struct idxd_device, conf_dev);
1352 return sprintf(buf, "%u\n", idxd->max_tokens);
1354 static DEVICE_ATTR_RO(max_tokens);
1356 static ssize_t token_limit_show(struct device *dev,
1357 struct device_attribute *attr, char *buf)
1359 struct idxd_device *idxd =
1360 container_of(dev, struct idxd_device, conf_dev);
1362 return sprintf(buf, "%u\n", idxd->token_limit);
1365 static ssize_t token_limit_store(struct device *dev,
1366 struct device_attribute *attr,
1367 const char *buf, size_t count)
1369 struct idxd_device *idxd =
1370 container_of(dev, struct idxd_device, conf_dev);
1374 rc = kstrtoul(buf, 10, &val);
1378 if (idxd->state == IDXD_DEV_ENABLED)
1381 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1384 if (!idxd->hw.group_cap.token_limit)
1387 if (val > idxd->hw.group_cap.total_tokens)
1390 idxd->token_limit = val;
1393 static DEVICE_ATTR_RW(token_limit);
1395 static ssize_t cdev_major_show(struct device *dev,
1396 struct device_attribute *attr, char *buf)
1398 struct idxd_device *idxd =
1399 container_of(dev, struct idxd_device, conf_dev);
1401 return sprintf(buf, "%u\n", idxd->major);
1403 static DEVICE_ATTR_RO(cdev_major);
1405 static ssize_t cmd_status_show(struct device *dev,
1406 struct device_attribute *attr, char *buf)
1408 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1410 return sprintf(buf, "%#x\n", idxd->cmd_status);
1412 static DEVICE_ATTR_RO(cmd_status);
1414 static struct attribute *idxd_device_attributes[] = {
1415 &dev_attr_version.attr,
1416 &dev_attr_max_groups.attr,
1417 &dev_attr_max_work_queues.attr,
1418 &dev_attr_max_work_queues_size.attr,
1419 &dev_attr_max_engines.attr,
1420 &dev_attr_numa_node.attr,
1421 &dev_attr_max_batch_size.attr,
1422 &dev_attr_max_transfer_size.attr,
1423 &dev_attr_op_cap.attr,
1424 &dev_attr_gen_cap.attr,
1425 &dev_attr_configurable.attr,
1426 &dev_attr_clients.attr,
1427 &dev_attr_state.attr,
1428 &dev_attr_errors.attr,
1429 &dev_attr_max_tokens.attr,
1430 &dev_attr_token_limit.attr,
1431 &dev_attr_cdev_major.attr,
1432 &dev_attr_cmd_status.attr,
1436 static const struct attribute_group idxd_device_attribute_group = {
1437 .attrs = idxd_device_attributes,
1440 static const struct attribute_group *idxd_attribute_groups[] = {
1441 &idxd_device_attribute_group,
1445 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1447 struct device *dev = &idxd->pdev->dev;
1450 for (i = 0; i < idxd->max_engines; i++) {
1451 struct idxd_engine *engine = &idxd->engines[i];
1453 engine->conf_dev.parent = &idxd->conf_dev;
1454 dev_set_name(&engine->conf_dev, "engine%d.%d",
1455 idxd->id, engine->id);
1456 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1457 engine->conf_dev.groups = idxd_engine_attribute_groups;
1458 engine->conf_dev.type = &idxd_engine_device_type;
1459 dev_dbg(dev, "Engine device register: %s\n",
1460 dev_name(&engine->conf_dev));
1461 rc = device_register(&engine->conf_dev);
1463 put_device(&engine->conf_dev);
1472 struct idxd_engine *engine = &idxd->engines[i];
1474 device_unregister(&engine->conf_dev);
1479 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1481 struct device *dev = &idxd->pdev->dev;
1484 for (i = 0; i < idxd->max_groups; i++) {
1485 struct idxd_group *group = &idxd->groups[i];
1487 group->conf_dev.parent = &idxd->conf_dev;
1488 dev_set_name(&group->conf_dev, "group%d.%d",
1489 idxd->id, group->id);
1490 group->conf_dev.bus = idxd_get_bus_type(idxd);
1491 group->conf_dev.groups = idxd_group_attribute_groups;
1492 group->conf_dev.type = &idxd_group_device_type;
1493 dev_dbg(dev, "Group device register: %s\n",
1494 dev_name(&group->conf_dev));
1495 rc = device_register(&group->conf_dev);
1497 put_device(&group->conf_dev);
1506 struct idxd_group *group = &idxd->groups[i];
1508 device_unregister(&group->conf_dev);
1513 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1515 struct device *dev = &idxd->pdev->dev;
1518 for (i = 0; i < idxd->max_wqs; i++) {
1519 struct idxd_wq *wq = &idxd->wqs[i];
1521 wq->conf_dev.parent = &idxd->conf_dev;
1522 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1523 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1524 wq->conf_dev.groups = idxd_wq_attribute_groups;
1525 wq->conf_dev.type = &idxd_wq_device_type;
1526 dev_dbg(dev, "WQ device register: %s\n",
1527 dev_name(&wq->conf_dev));
1528 rc = device_register(&wq->conf_dev);
1530 put_device(&wq->conf_dev);
1539 struct idxd_wq *wq = &idxd->wqs[i];
1541 device_unregister(&wq->conf_dev);
1546 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1548 struct device *dev = &idxd->pdev->dev;
1550 char devname[IDXD_NAME_SIZE];
1552 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1553 idxd->conf_dev.parent = dev;
1554 dev_set_name(&idxd->conf_dev, "%s", devname);
1555 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1556 idxd->conf_dev.groups = idxd_attribute_groups;
1557 idxd->conf_dev.type = idxd_get_device_type(idxd);
1559 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1560 rc = device_register(&idxd->conf_dev);
1562 put_device(&idxd->conf_dev);
1569 int idxd_setup_sysfs(struct idxd_device *idxd)
1571 struct device *dev = &idxd->pdev->dev;
1574 rc = idxd_setup_device_sysfs(idxd);
1576 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1580 rc = idxd_setup_wq_sysfs(idxd);
1582 /* unregister conf dev */
1583 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1587 rc = idxd_setup_group_sysfs(idxd);
1589 /* unregister conf dev */
1590 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1594 rc = idxd_setup_engine_sysfs(idxd);
1596 /* unregister conf dev */
1597 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1604 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1608 for (i = 0; i < idxd->max_wqs; i++) {
1609 struct idxd_wq *wq = &idxd->wqs[i];
1611 device_unregister(&wq->conf_dev);
1614 for (i = 0; i < idxd->max_engines; i++) {
1615 struct idxd_engine *engine = &idxd->engines[i];
1617 device_unregister(&engine->conf_dev);
1620 for (i = 0; i < idxd->max_groups; i++) {
1621 struct idxd_group *group = &idxd->groups[i];
1623 device_unregister(&group->conf_dev);
1626 device_unregister(&idxd->conf_dev);
1629 int idxd_register_bus_type(void)
1633 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1634 rc = bus_register(idxd_bus_types[i]);
1643 bus_unregister(idxd_bus_types[i]);
1647 void idxd_unregister_bus_type(void)
1651 for (i = 0; i < IDXD_TYPE_MAX; i++)
1652 bus_unregister(idxd_bus_types[i]);