interconnect: qcom: Use the standard aggregate function
[linux-block.git] / drivers / interconnect / core.c
CommitLineData
11f1ceca
GD
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
3697ff43 9#include <linux/debugfs.h>
11f1ceca
GD
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/init.h>
13#include <linux/interconnect.h>
14#include <linux/interconnect-provider.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
87e3031b 19#include <linux/of.h>
11f1ceca
GD
20#include <linux/overflow.h>
21
dd018a9c
GD
22#include "internal.h"
23
c46ab9db
GD
24#define CREATE_TRACE_POINTS
25#include "trace.h"
26
11f1ceca
GD
27static DEFINE_IDR(icc_idr);
28static LIST_HEAD(icc_providers);
29static DEFINE_MUTEX(icc_lock);
3697ff43 30static struct dentry *icc_debugfs_dir;
11f1ceca 31
3697ff43
GD
32static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
33{
34 if (!n)
35 return;
36
37 seq_printf(s, "%-30s %12u %12u\n",
38 n->name, n->avg_bw, n->peak_bw);
39}
40
41static int icc_summary_show(struct seq_file *s, void *data)
42{
43 struct icc_provider *provider;
44
45 seq_puts(s, " node avg peak\n");
46 seq_puts(s, "--------------------------------------------------------\n");
47
48 mutex_lock(&icc_lock);
49
50 list_for_each_entry(provider, &icc_providers, provider_list) {
51 struct icc_node *n;
52
53 list_for_each_entry(n, &provider->nodes, node_list) {
54 struct icc_req *r;
55
56 icc_summary_show_one(s, n);
57 hlist_for_each_entry(r, &n->req_list, req_node) {
58 if (!r->dev)
59 continue;
60
61 seq_printf(s, " %-26s %12u %12u\n",
62 dev_name(r->dev), r->avg_bw,
63 r->peak_bw);
64 }
65 }
66 }
67
68 mutex_unlock(&icc_lock);
69
70 return 0;
71}
83fdb2df 72DEFINE_SHOW_ATTRIBUTE(icc_summary);
3697ff43 73
11f1ceca
GD
74static struct icc_node *node_find(const int id)
75{
76 return idr_find(&icc_idr, id);
77}
78
79static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
80 ssize_t num_nodes)
81{
82 struct icc_node *node = dst;
83 struct icc_path *path;
84 int i;
85
86 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
87 if (!path)
88 return ERR_PTR(-ENOMEM);
89
90 path->num_nodes = num_nodes;
91
92 for (i = num_nodes - 1; i >= 0; i--) {
93 node->provider->users++;
94 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
95 path->reqs[i].node = node;
96 path->reqs[i].dev = dev;
97 /* reference to previous node was saved during path traversal */
98 node = node->reverse;
99 }
100
101 return path;
102}
103
104static struct icc_path *path_find(struct device *dev, struct icc_node *src,
105 struct icc_node *dst)
106{
107 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
108 struct icc_node *n, *node = NULL;
109 struct list_head traverse_list;
110 struct list_head edge_list;
111 struct list_head visited_list;
112 size_t i, depth = 1;
113 bool found = false;
114
115 INIT_LIST_HEAD(&traverse_list);
116 INIT_LIST_HEAD(&edge_list);
117 INIT_LIST_HEAD(&visited_list);
118
119 list_add(&src->search_list, &traverse_list);
120 src->reverse = NULL;
121
122 do {
123 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
124 if (node == dst) {
125 found = true;
126 list_splice_init(&edge_list, &visited_list);
127 list_splice_init(&traverse_list, &visited_list);
128 break;
129 }
130 for (i = 0; i < node->num_links; i++) {
131 struct icc_node *tmp = node->links[i];
132
133 if (!tmp) {
134 path = ERR_PTR(-ENOENT);
135 goto out;
136 }
137
138 if (tmp->is_traversed)
139 continue;
140
141 tmp->is_traversed = true;
142 tmp->reverse = node;
143 list_add_tail(&tmp->search_list, &edge_list);
144 }
145 }
146
147 if (found)
148 break;
149
150 list_splice_init(&traverse_list, &visited_list);
151 list_splice_init(&edge_list, &traverse_list);
152
153 /* count the hops including the source */
154 depth++;
155
156 } while (!list_empty(&traverse_list));
157
158out:
159
160 /* reset the traversed state */
161 list_for_each_entry_reverse(n, &visited_list, search_list)
162 n->is_traversed = false;
163
164 if (found)
165 path = path_init(dev, dst, depth);
166
167 return path;
168}
169
170/*
171 * We want the path to honor all bandwidth requests, so the average and peak
172 * bandwidth requirements from each consumer are aggregated at each node.
173 * The aggregation is platform specific, so each platform can customize it by
174 * implementing its own aggregate() function.
175 */
176
177static int aggregate_requests(struct icc_node *node)
178{
179 struct icc_provider *p = node->provider;
180 struct icc_req *r;
181
182 node->avg_bw = 0;
183 node->peak_bw = 0;
184
cbd5a9c2
GD
185 if (p->pre_aggregate)
186 p->pre_aggregate(node);
187
11f1ceca 188 hlist_for_each_entry(r, &node->req_list, req_node)
127ab2cc 189 p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
11f1ceca
GD
190 &node->avg_bw, &node->peak_bw);
191
192 return 0;
193}
194
195static int apply_constraints(struct icc_path *path)
196{
197 struct icc_node *next, *prev = NULL;
198 int ret = -EINVAL;
199 int i;
200
201 for (i = 0; i < path->num_nodes; i++) {
202 next = path->reqs[i].node;
203
204 /*
205 * Both endpoints should be valid master-slave pairs of the
206 * same interconnect provider that will be configured.
207 */
208 if (!prev || next->provider != prev->provider) {
209 prev = next;
210 continue;
211 }
212
213 /* set the constraints */
214 ret = next->provider->set(prev, next);
215 if (ret)
216 goto out;
217
218 prev = next;
219 }
220out:
221 return ret;
222}
223
3172e4d2
GD
224int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
225 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
226{
227 *agg_avg += avg_bw;
228 *agg_peak = max(*agg_peak, peak_bw);
229
230 return 0;
231}
232EXPORT_SYMBOL_GPL(icc_std_aggregate);
233
87e3031b
GD
234/* of_icc_xlate_onecell() - Translate function using a single index.
235 * @spec: OF phandle args to map into an interconnect node.
236 * @data: private data (pointer to struct icc_onecell_data)
237 *
238 * This is a generic translate function that can be used to model simple
239 * interconnect providers that have one device tree node and provide
240 * multiple interconnect nodes. A single cell is used as an index into
241 * an array of icc nodes specified in the icc_onecell_data struct when
242 * registering the provider.
243 */
244struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
245 void *data)
246{
247 struct icc_onecell_data *icc_data = data;
248 unsigned int idx = spec->args[0];
249
250 if (idx >= icc_data->num_nodes) {
251 pr_err("%s: invalid index %u\n", __func__, idx);
252 return ERR_PTR(-EINVAL);
253 }
254
255 return icc_data->nodes[idx];
256}
257EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
258
259/**
260 * of_icc_get_from_provider() - Look-up interconnect node
261 * @spec: OF phandle args to use for look-up
262 *
263 * Looks for interconnect provider under the node specified by @spec and if
264 * found, uses xlate function of the provider to map phandle args to node.
265 *
266 * Returns a valid pointer to struct icc_node on success or ERR_PTR()
267 * on failure.
268 */
269static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
270{
271 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
272 struct icc_provider *provider;
273
274 if (!spec || spec->args_count != 1)
275 return ERR_PTR(-EINVAL);
276
277 mutex_lock(&icc_lock);
278 list_for_each_entry(provider, &icc_providers, provider_list) {
279 if (provider->dev->of_node == spec->np)
280 node = provider->xlate(spec, provider->data);
281 if (!IS_ERR(node))
282 break;
283 }
284 mutex_unlock(&icc_lock);
285
286 return node;
287}
288
289/**
290 * of_icc_get() - get a path handle from a DT node based on name
291 * @dev: device pointer for the consumer device
292 * @name: interconnect path name
293 *
294 * This function will search for a path between two endpoints and return an
295 * icc_path handle on success. Use icc_put() to release constraints when they
296 * are not needed anymore.
297 * If the interconnect API is disabled, NULL is returned and the consumer
298 * drivers will still build. Drivers are free to handle this specifically,
299 * but they don't have to.
300 *
301 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
302 * when the API is disabled or the "interconnects" DT property is missing.
303 */
304struct icc_path *of_icc_get(struct device *dev, const char *name)
305{
306 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
307 struct icc_node *src_node, *dst_node;
308 struct device_node *np = NULL;
309 struct of_phandle_args src_args, dst_args;
310 int idx = 0;
311 int ret;
312
313 if (!dev || !dev->of_node)
314 return ERR_PTR(-ENODEV);
315
316 np = dev->of_node;
317
318 /*
319 * When the consumer DT node do not have "interconnects" property
320 * return a NULL path to skip setting constraints.
321 */
322 if (!of_find_property(np, "interconnects", NULL))
323 return NULL;
324
325 /*
326 * We use a combination of phandle and specifier for endpoint. For now
327 * lets support only global ids and extend this in the future if needed
328 * without breaking DT compatibility.
329 */
330 if (name) {
331 idx = of_property_match_string(np, "interconnect-names", name);
332 if (idx < 0)
333 return ERR_PTR(idx);
334 }
335
336 ret = of_parse_phandle_with_args(np, "interconnects",
337 "#interconnect-cells", idx * 2,
338 &src_args);
339 if (ret)
340 return ERR_PTR(ret);
341
342 of_node_put(src_args.np);
343
344 ret = of_parse_phandle_with_args(np, "interconnects",
345 "#interconnect-cells", idx * 2 + 1,
346 &dst_args);
347 if (ret)
348 return ERR_PTR(ret);
349
350 of_node_put(dst_args.np);
351
352 src_node = of_icc_get_from_provider(&src_args);
353
354 if (IS_ERR(src_node)) {
355 if (PTR_ERR(src_node) != -EPROBE_DEFER)
356 dev_err(dev, "error finding src node: %ld\n",
357 PTR_ERR(src_node));
358 return ERR_CAST(src_node);
359 }
360
361 dst_node = of_icc_get_from_provider(&dst_args);
362
363 if (IS_ERR(dst_node)) {
364 if (PTR_ERR(dst_node) != -EPROBE_DEFER)
365 dev_err(dev, "error finding dst node: %ld\n",
366 PTR_ERR(dst_node));
367 return ERR_CAST(dst_node);
368 }
369
370 mutex_lock(&icc_lock);
371 path = path_find(dev, src_node, dst_node);
87e3031b 372 mutex_unlock(&icc_lock);
05309830
GD
373 if (IS_ERR(path)) {
374 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
375 return path;
376 }
377
378 if (name)
379 path->name = kstrdup_const(name, GFP_KERNEL);
380 else
381 path->name = kasprintf(GFP_KERNEL, "%s-%s",
382 src_node->name, dst_node->name);
87e3031b
GD
383
384 return path;
385}
386EXPORT_SYMBOL_GPL(of_icc_get);
387
127ab2cc
GD
388/**
389 * icc_set_tag() - set an optional tag on a path
390 * @path: the path we want to tag
391 * @tag: the tag value
392 *
393 * This function allows consumers to append a tag to the requests associated
394 * with a path, so that a different aggregation could be done based on this tag.
395 */
396void icc_set_tag(struct icc_path *path, u32 tag)
397{
398 int i;
399
400 if (!path)
401 return;
402
a8dfe193
GD
403 mutex_lock(&icc_lock);
404
127ab2cc
GD
405 for (i = 0; i < path->num_nodes; i++)
406 path->reqs[i].tag = tag;
a8dfe193
GD
407
408 mutex_unlock(&icc_lock);
127ab2cc
GD
409}
410EXPORT_SYMBOL_GPL(icc_set_tag);
411
11f1ceca
GD
412/**
413 * icc_set_bw() - set bandwidth constraints on an interconnect path
414 * @path: reference to the path returned by icc_get()
415 * @avg_bw: average bandwidth in kilobytes per second
416 * @peak_bw: peak bandwidth in kilobytes per second
417 *
418 * This function is used by an interconnect consumer to express its own needs
419 * in terms of bandwidth for a previously requested path between two endpoints.
420 * The requests are aggregated and each node is updated accordingly. The entire
421 * path is locked by a mutex to ensure that the set() is completed.
422 * The @path can be NULL when the "interconnects" DT properties is missing,
423 * which will mean that no constraints will be set.
424 *
425 * Returns 0 on success, or an appropriate error code otherwise.
426 */
427int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
428{
429 struct icc_node *node;
dce6d406 430 u32 old_avg, old_peak;
11f1ceca
GD
431 size_t i;
432 int ret;
433
dce6d406 434 if (!path || !path->num_nodes)
11f1ceca
GD
435 return 0;
436
437 mutex_lock(&icc_lock);
438
dce6d406
GD
439 old_avg = path->reqs[0].avg_bw;
440 old_peak = path->reqs[0].peak_bw;
441
11f1ceca
GD
442 for (i = 0; i < path->num_nodes; i++) {
443 node = path->reqs[i].node;
444
445 /* update the consumer request for this path */
446 path->reqs[i].avg_bw = avg_bw;
447 path->reqs[i].peak_bw = peak_bw;
448
449 /* aggregate requests for this node */
450 aggregate_requests(node);
c46ab9db
GD
451
452 trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
11f1ceca
GD
453 }
454
455 ret = apply_constraints(path);
dce6d406 456 if (ret) {
11f1ceca
GD
457 pr_debug("interconnect: error applying constraints (%d)\n",
458 ret);
459
dce6d406
GD
460 for (i = 0; i < path->num_nodes; i++) {
461 node = path->reqs[i].node;
462 path->reqs[i].avg_bw = old_avg;
463 path->reqs[i].peak_bw = old_peak;
464 aggregate_requests(node);
465 }
466 apply_constraints(path);
467 }
468
11f1ceca
GD
469 mutex_unlock(&icc_lock);
470
c46ab9db
GD
471 trace_icc_set_bw_end(path, ret);
472
11f1ceca
GD
473 return ret;
474}
475EXPORT_SYMBOL_GPL(icc_set_bw);
476
477/**
478 * icc_get() - return a handle for path between two endpoints
479 * @dev: the device requesting the path
480 * @src_id: source device port id
481 * @dst_id: destination device port id
482 *
483 * This function will search for a path between two endpoints and return an
484 * icc_path handle on success. Use icc_put() to release
485 * constraints when they are not needed anymore.
486 * If the interconnect API is disabled, NULL is returned and the consumer
487 * drivers will still build. Drivers are free to handle this specifically,
488 * but they don't have to.
489 *
490 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
491 * interconnect API is disabled.
492 */
493struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
494{
495 struct icc_node *src, *dst;
496 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
497
498 mutex_lock(&icc_lock);
499
500 src = node_find(src_id);
501 if (!src)
502 goto out;
503
504 dst = node_find(dst_id);
505 if (!dst)
506 goto out;
507
508 path = path_find(dev, src, dst);
05309830 509 if (IS_ERR(path)) {
11f1ceca 510 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
05309830
GD
511 goto out;
512 }
11f1ceca 513
05309830 514 path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
11f1ceca
GD
515out:
516 mutex_unlock(&icc_lock);
517 return path;
518}
519EXPORT_SYMBOL_GPL(icc_get);
520
521/**
522 * icc_put() - release the reference to the icc_path
523 * @path: interconnect path
524 *
525 * Use this function to release the constraints on a path when the path is
526 * no longer needed. The constraints will be re-aggregated.
527 */
528void icc_put(struct icc_path *path)
529{
530 struct icc_node *node;
531 size_t i;
532 int ret;
533
534 if (!path || WARN_ON(IS_ERR(path)))
535 return;
536
537 ret = icc_set_bw(path, 0, 0);
538 if (ret)
539 pr_err("%s: error (%d)\n", __func__, ret);
540
541 mutex_lock(&icc_lock);
542 for (i = 0; i < path->num_nodes; i++) {
543 node = path->reqs[i].node;
544 hlist_del(&path->reqs[i].req_node);
545 if (!WARN_ON(!node->provider->users))
546 node->provider->users--;
547 }
548 mutex_unlock(&icc_lock);
549
05309830 550 kfree_const(path->name);
11f1ceca
GD
551 kfree(path);
552}
553EXPORT_SYMBOL_GPL(icc_put);
554
555static struct icc_node *icc_node_create_nolock(int id)
556{
557 struct icc_node *node;
558
559 /* check if node already exists */
560 node = node_find(id);
561 if (node)
562 return node;
563
564 node = kzalloc(sizeof(*node), GFP_KERNEL);
565 if (!node)
566 return ERR_PTR(-ENOMEM);
567
568 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
569 if (id < 0) {
570 WARN(1, "%s: couldn't get idr\n", __func__);
571 kfree(node);
572 return ERR_PTR(id);
573 }
574
575 node->id = id;
576
577 return node;
578}
579
580/**
581 * icc_node_create() - create a node
582 * @id: node id
583 *
584 * Return: icc_node pointer on success, or ERR_PTR() on error
585 */
586struct icc_node *icc_node_create(int id)
587{
588 struct icc_node *node;
589
590 mutex_lock(&icc_lock);
591
592 node = icc_node_create_nolock(id);
593
594 mutex_unlock(&icc_lock);
595
596 return node;
597}
598EXPORT_SYMBOL_GPL(icc_node_create);
599
600/**
601 * icc_node_destroy() - destroy a node
602 * @id: node id
603 */
604void icc_node_destroy(int id)
605{
606 struct icc_node *node;
607
608 mutex_lock(&icc_lock);
609
610 node = node_find(id);
611 if (node) {
612 idr_remove(&icc_idr, node->id);
613 WARN_ON(!hlist_empty(&node->req_list));
614 }
615
616 mutex_unlock(&icc_lock);
617
618 kfree(node);
619}
620EXPORT_SYMBOL_GPL(icc_node_destroy);
621
622/**
623 * icc_link_create() - create a link between two nodes
624 * @node: source node id
625 * @dst_id: destination node id
626 *
627 * Create a link between two nodes. The nodes might belong to different
628 * interconnect providers and the @dst_id node might not exist (if the
629 * provider driver has not probed yet). So just create the @dst_id node
630 * and when the actual provider driver is probed, the rest of the node
631 * data is filled.
632 *
633 * Return: 0 on success, or an error code otherwise
634 */
635int icc_link_create(struct icc_node *node, const int dst_id)
636{
637 struct icc_node *dst;
638 struct icc_node **new;
639 int ret = 0;
640
641 if (!node->provider)
642 return -EINVAL;
643
644 mutex_lock(&icc_lock);
645
646 dst = node_find(dst_id);
647 if (!dst) {
648 dst = icc_node_create_nolock(dst_id);
649
650 if (IS_ERR(dst)) {
651 ret = PTR_ERR(dst);
652 goto out;
653 }
654 }
655
656 new = krealloc(node->links,
657 (node->num_links + 1) * sizeof(*node->links),
658 GFP_KERNEL);
659 if (!new) {
660 ret = -ENOMEM;
661 goto out;
662 }
663
664 node->links = new;
665 node->links[node->num_links++] = dst;
666
667out:
668 mutex_unlock(&icc_lock);
669
670 return ret;
671}
672EXPORT_SYMBOL_GPL(icc_link_create);
673
674/**
675 * icc_link_destroy() - destroy a link between two nodes
676 * @src: pointer to source node
677 * @dst: pointer to destination node
678 *
679 * Return: 0 on success, or an error code otherwise
680 */
681int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
682{
683 struct icc_node **new;
684 size_t slot;
685 int ret = 0;
686
687 if (IS_ERR_OR_NULL(src))
688 return -EINVAL;
689
690 if (IS_ERR_OR_NULL(dst))
691 return -EINVAL;
692
693 mutex_lock(&icc_lock);
694
695 for (slot = 0; slot < src->num_links; slot++)
696 if (src->links[slot] == dst)
697 break;
698
699 if (WARN_ON(slot == src->num_links)) {
700 ret = -ENXIO;
701 goto out;
702 }
703
704 src->links[slot] = src->links[--src->num_links];
705
706 new = krealloc(src->links, src->num_links * sizeof(*src->links),
707 GFP_KERNEL);
708 if (new)
709 src->links = new;
710
711out:
712 mutex_unlock(&icc_lock);
713
714 return ret;
715}
716EXPORT_SYMBOL_GPL(icc_link_destroy);
717
718/**
719 * icc_node_add() - add interconnect node to interconnect provider
720 * @node: pointer to the interconnect node
721 * @provider: pointer to the interconnect provider
722 */
723void icc_node_add(struct icc_node *node, struct icc_provider *provider)
724{
725 mutex_lock(&icc_lock);
726
727 node->provider = provider;
728 list_add_tail(&node->node_list, &provider->nodes);
729
730 mutex_unlock(&icc_lock);
731}
732EXPORT_SYMBOL_GPL(icc_node_add);
733
734/**
735 * icc_node_del() - delete interconnect node from interconnect provider
736 * @node: pointer to the interconnect node
737 */
738void icc_node_del(struct icc_node *node)
739{
740 mutex_lock(&icc_lock);
741
742 list_del(&node->node_list);
743
744 mutex_unlock(&icc_lock);
745}
746EXPORT_SYMBOL_GPL(icc_node_del);
747
3cce2c6f
GD
748/**
749 * icc_nodes_remove() - remove all previously added nodes from provider
750 * @provider: the interconnect provider we are removing nodes from
751 *
752 * Return: 0 on success, or an error code otherwise
753 */
754int icc_nodes_remove(struct icc_provider *provider)
755{
756 struct icc_node *n, *tmp;
757
758 if (WARN_ON(IS_ERR_OR_NULL(provider)))
759 return -EINVAL;
760
761 list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
762 icc_node_del(n);
763 icc_node_destroy(n->id);
764 }
765
766 return 0;
767}
768EXPORT_SYMBOL_GPL(icc_nodes_remove);
769
11f1ceca
GD
770/**
771 * icc_provider_add() - add a new interconnect provider
772 * @provider: the interconnect provider that will be added into topology
773 *
774 * Return: 0 on success, or an error code otherwise
775 */
776int icc_provider_add(struct icc_provider *provider)
777{
778 if (WARN_ON(!provider->set))
779 return -EINVAL;
87e3031b
GD
780 if (WARN_ON(!provider->xlate))
781 return -EINVAL;
11f1ceca
GD
782
783 mutex_lock(&icc_lock);
784
785 INIT_LIST_HEAD(&provider->nodes);
786 list_add_tail(&provider->provider_list, &icc_providers);
787
788 mutex_unlock(&icc_lock);
789
790 dev_dbg(provider->dev, "interconnect provider added to topology\n");
791
792 return 0;
793}
794EXPORT_SYMBOL_GPL(icc_provider_add);
795
796/**
797 * icc_provider_del() - delete previously added interconnect provider
798 * @provider: the interconnect provider that will be removed from topology
799 *
800 * Return: 0 on success, or an error code otherwise
801 */
802int icc_provider_del(struct icc_provider *provider)
803{
804 mutex_lock(&icc_lock);
805 if (provider->users) {
806 pr_warn("interconnect provider still has %d users\n",
807 provider->users);
808 mutex_unlock(&icc_lock);
809 return -EBUSY;
810 }
811
812 if (!list_empty(&provider->nodes)) {
813 pr_warn("interconnect provider still has nodes\n");
814 mutex_unlock(&icc_lock);
815 return -EBUSY;
816 }
817
818 list_del(&provider->provider_list);
819 mutex_unlock(&icc_lock);
820
821 return 0;
822}
823EXPORT_SYMBOL_GPL(icc_provider_del);
824
3697ff43
GD
825static int __init icc_init(void)
826{
827 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
828 debugfs_create_file("interconnect_summary", 0444,
829 icc_debugfs_dir, NULL, &icc_summary_fops);
830 return 0;
831}
832
833static void __exit icc_exit(void)
834{
835 debugfs_remove_recursive(icc_debugfs_dir);
836}
837module_init(icc_init);
838module_exit(icc_exit);
839
11f1ceca
GD
840MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
841MODULE_DESCRIPTION("Interconnect Driver Core");
842MODULE_LICENSE("GPL v2");