Merge branches 'pm-devfreq', 'pm-qos', 'pm-tools' and 'pm-docs'
[linux-2.6-block.git] / drivers / firmware / arm_scmi / perf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Performance Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7
8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
9
10 #include <linux/bits.h>
11 #include <linux/of.h>
12 #include <linux/io.h>
13 #include <linux/io-64-nonatomic-hi-lo.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_opp.h>
17 #include <linux/scmi_protocol.h>
18 #include <linux/sort.h>
19
20 #include "protocols.h"
21 #include "notify.h"
22
23 #define MAX_OPPS                16
24
25 enum scmi_performance_protocol_cmd {
26         PERF_DOMAIN_ATTRIBUTES = 0x3,
27         PERF_DESCRIBE_LEVELS = 0x4,
28         PERF_LIMITS_SET = 0x5,
29         PERF_LIMITS_GET = 0x6,
30         PERF_LEVEL_SET = 0x7,
31         PERF_LEVEL_GET = 0x8,
32         PERF_NOTIFY_LIMITS = 0x9,
33         PERF_NOTIFY_LEVEL = 0xa,
34         PERF_DESCRIBE_FASTCHANNEL = 0xb,
35         PERF_DOMAIN_NAME_GET = 0xc,
36 };
37
38 struct scmi_opp {
39         u32 perf;
40         u32 power;
41         u32 trans_latency_us;
42 };
43
44 struct scmi_msg_resp_perf_attributes {
45         __le16 num_domains;
46         __le16 flags;
47 #define POWER_SCALE_IN_MILLIWATT(x)     ((x) & BIT(0))
48 #define POWER_SCALE_IN_MICROWATT(x)     ((x) & BIT(1))
49         __le32 stats_addr_low;
50         __le32 stats_addr_high;
51         __le32 stats_size;
52 };
53
54 struct scmi_msg_resp_perf_domain_attributes {
55         __le32 flags;
56 #define SUPPORTS_SET_LIMITS(x)          ((x) & BIT(31))
57 #define SUPPORTS_SET_PERF_LVL(x)        ((x) & BIT(30))
58 #define SUPPORTS_PERF_LIMIT_NOTIFY(x)   ((x) & BIT(29))
59 #define SUPPORTS_PERF_LEVEL_NOTIFY(x)   ((x) & BIT(28))
60 #define SUPPORTS_PERF_FASTCHANNELS(x)   ((x) & BIT(27))
61 #define SUPPORTS_EXTENDED_NAMES(x)      ((x) & BIT(26))
62         __le32 rate_limit_us;
63         __le32 sustained_freq_khz;
64         __le32 sustained_perf_level;
65             u8 name[SCMI_SHORT_NAME_MAX_SIZE];
66 };
67
68 struct scmi_msg_perf_describe_levels {
69         __le32 domain;
70         __le32 level_index;
71 };
72
73 struct scmi_perf_set_limits {
74         __le32 domain;
75         __le32 max_level;
76         __le32 min_level;
77 };
78
79 struct scmi_perf_get_limits {
80         __le32 max_level;
81         __le32 min_level;
82 };
83
84 struct scmi_perf_set_level {
85         __le32 domain;
86         __le32 level;
87 };
88
89 struct scmi_perf_notify_level_or_limits {
90         __le32 domain;
91         __le32 notify_enable;
92 };
93
94 struct scmi_perf_limits_notify_payld {
95         __le32 agent_id;
96         __le32 domain_id;
97         __le32 range_max;
98         __le32 range_min;
99 };
100
101 struct scmi_perf_level_notify_payld {
102         __le32 agent_id;
103         __le32 domain_id;
104         __le32 performance_level;
105 };
106
107 struct scmi_msg_resp_perf_describe_levels {
108         __le16 num_returned;
109         __le16 num_remaining;
110         struct {
111                 __le32 perf_val;
112                 __le32 power;
113                 __le16 transition_latency_us;
114                 __le16 reserved;
115         } opp[];
116 };
117
118 struct scmi_perf_get_fc_info {
119         __le32 domain;
120         __le32 message_id;
121 };
122
123 struct scmi_msg_resp_perf_desc_fc {
124         __le32 attr;
125 #define SUPPORTS_DOORBELL(x)            ((x) & BIT(0))
126 #define DOORBELL_REG_WIDTH(x)           FIELD_GET(GENMASK(2, 1), (x))
127         __le32 rate_limit;
128         __le32 chan_addr_low;
129         __le32 chan_addr_high;
130         __le32 chan_size;
131         __le32 db_addr_low;
132         __le32 db_addr_high;
133         __le32 db_set_lmask;
134         __le32 db_set_hmask;
135         __le32 db_preserve_lmask;
136         __le32 db_preserve_hmask;
137 };
138
139 struct scmi_fc_db_info {
140         int width;
141         u64 set;
142         u64 mask;
143         void __iomem *addr;
144 };
145
146 struct scmi_fc_info {
147         void __iomem *level_set_addr;
148         void __iomem *limit_set_addr;
149         void __iomem *level_get_addr;
150         void __iomem *limit_get_addr;
151         struct scmi_fc_db_info *level_set_db;
152         struct scmi_fc_db_info *limit_set_db;
153 };
154
155 struct perf_dom_info {
156         bool set_limits;
157         bool set_perf;
158         bool perf_limit_notify;
159         bool perf_level_notify;
160         bool perf_fastchannels;
161         u32 opp_count;
162         u32 sustained_freq_khz;
163         u32 sustained_perf_level;
164         u32 mult_factor;
165         char name[SCMI_MAX_STR_SIZE];
166         struct scmi_opp opp[MAX_OPPS];
167         struct scmi_fc_info *fc_info;
168 };
169
170 struct scmi_perf_info {
171         u32 version;
172         int num_domains;
173         enum scmi_power_scale power_scale;
174         u64 stats_addr;
175         u32 stats_size;
176         struct perf_dom_info *dom_info;
177 };
178
179 static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
180         PERF_NOTIFY_LIMITS,
181         PERF_NOTIFY_LEVEL,
182 };
183
184 static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
185                                     struct scmi_perf_info *pi)
186 {
187         int ret;
188         struct scmi_xfer *t;
189         struct scmi_msg_resp_perf_attributes *attr;
190
191         ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
192                                       sizeof(*attr), &t);
193         if (ret)
194                 return ret;
195
196         attr = t->rx.buf;
197
198         ret = ph->xops->do_xfer(ph, t);
199         if (!ret) {
200                 u16 flags = le16_to_cpu(attr->flags);
201
202                 pi->num_domains = le16_to_cpu(attr->num_domains);
203
204                 if (POWER_SCALE_IN_MILLIWATT(flags))
205                         pi->power_scale = SCMI_POWER_MILLIWATTS;
206                 if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
207                         if (POWER_SCALE_IN_MICROWATT(flags))
208                                 pi->power_scale = SCMI_POWER_MICROWATTS;
209
210                 pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
211                                 (u64)le32_to_cpu(attr->stats_addr_high) << 32;
212                 pi->stats_size = le32_to_cpu(attr->stats_size);
213         }
214
215         ph->xops->xfer_put(ph, t);
216         return ret;
217 }
218
219 static int
220 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
221                                 u32 domain, struct perf_dom_info *dom_info,
222                                 u32 version)
223 {
224         int ret;
225         u32 flags;
226         struct scmi_xfer *t;
227         struct scmi_msg_resp_perf_domain_attributes *attr;
228
229         ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
230                                      sizeof(domain), sizeof(*attr), &t);
231         if (ret)
232                 return ret;
233
234         put_unaligned_le32(domain, t->tx.buf);
235         attr = t->rx.buf;
236
237         ret = ph->xops->do_xfer(ph, t);
238         if (!ret) {
239                 flags = le32_to_cpu(attr->flags);
240
241                 dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
242                 dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
243                 dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
244                 dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
245                 dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
246                 dom_info->sustained_freq_khz =
247                                         le32_to_cpu(attr->sustained_freq_khz);
248                 dom_info->sustained_perf_level =
249                                         le32_to_cpu(attr->sustained_perf_level);
250                 if (!dom_info->sustained_freq_khz ||
251                     !dom_info->sustained_perf_level)
252                         /* CPUFreq converts to kHz, hence default 1000 */
253                         dom_info->mult_factor = 1000;
254                 else
255                         dom_info->mult_factor =
256                                         (dom_info->sustained_freq_khz * 1000) /
257                                         dom_info->sustained_perf_level;
258                 strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
259         }
260
261         ph->xops->xfer_put(ph, t);
262
263         /*
264          * If supported overwrite short name with the extended one;
265          * on error just carry on and use already provided short name.
266          */
267         if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
268             SUPPORTS_EXTENDED_NAMES(flags))
269                 ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain,
270                                             dom_info->name, SCMI_MAX_STR_SIZE);
271
272         return ret;
273 }
274
275 static int opp_cmp_func(const void *opp1, const void *opp2)
276 {
277         const struct scmi_opp *t1 = opp1, *t2 = opp2;
278
279         return t1->perf - t2->perf;
280 }
281
282 struct scmi_perf_ipriv {
283         u32 domain;
284         struct perf_dom_info *perf_dom;
285 };
286
287 static void iter_perf_levels_prepare_message(void *message,
288                                              unsigned int desc_index,
289                                              const void *priv)
290 {
291         struct scmi_msg_perf_describe_levels *msg = message;
292         const struct scmi_perf_ipriv *p = priv;
293
294         msg->domain = cpu_to_le32(p->domain);
295         /* Set the number of OPPs to be skipped/already read */
296         msg->level_index = cpu_to_le32(desc_index);
297 }
298
299 static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
300                                          const void *response, void *priv)
301 {
302         const struct scmi_msg_resp_perf_describe_levels *r = response;
303
304         st->num_returned = le16_to_cpu(r->num_returned);
305         st->num_remaining = le16_to_cpu(r->num_remaining);
306
307         return 0;
308 }
309
310 static int
311 iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
312                                   const void *response,
313                                   struct scmi_iterator_state *st, void *priv)
314 {
315         struct scmi_opp *opp;
316         const struct scmi_msg_resp_perf_describe_levels *r = response;
317         struct scmi_perf_ipriv *p = priv;
318
319         opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
320         opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
321         opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
322         opp->trans_latency_us =
323                 le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
324         p->perf_dom->opp_count++;
325
326         dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
327                 opp->perf, opp->power, opp->trans_latency_us);
328
329         return 0;
330 }
331
332 static int
333 scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
334                               struct perf_dom_info *perf_dom)
335 {
336         int ret;
337         void *iter;
338         struct scmi_iterator_ops ops = {
339                 .prepare_message = iter_perf_levels_prepare_message,
340                 .update_state = iter_perf_levels_update_state,
341                 .process_response = iter_perf_levels_process_response,
342         };
343         struct scmi_perf_ipriv ppriv = {
344                 .domain = domain,
345                 .perf_dom = perf_dom,
346         };
347
348         iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
349                                             PERF_DESCRIBE_LEVELS,
350                                             sizeof(struct scmi_msg_perf_describe_levels),
351                                             &ppriv);
352         if (IS_ERR(iter))
353                 return PTR_ERR(iter);
354
355         ret = ph->hops->iter_response_run(iter);
356         if (ret)
357                 return ret;
358
359         if (perf_dom->opp_count)
360                 sort(perf_dom->opp, perf_dom->opp_count,
361                      sizeof(struct scmi_opp), opp_cmp_func, NULL);
362
363         return ret;
364 }
365
366 #define SCMI_PERF_FC_RING_DB(w)                         \
367 do {                                                    \
368         u##w val = 0;                                   \
369                                                         \
370         if (db->mask)                                   \
371                 val = ioread##w(db->addr) & db->mask;   \
372         iowrite##w((u##w)db->set | val, db->addr);      \
373 } while (0)
374
375 static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
376 {
377         if (!db || !db->addr)
378                 return;
379
380         if (db->width == 1)
381                 SCMI_PERF_FC_RING_DB(8);
382         else if (db->width == 2)
383                 SCMI_PERF_FC_RING_DB(16);
384         else if (db->width == 4)
385                 SCMI_PERF_FC_RING_DB(32);
386         else /* db->width == 8 */
387 #ifdef CONFIG_64BIT
388                 SCMI_PERF_FC_RING_DB(64);
389 #else
390         {
391                 u64 val = 0;
392
393                 if (db->mask)
394                         val = ioread64_hi_lo(db->addr) & db->mask;
395                 iowrite64_hi_lo(db->set | val, db->addr);
396         }
397 #endif
398 }
399
400 static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
401                                    u32 domain, u32 max_perf, u32 min_perf)
402 {
403         int ret;
404         struct scmi_xfer *t;
405         struct scmi_perf_set_limits *limits;
406
407         ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
408                                       sizeof(*limits), 0, &t);
409         if (ret)
410                 return ret;
411
412         limits = t->tx.buf;
413         limits->domain = cpu_to_le32(domain);
414         limits->max_level = cpu_to_le32(max_perf);
415         limits->min_level = cpu_to_le32(min_perf);
416
417         ret = ph->xops->do_xfer(ph, t);
418
419         ph->xops->xfer_put(ph, t);
420         return ret;
421 }
422
423 static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
424                                 u32 domain, u32 max_perf, u32 min_perf)
425 {
426         struct scmi_perf_info *pi = ph->get_priv(ph);
427         struct perf_dom_info *dom = pi->dom_info + domain;
428
429         if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
430                 return -EINVAL;
431
432         if (dom->fc_info && dom->fc_info->limit_set_addr) {
433                 iowrite32(max_perf, dom->fc_info->limit_set_addr);
434                 iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
435                 scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
436                 return 0;
437         }
438
439         return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
440 }
441
442 static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
443                                    u32 domain, u32 *max_perf, u32 *min_perf)
444 {
445         int ret;
446         struct scmi_xfer *t;
447         struct scmi_perf_get_limits *limits;
448
449         ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
450                                       sizeof(__le32), 0, &t);
451         if (ret)
452                 return ret;
453
454         put_unaligned_le32(domain, t->tx.buf);
455
456         ret = ph->xops->do_xfer(ph, t);
457         if (!ret) {
458                 limits = t->rx.buf;
459
460                 *max_perf = le32_to_cpu(limits->max_level);
461                 *min_perf = le32_to_cpu(limits->min_level);
462         }
463
464         ph->xops->xfer_put(ph, t);
465         return ret;
466 }
467
468 static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
469                                 u32 domain, u32 *max_perf, u32 *min_perf)
470 {
471         struct scmi_perf_info *pi = ph->get_priv(ph);
472         struct perf_dom_info *dom = pi->dom_info + domain;
473
474         if (dom->fc_info && dom->fc_info->limit_get_addr) {
475                 *max_perf = ioread32(dom->fc_info->limit_get_addr);
476                 *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
477                 return 0;
478         }
479
480         return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
481 }
482
483 static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
484                                   u32 domain, u32 level, bool poll)
485 {
486         int ret;
487         struct scmi_xfer *t;
488         struct scmi_perf_set_level *lvl;
489
490         ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
491         if (ret)
492                 return ret;
493
494         t->hdr.poll_completion = poll;
495         lvl = t->tx.buf;
496         lvl->domain = cpu_to_le32(domain);
497         lvl->level = cpu_to_le32(level);
498
499         ret = ph->xops->do_xfer(ph, t);
500
501         ph->xops->xfer_put(ph, t);
502         return ret;
503 }
504
505 static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
506                                u32 domain, u32 level, bool poll)
507 {
508         struct scmi_perf_info *pi = ph->get_priv(ph);
509         struct perf_dom_info *dom = pi->dom_info + domain;
510
511         if (dom->fc_info && dom->fc_info->level_set_addr) {
512                 iowrite32(level, dom->fc_info->level_set_addr);
513                 scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
514                 return 0;
515         }
516
517         return scmi_perf_mb_level_set(ph, domain, level, poll);
518 }
519
520 static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
521                                   u32 domain, u32 *level, bool poll)
522 {
523         int ret;
524         struct scmi_xfer *t;
525
526         ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
527                                      sizeof(u32), sizeof(u32), &t);
528         if (ret)
529                 return ret;
530
531         t->hdr.poll_completion = poll;
532         put_unaligned_le32(domain, t->tx.buf);
533
534         ret = ph->xops->do_xfer(ph, t);
535         if (!ret)
536                 *level = get_unaligned_le32(t->rx.buf);
537
538         ph->xops->xfer_put(ph, t);
539         return ret;
540 }
541
542 static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
543                                u32 domain, u32 *level, bool poll)
544 {
545         struct scmi_perf_info *pi = ph->get_priv(ph);
546         struct perf_dom_info *dom = pi->dom_info + domain;
547
548         if (dom->fc_info && dom->fc_info->level_get_addr) {
549                 *level = ioread32(dom->fc_info->level_get_addr);
550                 return 0;
551         }
552
553         return scmi_perf_mb_level_get(ph, domain, level, poll);
554 }
555
556 static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
557                                          u32 domain, int message_id,
558                                          bool enable)
559 {
560         int ret;
561         struct scmi_xfer *t;
562         struct scmi_perf_notify_level_or_limits *notify;
563
564         ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
565         if (ret)
566                 return ret;
567
568         notify = t->tx.buf;
569         notify->domain = cpu_to_le32(domain);
570         notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
571
572         ret = ph->xops->do_xfer(ph, t);
573
574         ph->xops->xfer_put(ph, t);
575         return ret;
576 }
577
578 static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
579 {
580         if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
581                 return true;
582         if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
583                 return true;
584         return false;
585 }
586
587 static void
588 scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
589                          u32 message_id, void __iomem **p_addr,
590                          struct scmi_fc_db_info **p_db)
591 {
592         int ret;
593         u32 flags;
594         u64 phys_addr;
595         u8 size;
596         void __iomem *addr;
597         struct scmi_xfer *t;
598         struct scmi_fc_db_info *db;
599         struct scmi_perf_get_fc_info *info;
600         struct scmi_msg_resp_perf_desc_fc *resp;
601
602         if (!p_addr)
603                 return;
604
605         ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
606                                       sizeof(*info), sizeof(*resp), &t);
607         if (ret)
608                 return;
609
610         info = t->tx.buf;
611         info->domain = cpu_to_le32(domain);
612         info->message_id = cpu_to_le32(message_id);
613
614         ret = ph->xops->do_xfer(ph, t);
615         if (ret)
616                 goto err_xfer;
617
618         resp = t->rx.buf;
619         flags = le32_to_cpu(resp->attr);
620         size = le32_to_cpu(resp->chan_size);
621         if (!scmi_perf_fc_size_is_valid(message_id, size))
622                 goto err_xfer;
623
624         phys_addr = le32_to_cpu(resp->chan_addr_low);
625         phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
626         addr = devm_ioremap(ph->dev, phys_addr, size);
627         if (!addr)
628                 goto err_xfer;
629         *p_addr = addr;
630
631         if (p_db && SUPPORTS_DOORBELL(flags)) {
632                 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
633                 if (!db)
634                         goto err_xfer;
635
636                 size = 1 << DOORBELL_REG_WIDTH(flags);
637                 phys_addr = le32_to_cpu(resp->db_addr_low);
638                 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
639                 addr = devm_ioremap(ph->dev, phys_addr, size);
640                 if (!addr)
641                         goto err_xfer;
642
643                 db->addr = addr;
644                 db->width = size;
645                 db->set = le32_to_cpu(resp->db_set_lmask);
646                 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
647                 db->mask = le32_to_cpu(resp->db_preserve_lmask);
648                 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
649                 *p_db = db;
650         }
651 err_xfer:
652         ph->xops->xfer_put(ph, t);
653 }
654
655 static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
656                                      u32 domain, struct scmi_fc_info **p_fc)
657 {
658         struct scmi_fc_info *fc;
659
660         fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
661         if (!fc)
662                 return;
663
664         scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
665                                  &fc->level_set_addr, &fc->level_set_db);
666         scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
667                                  &fc->level_get_addr, NULL);
668         scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
669                                  &fc->limit_set_addr, &fc->limit_set_db);
670         scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
671                                  &fc->limit_get_addr, NULL);
672         *p_fc = fc;
673 }
674
675 /* Device specific ops */
676 static int scmi_dev_domain_id(struct device *dev)
677 {
678         struct of_phandle_args clkspec;
679
680         if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
681                                        0, &clkspec))
682                 return -EINVAL;
683
684         return clkspec.args[0];
685 }
686
687 static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
688                                      struct device *dev)
689 {
690         int idx, ret, domain;
691         unsigned long freq;
692         struct scmi_opp *opp;
693         struct perf_dom_info *dom;
694         struct scmi_perf_info *pi = ph->get_priv(ph);
695
696         domain = scmi_dev_domain_id(dev);
697         if (domain < 0)
698                 return domain;
699
700         dom = pi->dom_info + domain;
701
702         for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
703                 freq = opp->perf * dom->mult_factor;
704
705                 ret = dev_pm_opp_add(dev, freq, 0);
706                 if (ret) {
707                         dev_warn(dev, "failed to add opp %luHz\n", freq);
708
709                         while (idx-- > 0) {
710                                 freq = (--opp)->perf * dom->mult_factor;
711                                 dev_pm_opp_remove(dev, freq);
712                         }
713                         return ret;
714                 }
715         }
716         return 0;
717 }
718
719 static int
720 scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
721                                  struct device *dev)
722 {
723         struct perf_dom_info *dom;
724         struct scmi_perf_info *pi = ph->get_priv(ph);
725         int domain = scmi_dev_domain_id(dev);
726
727         if (domain < 0)
728                 return domain;
729
730         dom = pi->dom_info + domain;
731         /* uS to nS */
732         return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
733 }
734
735 static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
736                               unsigned long freq, bool poll)
737 {
738         struct scmi_perf_info *pi = ph->get_priv(ph);
739         struct perf_dom_info *dom = pi->dom_info + domain;
740
741         return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
742 }
743
744 static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
745                               unsigned long *freq, bool poll)
746 {
747         int ret;
748         u32 level;
749         struct scmi_perf_info *pi = ph->get_priv(ph);
750         struct perf_dom_info *dom = pi->dom_info + domain;
751
752         ret = scmi_perf_level_get(ph, domain, &level, poll);
753         if (!ret)
754                 *freq = level * dom->mult_factor;
755
756         return ret;
757 }
758
759 static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
760                                    u32 domain, unsigned long *freq,
761                                    unsigned long *power)
762 {
763         struct scmi_perf_info *pi = ph->get_priv(ph);
764         struct perf_dom_info *dom;
765         unsigned long opp_freq;
766         int idx, ret = -EINVAL;
767         struct scmi_opp *opp;
768
769         dom = pi->dom_info + domain;
770         if (!dom)
771                 return -EIO;
772
773         for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
774                 opp_freq = opp->perf * dom->mult_factor;
775                 if (opp_freq < *freq)
776                         continue;
777
778                 *freq = opp_freq;
779                 *power = opp->power;
780                 ret = 0;
781                 break;
782         }
783
784         return ret;
785 }
786
787 static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
788                                       struct device *dev)
789 {
790         struct perf_dom_info *dom;
791         struct scmi_perf_info *pi = ph->get_priv(ph);
792
793         dom = pi->dom_info + scmi_dev_domain_id(dev);
794
795         return dom->fc_info && dom->fc_info->level_set_addr;
796 }
797
798 static enum scmi_power_scale
799 scmi_power_scale_get(const struct scmi_protocol_handle *ph)
800 {
801         struct scmi_perf_info *pi = ph->get_priv(ph);
802
803         return pi->power_scale;
804 }
805
806 static const struct scmi_perf_proto_ops perf_proto_ops = {
807         .limits_set = scmi_perf_limits_set,
808         .limits_get = scmi_perf_limits_get,
809         .level_set = scmi_perf_level_set,
810         .level_get = scmi_perf_level_get,
811         .device_domain_id = scmi_dev_domain_id,
812         .transition_latency_get = scmi_dvfs_transition_latency_get,
813         .device_opps_add = scmi_dvfs_device_opps_add,
814         .freq_set = scmi_dvfs_freq_set,
815         .freq_get = scmi_dvfs_freq_get,
816         .est_power_get = scmi_dvfs_est_power_get,
817         .fast_switch_possible = scmi_fast_switch_possible,
818         .power_scale_get = scmi_power_scale_get,
819 };
820
821 static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
822                                         u8 evt_id, u32 src_id, bool enable)
823 {
824         int ret, cmd_id;
825
826         if (evt_id >= ARRAY_SIZE(evt_2_cmd))
827                 return -EINVAL;
828
829         cmd_id = evt_2_cmd[evt_id];
830         ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
831         if (ret)
832                 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
833                          evt_id, src_id, ret);
834
835         return ret;
836 }
837
838 static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
839                                           u8 evt_id, ktime_t timestamp,
840                                           const void *payld, size_t payld_sz,
841                                           void *report, u32 *src_id)
842 {
843         void *rep = NULL;
844
845         switch (evt_id) {
846         case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
847         {
848                 const struct scmi_perf_limits_notify_payld *p = payld;
849                 struct scmi_perf_limits_report *r = report;
850
851                 if (sizeof(*p) != payld_sz)
852                         break;
853
854                 r->timestamp = timestamp;
855                 r->agent_id = le32_to_cpu(p->agent_id);
856                 r->domain_id = le32_to_cpu(p->domain_id);
857                 r->range_max = le32_to_cpu(p->range_max);
858                 r->range_min = le32_to_cpu(p->range_min);
859                 *src_id = r->domain_id;
860                 rep = r;
861                 break;
862         }
863         case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
864         {
865                 const struct scmi_perf_level_notify_payld *p = payld;
866                 struct scmi_perf_level_report *r = report;
867
868                 if (sizeof(*p) != payld_sz)
869                         break;
870
871                 r->timestamp = timestamp;
872                 r->agent_id = le32_to_cpu(p->agent_id);
873                 r->domain_id = le32_to_cpu(p->domain_id);
874                 r->performance_level = le32_to_cpu(p->performance_level);
875                 *src_id = r->domain_id;
876                 rep = r;
877                 break;
878         }
879         default:
880                 break;
881         }
882
883         return rep;
884 }
885
886 static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
887 {
888         struct scmi_perf_info *pi = ph->get_priv(ph);
889
890         if (!pi)
891                 return -EINVAL;
892
893         return pi->num_domains;
894 }
895
896 static const struct scmi_event perf_events[] = {
897         {
898                 .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
899                 .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
900                 .max_report_sz = sizeof(struct scmi_perf_limits_report),
901         },
902         {
903                 .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
904                 .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
905                 .max_report_sz = sizeof(struct scmi_perf_level_report),
906         },
907 };
908
909 static const struct scmi_event_ops perf_event_ops = {
910         .get_num_sources = scmi_perf_get_num_sources,
911         .set_notify_enabled = scmi_perf_set_notify_enabled,
912         .fill_custom_report = scmi_perf_fill_custom_report,
913 };
914
915 static const struct scmi_protocol_events perf_protocol_events = {
916         .queue_sz = SCMI_PROTO_QUEUE_SZ,
917         .ops = &perf_event_ops,
918         .evts = perf_events,
919         .num_events = ARRAY_SIZE(perf_events),
920 };
921
922 static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
923 {
924         int domain, ret;
925         u32 version;
926         struct scmi_perf_info *pinfo;
927
928         ret = ph->xops->version_get(ph, &version);
929         if (ret)
930                 return ret;
931
932         dev_dbg(ph->dev, "Performance Version %d.%d\n",
933                 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
934
935         pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
936         if (!pinfo)
937                 return -ENOMEM;
938
939         ret = scmi_perf_attributes_get(ph, pinfo);
940         if (ret)
941                 return ret;
942
943         pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
944                                        sizeof(*pinfo->dom_info), GFP_KERNEL);
945         if (!pinfo->dom_info)
946                 return -ENOMEM;
947
948         for (domain = 0; domain < pinfo->num_domains; domain++) {
949                 struct perf_dom_info *dom = pinfo->dom_info + domain;
950
951                 scmi_perf_domain_attributes_get(ph, domain, dom, version);
952                 scmi_perf_describe_levels_get(ph, domain, dom);
953
954                 if (dom->perf_fastchannels)
955                         scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
956         }
957
958         pinfo->version = version;
959
960         return ph->set_priv(ph, pinfo);
961 }
962
963 static const struct scmi_protocol scmi_perf = {
964         .id = SCMI_PROTOCOL_PERF,
965         .owner = THIS_MODULE,
966         .instance_init = &scmi_perf_protocol_init,
967         .ops = &perf_proto_ops,
968         .events = &perf_protocol_events,
969 };
970
971 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)