firmware: arm_scmi: Add notification dispatch and delivery
[linux-block.git] / drivers / firmware / arm_scmi / driver.c
CommitLineData
aa4f886f
SH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018 ARM Ltd.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
d4c3751a 21#include <linux/ktime.h>
aa4f886f
SH
22#include <linux/module.h>
23#include <linux/of_address.h>
24#include <linux/of_device.h>
d4c3751a 25#include <linux/processor.h>
aa4f886f
SH
26#include <linux/slab.h>
27
28#include "common.h"
29
729d3530
LL
30#define CREATE_TRACE_POINTS
31#include <trace/events/scmi.h>
32
aa4f886f
SH
33enum scmi_error_codes {
34 SCMI_SUCCESS = 0, /* Success */
35 SCMI_ERR_SUPPORT = -1, /* Not supported */
36 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
37 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
38 SCMI_ERR_ENTRY = -4, /* Not found */
39 SCMI_ERR_RANGE = -5, /* Value out of range */
40 SCMI_ERR_BUSY = -6, /* Device busy */
41 SCMI_ERR_COMMS = -7, /* Communication Error */
42 SCMI_ERR_GENERIC = -8, /* Generic Error */
43 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
44 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
45 SCMI_ERR_MAX
46};
47
1baf47c2 48/* List of all SCMI devices active in system */
aa4f886f
SH
49static LIST_HEAD(scmi_list);
50/* Protection for the entire list */
51static DEFINE_MUTEX(scmi_list_mutex);
729d3530
LL
52/* Track the unique id for the transfers for debug & profiling purpose */
53static atomic_t transfer_last_id;
aa4f886f
SH
54
55/**
56 * struct scmi_xfers_info - Structure to manage transfer information
57 *
58 * @xfer_block: Preallocated Message array
59 * @xfer_alloc_table: Bitmap table for allocated messages.
60 * Index of this bitmap table is also used for message
61 * sequence identifier.
62 * @xfer_lock: Protection for message allocation
63 */
64struct scmi_xfers_info {
65 struct scmi_xfer *xfer_block;
66 unsigned long *xfer_alloc_table;
aa4f886f
SH
67 spinlock_t xfer_lock;
68};
69
aa4f886f 70/**
1baf47c2 71 * struct scmi_info - Structure representing a SCMI instance
aa4f886f
SH
72 *
73 * @dev: Device pointer
74 * @desc: SoC description for this instance
b6f20ff8
SH
75 * @version: SCMI revision information containing protocol version,
76 * implementation version and (sub-)vendor identification.
71af05a7 77 * @handle: Instance of SCMI handle to send to clients
38c927fb 78 * @tx_minfo: Universal Transmit Message management info
4ebd8f6d 79 * @rx_minfo: Universal Receive Message management info
3748daf7 80 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
46cc7c28 81 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
1baf47c2 82 * @protocols_imp: List of protocols implemented, currently maximum of
b6f20ff8 83 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
1baf47c2 84 * @node: List head
aa4f886f
SH
85 * @users: Number of users of this instance
86 */
87struct scmi_info {
88 struct device *dev;
89 const struct scmi_desc *desc;
b6f20ff8 90 struct scmi_revision_info version;
aa4f886f 91 struct scmi_handle handle;
38c927fb 92 struct scmi_xfers_info tx_minfo;
4ebd8f6d 93 struct scmi_xfers_info rx_minfo;
907b6d14 94 struct idr tx_idr;
46cc7c28 95 struct idr rx_idr;
b6f20ff8 96 u8 *protocols_imp;
aa4f886f
SH
97 struct list_head node;
98 int users;
99};
100
aa4f886f
SH
101#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
102
aa4f886f
SH
103static const int scmi_linux_errmap[] = {
104 /* better than switch case as long as return value is continuous */
105 0, /* SCMI_SUCCESS */
106 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
107 -EINVAL, /* SCMI_ERR_PARAM */
108 -EACCES, /* SCMI_ERR_ACCESS */
109 -ENOENT, /* SCMI_ERR_ENTRY */
110 -ERANGE, /* SCMI_ERR_RANGE */
111 -EBUSY, /* SCMI_ERR_BUSY */
112 -ECOMM, /* SCMI_ERR_COMMS */
113 -EIO, /* SCMI_ERR_GENERIC */
114 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
115 -EPROTO, /* SCMI_ERR_PROTOCOL */
116};
117
118static inline int scmi_to_linux_errno(int errno)
119{
120 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
121 return scmi_linux_errmap[-errno];
122 return -EIO;
123}
124
125/**
126 * scmi_dump_header_dbg() - Helper to dump a message header.
127 *
128 * @dev: Device pointer corresponding to the SCMI entity
129 * @hdr: pointer to header.
130 */
131static inline void scmi_dump_header_dbg(struct device *dev,
132 struct scmi_msg_hdr *hdr)
133{
5b65af8f 134 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
aa4f886f
SH
135 hdr->id, hdr->seq, hdr->protocol_id);
136}
137
aa4f886f 138/**
1baf47c2 139 * scmi_xfer_get() - Allocate one message
aa4f886f 140 *
1baf47c2 141 * @handle: Pointer to SCMI entity handle
38c927fb 142 * @minfo: Pointer to Tx/Rx Message management info based on channel type
aa4f886f 143 *
5b65af8f 144 * Helper function which is used by various message functions that are
aa4f886f
SH
145 * exposed to clients of this driver for allocating a message traffic event.
146 *
147 * This function can sleep depending on pending requests already in the system
148 * for the SCMI entity. Further, this also holds a spinlock to maintain
149 * integrity of internal data structures.
150 *
151 * Return: 0 if all went fine, else corresponding error.
152 */
38c927fb
SH
153static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
154 struct scmi_xfers_info *minfo)
aa4f886f
SH
155{
156 u16 xfer_id;
157 struct scmi_xfer *xfer;
158 unsigned long flags, bit_pos;
159 struct scmi_info *info = handle_to_scmi_info(handle);
aa4f886f
SH
160
161 /* Keep the locked section as small as possible */
162 spin_lock_irqsave(&minfo->xfer_lock, flags);
163 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
164 info->desc->max_msg);
165 if (bit_pos == info->desc->max_msg) {
166 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
167 return ERR_PTR(-ENOMEM);
168 }
169 set_bit(bit_pos, minfo->xfer_alloc_table);
170 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
171
172 xfer_id = bit_pos;
173
174 xfer = &minfo->xfer_block[xfer_id];
175 xfer->hdr.seq = xfer_id;
176 reinit_completion(&xfer->done);
729d3530 177 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
aa4f886f
SH
178
179 return xfer;
180}
181
182/**
38c927fb 183 * __scmi_xfer_put() - Release a message
aa4f886f 184 *
38c927fb 185 * @minfo: Pointer to Tx/Rx Message management info based on channel type
1baf47c2 186 * @xfer: message that was reserved by scmi_xfer_get
aa4f886f
SH
187 *
188 * This holds a spinlock to maintain integrity of internal data structures.
189 */
38c927fb
SH
190static void
191__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
aa4f886f
SH
192{
193 unsigned long flags;
aa4f886f
SH
194
195 /*
196 * Keep the locked section as small as possible
197 * NOTE: we might escape with smp_mb and no lock here..
198 * but just be conservative and symmetric.
199 */
200 spin_lock_irqsave(&minfo->xfer_lock, flags);
201 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
202 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
203}
204
4d09852b 205static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
2747a967 206{
5c8a47a5 207 struct scmi_xfer *xfer;
4d09852b
SH
208 struct device *dev = cinfo->dev;
209 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
210 struct scmi_xfers_info *minfo = &info->rx_minfo;
211
212 xfer = scmi_xfer_get(cinfo->handle, minfo);
213 if (IS_ERR(xfer)) {
214 dev_err(dev, "failed to get free message slot (%ld)\n",
215 PTR_ERR(xfer));
87dff4e6 216 info->desc->ops->clear_channel(cinfo);
4d09852b
SH
217 return;
218 }
219
220 unpack_scmi_header(msg_hdr, &xfer->hdr);
221 scmi_dump_header_dbg(dev, &xfer->hdr);
222 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
223 xfer);
224
225 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
226 xfer->hdr.protocol_id, xfer->hdr.seq,
227 MSG_TYPE_NOTIFICATION);
58ecdf03 228
4d09852b
SH
229 __scmi_xfer_put(minfo, xfer);
230
87dff4e6 231 info->desc->ops->clear_channel(cinfo);
4d09852b
SH
232}
233
234static void scmi_handle_response(struct scmi_chan_info *cinfo,
235 u16 xfer_id, u8 msg_type)
236{
237 struct scmi_xfer *xfer;
238 struct device *dev = cinfo->dev;
239 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
240 struct scmi_xfers_info *minfo = &info->tx_minfo;
2747a967
SH
241
242 /* Are we even expecting this? */
243 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
244 dev_err(dev, "message for %d is not expected!\n", xfer_id);
b37f5cc8 245 info->desc->ops->clear_channel(cinfo);
2747a967
SH
246 return;
247 }
248
249 xfer = &minfo->xfer_block[xfer_id];
c5bceb98
CM
250 /*
251 * Even if a response was indeed expected on this slot at this point,
252 * a buggy platform could wrongly reply feeding us an unexpected
253 * delayed response we're not prepared to handle: bail-out safely
254 * blaming firmware.
255 */
256 if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
257 dev_err(dev,
258 "Delayed Response for %d not expected! Buggy F/W ?\n",
259 xfer_id);
260 info->desc->ops->clear_channel(cinfo);
261 /* It was unexpected, so nobody will clear the xfer if not us */
262 __scmi_xfer_put(minfo, xfer);
263 return;
264 }
2747a967
SH
265
266 scmi_dump_header_dbg(dev, &xfer->hdr);
267
5c8a47a5 268 info->desc->ops->fetch_response(cinfo, xfer);
58ecdf03 269
729d3530
LL
270 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
271 xfer->hdr.protocol_id, xfer->hdr.seq,
272 msg_type);
273
d04fb2b2
CM
274 if (msg_type == MSG_TYPE_DELAYED_RESP) {
275 info->desc->ops->clear_channel(cinfo);
58ecdf03 276 complete(xfer->async_done);
d04fb2b2 277 } else {
58ecdf03 278 complete(&xfer->done);
d04fb2b2 279 }
2747a967
SH
280}
281
4d09852b
SH
282/**
283 * scmi_rx_callback() - callback for receiving messages
284 *
285 * @cinfo: SCMI channel info
286 * @msg_hdr: Message header
287 *
288 * Processes one received message to appropriate transfer information and
289 * signals completion of the transfer.
290 *
291 * NOTE: This function will be invoked in IRQ context, hence should be
292 * as optimal as possible.
293 */
294void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
295{
296 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
297 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
298
299 switch (msg_type) {
300 case MSG_TYPE_NOTIFICATION:
301 scmi_handle_notification(cinfo, msg_hdr);
302 break;
303 case MSG_TYPE_COMMAND:
304 case MSG_TYPE_DELAYED_RESP:
305 scmi_handle_response(cinfo, xfer_id, msg_type);
306 break;
307 default:
308 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
309 break;
310 }
311}
312
38c927fb
SH
313/**
314 * scmi_xfer_put() - Release a transmit message
315 *
316 * @handle: Pointer to SCMI entity handle
317 * @xfer: message that was reserved by scmi_xfer_get
318 */
319void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
320{
321 struct scmi_info *info = handle_to_scmi_info(handle);
322
323 __scmi_xfer_put(&info->tx_minfo, xfer);
324}
325
d4c3751a
SH
326#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
327
5c8a47a5 328static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
d4c3751a
SH
329 struct scmi_xfer *xfer, ktime_t stop)
330{
5c8a47a5 331 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
d4c3751a 332
5c8a47a5
VK
333 return info->desc->ops->poll_done(cinfo, xfer) ||
334 ktime_after(ktime_get(), stop);
d4c3751a
SH
335}
336
aa4f886f
SH
337/**
338 * scmi_do_xfer() - Do one transfer
339 *
1baf47c2 340 * @handle: Pointer to SCMI entity handle
aa4f886f
SH
341 * @xfer: Transfer to initiate and wait for response
342 *
343 * Return: -ETIMEDOUT in case of no response, if transmit error,
1baf47c2
SH
344 * return corresponding error, else if all goes well,
345 * return 0.
aa4f886f
SH
346 */
347int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
348{
349 int ret;
350 int timeout;
351 struct scmi_info *info = handle_to_scmi_info(handle);
352 struct device *dev = info->dev;
907b6d14
SH
353 struct scmi_chan_info *cinfo;
354
355 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
356 if (unlikely(!cinfo))
357 return -EINVAL;
aa4f886f 358
729d3530
LL
359 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
360 xfer->hdr.protocol_id, xfer->hdr.seq,
361 xfer->hdr.poll_completion);
362
5c8a47a5 363 ret = info->desc->ops->send_message(cinfo, xfer);
aa4f886f 364 if (ret < 0) {
5c8a47a5 365 dev_dbg(dev, "Failed to send message %d\n", ret);
aa4f886f
SH
366 return ret;
367 }
368
d4c3751a
SH
369 if (xfer->hdr.poll_completion) {
370 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
371
fbc4d81a 372 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
d4c3751a
SH
373
374 if (ktime_before(ktime_get(), stop))
5c8a47a5 375 info->desc->ops->fetch_response(cinfo, xfer);
d4c3751a
SH
376 else
377 ret = -ETIMEDOUT;
378 } else {
379 /* And we wait for the response. */
380 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
381 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
5c8a47a5 382 dev_err(dev, "timed out in resp(caller: %pS)\n",
d4c3751a
SH
383 (void *)_RET_IP_);
384 ret = -ETIMEDOUT;
385 }
aa4f886f 386 }
d4c3751a
SH
387
388 if (!ret && xfer->hdr.status)
389 ret = scmi_to_linux_errno(xfer->hdr.status);
390
5c8a47a5
VK
391 if (info->desc->ops->mark_txdone)
392 info->desc->ops->mark_txdone(cinfo, ret);
aa4f886f 393
729d3530 394 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
bad0d73b 395 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
729d3530 396
aa4f886f
SH
397 return ret;
398}
399
58ecdf03
SH
400#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
401
402/**
403 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
404 * response is received
405 *
406 * @handle: Pointer to SCMI entity handle
407 * @xfer: Transfer to initiate and wait for response
408 *
409 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
410 * return corresponding error, else if all goes well, return 0.
411 */
412int scmi_do_xfer_with_response(const struct scmi_handle *handle,
413 struct scmi_xfer *xfer)
414{
415 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
416 DECLARE_COMPLETION_ONSTACK(async_response);
417
418 xfer->async_done = &async_response;
419
420 ret = scmi_do_xfer(handle, xfer);
421 if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
422 ret = -ETIMEDOUT;
423
424 xfer->async_done = NULL;
425 return ret;
426}
427
aa4f886f 428/**
38c927fb 429 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
aa4f886f 430 *
1baf47c2 431 * @handle: Pointer to SCMI entity handle
aa4f886f 432 * @msg_id: Message identifier
1baf47c2 433 * @prot_id: Protocol identifier for the message
aa4f886f
SH
434 * @tx_size: transmit message size
435 * @rx_size: receive message size
436 * @p: pointer to the allocated and initialised message
437 *
14e297b3 438 * This function allocates the message using @scmi_xfer_get and
aa4f886f
SH
439 * initialise the header.
440 *
441 * Return: 0 if all went fine with @p pointing to message, else
442 * corresponding error.
443 */
14e297b3 444int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
aa4f886f
SH
445 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
446{
447 int ret;
448 struct scmi_xfer *xfer;
449 struct scmi_info *info = handle_to_scmi_info(handle);
38c927fb 450 struct scmi_xfers_info *minfo = &info->tx_minfo;
aa4f886f
SH
451 struct device *dev = info->dev;
452
453 /* Ensure we have sane transfer sizes */
454 if (rx_size > info->desc->max_msg_size ||
455 tx_size > info->desc->max_msg_size)
456 return -ERANGE;
457
38c927fb 458 xfer = scmi_xfer_get(handle, minfo);
aa4f886f
SH
459 if (IS_ERR(xfer)) {
460 ret = PTR_ERR(xfer);
461 dev_err(dev, "failed to get free message slot(%d)\n", ret);
462 return ret;
463 }
464
465 xfer->tx.len = tx_size;
466 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
467 xfer->hdr.id = msg_id;
468 xfer->hdr.protocol_id = prot_id;
469 xfer->hdr.poll_completion = false;
470
471 *p = xfer;
1baf47c2 472
aa4f886f
SH
473 return 0;
474}
475
b6f20ff8
SH
476/**
477 * scmi_version_get() - command to get the revision of the SCMI entity
478 *
1baf47c2
SH
479 * @handle: Pointer to SCMI entity handle
480 * @protocol: Protocol identifier for the message
481 * @version: Holds returned version of protocol.
b6f20ff8
SH
482 *
483 * Updates the SCMI information in the internal data structure.
484 *
485 * Return: 0 if all went fine, else return appropriate error.
486 */
487int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
488 u32 *version)
489{
490 int ret;
491 __le32 *rev_info;
492 struct scmi_xfer *t;
493
14e297b3 494 ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
b6f20ff8
SH
495 sizeof(*version), &t);
496 if (ret)
497 return ret;
498
499 ret = scmi_do_xfer(handle, t);
500 if (!ret) {
501 rev_info = t->rx.buf;
502 *version = le32_to_cpu(*rev_info);
503 }
504
14e297b3 505 scmi_xfer_put(handle, t);
b6f20ff8
SH
506 return ret;
507}
508
509void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
510 u8 *prot_imp)
511{
512 struct scmi_info *info = handle_to_scmi_info(handle);
513
514 info->protocols_imp = prot_imp;
515}
516
bc40081d
SH
517static bool
518scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
519{
520 int i;
521 struct scmi_info *info = handle_to_scmi_info(handle);
522
523 if (!info->protocols_imp)
524 return false;
525
526 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
527 if (info->protocols_imp[i] == prot_id)
528 return true;
529 return false;
530}
531
aa4f886f 532/**
14e297b3 533 * scmi_handle_get() - Get the SCMI handle for a device
aa4f886f
SH
534 *
535 * @dev: pointer to device for which we want SCMI handle
536 *
537 * NOTE: The function does not track individual clients of the framework
1baf47c2 538 * and is expected to be maintained by caller of SCMI protocol library.
aa4f886f
SH
539 * scmi_handle_put must be balanced with successful scmi_handle_get
540 *
541 * Return: pointer to handle if successful, NULL on error
542 */
543struct scmi_handle *scmi_handle_get(struct device *dev)
544{
545 struct list_head *p;
546 struct scmi_info *info;
547 struct scmi_handle *handle = NULL;
548
549 mutex_lock(&scmi_list_mutex);
550 list_for_each(p, &scmi_list) {
551 info = list_entry(p, struct scmi_info, node);
552 if (dev->parent == info->dev) {
553 handle = &info->handle;
554 info->users++;
555 break;
556 }
557 }
558 mutex_unlock(&scmi_list_mutex);
559
560 return handle;
561}
562
563/**
564 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
565 *
566 * @handle: handle acquired by scmi_handle_get
567 *
568 * NOTE: The function does not track individual clients of the framework
1baf47c2 569 * and is expected to be maintained by caller of SCMI protocol library.
aa4f886f
SH
570 * scmi_handle_put must be balanced with successful scmi_handle_get
571 *
572 * Return: 0 is successfully released
573 * if null was passed, it returns -EINVAL;
574 */
575int scmi_handle_put(const struct scmi_handle *handle)
576{
577 struct scmi_info *info;
578
579 if (!handle)
580 return -EINVAL;
581
582 info = handle_to_scmi_info(handle);
583 mutex_lock(&scmi_list_mutex);
584 if (!WARN_ON(!info->users))
585 info->users--;
586 mutex_unlock(&scmi_list_mutex);
587
588 return 0;
589}
590
4ebd8f6d
SH
591static int __scmi_xfer_info_init(struct scmi_info *sinfo,
592 struct scmi_xfers_info *info)
aa4f886f
SH
593{
594 int i;
595 struct scmi_xfer *xfer;
596 struct device *dev = sinfo->dev;
597 const struct scmi_desc *desc = sinfo->desc;
aa4f886f
SH
598
599 /* Pre-allocated messages, no more than what hdr.seq can support */
354b2e36
SH
600 if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
601 dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
602 desc->max_msg, MSG_TOKEN_MAX);
aa4f886f
SH
603 return -EINVAL;
604 }
605
606 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
607 sizeof(*info->xfer_block), GFP_KERNEL);
608 if (!info->xfer_block)
609 return -ENOMEM;
610
611 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
612 sizeof(long), GFP_KERNEL);
613 if (!info->xfer_alloc_table)
614 return -ENOMEM;
615
aa4f886f
SH
616 /* Pre-initialize the buffer pointer to pre-allocated buffers */
617 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
618 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
619 GFP_KERNEL);
620 if (!xfer->rx.buf)
621 return -ENOMEM;
622
623 xfer->tx.buf = xfer->rx.buf;
624 init_completion(&xfer->done);
625 }
626
627 spin_lock_init(&info->xfer_lock);
628
629 return 0;
630}
631
4ebd8f6d
SH
632static int scmi_xfer_info_init(struct scmi_info *sinfo)
633{
634 int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
635
636 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
637 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
638
639 return ret;
640}
641
5c8a47a5
VK
642static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
643 int prot_id, bool tx)
aa4f886f 644{
3748daf7 645 int ret, idx;
fbc4d81a 646 struct scmi_chan_info *cinfo;
46cc7c28 647 struct idr *idr;
3748daf7
SH
648
649 /* Transmit channel is first entry i.e. index 0 */
650 idx = tx ? 0 : 1;
46cc7c28 651 idr = tx ? &info->tx_idr : &info->rx_idr;
aa4f886f 652
11040889
SH
653 /* check if already allocated, used for multiple device per protocol */
654 cinfo = idr_find(idr, prot_id);
655 if (cinfo)
656 return 0;
657
5c8a47a5 658 if (!info->desc->ops->chan_available(dev, idx)) {
46cc7c28
SH
659 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
660 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
661 return -EINVAL;
907b6d14
SH
662 goto idr_alloc;
663 }
664
fbc4d81a
SH
665 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
666 if (!cinfo)
667 return -ENOMEM;
668
fbc4d81a
SH
669 cinfo->dev = dev;
670
5c8a47a5
VK
671 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
672 if (ret)
aa4f886f 673 return ret;
aa4f886f 674
907b6d14 675idr_alloc:
46cc7c28 676 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
907b6d14
SH
677 if (ret != prot_id) {
678 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
679 return ret;
680 }
681
682 cinfo->handle = &info->handle;
aa4f886f
SH
683 return 0;
684}
685
46cc7c28 686static inline int
5c8a47a5 687scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
46cc7c28 688{
5c8a47a5 689 int ret = scmi_chan_setup(info, dev, prot_id, true);
46cc7c28
SH
690
691 if (!ret) /* Rx is optional, hence no error check */
5c8a47a5 692 scmi_chan_setup(info, dev, prot_id, false);
46cc7c28
SH
693
694 return ret;
695}
696
bc40081d
SH
697static inline void
698scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
ee7a9c9f 699 int prot_id, const char *name)
bc40081d
SH
700{
701 struct scmi_device *sdev;
702
ee7a9c9f 703 sdev = scmi_device_create(np, info->dev, prot_id, name);
bc40081d
SH
704 if (!sdev) {
705 dev_err(info->dev, "failed to create %d protocol device\n",
706 prot_id);
707 return;
708 }
709
5c8a47a5 710 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
907b6d14
SH
711 dev_err(&sdev->dev, "failed to setup transport\n");
712 scmi_device_destroy(sdev);
31c60855 713 return;
907b6d14
SH
714 }
715
bc40081d
SH
716 /* setup handle now as the transport is ready */
717 scmi_set_handle(sdev);
718}
719
9c5c463f
SH
720#define MAX_SCMI_DEV_PER_PROTOCOL 2
721struct scmi_prot_devnames {
722 int protocol_id;
723 char *names[MAX_SCMI_DEV_PER_PROTOCOL];
724};
725
726static struct scmi_prot_devnames devnames[] = {
727 { SCMI_PROTOCOL_POWER, { "genpd" },},
728 { SCMI_PROTOCOL_PERF, { "cpufreq" },},
729 { SCMI_PROTOCOL_CLOCK, { "clocks" },},
730 { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
731 { SCMI_PROTOCOL_RESET, { "reset" },},
732};
733
734static inline void
735scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
736 int prot_id)
737{
738 int loop, cnt;
739
740 for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
741 if (devnames[loop].protocol_id != prot_id)
742 continue;
743
744 for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
745 const char *name = devnames[loop].names[cnt];
746
747 if (name)
748 scmi_create_protocol_device(np, info, prot_id,
749 name);
750 }
751 }
752}
753
aa4f886f
SH
754static int scmi_probe(struct platform_device *pdev)
755{
756 int ret;
757 struct scmi_handle *handle;
758 const struct scmi_desc *desc;
759 struct scmi_info *info;
760 struct device *dev = &pdev->dev;
bc40081d 761 struct device_node *child, *np = dev->of_node;
aa4f886f 762
d9350f21
AP
763 desc = of_device_get_match_data(dev);
764 if (!desc)
765 return -EINVAL;
aa4f886f
SH
766
767 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
768 if (!info)
769 return -ENOMEM;
770
771 info->dev = dev;
772 info->desc = desc;
773 INIT_LIST_HEAD(&info->node);
774
aa4f886f 775 platform_set_drvdata(pdev, info);
907b6d14 776 idr_init(&info->tx_idr);
46cc7c28 777 idr_init(&info->rx_idr);
aa4f886f
SH
778
779 handle = &info->handle;
780 handle->dev = info->dev;
b6f20ff8 781 handle->version = &info->version;
aa4f886f 782
5c8a47a5 783 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
aa4f886f
SH
784 if (ret)
785 return ret;
786
4ebd8f6d
SH
787 ret = scmi_xfer_info_init(info);
788 if (ret)
789 return ret;
790
b6f20ff8
SH
791 ret = scmi_base_protocol_init(handle);
792 if (ret) {
793 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
b6f20ff8
SH
794 return ret;
795 }
796
aa4f886f
SH
797 mutex_lock(&scmi_list_mutex);
798 list_add_tail(&info->node, &scmi_list);
799 mutex_unlock(&scmi_list_mutex);
800
bc40081d
SH
801 for_each_available_child_of_node(np, child) {
802 u32 prot_id;
803
804 if (of_property_read_u32(child, "reg", &prot_id))
805 continue;
806
354b2e36
SH
807 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
808 dev_err(dev, "Out of range protocol %d\n", prot_id);
bc40081d
SH
809
810 if (!scmi_is_protocol_implemented(handle, prot_id)) {
811 dev_err(dev, "SCMI protocol %d not implemented\n",
812 prot_id);
813 continue;
814 }
815
9c5c463f 816 scmi_create_protocol_devices(child, info, prot_id);
bc40081d
SH
817 }
818
aa4f886f
SH
819 return 0;
820}
821
5c8a47a5 822void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2747a967 823{
2747a967 824 idr_remove(idr, id);
2747a967
SH
825}
826
827static int scmi_remove(struct platform_device *pdev)
828{
829 int ret = 0;
830 struct scmi_info *info = platform_get_drvdata(pdev);
831 struct idr *idr = &info->tx_idr;
832
833 mutex_lock(&scmi_list_mutex);
834 if (info->users)
835 ret = -EBUSY;
836 else
837 list_del(&info->node);
838 mutex_unlock(&scmi_list_mutex);
839
840 if (ret)
841 return ret;
842
843 /* Safe to free channels since no more users */
5c8a47a5 844 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2747a967
SH
845 idr_destroy(&info->tx_idr);
846
46cc7c28 847 idr = &info->rx_idr;
5c8a47a5 848 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
46cc7c28
SH
849 idr_destroy(&info->rx_idr);
850
2747a967
SH
851 return ret;
852}
853
4605e224
SH
854static ssize_t protocol_version_show(struct device *dev,
855 struct device_attribute *attr, char *buf)
856{
857 struct scmi_info *info = dev_get_drvdata(dev);
858
859 return sprintf(buf, "%u.%u\n", info->version.major_ver,
860 info->version.minor_ver);
861}
862static DEVICE_ATTR_RO(protocol_version);
863
864static ssize_t firmware_version_show(struct device *dev,
865 struct device_attribute *attr, char *buf)
866{
867 struct scmi_info *info = dev_get_drvdata(dev);
868
869 return sprintf(buf, "0x%x\n", info->version.impl_ver);
870}
871static DEVICE_ATTR_RO(firmware_version);
872
873static ssize_t vendor_id_show(struct device *dev,
874 struct device_attribute *attr, char *buf)
875{
876 struct scmi_info *info = dev_get_drvdata(dev);
877
878 return sprintf(buf, "%s\n", info->version.vendor_id);
879}
880static DEVICE_ATTR_RO(vendor_id);
881
882static ssize_t sub_vendor_id_show(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct scmi_info *info = dev_get_drvdata(dev);
886
887 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
888}
889static DEVICE_ATTR_RO(sub_vendor_id);
890
891static struct attribute *versions_attrs[] = {
892 &dev_attr_firmware_version.attr,
893 &dev_attr_protocol_version.attr,
894 &dev_attr_vendor_id.attr,
895 &dev_attr_sub_vendor_id.attr,
896 NULL,
897};
898ATTRIBUTE_GROUPS(versions);
899
2747a967
SH
900/* Each compatible listed below must have descriptor associated with it */
901static const struct of_device_id scmi_of_match[] = {
5c8a47a5 902 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
d7642823 903#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1dc65580
PF
904 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
905#endif
2747a967
SH
906 { /* Sentinel */ },
907};
908
909MODULE_DEVICE_TABLE(of, scmi_of_match);
910
aa4f886f
SH
911static struct platform_driver scmi_driver = {
912 .driver = {
913 .name = "arm-scmi",
914 .of_match_table = scmi_of_match,
4605e224 915 .dev_groups = versions_groups,
aa4f886f
SH
916 },
917 .probe = scmi_probe,
918 .remove = scmi_remove,
919};
920
921module_platform_driver(scmi_driver);
922
923MODULE_ALIAS("platform: arm-scmi");
924MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
925MODULE_DESCRIPTION("ARM SCMI protocol driver");
926MODULE_LICENSE("GPL v2");