include: trace: Add new scmi_xfer_response_wait event
[linux-block.git] / drivers / firmware / arm_scmi / driver.c
CommitLineData
aa4f886f
SH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
48dc16e2 14 * Copyright (C) 2018-2021 ARM Ltd.
aa4f886f
SH
15 */
16
17#include <linux/bitmap.h>
23934efe 18#include <linux/device.h>
aa4f886f 19#include <linux/export.h>
48dc16e2 20#include <linux/idr.h>
aa4f886f
SH
21#include <linux/io.h>
22#include <linux/kernel.h>
d4c3751a 23#include <linux/ktime.h>
9ca5a183 24#include <linux/hashtable.h>
d4f9dddd 25#include <linux/list.h>
aa4f886f
SH
26#include <linux/module.h>
27#include <linux/of_address.h>
28#include <linux/of_device.h>
d4c3751a 29#include <linux/processor.h>
48dc16e2 30#include <linux/refcount.h>
aa4f886f
SH
31#include <linux/slab.h>
32
33#include "common.h"
6b8a6913 34#include "notify.h"
aa4f886f 35
729d3530
LL
36#define CREATE_TRACE_POINTS
37#include <trace/events/scmi.h>
38
aa4f886f
SH
39enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
aa4f886f
SH
51};
52
1baf47c2 53/* List of all SCMI devices active in system */
aa4f886f
SH
54static LIST_HEAD(scmi_list);
55/* Protection for the entire list */
56static DEFINE_MUTEX(scmi_list_mutex);
729d3530
LL
57/* Track the unique id for the transfers for debug & profiling purpose */
58static atomic_t transfer_last_id;
aa4f886f 59
d4f9dddd
CM
60static DEFINE_IDR(scmi_requested_devices);
61static DEFINE_MUTEX(scmi_requested_devices_mtx);
62
63struct scmi_requested_dev {
64 const struct scmi_device_id *id_table;
65 struct list_head node;
66};
67
aa4f886f
SH
68/**
69 * struct scmi_xfers_info - Structure to manage transfer information
70 *
aa4f886f
SH
71 * @xfer_alloc_table: Bitmap table for allocated messages.
72 * Index of this bitmap table is also used for message
73 * sequence identifier.
74 * @xfer_lock: Protection for message allocation
c92c3e38 75 * @max_msg: Maximum number of messages that can be pending
9ca5a183
CM
76 * @free_xfers: A free list for available to use xfers. It is initialized with
77 * a number of xfers equal to the maximum allowed in-flight
78 * messages.
79 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
80 * currently in-flight messages.
aa4f886f
SH
81 */
82struct scmi_xfers_info {
aa4f886f 83 unsigned long *xfer_alloc_table;
aa4f886f 84 spinlock_t xfer_lock;
c92c3e38 85 int max_msg;
9ca5a183
CM
86 struct hlist_head free_xfers;
87 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
aa4f886f
SH
88};
89
48dc16e2
CM
90/**
91 * struct scmi_protocol_instance - Describe an initialized protocol instance.
d7b6cc56 92 * @handle: Reference to the SCMI handle associated to this protocol instance.
48dc16e2
CM
93 * @proto: A reference to the protocol descriptor.
94 * @gid: A reference for per-protocol devres management.
95 * @users: A refcount to track effective users of this protocol.
d7b6cc56
CM
96 * @priv: Reference for optional protocol private data.
97 * @ph: An embedded protocol handle that will be passed down to protocol
98 * initialization code to identify this instance.
48dc16e2
CM
99 *
100 * Each protocol is initialized independently once for each SCMI platform in
101 * which is defined by DT and implemented by the SCMI server fw.
102 */
103struct scmi_protocol_instance {
d7b6cc56 104 const struct scmi_handle *handle;
48dc16e2
CM
105 const struct scmi_protocol *proto;
106 void *gid;
107 refcount_t users;
d7b6cc56
CM
108 void *priv;
109 struct scmi_protocol_handle ph;
48dc16e2
CM
110};
111
d7b6cc56
CM
112#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
113
aa4f886f 114/**
1baf47c2 115 * struct scmi_info - Structure representing a SCMI instance
aa4f886f
SH
116 *
117 * @dev: Device pointer
118 * @desc: SoC description for this instance
b6f20ff8
SH
119 * @version: SCMI revision information containing protocol version,
120 * implementation version and (sub-)vendor identification.
71af05a7 121 * @handle: Instance of SCMI handle to send to clients
38c927fb 122 * @tx_minfo: Universal Transmit Message management info
4ebd8f6d 123 * @rx_minfo: Universal Receive Message management info
3748daf7 124 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
46cc7c28 125 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
48dc16e2
CM
126 * @protocols: IDR for protocols' instance descriptors initialized for
127 * this SCMI instance: populated on protocol's first attempted
128 * usage.
129 * @protocols_mtx: A mutex to protect protocols instances initialization.
1baf47c2 130 * @protocols_imp: List of protocols implemented, currently maximum of
b6f20ff8 131 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
d4f9dddd
CM
132 * @active_protocols: IDR storing device_nodes for protocols actually defined
133 * in the DT and confirmed as implemented by fw.
a02d7c93 134 * @notify_priv: Pointer to private data structure specific to notifications.
1baf47c2 135 * @node: List head
aa4f886f
SH
136 * @users: Number of users of this instance
137 */
138struct scmi_info {
139 struct device *dev;
140 const struct scmi_desc *desc;
b6f20ff8 141 struct scmi_revision_info version;
aa4f886f 142 struct scmi_handle handle;
38c927fb 143 struct scmi_xfers_info tx_minfo;
4ebd8f6d 144 struct scmi_xfers_info rx_minfo;
907b6d14 145 struct idr tx_idr;
46cc7c28 146 struct idr rx_idr;
48dc16e2
CM
147 struct idr protocols;
148 /* Ensure mutual exclusive access to protocols instance array */
149 struct mutex protocols_mtx;
b6f20ff8 150 u8 *protocols_imp;
d4f9dddd 151 struct idr active_protocols;
a02d7c93 152 void *notify_priv;
aa4f886f
SH
153 struct list_head node;
154 int users;
155};
156
aa4f886f
SH
157#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
158
aa4f886f
SH
159static const int scmi_linux_errmap[] = {
160 /* better than switch case as long as return value is continuous */
161 0, /* SCMI_SUCCESS */
162 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
163 -EINVAL, /* SCMI_ERR_PARAM */
164 -EACCES, /* SCMI_ERR_ACCESS */
165 -ENOENT, /* SCMI_ERR_ENTRY */
166 -ERANGE, /* SCMI_ERR_RANGE */
167 -EBUSY, /* SCMI_ERR_BUSY */
168 -ECOMM, /* SCMI_ERR_COMMS */
169 -EIO, /* SCMI_ERR_GENERIC */
170 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
171 -EPROTO, /* SCMI_ERR_PROTOCOL */
172};
173
174static inline int scmi_to_linux_errno(int errno)
175{
7a691f16
SH
176 int err_idx = -errno;
177
178 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
179 return scmi_linux_errmap[err_idx];
aa4f886f
SH
180 return -EIO;
181}
182
a02d7c93
CM
183void scmi_notification_instance_data_set(const struct scmi_handle *handle,
184 void *priv)
185{
186 struct scmi_info *info = handle_to_scmi_info(handle);
187
188 info->notify_priv = priv;
189 /* Ensure updated protocol private date are visible */
190 smp_wmb();
191}
192
193void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
194{
195 struct scmi_info *info = handle_to_scmi_info(handle);
196
197 /* Ensure protocols_private_data has been updated */
198 smp_rmb();
199 return info->notify_priv;
200}
201
9ca5a183
CM
202/**
203 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
204 *
205 * @minfo: Pointer to Tx/Rx Message management info based on channel type
206 * @xfer: The xfer to act upon
207 *
208 * Pick the next unused monotonically increasing token and set it into
209 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
210 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
211 * of incorrect association of a late and expired xfer with a live in-flight
212 * transaction, both happening to re-use the same token identifier.
213 *
214 * Since platform is NOT required to answer our request in-order we should
215 * account for a few rare but possible scenarios:
216 *
217 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
218 * using find_next_zero_bit() starting from candidate next_token bit
219 *
220 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
221 * are plenty of free tokens at start, so try a second pass using
222 * find_next_zero_bit() and starting from 0.
223 *
224 * X = used in-flight
225 *
226 * Normal
227 * ------
228 *
229 * |- xfer_id picked
230 * -----------+----------------------------------------------------------
231 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
232 * ----------------------------------------------------------------------
233 * ^
234 * |- next_token
235 *
236 * Out-of-order pending at start
237 * -----------------------------
238 *
239 * |- xfer_id picked, last_token fixed
240 * -----+----------------------------------------------------------------
241 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
242 * ----------------------------------------------------------------------
243 * ^
244 * |- next_token
245 *
246 *
247 * Out-of-order pending at end
248 * ---------------------------
249 *
250 * |- xfer_id picked, last_token fixed
251 * -----+----------------------------------------------------------------
252 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
253 * ----------------------------------------------------------------------
254 * ^
255 * |- next_token
256 *
257 * Context: Assumes to be called with @xfer_lock already acquired.
258 *
259 * Return: 0 on Success or error
260 */
261static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
262 struct scmi_xfer *xfer)
263{
264 unsigned long xfer_id, next_token;
265
266 /*
267 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
268 * using the pre-allocated transfer_id as a base.
269 * Note that the global transfer_id is shared across all message types
270 * so there could be holes in the allocated set of monotonic sequence
271 * numbers, but that is going to limit the effectiveness of the
272 * mitigation only in very rare limit conditions.
273 */
274 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
275
276 /* Pick the next available xfer_id >= next_token */
277 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
278 MSG_TOKEN_MAX, next_token);
279 if (xfer_id == MSG_TOKEN_MAX) {
280 /*
281 * After heavily out-of-order responses, there are no free
282 * tokens ahead, but only at start of xfer_alloc_table so
283 * try again from the beginning.
284 */
285 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
286 MSG_TOKEN_MAX, 0);
287 /*
288 * Something is wrong if we got here since there can be a
289 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
290 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
291 */
292 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
293 return -ENOMEM;
294 }
295
296 /* Update +/- last_token accordingly if we skipped some hole */
297 if (xfer_id != next_token)
298 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
299
300 /* Set in-flight */
301 set_bit(xfer_id, minfo->xfer_alloc_table);
302 xfer->hdr.seq = (u16)xfer_id;
303
304 return 0;
305}
306
307/**
308 * scmi_xfer_token_clear - Release the token
309 *
310 * @minfo: Pointer to Tx/Rx Message management info based on channel type
311 * @xfer: The xfer to act upon
312 */
313static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
314 struct scmi_xfer *xfer)
315{
316 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
317}
318
aa4f886f 319/**
1baf47c2 320 * scmi_xfer_get() - Allocate one message
aa4f886f 321 *
1baf47c2 322 * @handle: Pointer to SCMI entity handle
38c927fb 323 * @minfo: Pointer to Tx/Rx Message management info based on channel type
9ca5a183
CM
324 * @set_pending: If true a monotonic token is picked and the xfer is added to
325 * the pending hash table.
aa4f886f 326 *
5b65af8f 327 * Helper function which is used by various message functions that are
aa4f886f
SH
328 * exposed to clients of this driver for allocating a message traffic event.
329 *
9ca5a183
CM
330 * Picks an xfer from the free list @free_xfers (if any available) and, if
331 * required, sets a monotonically increasing token and stores the inflight xfer
332 * into the @pending_xfers hashtable for later retrieval.
333 *
334 * The successfully initialized xfer is refcounted.
335 *
336 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
337 * @free_xfers.
aa4f886f
SH
338 *
339 * Return: 0 if all went fine, else corresponding error.
340 */
38c927fb 341static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
9ca5a183
CM
342 struct scmi_xfers_info *minfo,
343 bool set_pending)
aa4f886f 344{
9ca5a183
CM
345 int ret;
346 unsigned long flags;
aa4f886f 347 struct scmi_xfer *xfer;
aa4f886f 348
aa4f886f 349 spin_lock_irqsave(&minfo->xfer_lock, flags);
9ca5a183 350 if (hlist_empty(&minfo->free_xfers)) {
aa4f886f
SH
351 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
352 return ERR_PTR(-ENOMEM);
353 }
aa4f886f 354
9ca5a183
CM
355 /* grab an xfer from the free_list */
356 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
357 hlist_del_init(&xfer->node);
aa4f886f 358
9ca5a183
CM
359 /*
360 * Allocate transfer_id early so that can be used also as base for
361 * monotonic sequence number generation if needed.
362 */
729d3530 363 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
aa4f886f 364
9ca5a183
CM
365 if (set_pending) {
366 /* Pick and set monotonic token */
367 ret = scmi_xfer_token_set(minfo, xfer);
368 if (!ret) {
369 hash_add(minfo->pending_xfers, &xfer->node,
370 xfer->hdr.seq);
371 xfer->pending = true;
372 } else {
373 dev_err(handle->dev,
374 "Failed to get monotonic token %d\n", ret);
375 hlist_add_head(&xfer->node, &minfo->free_xfers);
376 xfer = ERR_PTR(ret);
377 }
378 }
ed7c04c1
CM
379
380 if (!IS_ERR(xfer)) {
381 refcount_set(&xfer->users, 1);
382 atomic_set(&xfer->busy, SCMI_XFER_FREE);
383 }
9ca5a183
CM
384 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
385
aa4f886f
SH
386 return xfer;
387}
388
389/**
38c927fb 390 * __scmi_xfer_put() - Release a message
aa4f886f 391 *
38c927fb 392 * @minfo: Pointer to Tx/Rx Message management info based on channel type
1baf47c2 393 * @xfer: message that was reserved by scmi_xfer_get
aa4f886f 394 *
9ca5a183
CM
395 * After refcount check, possibly release an xfer, clearing the token slot,
396 * removing xfer from @pending_xfers and putting it back into free_xfers.
397 *
aa4f886f
SH
398 * This holds a spinlock to maintain integrity of internal data structures.
399 */
38c927fb
SH
400static void
401__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
aa4f886f
SH
402{
403 unsigned long flags;
aa4f886f 404
aa4f886f 405 spin_lock_irqsave(&minfo->xfer_lock, flags);
ed7c04c1
CM
406 if (refcount_dec_and_test(&xfer->users)) {
407 if (xfer->pending) {
408 scmi_xfer_token_clear(minfo, xfer);
409 hash_del(&xfer->node);
410 xfer->pending = false;
411 }
412 hlist_add_head(&xfer->node, &minfo->free_xfers);
9ca5a183 413 }
aa4f886f
SH
414 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
415}
416
9ca5a183
CM
417/**
418 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
419 *
420 * @minfo: Pointer to Tx/Rx Message management info based on channel type
421 * @xfer_id: Token ID to lookup in @pending_xfers
422 *
423 * Refcounting is untouched.
424 *
425 * Context: Assumes to be called with @xfer_lock already acquired.
426 *
427 * Return: A valid xfer on Success or error otherwise
428 */
429static struct scmi_xfer *
430scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
431{
432 struct scmi_xfer *xfer = NULL;
433
434 if (test_bit(xfer_id, minfo->xfer_alloc_table))
435 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
436
437 return xfer ?: ERR_PTR(-EINVAL);
438}
439
ed7c04c1
CM
440/**
441 * scmi_msg_response_validate - Validate message type against state of related
442 * xfer
443 *
444 * @cinfo: A reference to the channel descriptor.
445 * @msg_type: Message type to check
446 * @xfer: A reference to the xfer to validate against @msg_type
447 *
448 * This function checks if @msg_type is congruent with the current state of
449 * a pending @xfer; if an asynchronous delayed response is received before the
450 * related synchronous response (Out-of-Order Delayed Response) the missing
451 * synchronous response is assumed to be OK and completed, carrying on with the
452 * Delayed Response: this is done to address the case in which the underlying
453 * SCMI transport can deliver such out-of-order responses.
454 *
455 * Context: Assumes to be called with xfer->lock already acquired.
456 *
457 * Return: 0 on Success, error otherwise
458 */
459static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
460 u8 msg_type,
461 struct scmi_xfer *xfer)
462{
463 /*
464 * Even if a response was indeed expected on this slot at this point,
465 * a buggy platform could wrongly reply feeding us an unexpected
466 * delayed response we're not prepared to handle: bail-out safely
467 * blaming firmware.
468 */
469 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
470 dev_err(cinfo->dev,
471 "Delayed Response for %d not expected! Buggy F/W ?\n",
472 xfer->hdr.seq);
473 return -EINVAL;
474 }
475
476 switch (xfer->state) {
477 case SCMI_XFER_SENT_OK:
478 if (msg_type == MSG_TYPE_DELAYED_RESP) {
479 /*
480 * Delayed Response expected but delivered earlier.
481 * Assume message RESPONSE was OK and skip state.
482 */
483 xfer->hdr.status = SCMI_SUCCESS;
484 xfer->state = SCMI_XFER_RESP_OK;
485 complete(&xfer->done);
486 dev_warn(cinfo->dev,
487 "Received valid OoO Delayed Response for %d\n",
488 xfer->hdr.seq);
489 }
490 break;
491 case SCMI_XFER_RESP_OK:
492 if (msg_type != MSG_TYPE_DELAYED_RESP)
493 return -EINVAL;
494 break;
495 case SCMI_XFER_DRESP_OK:
496 /* No further message expected once in SCMI_XFER_DRESP_OK */
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503/**
504 * scmi_xfer_state_update - Update xfer state
505 *
506 * @xfer: A reference to the xfer to update
507 * @msg_type: Type of message being processed.
508 *
509 * Note that this message is assumed to have been already successfully validated
510 * by @scmi_msg_response_validate(), so here we just update the state.
511 *
512 * Context: Assumes to be called on an xfer exclusively acquired using the
513 * busy flag.
514 */
515static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
516{
517 xfer->hdr.type = msg_type;
518
519 /* Unknown command types were already discarded earlier */
520 if (xfer->hdr.type == MSG_TYPE_COMMAND)
521 xfer->state = SCMI_XFER_RESP_OK;
522 else
523 xfer->state = SCMI_XFER_DRESP_OK;
524}
525
526static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
527{
528 int ret;
529
530 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
531
532 return ret == SCMI_XFER_FREE;
533}
534
535/**
536 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
537 *
538 * @cinfo: A reference to the channel descriptor.
539 * @msg_hdr: A message header to use as lookup key
540 *
541 * When a valid xfer is found for the sequence number embedded in the provided
542 * msg_hdr, reference counting is properly updated and exclusive access to this
543 * xfer is granted till released with @scmi_xfer_command_release.
544 *
545 * Return: A valid @xfer on Success or error otherwise.
546 */
547static inline struct scmi_xfer *
548scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
549{
550 int ret;
551 unsigned long flags;
552 struct scmi_xfer *xfer;
553 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
554 struct scmi_xfers_info *minfo = &info->tx_minfo;
555 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
556 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
557
558 /* Are we even expecting this? */
559 spin_lock_irqsave(&minfo->xfer_lock, flags);
560 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
561 if (IS_ERR(xfer)) {
562 dev_err(cinfo->dev,
563 "Message for %d type %d is not expected!\n",
564 xfer_id, msg_type);
565 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
566 return xfer;
567 }
568 refcount_inc(&xfer->users);
569 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
570
571 spin_lock_irqsave(&xfer->lock, flags);
572 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
573 /*
574 * If a pending xfer was found which was also in a congruent state with
575 * the received message, acquire exclusive access to it setting the busy
576 * flag.
577 * Spins only on the rare limit condition of concurrent reception of
578 * RESP and DRESP for the same xfer.
579 */
580 if (!ret) {
581 spin_until_cond(scmi_xfer_acquired(xfer));
582 scmi_xfer_state_update(xfer, msg_type);
583 }
584 spin_unlock_irqrestore(&xfer->lock, flags);
585
586 if (ret) {
587 dev_err(cinfo->dev,
588 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
589 msg_type, xfer_id, msg_hdr, xfer->state);
590 /* On error the refcount incremented above has to be dropped */
591 __scmi_xfer_put(minfo, xfer);
592 xfer = ERR_PTR(-EINVAL);
593 }
594
595 return xfer;
596}
597
598static inline void scmi_xfer_command_release(struct scmi_info *info,
599 struct scmi_xfer *xfer)
600{
601 atomic_set(&xfer->busy, SCMI_XFER_FREE);
602 __scmi_xfer_put(&info->tx_minfo, xfer);
603}
604
e9b21c96
CM
605static inline void scmi_clear_channel(struct scmi_info *info,
606 struct scmi_chan_info *cinfo)
607{
608 if (info->desc->ops->clear_channel)
609 info->desc->ops->clear_channel(cinfo);
610}
611
13fba878
CM
612static void scmi_handle_notification(struct scmi_chan_info *cinfo,
613 u32 msg_hdr, void *priv)
2747a967 614{
5c8a47a5 615 struct scmi_xfer *xfer;
4d09852b
SH
616 struct device *dev = cinfo->dev;
617 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
618 struct scmi_xfers_info *minfo = &info->rx_minfo;
72a5eb9d 619 ktime_t ts;
4d09852b 620
72a5eb9d 621 ts = ktime_get_boottime();
9ca5a183 622 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
4d09852b
SH
623 if (IS_ERR(xfer)) {
624 dev_err(dev, "failed to get free message slot (%ld)\n",
625 PTR_ERR(xfer));
e9b21c96 626 scmi_clear_channel(info, cinfo);
4d09852b
SH
627 return;
628 }
629
630 unpack_scmi_header(msg_hdr, &xfer->hdr);
13fba878
CM
631 if (priv)
632 xfer->priv = priv;
4d09852b
SH
633 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
634 xfer);
6b8a6913
CM
635 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
636 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
4d09852b
SH
637
638 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
639 xfer->hdr.protocol_id, xfer->hdr.seq,
640 MSG_TYPE_NOTIFICATION);
58ecdf03 641
4d09852b
SH
642 __scmi_xfer_put(minfo, xfer);
643
e9b21c96 644 scmi_clear_channel(info, cinfo);
4d09852b
SH
645}
646
13fba878
CM
647static void scmi_handle_response(struct scmi_chan_info *cinfo,
648 u32 msg_hdr, void *priv)
4d09852b
SH
649{
650 struct scmi_xfer *xfer;
4d09852b 651 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2747a967 652
ed7c04c1 653 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
9ca5a183 654 if (IS_ERR(xfer)) {
e9b21c96 655 scmi_clear_channel(info, cinfo);
c5bceb98
CM
656 return;
657 }
2747a967 658
0cb7af47 659 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
ed7c04c1 660 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
0cb7af47
CM
661 xfer->rx.len = info->desc->max_msg_size;
662
13fba878
CM
663 if (priv)
664 xfer->priv = priv;
5c8a47a5 665 info->desc->ops->fetch_response(cinfo, xfer);
58ecdf03 666
729d3530
LL
667 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
668 xfer->hdr.protocol_id, xfer->hdr.seq,
ed7c04c1 669 xfer->hdr.type);
729d3530 670
ed7c04c1 671 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
e9b21c96 672 scmi_clear_channel(info, cinfo);
58ecdf03 673 complete(xfer->async_done);
d04fb2b2 674 } else {
58ecdf03 675 complete(&xfer->done);
d04fb2b2 676 }
ed7c04c1
CM
677
678 scmi_xfer_command_release(info, xfer);
2747a967
SH
679}
680
4d09852b
SH
681/**
682 * scmi_rx_callback() - callback for receiving messages
683 *
684 * @cinfo: SCMI channel info
685 * @msg_hdr: Message header
13fba878 686 * @priv: Transport specific private data.
4d09852b
SH
687 *
688 * Processes one received message to appropriate transfer information and
689 * signals completion of the transfer.
690 *
691 * NOTE: This function will be invoked in IRQ context, hence should be
692 * as optimal as possible.
693 */
13fba878 694void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
4d09852b 695{
4d09852b
SH
696 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
697
698 switch (msg_type) {
699 case MSG_TYPE_NOTIFICATION:
13fba878 700 scmi_handle_notification(cinfo, msg_hdr, priv);
4d09852b
SH
701 break;
702 case MSG_TYPE_COMMAND:
703 case MSG_TYPE_DELAYED_RESP:
13fba878 704 scmi_handle_response(cinfo, msg_hdr, priv);
4d09852b
SH
705 break;
706 default:
707 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
708 break;
709 }
710}
711
38c927fb 712/**
a4a20b09 713 * xfer_put() - Release a transmit message
38c927fb 714 *
a4a20b09 715 * @ph: Pointer to SCMI protocol handle
ed7c04c1 716 * @xfer: message that was reserved by xfer_get_init
38c927fb 717 */
a4a20b09
CM
718static void xfer_put(const struct scmi_protocol_handle *ph,
719 struct scmi_xfer *xfer)
38c927fb 720{
a4a20b09
CM
721 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
722 struct scmi_info *info = handle_to_scmi_info(pi->handle);
38c927fb
SH
723
724 __scmi_xfer_put(&info->tx_minfo, xfer);
725}
726
5c8a47a5 727static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
d4c3751a
SH
728 struct scmi_xfer *xfer, ktime_t stop)
729{
5c8a47a5 730 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
d4c3751a 731
ed7c04c1
CM
732 /*
733 * Poll also on xfer->done so that polling can be forcibly terminated
734 * in case of out-of-order receptions of delayed responses
735 */
5c8a47a5 736 return info->desc->ops->poll_done(cinfo, xfer) ||
ed7c04c1 737 try_wait_for_completion(&xfer->done) ||
5c8a47a5 738 ktime_after(ktime_get(), stop);
d4c3751a
SH
739}
740
5a731aeb
CM
741/**
742 * scmi_wait_for_message_response - An helper to group all the possible ways of
743 * waiting for a synchronous message response.
744 *
745 * @cinfo: SCMI channel info
746 * @xfer: Reference to the transfer being waited for.
747 *
748 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
749 * configuration flags like xfer->hdr.poll_completion.
750 *
751 * Return: 0 on Success, error otherwise.
752 */
753static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
754 struct scmi_xfer *xfer)
755{
756 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
757 struct device *dev = info->dev;
758 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
759
760 if (xfer->hdr.poll_completion) {
761 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
762
763 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
764 if (ktime_before(ktime_get(), stop)) {
765 unsigned long flags;
766
767 /*
768 * Do not fetch_response if an out-of-order delayed
769 * response is being processed.
770 */
771 spin_lock_irqsave(&xfer->lock, flags);
772 if (xfer->state == SCMI_XFER_SENT_OK) {
773 info->desc->ops->fetch_response(cinfo, xfer);
774 xfer->state = SCMI_XFER_RESP_OK;
775 }
776 spin_unlock_irqrestore(&xfer->lock, flags);
777 } else {
778 dev_err(dev,
779 "timed out in resp(caller: %pS) - polling\n",
780 (void *)_RET_IP_);
781 ret = -ETIMEDOUT;
782 }
783 } else {
784 /* And we wait for the response. */
785 if (!wait_for_completion_timeout(&xfer->done,
786 msecs_to_jiffies(timeout_ms))) {
787 dev_err(dev, "timed out in resp(caller: %pS)\n",
788 (void *)_RET_IP_);
789 ret = -ETIMEDOUT;
790 }
791 }
792
793 return ret;
794}
795
aa4f886f 796/**
a4a20b09 797 * do_xfer() - Do one transfer
aa4f886f 798 *
a4a20b09 799 * @ph: Pointer to SCMI protocol handle
aa4f886f
SH
800 * @xfer: Transfer to initiate and wait for response
801 *
802 * Return: -ETIMEDOUT in case of no response, if transmit error,
1baf47c2
SH
803 * return corresponding error, else if all goes well,
804 * return 0.
aa4f886f 805 */
a4a20b09
CM
806static int do_xfer(const struct scmi_protocol_handle *ph,
807 struct scmi_xfer *xfer)
aa4f886f
SH
808{
809 int ret;
a4a20b09
CM
810 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
811 struct scmi_info *info = handle_to_scmi_info(pi->handle);
aa4f886f 812 struct device *dev = info->dev;
907b6d14
SH
813 struct scmi_chan_info *cinfo;
814
2930abcf
CM
815 if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
816 dev_warn_once(dev,
817 "Polling mode is not supported by transport.\n");
818 return -EINVAL;
819 }
820
d211ddeb
CM
821 cinfo = idr_find(&info->tx_idr, pi->proto->id);
822 if (unlikely(!cinfo))
823 return -EINVAL;
824
a4a20b09 825 /*
61832b35 826 * Initialise protocol id now from protocol handle to avoid it being
a4a20b09 827 * overridden by mistake (or malice) by the protocol code mangling with
61832b35 828 * the scmi_xfer structure prior to this.
a4a20b09
CM
829 */
830 xfer->hdr.protocol_id = pi->proto->id;
e30d91d4 831 reinit_completion(&xfer->done);
a4a20b09 832
729d3530
LL
833 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
834 xfer->hdr.protocol_id, xfer->hdr.seq,
835 xfer->hdr.poll_completion);
836
ed7c04c1
CM
837 xfer->state = SCMI_XFER_SENT_OK;
838 /*
839 * Even though spinlocking is not needed here since no race is possible
840 * on xfer->state due to the monotonically increasing tokens allocation,
841 * we must anyway ensure xfer->state initialization is not re-ordered
842 * after the .send_message() to be sure that on the RX path an early
843 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
844 */
845 smp_mb();
846
5c8a47a5 847 ret = info->desc->ops->send_message(cinfo, xfer);
aa4f886f 848 if (ret < 0) {
5c8a47a5 849 dev_dbg(dev, "Failed to send message %d\n", ret);
aa4f886f
SH
850 return ret;
851 }
852
5a731aeb 853 ret = scmi_wait_for_message_response(cinfo, xfer);
d4c3751a
SH
854 if (!ret && xfer->hdr.status)
855 ret = scmi_to_linux_errno(xfer->hdr.status);
856
5c8a47a5
VK
857 if (info->desc->ops->mark_txdone)
858 info->desc->ops->mark_txdone(cinfo, ret);
aa4f886f 859
729d3530 860 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
bad0d73b 861 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
729d3530 862
aa4f886f
SH
863 return ret;
864}
865
a4a20b09
CM
866static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
867 struct scmi_xfer *xfer)
868{
869 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
870 struct scmi_info *info = handle_to_scmi_info(pi->handle);
871
872 xfer->rx.len = info->desc->max_msg_size;
873}
874
58ecdf03
SH
875#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
876
877/**
a4a20b09 878 * do_xfer_with_response() - Do one transfer and wait until the delayed
58ecdf03
SH
879 * response is received
880 *
a4a20b09 881 * @ph: Pointer to SCMI protocol handle
58ecdf03
SH
882 * @xfer: Transfer to initiate and wait for response
883 *
884 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
885 * return corresponding error, else if all goes well, return 0.
886 */
a4a20b09
CM
887static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
888 struct scmi_xfer *xfer)
58ecdf03
SH
889{
890 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
891 DECLARE_COMPLETION_ONSTACK(async_response);
892
893 xfer->async_done = &async_response;
894
a4a20b09 895 ret = do_xfer(ph, xfer);
f1748b1e
CM
896 if (!ret) {
897 if (!wait_for_completion_timeout(xfer->async_done, timeout))
898 ret = -ETIMEDOUT;
899 else if (xfer->hdr.status)
900 ret = scmi_to_linux_errno(xfer->hdr.status);
901 }
58ecdf03
SH
902
903 xfer->async_done = NULL;
904 return ret;
905}
906
aa4f886f 907/**
a4a20b09 908 * xfer_get_init() - Allocate and initialise one message for transmit
aa4f886f 909 *
a4a20b09 910 * @ph: Pointer to SCMI protocol handle
aa4f886f 911 * @msg_id: Message identifier
aa4f886f
SH
912 * @tx_size: transmit message size
913 * @rx_size: receive message size
914 * @p: pointer to the allocated and initialised message
915 *
14e297b3 916 * This function allocates the message using @scmi_xfer_get and
aa4f886f
SH
917 * initialise the header.
918 *
919 * Return: 0 if all went fine with @p pointing to message, else
920 * corresponding error.
921 */
a4a20b09
CM
922static int xfer_get_init(const struct scmi_protocol_handle *ph,
923 u8 msg_id, size_t tx_size, size_t rx_size,
924 struct scmi_xfer **p)
aa4f886f
SH
925{
926 int ret;
927 struct scmi_xfer *xfer;
a4a20b09
CM
928 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
929 struct scmi_info *info = handle_to_scmi_info(pi->handle);
38c927fb 930 struct scmi_xfers_info *minfo = &info->tx_minfo;
aa4f886f
SH
931 struct device *dev = info->dev;
932
933 /* Ensure we have sane transfer sizes */
934 if (rx_size > info->desc->max_msg_size ||
935 tx_size > info->desc->max_msg_size)
936 return -ERANGE;
937
9ca5a183 938 xfer = scmi_xfer_get(pi->handle, minfo, true);
aa4f886f
SH
939 if (IS_ERR(xfer)) {
940 ret = PTR_ERR(xfer);
941 dev_err(dev, "failed to get free message slot(%d)\n", ret);
942 return ret;
943 }
944
945 xfer->tx.len = tx_size;
946 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
63b282f1 947 xfer->hdr.type = MSG_TYPE_COMMAND;
aa4f886f 948 xfer->hdr.id = msg_id;
aa4f886f
SH
949 xfer->hdr.poll_completion = false;
950
951 *p = xfer;
1baf47c2 952
aa4f886f
SH
953 return 0;
954}
955
b6f20ff8 956/**
a4a20b09 957 * version_get() - command to get the revision of the SCMI entity
b6f20ff8 958 *
a4a20b09 959 * @ph: Pointer to SCMI protocol handle
1baf47c2 960 * @version: Holds returned version of protocol.
b6f20ff8
SH
961 *
962 * Updates the SCMI information in the internal data structure.
963 *
964 * Return: 0 if all went fine, else return appropriate error.
965 */
a4a20b09 966static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
b6f20ff8
SH
967{
968 int ret;
969 __le32 *rev_info;
970 struct scmi_xfer *t;
971
a4a20b09 972 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
b6f20ff8
SH
973 if (ret)
974 return ret;
975
a4a20b09 976 ret = do_xfer(ph, t);
b6f20ff8
SH
977 if (!ret) {
978 rev_info = t->rx.buf;
979 *version = le32_to_cpu(*rev_info);
980 }
981
a4a20b09 982 xfer_put(ph, t);
b6f20ff8
SH
983 return ret;
984}
985
d7b6cc56
CM
986/**
987 * scmi_set_protocol_priv - Set protocol specific data at init time
988 *
989 * @ph: A reference to the protocol handle.
990 * @priv: The private data to set.
991 *
992 * Return: 0 on Success
993 */
994static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
995 void *priv)
996{
997 struct scmi_protocol_instance *pi = ph_to_pi(ph);
998
999 pi->priv = priv;
1000
1001 return 0;
1002}
1003
1004/**
1005 * scmi_get_protocol_priv - Set protocol specific data at init time
1006 *
1007 * @ph: A reference to the protocol handle.
1008 *
1009 * Return: Protocol private data if any was set.
1010 */
1011static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1012{
1013 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1014
1015 return pi->priv;
1016}
1017
a4a20b09
CM
1018static const struct scmi_xfer_ops xfer_ops = {
1019 .version_get = version_get,
1020 .xfer_get_init = xfer_get_init,
1021 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1022 .do_xfer = do_xfer,
1023 .do_xfer_with_response = do_xfer_with_response,
1024 .xfer_put = xfer_put,
1025};
1026
3d5d6e84
CM
1027/**
1028 * scmi_revision_area_get - Retrieve version memory area.
1029 *
1030 * @ph: A reference to the protocol handle.
1031 *
1032 * A helper to grab the version memory area reference during SCMI Base protocol
1033 * initialization.
1034 *
1035 * Return: A reference to the version memory area associated to the SCMI
1036 * instance underlying this protocol handle.
1037 */
1038struct scmi_revision_info *
1039scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1040{
1041 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1042
1043 return pi->handle->version;
1044}
1045
48dc16e2
CM
1046/**
1047 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1048 * instance descriptor.
1049 * @info: The reference to the related SCMI instance.
1050 * @proto: The protocol descriptor.
1051 *
1052 * Allocate a new protocol instance descriptor, using the provided @proto
1053 * description, against the specified SCMI instance @info, and initialize it;
1054 * all resources management is handled via a dedicated per-protocol devres
1055 * group.
1056 *
1057 * Context: Assumes to be called with @protocols_mtx already acquired.
1058 * Return: A reference to a freshly allocated and initialized protocol instance
f5800e0b
CM
1059 * or ERR_PTR on failure. On failure the @proto reference is at first
1060 * put using @scmi_protocol_put() before releasing all the devres group.
48dc16e2
CM
1061 */
1062static struct scmi_protocol_instance *
1063scmi_alloc_init_protocol_instance(struct scmi_info *info,
1064 const struct scmi_protocol *proto)
1065{
1066 int ret = -ENOMEM;
1067 void *gid;
1068 struct scmi_protocol_instance *pi;
f0e73cee 1069 const struct scmi_handle *handle = &info->handle;
48dc16e2
CM
1070
1071 /* Protocol specific devres group */
1072 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
f5800e0b
CM
1073 if (!gid) {
1074 scmi_protocol_put(proto->id);
48dc16e2 1075 goto out;
f5800e0b 1076 }
48dc16e2
CM
1077
1078 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1079 if (!pi)
1080 goto clean;
1081
1082 pi->gid = gid;
1083 pi->proto = proto;
d7b6cc56
CM
1084 pi->handle = handle;
1085 pi->ph.dev = handle->dev;
a4a20b09 1086 pi->ph.xops = &xfer_ops;
d7b6cc56
CM
1087 pi->ph.set_priv = scmi_set_protocol_priv;
1088 pi->ph.get_priv = scmi_get_protocol_priv;
48dc16e2
CM
1089 refcount_set(&pi->users, 1);
1090 /* proto->init is assured NON NULL by scmi_protocol_register */
a4a20b09 1091 ret = pi->proto->instance_init(&pi->ph);
48dc16e2
CM
1092 if (ret)
1093 goto clean;
1094
1095 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1096 GFP_KERNEL);
1097 if (ret != proto->id)
1098 goto clean;
1099
533c7095
CM
1100 /*
1101 * Warn but ignore events registration errors since we do not want
1102 * to skip whole protocols if their notifications are messed up.
1103 */
1104 if (pi->proto->events) {
1105 ret = scmi_register_protocol_events(handle, pi->proto->id,
b9f7fd90 1106 &pi->ph,
533c7095
CM
1107 pi->proto->events);
1108 if (ret)
1109 dev_warn(handle->dev,
1110 "Protocol:%X - Events Registration Failed - err:%d\n",
1111 pi->proto->id, ret);
1112 }
1113
48dc16e2
CM
1114 devres_close_group(handle->dev, pi->gid);
1115 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1116
1117 return pi;
1118
1119clean:
f5800e0b
CM
1120 /* Take care to put the protocol module's owner before releasing all */
1121 scmi_protocol_put(proto->id);
48dc16e2
CM
1122 devres_release_group(handle->dev, gid);
1123out:
1124 return ERR_PTR(ret);
1125}
1126
1127/**
1128 * scmi_get_protocol_instance - Protocol initialization helper.
1129 * @handle: A reference to the SCMI platform instance.
1130 * @protocol_id: The protocol being requested.
1131 *
1132 * In case the required protocol has never been requested before for this
1133 * instance, allocate and initialize all the needed structures while handling
1134 * resource allocation with a dedicated per-protocol devres subgroup.
1135 *
f5800e0b
CM
1136 * Return: A reference to an initialized protocol instance or error on failure:
1137 * in particular returns -EPROBE_DEFER when the desired protocol could
1138 * NOT be found.
48dc16e2
CM
1139 */
1140static struct scmi_protocol_instance * __must_check
f0e73cee 1141scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
48dc16e2
CM
1142{
1143 struct scmi_protocol_instance *pi;
1144 struct scmi_info *info = handle_to_scmi_info(handle);
1145
1146 mutex_lock(&info->protocols_mtx);
1147 pi = idr_find(&info->protocols, protocol_id);
1148
1149 if (pi) {
1150 refcount_inc(&pi->users);
1151 } else {
1152 const struct scmi_protocol *proto;
1153
1154 /* Fails if protocol not registered on bus */
1155 proto = scmi_protocol_get(protocol_id);
1156 if (proto)
1157 pi = scmi_alloc_init_protocol_instance(info, proto);
1158 else
f5800e0b 1159 pi = ERR_PTR(-EPROBE_DEFER);
48dc16e2
CM
1160 }
1161 mutex_unlock(&info->protocols_mtx);
1162
1163 return pi;
1164}
1165
1166/**
1167 * scmi_protocol_acquire - Protocol acquire
1168 * @handle: A reference to the SCMI platform instance.
1169 * @protocol_id: The protocol being requested.
1170 *
1171 * Register a new user for the requested protocol on the specified SCMI
1172 * platform instance, possibly triggering its initialization on first user.
1173 *
1174 * Return: 0 if protocol was acquired successfully.
1175 */
f0e73cee 1176int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
48dc16e2
CM
1177{
1178 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1179}
1180
1181/**
1182 * scmi_protocol_release - Protocol de-initialization helper.
1183 * @handle: A reference to the SCMI platform instance.
1184 * @protocol_id: The protocol being requested.
1185 *
1186 * Remove one user for the specified protocol and triggers de-initialization
1187 * and resources de-allocation once the last user has gone.
1188 */
f0e73cee 1189void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
48dc16e2
CM
1190{
1191 struct scmi_info *info = handle_to_scmi_info(handle);
1192 struct scmi_protocol_instance *pi;
1193
1194 mutex_lock(&info->protocols_mtx);
1195 pi = idr_find(&info->protocols, protocol_id);
1196 if (WARN_ON(!pi))
1197 goto out;
1198
1199 if (refcount_dec_and_test(&pi->users)) {
1200 void *gid = pi->gid;
1201
533c7095
CM
1202 if (pi->proto->events)
1203 scmi_deregister_protocol_events(handle, protocol_id);
1204
48dc16e2 1205 if (pi->proto->instance_deinit)
a4a20b09 1206 pi->proto->instance_deinit(&pi->ph);
48dc16e2
CM
1207
1208 idr_remove(&info->protocols, protocol_id);
1209
f5800e0b
CM
1210 scmi_protocol_put(protocol_id);
1211
48dc16e2
CM
1212 devres_release_group(handle->dev, gid);
1213 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1214 protocol_id);
1215 }
1216
1217out:
1218 mutex_unlock(&info->protocols_mtx);
1219}
1220
8d3581c2 1221void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
b6f20ff8
SH
1222 u8 *prot_imp)
1223{
8d3581c2
CM
1224 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1225 struct scmi_info *info = handle_to_scmi_info(pi->handle);
b6f20ff8
SH
1226
1227 info->protocols_imp = prot_imp;
1228}
1229
bc40081d
SH
1230static bool
1231scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1232{
1233 int i;
1234 struct scmi_info *info = handle_to_scmi_info(handle);
1235
1236 if (!info->protocols_imp)
1237 return false;
1238
1239 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
1240 if (info->protocols_imp[i] == prot_id)
1241 return true;
1242 return false;
1243}
1244
23934efe 1245struct scmi_protocol_devres {
f0e73cee 1246 const struct scmi_handle *handle;
23934efe
CM
1247 u8 protocol_id;
1248};
1249
1250static void scmi_devm_release_protocol(struct device *dev, void *res)
1251{
1252 struct scmi_protocol_devres *dres = res;
1253
1254 scmi_protocol_release(dres->handle, dres->protocol_id);
1255}
1256
1257/**
1258 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1259 * @sdev: A reference to an scmi_device whose embedded struct device is to
1260 * be used for devres accounting.
1261 * @protocol_id: The protocol being requested.
1262 * @ph: A pointer reference used to pass back the associated protocol handle.
1263 *
1264 * Get hold of a protocol accounting for its usage, eventually triggering its
1265 * initialization, and returning the protocol specific operations and related
1266 * protocol handle which will be used as first argument in most of the
1267 * protocols operations methods.
1268 * Being a devres based managed method, protocol hold will be automatically
1269 * released, and possibly de-initialized on last user, once the SCMI driver
1270 * owning the scmi_device is unbound from it.
1271 *
1272 * Return: A reference to the requested protocol operations or error.
1273 * Must be checked for errors by caller.
1274 */
1275static const void __must_check *
1276scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1277 struct scmi_protocol_handle **ph)
1278{
1279 struct scmi_protocol_instance *pi;
1280 struct scmi_protocol_devres *dres;
1281 struct scmi_handle *handle = sdev->handle;
1282
1283 if (!ph)
1284 return ERR_PTR(-EINVAL);
1285
1286 dres = devres_alloc(scmi_devm_release_protocol,
1287 sizeof(*dres), GFP_KERNEL);
1288 if (!dres)
1289 return ERR_PTR(-ENOMEM);
1290
1291 pi = scmi_get_protocol_instance(handle, protocol_id);
1292 if (IS_ERR(pi)) {
1293 devres_free(dres);
1294 return pi;
1295 }
1296
1297 dres->handle = handle;
1298 dres->protocol_id = protocol_id;
1299 devres_add(&sdev->dev, dres);
1300
1301 *ph = &pi->ph;
1302
1303 return pi->proto->ops;
1304}
1305
1306static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1307{
1308 struct scmi_protocol_devres *dres = res;
1309
1310 if (WARN_ON(!dres || !data))
1311 return 0;
1312
1313 return dres->protocol_id == *((u8 *)data);
1314}
1315
1316/**
1317 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1318 * @sdev: A reference to an scmi_device whose embedded struct device is to
1319 * be used for devres accounting.
1320 * @protocol_id: The protocol being requested.
1321 *
1322 * Explicitly release a protocol hold previously obtained calling the above
1323 * @scmi_devm_protocol_get.
1324 */
1325static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1326{
1327 int ret;
1328
1329 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1330 scmi_devm_protocol_match, &protocol_id);
1331 WARN_ON(ret);
1332}
1333
d4f9dddd
CM
1334static inline
1335struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1336{
1337 info->users++;
1338 return &info->handle;
1339}
1340
aa4f886f 1341/**
14e297b3 1342 * scmi_handle_get() - Get the SCMI handle for a device
aa4f886f
SH
1343 *
1344 * @dev: pointer to device for which we want SCMI handle
1345 *
1346 * NOTE: The function does not track individual clients of the framework
1baf47c2 1347 * and is expected to be maintained by caller of SCMI protocol library.
aa4f886f
SH
1348 * scmi_handle_put must be balanced with successful scmi_handle_get
1349 *
1350 * Return: pointer to handle if successful, NULL on error
1351 */
1352struct scmi_handle *scmi_handle_get(struct device *dev)
1353{
1354 struct list_head *p;
1355 struct scmi_info *info;
1356 struct scmi_handle *handle = NULL;
1357
1358 mutex_lock(&scmi_list_mutex);
1359 list_for_each(p, &scmi_list) {
1360 info = list_entry(p, struct scmi_info, node);
1361 if (dev->parent == info->dev) {
d4f9dddd 1362 handle = scmi_handle_get_from_info_unlocked(info);
aa4f886f
SH
1363 break;
1364 }
1365 }
1366 mutex_unlock(&scmi_list_mutex);
1367
1368 return handle;
1369}
1370
1371/**
1372 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1373 *
1374 * @handle: handle acquired by scmi_handle_get
1375 *
1376 * NOTE: The function does not track individual clients of the framework
1baf47c2 1377 * and is expected to be maintained by caller of SCMI protocol library.
aa4f886f
SH
1378 * scmi_handle_put must be balanced with successful scmi_handle_get
1379 *
1380 * Return: 0 is successfully released
1381 * if null was passed, it returns -EINVAL;
1382 */
1383int scmi_handle_put(const struct scmi_handle *handle)
1384{
1385 struct scmi_info *info;
1386
1387 if (!handle)
1388 return -EINVAL;
1389
1390 info = handle_to_scmi_info(handle);
1391 mutex_lock(&scmi_list_mutex);
1392 if (!WARN_ON(!info->users))
1393 info->users--;
1394 mutex_unlock(&scmi_list_mutex);
1395
1396 return 0;
1397}
1398
4ebd8f6d
SH
1399static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1400 struct scmi_xfers_info *info)
aa4f886f
SH
1401{
1402 int i;
1403 struct scmi_xfer *xfer;
1404 struct device *dev = sinfo->dev;
1405 const struct scmi_desc *desc = sinfo->desc;
aa4f886f
SH
1406
1407 /* Pre-allocated messages, no more than what hdr.seq can support */
c92c3e38 1408 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
bdb8742d
CM
1409 dev_err(dev,
1410 "Invalid maximum messages %d, not in range [1 - %lu]\n",
c92c3e38 1411 info->max_msg, MSG_TOKEN_MAX);
aa4f886f
SH
1412 return -EINVAL;
1413 }
1414
9ca5a183 1415 hash_init(info->pending_xfers);
aa4f886f 1416
9ca5a183
CM
1417 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1418 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
aa4f886f
SH
1419 sizeof(long), GFP_KERNEL);
1420 if (!info->xfer_alloc_table)
1421 return -ENOMEM;
1422
9ca5a183
CM
1423 /*
1424 * Preallocate a number of xfers equal to max inflight messages,
1425 * pre-initialize the buffer pointer to pre-allocated buffers and
1426 * attach all of them to the free list
1427 */
1428 INIT_HLIST_HEAD(&info->free_xfers);
c92c3e38 1429 for (i = 0; i < info->max_msg; i++) {
9ca5a183
CM
1430 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1431 if (!xfer)
1432 return -ENOMEM;
1433
aa4f886f
SH
1434 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1435 GFP_KERNEL);
1436 if (!xfer->rx.buf)
1437 return -ENOMEM;
1438
1439 xfer->tx.buf = xfer->rx.buf;
1440 init_completion(&xfer->done);
ed7c04c1 1441 spin_lock_init(&xfer->lock);
9ca5a183
CM
1442
1443 /* Add initialized xfer to the free list */
1444 hlist_add_head(&xfer->node, &info->free_xfers);
aa4f886f
SH
1445 }
1446
1447 spin_lock_init(&info->xfer_lock);
1448
1449 return 0;
1450}
1451
c92c3e38
IS
1452static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1453{
1454 const struct scmi_desc *desc = sinfo->desc;
1455
1456 if (!desc->ops->get_max_msg) {
1457 sinfo->tx_minfo.max_msg = desc->max_msg;
1458 sinfo->rx_minfo.max_msg = desc->max_msg;
1459 } else {
1460 struct scmi_chan_info *base_cinfo;
1461
1462 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1463 if (!base_cinfo)
1464 return -EINVAL;
1465 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1466
1467 /* RX channel is optional so can be skipped */
1468 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1469 if (base_cinfo)
1470 sinfo->rx_minfo.max_msg =
1471 desc->ops->get_max_msg(base_cinfo);
1472 }
1473
1474 return 0;
1475}
1476
4ebd8f6d
SH
1477static int scmi_xfer_info_init(struct scmi_info *sinfo)
1478{
c92c3e38
IS
1479 int ret;
1480
1481 ret = scmi_channels_max_msg_configure(sinfo);
1482 if (ret)
1483 return ret;
4ebd8f6d 1484
c92c3e38 1485 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
4ebd8f6d
SH
1486 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1487 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1488
1489 return ret;
1490}
1491
5c8a47a5
VK
1492static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1493 int prot_id, bool tx)
aa4f886f 1494{
3748daf7 1495 int ret, idx;
fbc4d81a 1496 struct scmi_chan_info *cinfo;
46cc7c28 1497 struct idr *idr;
3748daf7
SH
1498
1499 /* Transmit channel is first entry i.e. index 0 */
1500 idx = tx ? 0 : 1;
46cc7c28 1501 idr = tx ? &info->tx_idr : &info->rx_idr;
aa4f886f 1502
11040889
SH
1503 /* check if already allocated, used for multiple device per protocol */
1504 cinfo = idr_find(idr, prot_id);
1505 if (cinfo)
1506 return 0;
1507
5c8a47a5 1508 if (!info->desc->ops->chan_available(dev, idx)) {
46cc7c28
SH
1509 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1510 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1511 return -EINVAL;
907b6d14
SH
1512 goto idr_alloc;
1513 }
1514
fbc4d81a
SH
1515 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1516 if (!cinfo)
1517 return -ENOMEM;
1518
fbc4d81a
SH
1519 cinfo->dev = dev;
1520
5c8a47a5
VK
1521 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1522 if (ret)
aa4f886f 1523 return ret;
aa4f886f 1524
907b6d14 1525idr_alloc:
46cc7c28 1526 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
907b6d14
SH
1527 if (ret != prot_id) {
1528 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1529 return ret;
1530 }
1531
1532 cinfo->handle = &info->handle;
aa4f886f
SH
1533 return 0;
1534}
1535
46cc7c28 1536static inline int
5c8a47a5 1537scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
46cc7c28 1538{
5c8a47a5 1539 int ret = scmi_chan_setup(info, dev, prot_id, true);
46cc7c28
SH
1540
1541 if (!ret) /* Rx is optional, hence no error check */
5c8a47a5 1542 scmi_chan_setup(info, dev, prot_id, false);
46cc7c28
SH
1543
1544 return ret;
1545}
1546
d4f9dddd
CM
1547/**
1548 * scmi_get_protocol_device - Helper to get/create an SCMI device.
1549 *
1550 * @np: A device node representing a valid active protocols for the referred
1551 * SCMI instance.
1552 * @info: The referred SCMI instance for which we are getting/creating this
1553 * device.
1554 * @prot_id: The protocol ID.
1555 * @name: The device name.
1556 *
1557 * Referring to the specific SCMI instance identified by @info, this helper
1558 * takes care to return a properly initialized device matching the requested
1559 * @proto_id and @name: if device was still not existent it is created as a
1560 * child of the specified SCMI instance @info and its transport properly
1561 * initialized as usual.
b98cf55e
CM
1562 *
1563 * Return: A properly initialized scmi device, NULL otherwise.
d4f9dddd
CM
1564 */
1565static inline struct scmi_device *
1566scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1567 int prot_id, const char *name)
bc40081d
SH
1568{
1569 struct scmi_device *sdev;
1570
d4f9dddd
CM
1571 /* Already created for this parent SCMI instance ? */
1572 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1573 if (sdev)
1574 return sdev;
1575
1576 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1577
ee7a9c9f 1578 sdev = scmi_device_create(np, info->dev, prot_id, name);
bc40081d
SH
1579 if (!sdev) {
1580 dev_err(info->dev, "failed to create %d protocol device\n",
1581 prot_id);
d4f9dddd 1582 return NULL;
bc40081d
SH
1583 }
1584
5c8a47a5 1585 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
907b6d14
SH
1586 dev_err(&sdev->dev, "failed to setup transport\n");
1587 scmi_device_destroy(sdev);
d4f9dddd 1588 return NULL;
907b6d14
SH
1589 }
1590
d4f9dddd
CM
1591 return sdev;
1592}
1593
1594static inline void
1595scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1596 int prot_id, const char *name)
1597{
1598 struct scmi_device *sdev;
1599
1600 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1601 if (!sdev)
1602 return;
1603
bc40081d
SH
1604 /* setup handle now as the transport is ready */
1605 scmi_set_handle(sdev);
1606}
1607
d4f9dddd
CM
1608/**
1609 * scmi_create_protocol_devices - Create devices for all pending requests for
1610 * this SCMI instance.
1611 *
1612 * @np: The device node describing the protocol
1613 * @info: The SCMI instance descriptor
1614 * @prot_id: The protocol ID
1615 *
1616 * All devices previously requested for this instance (if any) are found and
1617 * created by scanning the proper @&scmi_requested_devices entry.
1618 */
1619static void scmi_create_protocol_devices(struct device_node *np,
1620 struct scmi_info *info, int prot_id)
1621{
1622 struct list_head *phead;
9c5c463f 1623
d4f9dddd
CM
1624 mutex_lock(&scmi_requested_devices_mtx);
1625 phead = idr_find(&scmi_requested_devices, prot_id);
1626 if (phead) {
1627 struct scmi_requested_dev *rdev;
9c5c463f 1628
d4f9dddd
CM
1629 list_for_each_entry(rdev, phead, node)
1630 scmi_create_protocol_device(np, info, prot_id,
1631 rdev->id_table->name);
1632 }
1633 mutex_unlock(&scmi_requested_devices_mtx);
1634}
1635
1636/**
1637 * scmi_protocol_device_request - Helper to request a device
1638 *
1639 * @id_table: A protocol/name pair descriptor for the device to be created.
1640 *
1641 * This helper let an SCMI driver request specific devices identified by the
1642 * @id_table to be created for each active SCMI instance.
1643 *
1644 * The requested device name MUST NOT be already existent for any protocol;
1645 * at first the freshly requested @id_table is annotated in the IDR table
1646 * @scmi_requested_devices, then a matching device is created for each already
1647 * active SCMI instance. (if any)
1648 *
1649 * This way the requested device is created straight-away for all the already
1650 * initialized(probed) SCMI instances (handles) and it remains also annotated
1651 * as pending creation if the requesting SCMI driver was loaded before some
1652 * SCMI instance and related transports were available: when such late instance
1653 * is probed, its probe will take care to scan the list of pending requested
1654 * devices and create those on its own (see @scmi_create_protocol_devices and
1655 * its enclosing loop)
1656 *
1657 * Return: 0 on Success
1658 */
1659int scmi_protocol_device_request(const struct scmi_device_id *id_table)
9c5c463f 1660{
d4f9dddd
CM
1661 int ret = 0;
1662 unsigned int id = 0;
1663 struct list_head *head, *phead = NULL;
1664 struct scmi_requested_dev *rdev;
1665 struct scmi_info *info;
9c5c463f 1666
d4f9dddd
CM
1667 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1668 id_table->name, id_table->protocol_id);
1669
1670 /*
1671 * Search for the matching protocol rdev list and then search
1672 * of any existent equally named device...fails if any duplicate found.
1673 */
1674 mutex_lock(&scmi_requested_devices_mtx);
1675 idr_for_each_entry(&scmi_requested_devices, head, id) {
1676 if (!phead) {
1677 /* A list found registered in the IDR is never empty */
1678 rdev = list_first_entry(head, struct scmi_requested_dev,
1679 node);
1680 if (rdev->id_table->protocol_id ==
1681 id_table->protocol_id)
1682 phead = head;
1683 }
1684 list_for_each_entry(rdev, head, node) {
1685 if (!strcmp(rdev->id_table->name, id_table->name)) {
1686 pr_err("Ignoring duplicate request [%d] %s\n",
1687 rdev->id_table->protocol_id,
1688 rdev->id_table->name);
1689 ret = -EINVAL;
1690 goto out;
1691 }
1692 }
1693 }
1694
1695 /*
1696 * No duplicate found for requested id_table, so let's create a new
1697 * requested device entry for this new valid request.
1698 */
1699 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1700 if (!rdev) {
1701 ret = -ENOMEM;
1702 goto out;
1703 }
1704 rdev->id_table = id_table;
1705
1706 /*
1707 * Append the new requested device table descriptor to the head of the
1708 * related protocol list, eventually creating such head if not already
1709 * there.
1710 */
1711 if (!phead) {
1712 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1713 if (!phead) {
1714 kfree(rdev);
1715 ret = -ENOMEM;
1716 goto out;
1717 }
1718 INIT_LIST_HEAD(phead);
1719
1720 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1721 id_table->protocol_id,
1722 id_table->protocol_id + 1, GFP_KERNEL);
1723 if (ret != id_table->protocol_id) {
1724 pr_err("Failed to save SCMI device - ret:%d\n", ret);
1725 kfree(rdev);
1726 kfree(phead);
1727 ret = -EINVAL;
1728 goto out;
1729 }
1730 ret = 0;
1731 }
1732 list_add(&rdev->node, phead);
1733
1734 /*
1735 * Now effectively create and initialize the requested device for every
1736 * already initialized SCMI instance which has registered the requested
1737 * protocol as a valid active one: i.e. defined in DT and supported by
1738 * current platform FW.
1739 */
1740 mutex_lock(&scmi_list_mutex);
1741 list_for_each_entry(info, &scmi_list, node) {
1742 struct device_node *child;
1743
1744 child = idr_find(&info->active_protocols,
1745 id_table->protocol_id);
1746 if (child) {
1747 struct scmi_device *sdev;
1748
1749 sdev = scmi_get_protocol_device(child, info,
1750 id_table->protocol_id,
1751 id_table->name);
1752 /* Set handle if not already set: device existed */
1753 if (sdev && !sdev->handle)
1754 sdev->handle =
1755 scmi_handle_get_from_info_unlocked(info);
1756 } else {
1757 dev_err(info->dev,
1758 "Failed. SCMI protocol %d not active.\n",
1759 id_table->protocol_id);
1760 }
1761 }
1762 mutex_unlock(&scmi_list_mutex);
1763
1764out:
1765 mutex_unlock(&scmi_requested_devices_mtx);
9c5c463f 1766
d4f9dddd
CM
1767 return ret;
1768}
9c5c463f 1769
d4f9dddd
CM
1770/**
1771 * scmi_protocol_device_unrequest - Helper to unrequest a device
1772 *
1773 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1774 *
1775 * An helper to let an SCMI driver release its request about devices; note that
1776 * devices are created and initialized once the first SCMI driver request them
1777 * but they destroyed only on SCMI core unloading/unbinding.
1778 *
1779 * The current SCMI transport layer uses such devices as internal references and
1780 * as such they could be shared as same transport between multiple drivers so
1781 * that cannot be safely destroyed till the whole SCMI stack is removed.
1782 * (unless adding further burden of refcounting.)
1783 */
1784void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1785{
1786 struct list_head *phead;
1787
1788 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1789 id_table->name, id_table->protocol_id);
1790
1791 mutex_lock(&scmi_requested_devices_mtx);
1792 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1793 if (phead) {
1794 struct scmi_requested_dev *victim, *tmp;
1795
1796 list_for_each_entry_safe(victim, tmp, phead, node) {
1797 if (!strcmp(victim->id_table->name, id_table->name)) {
1798 list_del(&victim->node);
1799 kfree(victim);
1800 break;
1801 }
1802 }
1803
1804 if (list_empty(phead)) {
1805 idr_remove(&scmi_requested_devices,
1806 id_table->protocol_id);
1807 kfree(phead);
9c5c463f
SH
1808 }
1809 }
d4f9dddd 1810 mutex_unlock(&scmi_requested_devices_mtx);
9c5c463f
SH
1811}
1812
1e7cbfaa
RB
1813static int scmi_cleanup_txrx_channels(struct scmi_info *info)
1814{
1815 int ret;
1816 struct idr *idr = &info->tx_idr;
1817
1818 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1819 idr_destroy(&info->tx_idr);
1820
1821 idr = &info->rx_idr;
1822 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1823 idr_destroy(&info->rx_idr);
1824
1825 return ret;
1826}
1827
aa4f886f
SH
1828static int scmi_probe(struct platform_device *pdev)
1829{
1830 int ret;
1831 struct scmi_handle *handle;
1832 const struct scmi_desc *desc;
1833 struct scmi_info *info;
1834 struct device *dev = &pdev->dev;
bc40081d 1835 struct device_node *child, *np = dev->of_node;
aa4f886f 1836
d9350f21
AP
1837 desc = of_device_get_match_data(dev);
1838 if (!desc)
1839 return -EINVAL;
aa4f886f
SH
1840
1841 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1842 if (!info)
1843 return -ENOMEM;
1844
1845 info->dev = dev;
1846 info->desc = desc;
1847 INIT_LIST_HEAD(&info->node);
48dc16e2
CM
1848 idr_init(&info->protocols);
1849 mutex_init(&info->protocols_mtx);
d4f9dddd 1850 idr_init(&info->active_protocols);
aa4f886f 1851
aa4f886f 1852 platform_set_drvdata(pdev, info);
907b6d14 1853 idr_init(&info->tx_idr);
46cc7c28 1854 idr_init(&info->rx_idr);
aa4f886f
SH
1855
1856 handle = &info->handle;
1857 handle->dev = info->dev;
b6f20ff8 1858 handle->version = &info->version;
23934efe
CM
1859 handle->devm_protocol_get = scmi_devm_protocol_get;
1860 handle->devm_protocol_put = scmi_devm_protocol_put;
aa4f886f 1861
78852812
PH
1862 if (desc->ops->link_supplier) {
1863 ret = desc->ops->link_supplier(dev);
1864 if (ret)
1865 return ret;
1866 }
1867
5c8a47a5 1868 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
aa4f886f
SH
1869 if (ret)
1870 return ret;
1871
4ebd8f6d
SH
1872 ret = scmi_xfer_info_init(info);
1873 if (ret)
1e7cbfaa 1874 goto clear_txrx_setup;
4ebd8f6d 1875
6b8a6913
CM
1876 if (scmi_notification_init(handle))
1877 dev_err(dev, "SCMI Notifications NOT available.\n");
1878
8d3581c2
CM
1879 /*
1880 * Trigger SCMI Base protocol initialization.
1881 * It's mandatory and won't be ever released/deinit until the
1882 * SCMI stack is shutdown/unloaded as a whole.
1883 */
1884 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
b6f20ff8 1885 if (ret) {
8d3581c2 1886 dev_err(dev, "unable to communicate with SCMI\n");
1e7cbfaa 1887 goto notification_exit;
b6f20ff8
SH
1888 }
1889
aa4f886f
SH
1890 mutex_lock(&scmi_list_mutex);
1891 list_add_tail(&info->node, &scmi_list);
1892 mutex_unlock(&scmi_list_mutex);
1893
bc40081d
SH
1894 for_each_available_child_of_node(np, child) {
1895 u32 prot_id;
1896
1897 if (of_property_read_u32(child, "reg", &prot_id))
1898 continue;
1899
354b2e36
SH
1900 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1901 dev_err(dev, "Out of range protocol %d\n", prot_id);
bc40081d
SH
1902
1903 if (!scmi_is_protocol_implemented(handle, prot_id)) {
1904 dev_err(dev, "SCMI protocol %d not implemented\n",
1905 prot_id);
1906 continue;
1907 }
1908
d4f9dddd
CM
1909 /*
1910 * Save this valid DT protocol descriptor amongst
1911 * @active_protocols for this SCMI instance/
1912 */
1913 ret = idr_alloc(&info->active_protocols, child,
1914 prot_id, prot_id + 1, GFP_KERNEL);
1915 if (ret != prot_id) {
1916 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1917 prot_id);
1918 continue;
1919 }
1920
1921 of_node_get(child);
9c5c463f 1922 scmi_create_protocol_devices(child, info, prot_id);
bc40081d
SH
1923 }
1924
aa4f886f 1925 return 0;
1e7cbfaa
RB
1926
1927notification_exit:
1928 scmi_notification_exit(&info->handle);
1929clear_txrx_setup:
1930 scmi_cleanup_txrx_channels(info);
1931 return ret;
aa4f886f
SH
1932}
1933
5c8a47a5 1934void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2747a967 1935{
2747a967 1936 idr_remove(idr, id);
2747a967
SH
1937}
1938
1939static int scmi_remove(struct platform_device *pdev)
1940{
d4f9dddd 1941 int ret = 0, id;
2747a967 1942 struct scmi_info *info = platform_get_drvdata(pdev);
d4f9dddd 1943 struct device_node *child;
2747a967
SH
1944
1945 mutex_lock(&scmi_list_mutex);
1946 if (info->users)
1947 ret = -EBUSY;
1948 else
1949 list_del(&info->node);
1950 mutex_unlock(&scmi_list_mutex);
1951
1952 if (ret)
1953 return ret;
1954
a90b6543
CM
1955 scmi_notification_exit(&info->handle);
1956
48dc16e2
CM
1957 mutex_lock(&info->protocols_mtx);
1958 idr_destroy(&info->protocols);
1959 mutex_unlock(&info->protocols_mtx);
1960
d4f9dddd
CM
1961 idr_for_each_entry(&info->active_protocols, child, id)
1962 of_node_put(child);
1963 idr_destroy(&info->active_protocols);
1964
2747a967 1965 /* Safe to free channels since no more users */
1e7cbfaa 1966 return scmi_cleanup_txrx_channels(info);
2747a967
SH
1967}
1968
4605e224
SH
1969static ssize_t protocol_version_show(struct device *dev,
1970 struct device_attribute *attr, char *buf)
1971{
1972 struct scmi_info *info = dev_get_drvdata(dev);
1973
1974 return sprintf(buf, "%u.%u\n", info->version.major_ver,
1975 info->version.minor_ver);
1976}
1977static DEVICE_ATTR_RO(protocol_version);
1978
1979static ssize_t firmware_version_show(struct device *dev,
1980 struct device_attribute *attr, char *buf)
1981{
1982 struct scmi_info *info = dev_get_drvdata(dev);
1983
1984 return sprintf(buf, "0x%x\n", info->version.impl_ver);
1985}
1986static DEVICE_ATTR_RO(firmware_version);
1987
1988static ssize_t vendor_id_show(struct device *dev,
1989 struct device_attribute *attr, char *buf)
1990{
1991 struct scmi_info *info = dev_get_drvdata(dev);
1992
1993 return sprintf(buf, "%s\n", info->version.vendor_id);
1994}
1995static DEVICE_ATTR_RO(vendor_id);
1996
1997static ssize_t sub_vendor_id_show(struct device *dev,
1998 struct device_attribute *attr, char *buf)
1999{
2000 struct scmi_info *info = dev_get_drvdata(dev);
2001
2002 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2003}
2004static DEVICE_ATTR_RO(sub_vendor_id);
2005
2006static struct attribute *versions_attrs[] = {
2007 &dev_attr_firmware_version.attr,
2008 &dev_attr_protocol_version.attr,
2009 &dev_attr_vendor_id.attr,
2010 &dev_attr_sub_vendor_id.attr,
2011 NULL,
2012};
2013ATTRIBUTE_GROUPS(versions);
2014
2747a967
SH
2015/* Each compatible listed below must have descriptor associated with it */
2016static const struct of_device_id scmi_of_match[] = {
e8419c24 2017#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
5c8a47a5 2018 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
ab7766b7 2019#endif
5f90f189
EC
2020#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2021 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2022#endif
e8419c24 2023#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
1dc65580 2024 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
46abe13b
IS
2025#endif
2026#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2027 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
1dc65580 2028#endif
2747a967
SH
2029 { /* Sentinel */ },
2030};
2031
2032MODULE_DEVICE_TABLE(of, scmi_of_match);
2033
aa4f886f
SH
2034static struct platform_driver scmi_driver = {
2035 .driver = {
2036 .name = "arm-scmi",
2037 .of_match_table = scmi_of_match,
4605e224 2038 .dev_groups = versions_groups,
aa4f886f
SH
2039 },
2040 .probe = scmi_probe,
2041 .remove = scmi_remove,
2042};
2043
ceac257d
CM
2044/**
2045 * __scmi_transports_setup - Common helper to call transport-specific
2046 * .init/.exit code if provided.
2047 *
2048 * @init: A flag to distinguish between init and exit.
2049 *
2050 * Note that, if provided, we invoke .init/.exit functions for all the
2051 * transports currently compiled in.
2052 *
2053 * Return: 0 on Success.
2054 */
2055static inline int __scmi_transports_setup(bool init)
2056{
2057 int ret = 0;
2058 const struct of_device_id *trans;
2059
2060 for (trans = scmi_of_match; trans->data; trans++) {
2061 const struct scmi_desc *tdesc = trans->data;
2062
2063 if ((init && !tdesc->transport_init) ||
2064 (!init && !tdesc->transport_exit))
2065 continue;
2066
2067 if (init)
2068 ret = tdesc->transport_init();
2069 else
2070 tdesc->transport_exit();
2071
2072 if (ret) {
2073 pr_err("SCMI transport %s FAILED initialization!\n",
2074 trans->compatible);
2075 break;
2076 }
2077 }
2078
2079 return ret;
2080}
2081
2082static int __init scmi_transports_init(void)
2083{
2084 return __scmi_transports_setup(true);
2085}
2086
2087static void __exit scmi_transports_exit(void)
2088{
2089 __scmi_transports_setup(false);
2090}
2091
5a2f0a0b
SH
2092static int __init scmi_driver_init(void)
2093{
ceac257d
CM
2094 int ret;
2095
c0397c85
CM
2096 /* Bail out if no SCMI transport was configured */
2097 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2098 return -EINVAL;
5a2f0a0b 2099
c0397c85 2100 scmi_bus_init();
e8419c24 2101
ceac257d
CM
2102 /* Initialize any compiled-in transport which provided an init/exit */
2103 ret = scmi_transports_init();
2104 if (ret)
2105 return ret;
2106
48dc16e2
CM
2107 scmi_base_register();
2108
1eaf18e3
SH
2109 scmi_clock_register();
2110 scmi_perf_register();
2111 scmi_power_register();
2112 scmi_reset_register();
2113 scmi_sensors_register();
2add5cac 2114 scmi_voltage_register();
1eaf18e3
SH
2115 scmi_system_register();
2116
5a2f0a0b
SH
2117 return platform_driver_register(&scmi_driver);
2118}
1eaf18e3 2119subsys_initcall(scmi_driver_init);
5a2f0a0b
SH
2120
2121static void __exit scmi_driver_exit(void)
2122{
48dc16e2 2123 scmi_base_unregister();
5a2f0a0b 2124
1eaf18e3
SH
2125 scmi_clock_unregister();
2126 scmi_perf_unregister();
2127 scmi_power_unregister();
2128 scmi_reset_unregister();
2129 scmi_sensors_unregister();
2add5cac 2130 scmi_voltage_unregister();
1eaf18e3
SH
2131 scmi_system_unregister();
2132
48dc16e2
CM
2133 scmi_bus_exit();
2134
ceac257d
CM
2135 scmi_transports_exit();
2136
5a2f0a0b
SH
2137 platform_driver_unregister(&scmi_driver);
2138}
2139module_exit(scmi_driver_exit);
aa4f886f
SH
2140
2141MODULE_ALIAS("platform: arm-scmi");
2142MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2143MODULE_DESCRIPTION("ARM SCMI protocol driver");
2144MODULE_LICENSE("GPL v2");