Commit | Line | Data |
---|---|---|
aa4f886f SH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * System Control and Management Interface (SCMI) Message Protocol driver | |
4 | * | |
5 | * SCMI Message Protocol is used between the System Control Processor(SCP) | |
6 | * and the Application Processors(AP). The Message Handling Unit(MHU) | |
7 | * provides a mechanism for inter-processor communication between SCP's | |
8 | * Cortex M3 and AP. | |
9 | * | |
10 | * SCP offers control and management of the core/cluster power states, | |
11 | * various power domain DVFS including the core/cluster, certain system | |
12 | * clocks configuration, thermal sensors and many others. | |
13 | * | |
14 | * Copyright (C) 2018 ARM Ltd. | |
15 | */ | |
16 | ||
17 | #include <linux/bitmap.h> | |
18 | #include <linux/export.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/kernel.h> | |
d4c3751a | 21 | #include <linux/ktime.h> |
aa4f886f SH |
22 | #include <linux/mailbox_client.h> |
23 | #include <linux/module.h> | |
24 | #include <linux/of_address.h> | |
25 | #include <linux/of_device.h> | |
d4c3751a | 26 | #include <linux/processor.h> |
aa4f886f SH |
27 | #include <linux/semaphore.h> |
28 | #include <linux/slab.h> | |
29 | ||
30 | #include "common.h" | |
31 | ||
354b2e36 SH |
32 | #define MSG_ID_MASK GENMASK(7, 0) |
33 | #define MSG_TYPE_MASK GENMASK(9, 8) | |
34 | #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) | |
35 | #define MSG_TOKEN_ID_MASK GENMASK(27, 18) | |
36 | #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) | |
37 | #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) | |
aa4f886f SH |
38 | |
39 | enum scmi_error_codes { | |
40 | SCMI_SUCCESS = 0, /* Success */ | |
41 | SCMI_ERR_SUPPORT = -1, /* Not supported */ | |
42 | SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ | |
43 | SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ | |
44 | SCMI_ERR_ENTRY = -4, /* Not found */ | |
45 | SCMI_ERR_RANGE = -5, /* Value out of range */ | |
46 | SCMI_ERR_BUSY = -6, /* Device busy */ | |
47 | SCMI_ERR_COMMS = -7, /* Communication Error */ | |
48 | SCMI_ERR_GENERIC = -8, /* Generic Error */ | |
49 | SCMI_ERR_HARDWARE = -9, /* Hardware Error */ | |
50 | SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ | |
51 | SCMI_ERR_MAX | |
52 | }; | |
53 | ||
1baf47c2 | 54 | /* List of all SCMI devices active in system */ |
aa4f886f SH |
55 | static LIST_HEAD(scmi_list); |
56 | /* Protection for the entire list */ | |
57 | static DEFINE_MUTEX(scmi_list_mutex); | |
58 | ||
59 | /** | |
60 | * struct scmi_xfers_info - Structure to manage transfer information | |
61 | * | |
62 | * @xfer_block: Preallocated Message array | |
63 | * @xfer_alloc_table: Bitmap table for allocated messages. | |
64 | * Index of this bitmap table is also used for message | |
65 | * sequence identifier. | |
66 | * @xfer_lock: Protection for message allocation | |
67 | */ | |
68 | struct scmi_xfers_info { | |
69 | struct scmi_xfer *xfer_block; | |
70 | unsigned long *xfer_alloc_table; | |
aa4f886f SH |
71 | spinlock_t xfer_lock; |
72 | }; | |
73 | ||
74 | /** | |
75 | * struct scmi_desc - Description of SoC integration | |
76 | * | |
77 | * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) | |
78 | * @max_msg: Maximum number of messages that can be pending | |
79 | * simultaneously in the system | |
80 | * @max_msg_size: Maximum size of data per message that can be handled. | |
81 | */ | |
82 | struct scmi_desc { | |
83 | int max_rx_timeout_ms; | |
84 | int max_msg; | |
85 | int max_msg_size; | |
86 | }; | |
87 | ||
fbc4d81a | 88 | /** |
c29a6289 | 89 | * struct scmi_chan_info - Structure representing a SCMI channel information |
fbc4d81a SH |
90 | * |
91 | * @cl: Mailbox Client | |
92 | * @chan: Transmit/Receive mailbox channel | |
93 | * @payload: Transmit/Receive mailbox channel payload area | |
94 | * @dev: Reference to device in the SCMI hierarchy corresponding to this | |
95 | * channel | |
1baf47c2 | 96 | * @handle: Pointer to SCMI entity handle |
fbc4d81a SH |
97 | */ |
98 | struct scmi_chan_info { | |
99 | struct mbox_client cl; | |
100 | struct mbox_chan *chan; | |
101 | void __iomem *payload; | |
102 | struct device *dev; | |
907b6d14 | 103 | struct scmi_handle *handle; |
fbc4d81a SH |
104 | }; |
105 | ||
aa4f886f | 106 | /** |
1baf47c2 | 107 | * struct scmi_info - Structure representing a SCMI instance |
aa4f886f SH |
108 | * |
109 | * @dev: Device pointer | |
110 | * @desc: SoC description for this instance | |
111 | * @handle: Instance of SCMI handle to send to clients | |
b6f20ff8 SH |
112 | * @version: SCMI revision information containing protocol version, |
113 | * implementation version and (sub-)vendor identification. | |
aa4f886f | 114 | * @minfo: Message info |
3748daf7 | 115 | * @tx_idr: IDR object to map protocol id to Tx channel info pointer |
1baf47c2 | 116 | * @protocols_imp: List of protocols implemented, currently maximum of |
b6f20ff8 | 117 | * MAX_PROTOCOLS_IMP elements allocated by the base protocol |
1baf47c2 | 118 | * @node: List head |
aa4f886f SH |
119 | * @users: Number of users of this instance |
120 | */ | |
121 | struct scmi_info { | |
122 | struct device *dev; | |
123 | const struct scmi_desc *desc; | |
b6f20ff8 | 124 | struct scmi_revision_info version; |
aa4f886f | 125 | struct scmi_handle handle; |
aa4f886f | 126 | struct scmi_xfers_info minfo; |
907b6d14 | 127 | struct idr tx_idr; |
b6f20ff8 | 128 | u8 *protocols_imp; |
aa4f886f SH |
129 | struct list_head node; |
130 | int users; | |
131 | }; | |
132 | ||
fbc4d81a | 133 | #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl) |
aa4f886f SH |
134 | #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) |
135 | ||
136 | /* | |
137 | * SCMI specification requires all parameters, message headers, return | |
138 | * arguments or any protocol data to be expressed in little endian | |
139 | * format only. | |
140 | */ | |
141 | struct scmi_shared_mem { | |
142 | __le32 reserved; | |
143 | __le32 channel_status; | |
144 | #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) | |
145 | #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) | |
146 | __le32 reserved1[2]; | |
147 | __le32 flags; | |
148 | #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) | |
149 | __le32 length; | |
150 | __le32 msg_header; | |
151 | u8 msg_payload[0]; | |
152 | }; | |
153 | ||
154 | static const int scmi_linux_errmap[] = { | |
155 | /* better than switch case as long as return value is continuous */ | |
156 | 0, /* SCMI_SUCCESS */ | |
157 | -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ | |
158 | -EINVAL, /* SCMI_ERR_PARAM */ | |
159 | -EACCES, /* SCMI_ERR_ACCESS */ | |
160 | -ENOENT, /* SCMI_ERR_ENTRY */ | |
161 | -ERANGE, /* SCMI_ERR_RANGE */ | |
162 | -EBUSY, /* SCMI_ERR_BUSY */ | |
163 | -ECOMM, /* SCMI_ERR_COMMS */ | |
164 | -EIO, /* SCMI_ERR_GENERIC */ | |
165 | -EREMOTEIO, /* SCMI_ERR_HARDWARE */ | |
166 | -EPROTO, /* SCMI_ERR_PROTOCOL */ | |
167 | }; | |
168 | ||
169 | static inline int scmi_to_linux_errno(int errno) | |
170 | { | |
171 | if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) | |
172 | return scmi_linux_errmap[-errno]; | |
173 | return -EIO; | |
174 | } | |
175 | ||
176 | /** | |
177 | * scmi_dump_header_dbg() - Helper to dump a message header. | |
178 | * | |
179 | * @dev: Device pointer corresponding to the SCMI entity | |
180 | * @hdr: pointer to header. | |
181 | */ | |
182 | static inline void scmi_dump_header_dbg(struct device *dev, | |
183 | struct scmi_msg_hdr *hdr) | |
184 | { | |
5b65af8f | 185 | dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", |
aa4f886f SH |
186 | hdr->id, hdr->seq, hdr->protocol_id); |
187 | } | |
188 | ||
189 | static void scmi_fetch_response(struct scmi_xfer *xfer, | |
190 | struct scmi_shared_mem __iomem *mem) | |
191 | { | |
192 | xfer->hdr.status = ioread32(mem->msg_payload); | |
c29a6289 | 193 | /* Skip the length of header and status in payload area i.e 8 bytes */ |
aa4f886f SH |
194 | xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); |
195 | ||
196 | /* Take a copy to the rx buffer.. */ | |
197 | memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); | |
198 | } | |
199 | ||
aa4f886f SH |
200 | /** |
201 | * pack_scmi_header() - packs and returns 32-bit header | |
202 | * | |
203 | * @hdr: pointer to header containing all the information on message id, | |
204 | * protocol id and sequence id. | |
1baf47c2 | 205 | * |
5b65af8f | 206 | * Return: 32-bit packed message header to be sent to the platform. |
aa4f886f SH |
207 | */ |
208 | static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) | |
209 | { | |
354b2e36 SH |
210 | return FIELD_PREP(MSG_ID_MASK, hdr->id) | |
211 | FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | | |
212 | FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); | |
aa4f886f SH |
213 | } |
214 | ||
215 | /** | |
216 | * scmi_tx_prepare() - mailbox client callback to prepare for the transfer | |
217 | * | |
218 | * @cl: client pointer | |
219 | * @m: mailbox message | |
220 | * | |
221 | * This function prepares the shared memory which contains the header and the | |
222 | * payload. | |
223 | */ | |
224 | static void scmi_tx_prepare(struct mbox_client *cl, void *m) | |
225 | { | |
226 | struct scmi_xfer *t = m; | |
fbc4d81a SH |
227 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); |
228 | struct scmi_shared_mem __iomem *mem = cinfo->payload; | |
aa4f886f | 229 | |
9dc34d63 SH |
230 | /* |
231 | * Ideally channel must be free by now unless OS timeout last | |
232 | * request and platform continued to process the same, wait | |
233 | * until it releases the shared memory, otherwise we may endup | |
234 | * overwriting its response with new message payload or vice-versa | |
235 | */ | |
236 | spin_until_cond(ioread32(&mem->channel_status) & | |
237 | SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); | |
aa4f886f SH |
238 | /* Mark channel busy + clear error */ |
239 | iowrite32(0x0, &mem->channel_status); | |
240 | iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, | |
241 | &mem->flags); | |
242 | iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length); | |
243 | iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header); | |
244 | if (t->tx.buf) | |
245 | memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len); | |
246 | } | |
247 | ||
248 | /** | |
1baf47c2 | 249 | * scmi_xfer_get() - Allocate one message |
aa4f886f | 250 | * |
1baf47c2 | 251 | * @handle: Pointer to SCMI entity handle |
aa4f886f | 252 | * |
5b65af8f | 253 | * Helper function which is used by various message functions that are |
aa4f886f SH |
254 | * exposed to clients of this driver for allocating a message traffic event. |
255 | * | |
256 | * This function can sleep depending on pending requests already in the system | |
257 | * for the SCMI entity. Further, this also holds a spinlock to maintain | |
258 | * integrity of internal data structures. | |
259 | * | |
260 | * Return: 0 if all went fine, else corresponding error. | |
261 | */ | |
14e297b3 | 262 | static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) |
aa4f886f SH |
263 | { |
264 | u16 xfer_id; | |
265 | struct scmi_xfer *xfer; | |
266 | unsigned long flags, bit_pos; | |
267 | struct scmi_info *info = handle_to_scmi_info(handle); | |
268 | struct scmi_xfers_info *minfo = &info->minfo; | |
269 | ||
270 | /* Keep the locked section as small as possible */ | |
271 | spin_lock_irqsave(&minfo->xfer_lock, flags); | |
272 | bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, | |
273 | info->desc->max_msg); | |
274 | if (bit_pos == info->desc->max_msg) { | |
275 | spin_unlock_irqrestore(&minfo->xfer_lock, flags); | |
276 | return ERR_PTR(-ENOMEM); | |
277 | } | |
278 | set_bit(bit_pos, minfo->xfer_alloc_table); | |
279 | spin_unlock_irqrestore(&minfo->xfer_lock, flags); | |
280 | ||
281 | xfer_id = bit_pos; | |
282 | ||
283 | xfer = &minfo->xfer_block[xfer_id]; | |
284 | xfer->hdr.seq = xfer_id; | |
285 | reinit_completion(&xfer->done); | |
286 | ||
287 | return xfer; | |
288 | } | |
289 | ||
290 | /** | |
14e297b3 | 291 | * scmi_xfer_put() - Release a message |
aa4f886f | 292 | * |
1baf47c2 SH |
293 | * @handle: Pointer to SCMI entity handle |
294 | * @xfer: message that was reserved by scmi_xfer_get | |
aa4f886f SH |
295 | * |
296 | * This holds a spinlock to maintain integrity of internal data structures. | |
297 | */ | |
14e297b3 | 298 | void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) |
aa4f886f SH |
299 | { |
300 | unsigned long flags; | |
301 | struct scmi_info *info = handle_to_scmi_info(handle); | |
302 | struct scmi_xfers_info *minfo = &info->minfo; | |
303 | ||
304 | /* | |
305 | * Keep the locked section as small as possible | |
306 | * NOTE: we might escape with smp_mb and no lock here.. | |
307 | * but just be conservative and symmetric. | |
308 | */ | |
309 | spin_lock_irqsave(&minfo->xfer_lock, flags); | |
310 | clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); | |
311 | spin_unlock_irqrestore(&minfo->xfer_lock, flags); | |
312 | } | |
313 | ||
2747a967 SH |
314 | /** |
315 | * scmi_rx_callback() - mailbox client callback for receive messages | |
316 | * | |
317 | * @cl: client pointer | |
318 | * @m: mailbox message | |
319 | * | |
320 | * Processes one received message to appropriate transfer information and | |
321 | * signals completion of the transfer. | |
322 | * | |
323 | * NOTE: This function will be invoked in IRQ context, hence should be | |
324 | * as optimal as possible. | |
325 | */ | |
326 | static void scmi_rx_callback(struct mbox_client *cl, void *m) | |
327 | { | |
328 | u16 xfer_id; | |
329 | struct scmi_xfer *xfer; | |
330 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); | |
331 | struct device *dev = cinfo->dev; | |
332 | struct scmi_info *info = handle_to_scmi_info(cinfo->handle); | |
333 | struct scmi_xfers_info *minfo = &info->minfo; | |
334 | struct scmi_shared_mem __iomem *mem = cinfo->payload; | |
335 | ||
336 | xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); | |
337 | ||
338 | /* Are we even expecting this? */ | |
339 | if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { | |
340 | dev_err(dev, "message for %d is not expected!\n", xfer_id); | |
341 | return; | |
342 | } | |
343 | ||
344 | xfer = &minfo->xfer_block[xfer_id]; | |
345 | ||
346 | scmi_dump_header_dbg(dev, &xfer->hdr); | |
347 | ||
348 | scmi_fetch_response(xfer, mem); | |
349 | complete(&xfer->done); | |
350 | } | |
351 | ||
d4c3751a | 352 | static bool |
fbc4d81a | 353 | scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) |
d4c3751a | 354 | { |
fbc4d81a SH |
355 | struct scmi_shared_mem __iomem *mem = cinfo->payload; |
356 | u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); | |
d4c3751a SH |
357 | |
358 | if (xfer->hdr.seq != xfer_id) | |
359 | return false; | |
360 | ||
fbc4d81a | 361 | return ioread32(&mem->channel_status) & |
d4c3751a SH |
362 | (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | |
363 | SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); | |
364 | } | |
365 | ||
366 | #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) | |
367 | ||
fbc4d81a | 368 | static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo, |
d4c3751a SH |
369 | struct scmi_xfer *xfer, ktime_t stop) |
370 | { | |
371 | ktime_t __cur = ktime_get(); | |
372 | ||
fbc4d81a | 373 | return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop); |
d4c3751a SH |
374 | } |
375 | ||
aa4f886f SH |
376 | /** |
377 | * scmi_do_xfer() - Do one transfer | |
378 | * | |
1baf47c2 | 379 | * @handle: Pointer to SCMI entity handle |
aa4f886f SH |
380 | * @xfer: Transfer to initiate and wait for response |
381 | * | |
382 | * Return: -ETIMEDOUT in case of no response, if transmit error, | |
1baf47c2 SH |
383 | * return corresponding error, else if all goes well, |
384 | * return 0. | |
aa4f886f SH |
385 | */ |
386 | int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) | |
387 | { | |
388 | int ret; | |
389 | int timeout; | |
390 | struct scmi_info *info = handle_to_scmi_info(handle); | |
391 | struct device *dev = info->dev; | |
907b6d14 SH |
392 | struct scmi_chan_info *cinfo; |
393 | ||
394 | cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); | |
395 | if (unlikely(!cinfo)) | |
396 | return -EINVAL; | |
aa4f886f | 397 | |
fbc4d81a | 398 | ret = mbox_send_message(cinfo->chan, xfer); |
aa4f886f SH |
399 | if (ret < 0) { |
400 | dev_dbg(dev, "mbox send fail %d\n", ret); | |
401 | return ret; | |
402 | } | |
403 | ||
404 | /* mbox_send_message returns non-negative value on success, so reset */ | |
405 | ret = 0; | |
406 | ||
d4c3751a SH |
407 | if (xfer->hdr.poll_completion) { |
408 | ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); | |
409 | ||
fbc4d81a | 410 | spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); |
d4c3751a SH |
411 | |
412 | if (ktime_before(ktime_get(), stop)) | |
fbc4d81a | 413 | scmi_fetch_response(xfer, cinfo->payload); |
d4c3751a SH |
414 | else |
415 | ret = -ETIMEDOUT; | |
416 | } else { | |
417 | /* And we wait for the response. */ | |
418 | timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); | |
419 | if (!wait_for_completion_timeout(&xfer->done, timeout)) { | |
420 | dev_err(dev, "mbox timed out in resp(caller: %pS)\n", | |
421 | (void *)_RET_IP_); | |
422 | ret = -ETIMEDOUT; | |
423 | } | |
aa4f886f | 424 | } |
d4c3751a SH |
425 | |
426 | if (!ret && xfer->hdr.status) | |
427 | ret = scmi_to_linux_errno(xfer->hdr.status); | |
428 | ||
aa4f886f SH |
429 | /* |
430 | * NOTE: we might prefer not to need the mailbox ticker to manage the | |
431 | * transfer queueing since the protocol layer queues things by itself. | |
432 | * Unfortunately, we have to kick the mailbox framework after we have | |
433 | * received our message. | |
434 | */ | |
fbc4d81a | 435 | mbox_client_txdone(cinfo->chan, ret); |
aa4f886f SH |
436 | |
437 | return ret; | |
438 | } | |
439 | ||
440 | /** | |
14e297b3 | 441 | * scmi_xfer_get_init() - Allocate and initialise one message |
aa4f886f | 442 | * |
1baf47c2 | 443 | * @handle: Pointer to SCMI entity handle |
aa4f886f | 444 | * @msg_id: Message identifier |
1baf47c2 | 445 | * @prot_id: Protocol identifier for the message |
aa4f886f SH |
446 | * @tx_size: transmit message size |
447 | * @rx_size: receive message size | |
448 | * @p: pointer to the allocated and initialised message | |
449 | * | |
14e297b3 | 450 | * This function allocates the message using @scmi_xfer_get and |
aa4f886f SH |
451 | * initialise the header. |
452 | * | |
453 | * Return: 0 if all went fine with @p pointing to message, else | |
454 | * corresponding error. | |
455 | */ | |
14e297b3 | 456 | int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, |
aa4f886f SH |
457 | size_t tx_size, size_t rx_size, struct scmi_xfer **p) |
458 | { | |
459 | int ret; | |
460 | struct scmi_xfer *xfer; | |
461 | struct scmi_info *info = handle_to_scmi_info(handle); | |
462 | struct device *dev = info->dev; | |
463 | ||
464 | /* Ensure we have sane transfer sizes */ | |
465 | if (rx_size > info->desc->max_msg_size || | |
466 | tx_size > info->desc->max_msg_size) | |
467 | return -ERANGE; | |
468 | ||
14e297b3 | 469 | xfer = scmi_xfer_get(handle); |
aa4f886f SH |
470 | if (IS_ERR(xfer)) { |
471 | ret = PTR_ERR(xfer); | |
472 | dev_err(dev, "failed to get free message slot(%d)\n", ret); | |
473 | return ret; | |
474 | } | |
475 | ||
476 | xfer->tx.len = tx_size; | |
477 | xfer->rx.len = rx_size ? : info->desc->max_msg_size; | |
478 | xfer->hdr.id = msg_id; | |
479 | xfer->hdr.protocol_id = prot_id; | |
480 | xfer->hdr.poll_completion = false; | |
481 | ||
482 | *p = xfer; | |
1baf47c2 | 483 | |
aa4f886f SH |
484 | return 0; |
485 | } | |
486 | ||
b6f20ff8 SH |
487 | /** |
488 | * scmi_version_get() - command to get the revision of the SCMI entity | |
489 | * | |
1baf47c2 SH |
490 | * @handle: Pointer to SCMI entity handle |
491 | * @protocol: Protocol identifier for the message | |
492 | * @version: Holds returned version of protocol. | |
b6f20ff8 SH |
493 | * |
494 | * Updates the SCMI information in the internal data structure. | |
495 | * | |
496 | * Return: 0 if all went fine, else return appropriate error. | |
497 | */ | |
498 | int scmi_version_get(const struct scmi_handle *handle, u8 protocol, | |
499 | u32 *version) | |
500 | { | |
501 | int ret; | |
502 | __le32 *rev_info; | |
503 | struct scmi_xfer *t; | |
504 | ||
14e297b3 | 505 | ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0, |
b6f20ff8 SH |
506 | sizeof(*version), &t); |
507 | if (ret) | |
508 | return ret; | |
509 | ||
510 | ret = scmi_do_xfer(handle, t); | |
511 | if (!ret) { | |
512 | rev_info = t->rx.buf; | |
513 | *version = le32_to_cpu(*rev_info); | |
514 | } | |
515 | ||
14e297b3 | 516 | scmi_xfer_put(handle, t); |
b6f20ff8 SH |
517 | return ret; |
518 | } | |
519 | ||
520 | void scmi_setup_protocol_implemented(const struct scmi_handle *handle, | |
521 | u8 *prot_imp) | |
522 | { | |
523 | struct scmi_info *info = handle_to_scmi_info(handle); | |
524 | ||
525 | info->protocols_imp = prot_imp; | |
526 | } | |
527 | ||
bc40081d SH |
528 | static bool |
529 | scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) | |
530 | { | |
531 | int i; | |
532 | struct scmi_info *info = handle_to_scmi_info(handle); | |
533 | ||
534 | if (!info->protocols_imp) | |
535 | return false; | |
536 | ||
537 | for (i = 0; i < MAX_PROTOCOLS_IMP; i++) | |
538 | if (info->protocols_imp[i] == prot_id) | |
539 | return true; | |
540 | return false; | |
541 | } | |
542 | ||
aa4f886f | 543 | /** |
14e297b3 | 544 | * scmi_handle_get() - Get the SCMI handle for a device |
aa4f886f SH |
545 | * |
546 | * @dev: pointer to device for which we want SCMI handle | |
547 | * | |
548 | * NOTE: The function does not track individual clients of the framework | |
1baf47c2 | 549 | * and is expected to be maintained by caller of SCMI protocol library. |
aa4f886f SH |
550 | * scmi_handle_put must be balanced with successful scmi_handle_get |
551 | * | |
552 | * Return: pointer to handle if successful, NULL on error | |
553 | */ | |
554 | struct scmi_handle *scmi_handle_get(struct device *dev) | |
555 | { | |
556 | struct list_head *p; | |
557 | struct scmi_info *info; | |
558 | struct scmi_handle *handle = NULL; | |
559 | ||
560 | mutex_lock(&scmi_list_mutex); | |
561 | list_for_each(p, &scmi_list) { | |
562 | info = list_entry(p, struct scmi_info, node); | |
563 | if (dev->parent == info->dev) { | |
564 | handle = &info->handle; | |
565 | info->users++; | |
566 | break; | |
567 | } | |
568 | } | |
569 | mutex_unlock(&scmi_list_mutex); | |
570 | ||
571 | return handle; | |
572 | } | |
573 | ||
574 | /** | |
575 | * scmi_handle_put() - Release the handle acquired by scmi_handle_get | |
576 | * | |
577 | * @handle: handle acquired by scmi_handle_get | |
578 | * | |
579 | * NOTE: The function does not track individual clients of the framework | |
1baf47c2 | 580 | * and is expected to be maintained by caller of SCMI protocol library. |
aa4f886f SH |
581 | * scmi_handle_put must be balanced with successful scmi_handle_get |
582 | * | |
583 | * Return: 0 is successfully released | |
584 | * if null was passed, it returns -EINVAL; | |
585 | */ | |
586 | int scmi_handle_put(const struct scmi_handle *handle) | |
587 | { | |
588 | struct scmi_info *info; | |
589 | ||
590 | if (!handle) | |
591 | return -EINVAL; | |
592 | ||
593 | info = handle_to_scmi_info(handle); | |
594 | mutex_lock(&scmi_list_mutex); | |
595 | if (!WARN_ON(!info->users)) | |
596 | info->users--; | |
597 | mutex_unlock(&scmi_list_mutex); | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
aa4f886f SH |
602 | static int scmi_xfer_info_init(struct scmi_info *sinfo) |
603 | { | |
604 | int i; | |
605 | struct scmi_xfer *xfer; | |
606 | struct device *dev = sinfo->dev; | |
607 | const struct scmi_desc *desc = sinfo->desc; | |
608 | struct scmi_xfers_info *info = &sinfo->minfo; | |
609 | ||
610 | /* Pre-allocated messages, no more than what hdr.seq can support */ | |
354b2e36 SH |
611 | if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { |
612 | dev_err(dev, "Maximum message of %d exceeds supported %ld\n", | |
613 | desc->max_msg, MSG_TOKEN_MAX); | |
aa4f886f SH |
614 | return -EINVAL; |
615 | } | |
616 | ||
617 | info->xfer_block = devm_kcalloc(dev, desc->max_msg, | |
618 | sizeof(*info->xfer_block), GFP_KERNEL); | |
619 | if (!info->xfer_block) | |
620 | return -ENOMEM; | |
621 | ||
622 | info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg), | |
623 | sizeof(long), GFP_KERNEL); | |
624 | if (!info->xfer_alloc_table) | |
625 | return -ENOMEM; | |
626 | ||
aa4f886f SH |
627 | /* Pre-initialize the buffer pointer to pre-allocated buffers */ |
628 | for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { | |
629 | xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, | |
630 | GFP_KERNEL); | |
631 | if (!xfer->rx.buf) | |
632 | return -ENOMEM; | |
633 | ||
634 | xfer->tx.buf = xfer->rx.buf; | |
635 | init_completion(&xfer->done); | |
636 | } | |
637 | ||
638 | spin_lock_init(&info->xfer_lock); | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
3748daf7 | 643 | static int scmi_mailbox_check(struct device_node *np, int idx) |
aa4f886f | 644 | { |
3748daf7 SH |
645 | return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", |
646 | idx, NULL); | |
aa4f886f SH |
647 | } |
648 | ||
3748daf7 SH |
649 | static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, |
650 | int prot_id, bool tx) | |
aa4f886f | 651 | { |
3748daf7 | 652 | int ret, idx; |
aa4f886f SH |
653 | struct resource res; |
654 | resource_size_t size; | |
aa4f886f | 655 | struct device_node *shmem, *np = dev->of_node; |
fbc4d81a | 656 | struct scmi_chan_info *cinfo; |
aa4f886f | 657 | struct mbox_client *cl; |
3748daf7 SH |
658 | const char *desc = tx ? "Tx" : "Rx"; |
659 | ||
660 | /* Transmit channel is first entry i.e. index 0 */ | |
661 | idx = tx ? 0 : 1; | |
aa4f886f | 662 | |
3748daf7 | 663 | if (scmi_mailbox_check(np, idx)) { |
907b6d14 SH |
664 | cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); |
665 | goto idr_alloc; | |
666 | } | |
667 | ||
fbc4d81a SH |
668 | cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); |
669 | if (!cinfo) | |
670 | return -ENOMEM; | |
671 | ||
fbc4d81a SH |
672 | cinfo->dev = dev; |
673 | ||
674 | cl = &cinfo->cl; | |
aa4f886f SH |
675 | cl->dev = dev; |
676 | cl->rx_callback = scmi_rx_callback; | |
3748daf7 | 677 | cl->tx_prepare = tx ? scmi_tx_prepare : NULL; |
aa4f886f | 678 | cl->tx_block = false; |
3748daf7 | 679 | cl->knows_txdone = tx; |
aa4f886f | 680 | |
3748daf7 | 681 | shmem = of_parse_phandle(np, "shmem", idx); |
aa4f886f SH |
682 | ret = of_address_to_resource(shmem, 0, &res); |
683 | of_node_put(shmem); | |
684 | if (ret) { | |
3748daf7 | 685 | dev_err(dev, "failed to get SCMI %s payload memory\n", desc); |
aa4f886f SH |
686 | return ret; |
687 | } | |
688 | ||
689 | size = resource_size(&res); | |
fbc4d81a SH |
690 | cinfo->payload = devm_ioremap(info->dev, res.start, size); |
691 | if (!cinfo->payload) { | |
3748daf7 | 692 | dev_err(dev, "failed to ioremap SCMI %s payload\n", desc); |
aa4f886f SH |
693 | return -EADDRNOTAVAIL; |
694 | } | |
695 | ||
3748daf7 | 696 | cinfo->chan = mbox_request_channel(cl, idx); |
fbc4d81a SH |
697 | if (IS_ERR(cinfo->chan)) { |
698 | ret = PTR_ERR(cinfo->chan); | |
aa4f886f | 699 | if (ret != -EPROBE_DEFER) |
3748daf7 SH |
700 | dev_err(dev, "failed to request SCMI %s mailbox\n", |
701 | desc); | |
aa4f886f SH |
702 | return ret; |
703 | } | |
704 | ||
907b6d14 SH |
705 | idr_alloc: |
706 | ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); | |
707 | if (ret != prot_id) { | |
708 | dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); | |
709 | return ret; | |
710 | } | |
711 | ||
712 | cinfo->handle = &info->handle; | |
aa4f886f SH |
713 | return 0; |
714 | } | |
715 | ||
bc40081d SH |
716 | static inline void |
717 | scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, | |
718 | int prot_id) | |
719 | { | |
720 | struct scmi_device *sdev; | |
721 | ||
722 | sdev = scmi_device_create(np, info->dev, prot_id); | |
723 | if (!sdev) { | |
724 | dev_err(info->dev, "failed to create %d protocol device\n", | |
725 | prot_id); | |
726 | return; | |
727 | } | |
728 | ||
3748daf7 | 729 | if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id, true)) { |
907b6d14 SH |
730 | dev_err(&sdev->dev, "failed to setup transport\n"); |
731 | scmi_device_destroy(sdev); | |
31c60855 | 732 | return; |
907b6d14 SH |
733 | } |
734 | ||
bc40081d SH |
735 | /* setup handle now as the transport is ready */ |
736 | scmi_set_handle(sdev); | |
737 | } | |
738 | ||
aa4f886f SH |
739 | static int scmi_probe(struct platform_device *pdev) |
740 | { | |
741 | int ret; | |
742 | struct scmi_handle *handle; | |
743 | const struct scmi_desc *desc; | |
744 | struct scmi_info *info; | |
745 | struct device *dev = &pdev->dev; | |
bc40081d | 746 | struct device_node *child, *np = dev->of_node; |
aa4f886f SH |
747 | |
748 | /* Only mailbox method supported, check for the presence of one */ | |
3748daf7 | 749 | if (scmi_mailbox_check(np, 0)) { |
aa4f886f SH |
750 | dev_err(dev, "no mailbox found in %pOF\n", np); |
751 | return -EINVAL; | |
752 | } | |
753 | ||
d9350f21 AP |
754 | desc = of_device_get_match_data(dev); |
755 | if (!desc) | |
756 | return -EINVAL; | |
aa4f886f SH |
757 | |
758 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); | |
759 | if (!info) | |
760 | return -ENOMEM; | |
761 | ||
762 | info->dev = dev; | |
763 | info->desc = desc; | |
764 | INIT_LIST_HEAD(&info->node); | |
765 | ||
766 | ret = scmi_xfer_info_init(info); | |
767 | if (ret) | |
768 | return ret; | |
769 | ||
770 | platform_set_drvdata(pdev, info); | |
907b6d14 | 771 | idr_init(&info->tx_idr); |
aa4f886f SH |
772 | |
773 | handle = &info->handle; | |
774 | handle->dev = info->dev; | |
b6f20ff8 | 775 | handle->version = &info->version; |
aa4f886f | 776 | |
3748daf7 | 777 | ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE, true); |
aa4f886f SH |
778 | if (ret) |
779 | return ret; | |
780 | ||
b6f20ff8 SH |
781 | ret = scmi_base_protocol_init(handle); |
782 | if (ret) { | |
783 | dev_err(dev, "unable to communicate with SCMI(%d)\n", ret); | |
b6f20ff8 SH |
784 | return ret; |
785 | } | |
786 | ||
aa4f886f SH |
787 | mutex_lock(&scmi_list_mutex); |
788 | list_add_tail(&info->node, &scmi_list); | |
789 | mutex_unlock(&scmi_list_mutex); | |
790 | ||
bc40081d SH |
791 | for_each_available_child_of_node(np, child) { |
792 | u32 prot_id; | |
793 | ||
794 | if (of_property_read_u32(child, "reg", &prot_id)) | |
795 | continue; | |
796 | ||
354b2e36 SH |
797 | if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id)) |
798 | dev_err(dev, "Out of range protocol %d\n", prot_id); | |
bc40081d SH |
799 | |
800 | if (!scmi_is_protocol_implemented(handle, prot_id)) { | |
801 | dev_err(dev, "SCMI protocol %d not implemented\n", | |
802 | prot_id); | |
803 | continue; | |
804 | } | |
805 | ||
806 | scmi_create_protocol_device(child, info, prot_id); | |
807 | } | |
808 | ||
aa4f886f SH |
809 | return 0; |
810 | } | |
811 | ||
2747a967 SH |
812 | static int scmi_mbox_free_channel(int id, void *p, void *data) |
813 | { | |
814 | struct scmi_chan_info *cinfo = p; | |
815 | struct idr *idr = data; | |
816 | ||
817 | if (!IS_ERR_OR_NULL(cinfo->chan)) { | |
818 | mbox_free_channel(cinfo->chan); | |
819 | cinfo->chan = NULL; | |
820 | } | |
821 | ||
822 | idr_remove(idr, id); | |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
827 | static int scmi_remove(struct platform_device *pdev) | |
828 | { | |
829 | int ret = 0; | |
830 | struct scmi_info *info = platform_get_drvdata(pdev); | |
831 | struct idr *idr = &info->tx_idr; | |
832 | ||
833 | mutex_lock(&scmi_list_mutex); | |
834 | if (info->users) | |
835 | ret = -EBUSY; | |
836 | else | |
837 | list_del(&info->node); | |
838 | mutex_unlock(&scmi_list_mutex); | |
839 | ||
840 | if (ret) | |
841 | return ret; | |
842 | ||
843 | /* Safe to free channels since no more users */ | |
844 | ret = idr_for_each(idr, scmi_mbox_free_channel, idr); | |
845 | idr_destroy(&info->tx_idr); | |
846 | ||
847 | return ret; | |
848 | } | |
849 | ||
850 | static const struct scmi_desc scmi_generic_desc = { | |
851 | .max_rx_timeout_ms = 30, /* We may increase this if required */ | |
852 | .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ | |
853 | .max_msg_size = 128, | |
854 | }; | |
855 | ||
856 | /* Each compatible listed below must have descriptor associated with it */ | |
857 | static const struct of_device_id scmi_of_match[] = { | |
858 | { .compatible = "arm,scmi", .data = &scmi_generic_desc }, | |
859 | { /* Sentinel */ }, | |
860 | }; | |
861 | ||
862 | MODULE_DEVICE_TABLE(of, scmi_of_match); | |
863 | ||
aa4f886f SH |
864 | static struct platform_driver scmi_driver = { |
865 | .driver = { | |
866 | .name = "arm-scmi", | |
867 | .of_match_table = scmi_of_match, | |
868 | }, | |
869 | .probe = scmi_probe, | |
870 | .remove = scmi_remove, | |
871 | }; | |
872 | ||
873 | module_platform_driver(scmi_driver); | |
874 | ||
875 | MODULE_ALIAS("platform: arm-scmi"); | |
876 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); | |
877 | MODULE_DESCRIPTION("ARM SCMI protocol driver"); | |
878 | MODULE_LICENSE("GPL v2"); |