firmware: qcom: scm: Remove log reporting memory allocation failure
[linux-2.6-block.git] / drivers / firmware / qcom / qcom_scm.c
CommitLineData
97fb5e8d 1// SPDX-License-Identifier: GPL-2.0-only
5443cc5f 2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
2ce76a6a 3 * Copyright (C) 2015 Linaro Ltd.
2a1eb58a 4 */
bc7fbb5e
BG
5
6#include <linux/arm-smccc.h>
7#include <linux/clk.h>
6bf32599 8#include <linux/completion.h>
b6a1dfbc 9#include <linux/cpumask.h>
f01e90fe 10#include <linux/dma-mapping.h>
bc7fbb5e
BG
11#include <linux/export.h>
12#include <linux/firmware/qcom/qcom_scm.h>
13#include <linux/init.h>
65b7ebda 14#include <linux/interconnect.h>
bc7fbb5e 15#include <linux/interrupt.h>
8c1b7dc9 16#include <linux/module.h>
d0f6fa7b 17#include <linux/of.h>
8c1b7dc9 18#include <linux/of_address.h>
6bf32599 19#include <linux/of_irq.h>
d0f6fa7b 20#include <linux/of_platform.h>
bc7fbb5e 21#include <linux/platform_device.h>
dd4fe5b2 22#include <linux/reset-controller.h>
bc7fbb5e 23#include <linux/types.h>
2a1eb58a 24
b6a1dfbc 25#include "qcom_scm.h"
a353e4a0 26
8c1b7dc9
BA
27static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
28module_param(download_mode, bool, 0);
29
d0f6fa7b
AG
30struct qcom_scm {
31 struct device *dev;
32 struct clk *core_clk;
33 struct clk *iface_clk;
34 struct clk *bus_clk;
65b7ebda 35 struct icc_path *path;
6bf32599 36 struct completion waitq_comp;
dd4fe5b2 37 struct reset_controller_dev reset;
8c1b7dc9 38
65b7ebda
SS
39 /* control access to the interconnect path */
40 struct mutex scm_bw_lock;
41 int scm_vote_count;
42
8c1b7dc9 43 u64 dload_mode_addr;
d0f6fa7b
AG
44};
45
d82bd359
AKD
46struct qcom_scm_current_perm_info {
47 __le32 vmid;
48 __le32 perm;
49 __le64 ctx;
50 __le32 ctx_size;
51 __le32 unused;
52};
53
54struct qcom_scm_mem_map_info {
55 __le64 mem_addr;
56 __le64 mem_size;
57};
58
00b12486
ML
59/**
60 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
61 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
62 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
63 * @data: Response data. The type of this data is given in @resp_type.
64 */
65struct qcom_scm_qseecom_resp {
66 u64 result;
67 u64 resp_type;
68 u64 data;
69};
70
71enum qcom_scm_qseecom_result {
72 QSEECOM_RESULT_SUCCESS = 0,
73 QSEECOM_RESULT_INCOMPLETE = 1,
74 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2,
75 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF,
76};
77
78enum qcom_scm_qseecom_resp_type {
79 QSEECOM_SCM_RES_APP_ID = 0xEE01,
80 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02,
81};
82
83enum qcom_scm_qseecom_tz_owner {
84 QSEECOM_TZ_OWNER_SIP = 2,
85 QSEECOM_TZ_OWNER_TZ_APPS = 48,
86 QSEECOM_TZ_OWNER_QSEE_OS = 50
87};
88
89enum qcom_scm_qseecom_tz_svc {
90 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0,
91 QSEECOM_TZ_SVC_APP_MGR = 1,
92 QSEECOM_TZ_SVC_INFO = 6,
93};
94
95enum qcom_scm_qseecom_tz_cmd_app {
96 QSEECOM_TZ_CMD_APP_SEND = 1,
97 QSEECOM_TZ_CMD_APP_LOOKUP = 3,
98};
99
100enum qcom_scm_qseecom_tz_cmd_info {
101 QSEECOM_TZ_CMD_INFO_VERSION = 3,
102};
103
104#define QSEECOM_MAX_APP_NAME_SIZE 64
105
7734c4b5
SG
106/* Each bit configures cold/warm boot address for one of the 4 CPUs */
107static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
108 0, BIT(0), BIT(3), BIT(5)
57d3b816 109};
7734c4b5
SG
110static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
111 BIT(2), BIT(1), BIT(4), BIT(6)
57d3b816
EB
112};
113
6bf32599
GDS
114#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
115#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
116
6bc45428 117static const char * const qcom_scm_convention_names[] = {
9a434cee
EB
118 [SMC_CONVENTION_UNKNOWN] = "unknown",
119 [SMC_CONVENTION_ARM_32] = "smc arm 32",
120 [SMC_CONVENTION_ARM_64] = "smc arm 64",
121 [SMC_CONVENTION_LEGACY] = "smc legacy",
122};
123
d0f6fa7b
AG
124static struct qcom_scm *__scm;
125
126static int qcom_scm_clk_enable(void)
127{
128 int ret;
129
130 ret = clk_prepare_enable(__scm->core_clk);
131 if (ret)
132 goto bail;
133
134 ret = clk_prepare_enable(__scm->iface_clk);
135 if (ret)
136 goto disable_core;
137
138 ret = clk_prepare_enable(__scm->bus_clk);
139 if (ret)
140 goto disable_iface;
141
142 return 0;
143
144disable_iface:
145 clk_disable_unprepare(__scm->iface_clk);
146disable_core:
147 clk_disable_unprepare(__scm->core_clk);
148bail:
149 return ret;
150}
151
152static void qcom_scm_clk_disable(void)
153{
154 clk_disable_unprepare(__scm->core_clk);
155 clk_disable_unprepare(__scm->iface_clk);
156 clk_disable_unprepare(__scm->bus_clk);
157}
158
65b7ebda
SS
159static int qcom_scm_bw_enable(void)
160{
161 int ret = 0;
162
163 if (!__scm->path)
164 return 0;
165
65b7ebda
SS
166 mutex_lock(&__scm->scm_bw_lock);
167 if (!__scm->scm_vote_count) {
168 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
169 if (ret < 0) {
170 dev_err(__scm->dev, "failed to set bandwidth request\n");
171 goto err_bw;
172 }
173 }
174 __scm->scm_vote_count++;
175err_bw:
176 mutex_unlock(&__scm->scm_bw_lock);
177
178 return ret;
179}
180
181static void qcom_scm_bw_disable(void)
182{
e6f3dac9 183 if (!__scm->path)
65b7ebda
SS
184 return;
185
186 mutex_lock(&__scm->scm_bw_lock);
187 if (__scm->scm_vote_count-- == 1)
188 icc_set_bw(__scm->path, 0, 0);
189 mutex_unlock(&__scm->scm_bw_lock);
190}
191
f6ea568f
SB
192enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
193static DEFINE_SPINLOCK(scm_query_lock);
9a434cee 194
f6ea568f 195static enum qcom_scm_convention __get_convention(void)
9a434cee
EB
196{
197 unsigned long flags;
198 struct qcom_scm_desc desc = {
199 .svc = QCOM_SCM_SVC_INFO,
200 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
201 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
202 QCOM_SCM_INFO_IS_CALL_AVAIL) |
203 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
204 .arginfo = QCOM_SCM_ARGS(1),
205 .owner = ARM_SMCCC_OWNER_SIP,
206 };
207 struct qcom_scm_res res;
f6ea568f 208 enum qcom_scm_convention probed_convention;
9a434cee 209 int ret;
257f2935 210 bool forced = false;
9a434cee 211
f6ea568f
SB
212 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
213 return qcom_scm_convention;
9a434cee 214
3337a6fe
KT
215 /*
216 * Per the "SMC calling convention specification", the 64-bit calling
217 * convention can only be used when the client is 64-bit, otherwise
218 * system will encounter the undefined behaviour.
219 */
220#if IS_ENABLED(CONFIG_ARM64)
f6ea568f
SB
221 /*
222 * Device isn't required as there is only one argument - no device
223 * needed to dma_map_single to secure world
224 */
225 probed_convention = SMC_CONVENTION_ARM_64;
226 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
9a434cee 227 if (!ret && res.result[0] == 1)
f6ea568f 228 goto found;
9a434cee 229
257f2935
SB
230 /*
231 * Some SC7180 firmwares didn't implement the
232 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
233 * calling conventions on these firmwares. Luckily we don't make any
234 * early calls into the firmware on these SoCs so the device pointer
235 * will be valid here to check if the compatible matches.
236 */
237 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
238 forced = true;
239 goto found;
240 }
3337a6fe 241#endif
257f2935 242
f6ea568f
SB
243 probed_convention = SMC_CONVENTION_ARM_32;
244 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
9a434cee 245 if (!ret && res.result[0] == 1)
f6ea568f
SB
246 goto found;
247
248 probed_convention = SMC_CONVENTION_LEGACY;
249found:
250 spin_lock_irqsave(&scm_query_lock, flags);
251 if (probed_convention != qcom_scm_convention) {
252 qcom_scm_convention = probed_convention;
257f2935
SB
253 pr_info("qcom_scm: convention: %s%s\n",
254 qcom_scm_convention_names[qcom_scm_convention],
255 forced ? " (forced)" : "");
f6ea568f
SB
256 }
257 spin_unlock_irqrestore(&scm_query_lock, flags);
9a434cee 258
9a434cee
EB
259 return qcom_scm_convention;
260}
261
262/**
263 * qcom_scm_call() - Invoke a syscall in the secure world
264 * @dev: device
9a434cee 265 * @desc: Descriptor structure containing arguments and return values
a5d32f6d 266 * @res: Structure containing results from SMC/HVC call
9a434cee
EB
267 *
268 * Sends a command to the SCM and waits for the command to finish processing.
269 * This should *only* be called in pre-emptible context.
270 */
271static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
272 struct qcom_scm_res *res)
273{
274 might_sleep();
275 switch (__get_convention()) {
276 case SMC_CONVENTION_ARM_32:
277 case SMC_CONVENTION_ARM_64:
278 return scm_smc_call(dev, desc, res, false);
279 case SMC_CONVENTION_LEGACY:
280 return scm_legacy_call(dev, desc, res);
281 default:
282 pr_err("Unknown current SCM calling convention.\n");
283 return -EINVAL;
284 }
285}
286
287/**
288 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
289 * @dev: device
9a434cee
EB
290 * @desc: Descriptor structure containing arguments and return values
291 * @res: Structure containing results from SMC/HVC call
292 *
293 * Sends a command to the SCM and waits for the command to finish processing.
294 * This can be called in atomic context.
295 */
296static int qcom_scm_call_atomic(struct device *dev,
297 const struct qcom_scm_desc *desc,
298 struct qcom_scm_res *res)
299{
300 switch (__get_convention()) {
301 case SMC_CONVENTION_ARM_32:
302 case SMC_CONVENTION_ARM_64:
303 return scm_smc_call(dev, desc, res, true);
304 case SMC_CONVENTION_LEGACY:
305 return scm_legacy_call_atomic(dev, desc, res);
306 default:
307 pr_err("Unknown current SCM calling convention.\n");
308 return -EINVAL;
309 }
310}
311
9d11af8b
SB
312static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
313 u32 cmd_id)
9a434cee
EB
314{
315 int ret;
316 struct qcom_scm_desc desc = {
317 .svc = QCOM_SCM_SVC_INFO,
318 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
319 .owner = ARM_SMCCC_OWNER_SIP,
320 };
321 struct qcom_scm_res res;
322
323 desc.arginfo = QCOM_SCM_ARGS(1);
324 switch (__get_convention()) {
325 case SMC_CONVENTION_ARM_32:
326 case SMC_CONVENTION_ARM_64:
327 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
328 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
329 break;
330 case SMC_CONVENTION_LEGACY:
331 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
332 break;
333 default:
334 pr_err("Unknown SMC convention being used\n");
38212b2a 335 return false;
9a434cee
EB
336 }
337
338 ret = qcom_scm_call(dev, &desc, &res);
339
9d11af8b 340 return ret ? false : !!res.result[0];
9a434cee
EB
341}
342
52beb1fc 343static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
a353e4a0 344{
57d3b816 345 int cpu;
7734c4b5 346 unsigned int flags = 0;
57d3b816
EB
347 struct qcom_scm_desc desc = {
348 .svc = QCOM_SCM_SVC_BOOT,
349 .cmd = QCOM_SCM_BOOT_SET_ADDR,
350 .arginfo = QCOM_SCM_ARGS(2),
7734c4b5 351 .owner = ARM_SMCCC_OWNER_SIP,
57d3b816
EB
352 };
353
52beb1fc 354 for_each_present_cpu(cpu) {
7734c4b5
SG
355 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
356 return -EINVAL;
357 flags |= cpu_bits[cpu];
57d3b816
EB
358 }
359
57d3b816
EB
360 desc.args[0] = flags;
361 desc.args[1] = virt_to_phys(entry);
362
7734c4b5
SG
363 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
364}
57d3b816 365
f60a317b
SG
366static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
367{
368 struct qcom_scm_desc desc = {
369 .svc = QCOM_SCM_SVC_BOOT,
370 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
371 .owner = ARM_SMCCC_OWNER_SIP,
372 .arginfo = QCOM_SCM_ARGS(6),
373 .args = {
374 virt_to_phys(entry),
375 /* Apply to all CPUs in all affinity levels */
376 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
377 flags,
378 },
379 };
380
381 /* Need a device for DMA of the additional arguments */
382 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
383 return -EOPNOTSUPP;
384
385 return qcom_scm_call(__scm->dev, &desc, NULL);
386}
387
7734c4b5 388/**
52beb1fc 389 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
7734c4b5 390 * @entry: Entry point function for the cpus
7734c4b5
SG
391 *
392 * Set the Linux entry point for the SCM to transfer control to when coming
393 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
394 */
52beb1fc 395int qcom_scm_set_warm_boot_addr(void *entry)
7734c4b5 396{
f60a317b
SG
397 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
398 /* Fallback to old SCM call */
399 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
400 return 0;
a353e4a0 401}
2784e3b0 402EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
2ce76a6a
LI
403
404/**
52beb1fc 405 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
2ce76a6a 406 * @entry: Entry point function for the cpus
2ce76a6a 407 */
52beb1fc 408int qcom_scm_set_cold_boot_addr(void *entry)
2ce76a6a 409{
f60a317b
SG
410 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
411 /* Fallback to old SCM call */
412 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
413 return 0;
2ce76a6a 414}
2784e3b0 415EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
767b0235 416
767b0235
LI
417/**
418 * qcom_scm_cpu_power_down() - Power down the cpu
a5d32f6d 419 * @flags: Flags to flush cache
767b0235
LI
420 *
421 * This is an end point to power down cpu. If there was a pending interrupt,
422 * the control would return from this function, otherwise, the cpu jumps to the
423 * warm boot entry point set for this cpu upon reset.
424 */
425void qcom_scm_cpu_power_down(u32 flags)
426{
57d3b816
EB
427 struct qcom_scm_desc desc = {
428 .svc = QCOM_SCM_SVC_BOOT,
429 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
430 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
431 .arginfo = QCOM_SCM_ARGS(1),
432 .owner = ARM_SMCCC_OWNER_SIP,
433 };
434
435 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
767b0235 436}
2784e3b0 437EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
9626b699 438
65f0c90b 439int qcom_scm_set_remote_state(u32 state, u32 id)
f01e90fe 440{
57d3b816
EB
441 struct qcom_scm_desc desc = {
442 .svc = QCOM_SCM_SVC_BOOT,
443 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
444 .arginfo = QCOM_SCM_ARGS(2),
445 .args[0] = state,
446 .args[1] = id,
447 .owner = ARM_SMCCC_OWNER_SIP,
448 };
449 struct qcom_scm_res res;
450 int ret;
451
452 ret = qcom_scm_call(__scm->dev, &desc, &res);
453
454 return ret ? : res.result[0];
f01e90fe 455}
2784e3b0 456EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
f01e90fe 457
ff4aa3bc
RM
458static int qcom_scm_disable_sdi(void)
459{
460 int ret;
461 struct qcom_scm_desc desc = {
462 .svc = QCOM_SCM_SVC_BOOT,
463 .cmd = QCOM_SCM_BOOT_SDI_CONFIG,
464 .args[0] = 1, /* Disable watchdog debug */
465 .args[1] = 0, /* Disable SDI */
466 .arginfo = QCOM_SCM_ARGS(2),
467 .owner = ARM_SMCCC_OWNER_SIP,
468 };
469 struct qcom_scm_res res;
470
471 ret = qcom_scm_clk_enable();
472 if (ret)
473 return ret;
474 ret = qcom_scm_call(__scm->dev, &desc, &res);
475
476 qcom_scm_clk_disable();
477
478 return ret ? : res.result[0];
479}
480
57d3b816
EB
481static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
482{
483 struct qcom_scm_desc desc = {
484 .svc = QCOM_SCM_SVC_BOOT,
485 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
486 .arginfo = QCOM_SCM_ARGS(2),
487 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
488 .owner = ARM_SMCCC_OWNER_SIP,
489 };
490
491 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
492
b88c2828 493 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
57d3b816
EB
494}
495
65f0c90b 496static void qcom_scm_set_download_mode(bool enable)
b0a1614f 497{
65f0c90b
EB
498 bool avail;
499 int ret = 0;
b0a1614f 500
65f0c90b
EB
501 avail = __qcom_scm_is_call_available(__scm->dev,
502 QCOM_SCM_SVC_BOOT,
503 QCOM_SCM_BOOT_SET_DLOAD_MODE);
504 if (avail) {
505 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
506 } else if (__scm->dload_mode_addr) {
57d3b816
EB
507 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
508 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
65f0c90b
EB
509 } else {
510 dev_err(__scm->dev,
511 "No available mechanism for setting download mode\n");
512 }
b0a1614f 513
65f0c90b
EB
514 if (ret)
515 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
b0a1614f 516}
b0a1614f 517
f01e90fe
BA
518/**
519 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
520 * state machine for a given peripheral, using the
521 * metadata
522 * @peripheral: peripheral id
523 * @metadata: pointer to memory containing ELF header, program header table
524 * and optional blob of data used for authenticating the metadata
525 * and the rest of the firmware
526 * @size: size of the metadata
3a99f121 527 * @ctx: optional metadata context
f01e90fe 528 *
3a99f121
BA
529 * Return: 0 on success.
530 *
531 * Upon successful return, the PAS metadata context (@ctx) will be used to
532 * track the metadata allocation, this needs to be released by invoking
533 * qcom_scm_pas_metadata_release() by the caller.
f01e90fe 534 */
3a99f121
BA
535int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
536 struct qcom_scm_pas_metadata *ctx)
f01e90fe
BA
537{
538 dma_addr_t mdata_phys;
539 void *mdata_buf;
540 int ret;
57d3b816
EB
541 struct qcom_scm_desc desc = {
542 .svc = QCOM_SCM_SVC_PIL,
543 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
544 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
545 .args[0] = peripheral,
546 .owner = ARM_SMCCC_OWNER_SIP,
547 };
548 struct qcom_scm_res res;
f01e90fe
BA
549
550 /*
551 * During the scm call memory protection will be enabled for the meta
552 * data blob, so make sure it's physically contiguous, 4K aligned and
553 * non-cachable to avoid XPU violations.
554 */
555 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
556 GFP_KERNEL);
3de990f7 557 if (!mdata_buf)
f01e90fe 558 return -ENOMEM;
3de990f7 559
f01e90fe
BA
560 memcpy(mdata_buf, metadata, size);
561
562 ret = qcom_scm_clk_enable();
563 if (ret)
3a99f121 564 goto out;
f01e90fe 565
65b7ebda
SS
566 ret = qcom_scm_bw_enable();
567 if (ret)
0c50b7fc 568 goto disable_clk;
65b7ebda 569
57d3b816
EB
570 desc.args[1] = mdata_phys;
571
572 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 573 qcom_scm_bw_disable();
0c50b7fc
GJ
574
575disable_clk:
f01e90fe
BA
576 qcom_scm_clk_disable();
577
3a99f121
BA
578out:
579 if (ret < 0 || !ctx) {
580 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
581 } else if (ctx) {
582 ctx->ptr = mdata_buf;
583 ctx->phys = mdata_phys;
584 ctx->size = size;
585 }
f01e90fe 586
57d3b816 587 return ret ? : res.result[0];
f01e90fe 588}
2784e3b0 589EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
f01e90fe 590
3a99f121
BA
591/**
592 * qcom_scm_pas_metadata_release() - release metadata context
593 * @ctx: metadata context
594 */
595void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
596{
597 if (!ctx->ptr)
598 return;
599
600 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
601
602 ctx->ptr = NULL;
603 ctx->phys = 0;
604 ctx->size = 0;
605}
2784e3b0 606EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
3a99f121 607
f01e90fe
BA
608/**
609 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
610 * for firmware loading
611 * @peripheral: peripheral id
612 * @addr: start address of memory area to prepare
613 * @size: size of the memory area to prepare
614 *
615 * Returns 0 on success.
616 */
617int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
618{
619 int ret;
57d3b816
EB
620 struct qcom_scm_desc desc = {
621 .svc = QCOM_SCM_SVC_PIL,
622 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
623 .arginfo = QCOM_SCM_ARGS(3),
624 .args[0] = peripheral,
625 .args[1] = addr,
626 .args[2] = size,
627 .owner = ARM_SMCCC_OWNER_SIP,
628 };
629 struct qcom_scm_res res;
f01e90fe
BA
630
631 ret = qcom_scm_clk_enable();
632 if (ret)
633 return ret;
634
65b7ebda
SS
635 ret = qcom_scm_bw_enable();
636 if (ret)
0c50b7fc 637 goto disable_clk;
65b7ebda 638
57d3b816 639 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 640 qcom_scm_bw_disable();
0c50b7fc
GJ
641
642disable_clk:
f01e90fe
BA
643 qcom_scm_clk_disable();
644
57d3b816 645 return ret ? : res.result[0];
f01e90fe 646}
2784e3b0 647EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
f01e90fe
BA
648
649/**
650 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
651 * and reset the remote processor
652 * @peripheral: peripheral id
653 *
654 * Return 0 on success.
655 */
656int qcom_scm_pas_auth_and_reset(u32 peripheral)
657{
658 int ret;
57d3b816
EB
659 struct qcom_scm_desc desc = {
660 .svc = QCOM_SCM_SVC_PIL,
661 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
662 .arginfo = QCOM_SCM_ARGS(1),
663 .args[0] = peripheral,
664 .owner = ARM_SMCCC_OWNER_SIP,
665 };
666 struct qcom_scm_res res;
f01e90fe
BA
667
668 ret = qcom_scm_clk_enable();
669 if (ret)
670 return ret;
671
65b7ebda
SS
672 ret = qcom_scm_bw_enable();
673 if (ret)
0c50b7fc 674 goto disable_clk;
65b7ebda 675
57d3b816 676 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 677 qcom_scm_bw_disable();
0c50b7fc
GJ
678
679disable_clk:
f01e90fe
BA
680 qcom_scm_clk_disable();
681
57d3b816 682 return ret ? : res.result[0];
f01e90fe 683}
2784e3b0 684EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
f01e90fe
BA
685
686/**
687 * qcom_scm_pas_shutdown() - Shut down the remote processor
688 * @peripheral: peripheral id
689 *
690 * Returns 0 on success.
691 */
692int qcom_scm_pas_shutdown(u32 peripheral)
693{
694 int ret;
57d3b816
EB
695 struct qcom_scm_desc desc = {
696 .svc = QCOM_SCM_SVC_PIL,
697 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
698 .arginfo = QCOM_SCM_ARGS(1),
699 .args[0] = peripheral,
700 .owner = ARM_SMCCC_OWNER_SIP,
701 };
702 struct qcom_scm_res res;
f01e90fe
BA
703
704 ret = qcom_scm_clk_enable();
705 if (ret)
706 return ret;
707
65b7ebda
SS
708 ret = qcom_scm_bw_enable();
709 if (ret)
0c50b7fc 710 goto disable_clk;
65b7ebda 711
57d3b816 712 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 713 qcom_scm_bw_disable();
0c50b7fc
GJ
714
715disable_clk:
f01e90fe
BA
716 qcom_scm_clk_disable();
717
57d3b816 718 return ret ? : res.result[0];
f01e90fe 719}
2784e3b0 720EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
f01e90fe 721
65f0c90b
EB
722/**
723 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
724 * available for the given peripherial
725 * @peripheral: peripheral id
726 *
727 * Returns true if PAS is supported for this peripheral, otherwise false.
728 */
729bool qcom_scm_pas_supported(u32 peripheral)
730{
731 int ret;
57d3b816
EB
732 struct qcom_scm_desc desc = {
733 .svc = QCOM_SCM_SVC_PIL,
734 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
735 .arginfo = QCOM_SCM_ARGS(1),
736 .args[0] = peripheral,
737 .owner = ARM_SMCCC_OWNER_SIP,
738 };
739 struct qcom_scm_res res;
65f0c90b 740
9d11af8b
SB
741 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
742 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
65f0c90b
EB
743 return false;
744
57d3b816
EB
745 ret = qcom_scm_call(__scm->dev, &desc, &res);
746
747 return ret ? false : !!res.result[0];
65f0c90b 748}
2784e3b0 749EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
65f0c90b 750
57d3b816
EB
751static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
752{
753 struct qcom_scm_desc desc = {
754 .svc = QCOM_SCM_SVC_PIL,
755 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
756 .arginfo = QCOM_SCM_ARGS(2),
757 .args[0] = reset,
758 .args[1] = 0,
759 .owner = ARM_SMCCC_OWNER_SIP,
760 };
761 struct qcom_scm_res res;
762 int ret;
763
764 ret = qcom_scm_call(__scm->dev, &desc, &res);
765
766 return ret ? : res.result[0];
767}
768
dd4fe5b2
BA
769static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
770 unsigned long idx)
771{
772 if (idx != 0)
773 return -EINVAL;
774
775 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
776}
777
778static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
779 unsigned long idx)
780{
781 if (idx != 0)
782 return -EINVAL;
783
784 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
785}
786
787static const struct reset_control_ops qcom_scm_pas_reset_ops = {
788 .assert = qcom_scm_pas_reset_assert,
789 .deassert = qcom_scm_pas_reset_deassert,
790};
791
65f0c90b
EB
792int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
793{
57d3b816
EB
794 struct qcom_scm_desc desc = {
795 .svc = QCOM_SCM_SVC_IO,
796 .cmd = QCOM_SCM_IO_READ,
797 .arginfo = QCOM_SCM_ARGS(1),
798 .args[0] = addr,
799 .owner = ARM_SMCCC_OWNER_SIP,
800 };
801 struct qcom_scm_res res;
802 int ret;
803
804
b88c2828 805 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
57d3b816
EB
806 if (ret >= 0)
807 *val = res.result[0];
808
809 return ret < 0 ? ret : 0;
65f0c90b 810}
2784e3b0 811EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
65f0c90b
EB
812
813int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
814{
57d3b816
EB
815 struct qcom_scm_desc desc = {
816 .svc = QCOM_SCM_SVC_IO,
817 .cmd = QCOM_SCM_IO_WRITE,
818 .arginfo = QCOM_SCM_ARGS(2),
819 .args[0] = addr,
820 .args[1] = val,
821 .owner = ARM_SMCCC_OWNER_SIP,
822 };
823
b88c2828 824 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
65f0c90b 825}
2784e3b0 826EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
65f0c90b 827
0434a406
RC
828/**
829 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
830 * supports restore security config interface.
831 *
832 * Return true if restore-cfg interface is supported, false if not.
833 */
834bool qcom_scm_restore_sec_cfg_available(void)
835{
836 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
5443cc5f 837 QCOM_SCM_MP_RESTORE_SEC_CFG);
0434a406 838}
2784e3b0 839EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
0434a406 840
a2c680c6
RC
841int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
842{
57d3b816
EB
843 struct qcom_scm_desc desc = {
844 .svc = QCOM_SCM_SVC_MP,
845 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
846 .arginfo = QCOM_SCM_ARGS(2),
847 .args[0] = device_id,
848 .args[1] = spare,
849 .owner = ARM_SMCCC_OWNER_SIP,
850 };
851 struct qcom_scm_res res;
852 int ret;
853
854 ret = qcom_scm_call(__scm->dev, &desc, &res);
855
856 return ret ? : res.result[0];
a2c680c6 857}
2784e3b0 858EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
a2c680c6 859
b182cc4d
SV
860int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
861{
57d3b816
EB
862 struct qcom_scm_desc desc = {
863 .svc = QCOM_SCM_SVC_MP,
864 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
865 .arginfo = QCOM_SCM_ARGS(1),
866 .args[0] = spare,
867 .owner = ARM_SMCCC_OWNER_SIP,
868 };
869 struct qcom_scm_res res;
870 int ret;
871
872 ret = qcom_scm_call(__scm->dev, &desc, &res);
873
874 if (size)
875 *size = res.result[0];
876
877 return ret ? : res.result[1];
b182cc4d 878}
2784e3b0 879EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
b182cc4d
SV
880
881int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
882{
57d3b816
EB
883 struct qcom_scm_desc desc = {
884 .svc = QCOM_SCM_SVC_MP,
885 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
886 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
887 QCOM_SCM_VAL),
888 .args[0] = addr,
889 .args[1] = size,
890 .args[2] = spare,
891 .owner = ARM_SMCCC_OWNER_SIP,
892 };
893 int ret;
894
57d3b816
EB
895 ret = qcom_scm_call(__scm->dev, &desc, NULL);
896
897 /* the pg table has been initialized already, ignore the error */
898 if (ret == -EPERM)
899 ret = 0;
900
901 return ret;
b182cc4d 902}
2784e3b0 903EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
b182cc4d 904
94351509
ADR
905int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
906{
907 struct qcom_scm_desc desc = {
908 .svc = QCOM_SCM_SVC_MP,
909 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
910 .arginfo = QCOM_SCM_ARGS(2),
911 .args[0] = size,
912 .args[1] = spare,
913 .owner = ARM_SMCCC_OWNER_SIP,
914 };
915
916 return qcom_scm_call(__scm->dev, &desc, NULL);
917}
2784e3b0 918EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
94351509 919
6d885330
SV
920int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
921 u32 cp_nonpixel_start,
922 u32 cp_nonpixel_size)
923{
924 int ret;
925 struct qcom_scm_desc desc = {
926 .svc = QCOM_SCM_SVC_MP,
927 .cmd = QCOM_SCM_MP_VIDEO_VAR,
928 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
929 QCOM_SCM_VAL, QCOM_SCM_VAL),
930 .args[0] = cp_start,
931 .args[1] = cp_size,
932 .args[2] = cp_nonpixel_start,
933 .args[3] = cp_nonpixel_size,
934 .owner = ARM_SMCCC_OWNER_SIP,
935 };
936 struct qcom_scm_res res;
937
938 ret = qcom_scm_call(__scm->dev, &desc, &res);
939
940 return ret ? : res.result[0];
941}
2784e3b0 942EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
6d885330 943
57d3b816
EB
944static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
945 size_t mem_sz, phys_addr_t src, size_t src_sz,
946 phys_addr_t dest, size_t dest_sz)
947{
948 int ret;
949 struct qcom_scm_desc desc = {
950 .svc = QCOM_SCM_SVC_MP,
951 .cmd = QCOM_SCM_MP_ASSIGN,
952 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
953 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
954 QCOM_SCM_VAL, QCOM_SCM_VAL),
955 .args[0] = mem_region,
956 .args[1] = mem_sz,
957 .args[2] = src,
958 .args[3] = src_sz,
959 .args[4] = dest,
960 .args[5] = dest_sz,
961 .args[6] = 0,
962 .owner = ARM_SMCCC_OWNER_SIP,
963 };
964 struct qcom_scm_res res;
965
966 ret = qcom_scm_call(dev, &desc, &res);
967
968 return ret ? : res.result[0];
969}
970
d82bd359
AKD
971/**
972 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
973 * @mem_addr: mem region whose ownership need to be reassigned
974 * @mem_sz: size of the region.
975 * @srcvm: vmid for current set of owners, each set bit in
976 * flag indicate a unique owner
c8b08fc0 977 * @newvm: array having new owners and corresponding permission
d82bd359
AKD
978 * flags
979 * @dest_cnt: number of owners in next set.
980 *
c8b08fc0 981 * Return negative errno on failure or 0 on success with @srcvm updated.
d82bd359
AKD
982 */
983int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
968a26a0 984 u64 *srcvm,
af311ff9
SB
985 const struct qcom_scm_vmperm *newvm,
986 unsigned int dest_cnt)
d82bd359
AKD
987{
988 struct qcom_scm_current_perm_info *destvm;
989 struct qcom_scm_mem_map_info *mem_to_map;
990 phys_addr_t mem_to_map_phys;
991 phys_addr_t dest_phys;
459b1f86 992 dma_addr_t ptr_phys;
d82bd359
AKD
993 size_t mem_to_map_sz;
994 size_t dest_sz;
995 size_t src_sz;
996 size_t ptr_sz;
997 int next_vm;
998 __le32 *src;
999 void *ptr;
af311ff9 1000 int ret, i, b;
968a26a0 1001 u64 srcvm_bits = *srcvm;
d82bd359 1002
968a26a0 1003 src_sz = hweight64(srcvm_bits) * sizeof(*src);
d82bd359
AKD
1004 mem_to_map_sz = sizeof(*mem_to_map);
1005 dest_sz = dest_cnt * sizeof(*destvm);
1006 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1007 ALIGN(dest_sz, SZ_64);
1008
459b1f86 1009 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
d82bd359
AKD
1010 if (!ptr)
1011 return -ENOMEM;
1012
1013 /* Fill source vmid detail */
1014 src = ptr;
af311ff9 1015 i = 0;
968a26a0
EB
1016 for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1017 if (srcvm_bits & BIT(b))
1018 src[i++] = cpu_to_le32(b);
1019 }
d82bd359
AKD
1020
1021 /* Fill details of mem buff to map */
1022 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1023 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
af311ff9
SB
1024 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1025 mem_to_map->mem_size = cpu_to_le64(mem_sz);
d82bd359
AKD
1026
1027 next_vm = 0;
1028 /* Fill details of next vmid detail */
1029 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1030 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
af311ff9
SB
1031 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1032 destvm->vmid = cpu_to_le32(newvm->vmid);
1033 destvm->perm = cpu_to_le32(newvm->perm);
1034 destvm->ctx = 0;
1035 destvm->ctx_size = 0;
1036 next_vm |= BIT(newvm->vmid);
d82bd359
AKD
1037 }
1038
1039 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1040 ptr_phys, src_sz, dest_phys, dest_sz);
459b1f86 1041 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
d82bd359
AKD
1042 if (ret) {
1043 dev_err(__scm->dev,
c8b08fc0 1044 "Assign memory protection call failed %d\n", ret);
d82bd359
AKD
1045 return -EINVAL;
1046 }
1047
1048 *srcvm = next_vm;
1049 return 0;
1050}
2784e3b0 1051EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
d82bd359 1052
65f0c90b
EB
1053/**
1054 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1055 */
1056bool qcom_scm_ocmem_lock_available(void)
1057{
1058 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1059 QCOM_SCM_OCMEM_LOCK_CMD);
1060}
2784e3b0 1061EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
65f0c90b
EB
1062
1063/**
1064 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1065 * region to the specified initiator
1066 *
1067 * @id: tz initiator id
1068 * @offset: OCMEM offset
1069 * @size: OCMEM size
1070 * @mode: access mode (WIDE/NARROW)
1071 */
1072int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1073 u32 mode)
1074{
57d3b816
EB
1075 struct qcom_scm_desc desc = {
1076 .svc = QCOM_SCM_SVC_OCMEM,
1077 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1078 .args[0] = id,
1079 .args[1] = offset,
1080 .args[2] = size,
1081 .args[3] = mode,
1082 .arginfo = QCOM_SCM_ARGS(4),
1083 };
1084
1085 return qcom_scm_call(__scm->dev, &desc, NULL);
65f0c90b 1086}
2784e3b0 1087EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
65f0c90b
EB
1088
1089/**
1090 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1091 * region from the specified initiator
1092 *
1093 * @id: tz initiator id
1094 * @offset: OCMEM offset
1095 * @size: OCMEM size
1096 */
1097int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1098{
57d3b816
EB
1099 struct qcom_scm_desc desc = {
1100 .svc = QCOM_SCM_SVC_OCMEM,
1101 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1102 .args[0] = id,
1103 .args[1] = offset,
1104 .args[2] = size,
1105 .arginfo = QCOM_SCM_ARGS(3),
1106 };
1107
1108 return qcom_scm_call(__scm->dev, &desc, NULL);
65f0c90b 1109}
2784e3b0 1110EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
65f0c90b 1111
0f206514
EB
1112/**
1113 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1114 *
1115 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1116 * qcom_scm_ice_set_key() are available.
1117 */
1118bool qcom_scm_ice_available(void)
1119{
1120 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1121 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1122 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1123 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1124}
2784e3b0 1125EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
0f206514
EB
1126
1127/**
1128 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1129 * @index: the keyslot to invalidate
1130 *
433611ea
EB
1131 * The UFSHCI and eMMC standards define a standard way to do this, but it
1132 * doesn't work on these SoCs; only this SCM call does.
1133 *
1134 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1135 * call doesn't specify which ICE instance the keyslot belongs to.
0f206514
EB
1136 *
1137 * Return: 0 on success; -errno on failure.
1138 */
1139int qcom_scm_ice_invalidate_key(u32 index)
1140{
1141 struct qcom_scm_desc desc = {
1142 .svc = QCOM_SCM_SVC_ES,
1143 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1144 .arginfo = QCOM_SCM_ARGS(1),
1145 .args[0] = index,
1146 .owner = ARM_SMCCC_OWNER_SIP,
1147 };
1148
1149 return qcom_scm_call(__scm->dev, &desc, NULL);
1150}
2784e3b0 1151EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
0f206514
EB
1152
1153/**
1154 * qcom_scm_ice_set_key() - Set an inline encryption key
1155 * @index: the keyslot into which to set the key
1156 * @key: the key to program
1157 * @key_size: the size of the key in bytes
1158 * @cipher: the encryption algorithm the key is for
1159 * @data_unit_size: the encryption data unit size, i.e. the size of each
1160 * individual plaintext and ciphertext. Given in 512-byte
1161 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1162 *
1163 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
433611ea
EB
1164 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1165 *
1166 * The UFSHCI and eMMC standards define a standard way to do this, but it
1167 * doesn't work on these SoCs; only this SCM call does.
0f206514 1168 *
433611ea
EB
1169 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1170 * call doesn't specify which ICE instance the keyslot belongs to.
0f206514
EB
1171 *
1172 * Return: 0 on success; -errno on failure.
1173 */
1174int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1175 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1176{
1177 struct qcom_scm_desc desc = {
1178 .svc = QCOM_SCM_SVC_ES,
1179 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1180 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1181 QCOM_SCM_VAL, QCOM_SCM_VAL,
1182 QCOM_SCM_VAL),
1183 .args[0] = index,
1184 .args[2] = key_size,
1185 .args[3] = cipher,
1186 .args[4] = data_unit_size,
1187 .owner = ARM_SMCCC_OWNER_SIP,
1188 };
1189 void *keybuf;
1190 dma_addr_t key_phys;
1191 int ret;
1192
1193 /*
1194 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1195 * physical address that's been properly flushed. The sanctioned way to
1196 * do this is by using the DMA API. But as is best practice for crypto
1197 * keys, we also must wipe the key after use. This makes kmemdup() +
1198 * dma_map_single() not clearly correct, since the DMA API can use
1199 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1200 * keys is normally rare and thus not performance-critical.
1201 */
1202
1203 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1204 GFP_KERNEL);
1205 if (!keybuf)
1206 return -ENOMEM;
1207 memcpy(keybuf, key, key_size);
1208 desc.args[1] = key_phys;
1209
1210 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1211
1212 memzero_explicit(keybuf, key_size);
1213
1214 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1215 return ret;
1216}
2784e3b0 1217EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
0f206514 1218
65f0c90b
EB
1219/**
1220 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1221 *
1222 * Return true if HDCP is supported, false if not.
1223 */
1224bool qcom_scm_hdcp_available(void)
1225{
9d11af8b 1226 bool avail;
65f0c90b
EB
1227 int ret = qcom_scm_clk_enable();
1228
1229 if (ret)
1230 return ret;
1231
9d11af8b 1232 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
65f0c90b
EB
1233 QCOM_SCM_HDCP_INVOKE);
1234
1235 qcom_scm_clk_disable();
1236
9d11af8b 1237 return avail;
65f0c90b 1238}
2784e3b0 1239EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
65f0c90b
EB
1240
1241/**
1242 * qcom_scm_hdcp_req() - Send HDCP request.
1243 * @req: HDCP request array
1244 * @req_cnt: HDCP request array count
1245 * @resp: response buffer passed to SCM
1246 *
1247 * Write HDCP register(s) through SCM.
1248 */
1249int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1250{
57d3b816
EB
1251 int ret;
1252 struct qcom_scm_desc desc = {
1253 .svc = QCOM_SCM_SVC_HDCP,
1254 .cmd = QCOM_SCM_HDCP_INVOKE,
1255 .arginfo = QCOM_SCM_ARGS(10),
1256 .args = {
1257 req[0].addr,
1258 req[0].val,
1259 req[1].addr,
1260 req[1].val,
1261 req[2].addr,
1262 req[2].val,
1263 req[3].addr,
1264 req[3].val,
1265 req[4].addr,
1266 req[4].val
1267 },
1268 .owner = ARM_SMCCC_OWNER_SIP,
1269 };
1270 struct qcom_scm_res res;
1271
1272 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1273 return -ERANGE;
65f0c90b 1274
57d3b816 1275 ret = qcom_scm_clk_enable();
65f0c90b
EB
1276 if (ret)
1277 return ret;
1278
57d3b816
EB
1279 ret = qcom_scm_call(__scm->dev, &desc, &res);
1280 *resp = res.result[0];
1281
65f0c90b 1282 qcom_scm_clk_disable();
57d3b816 1283
65f0c90b
EB
1284 return ret;
1285}
2784e3b0 1286EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
65f0c90b 1287
071a1333
ADR
1288int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1289{
1290 struct qcom_scm_desc desc = {
1291 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1292 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1293 .arginfo = QCOM_SCM_ARGS(3),
1294 .args[0] = sec_id,
1295 .args[1] = ctx_num,
1296 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1297 .owner = ARM_SMCCC_OWNER_SIP,
1298 };
1299
1300 return qcom_scm_call(__scm->dev, &desc, NULL);
1301}
2784e3b0 1302EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
071a1333 1303
65f0c90b
EB
1304int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1305{
57d3b816
EB
1306 struct qcom_scm_desc desc = {
1307 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1308 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1309 .arginfo = QCOM_SCM_ARGS(2),
1310 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1311 .args[1] = en,
1312 .owner = ARM_SMCCC_OWNER_SIP,
1313 };
1314
1315
1316 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
65f0c90b 1317}
2784e3b0 1318EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
65f0c90b 1319
de3438c4
TG
1320bool qcom_scm_lmh_dcvsh_available(void)
1321{
1322 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1323}
2784e3b0 1324EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
de3438c4
TG
1325
1326int qcom_scm_lmh_profile_change(u32 profile_id)
1327{
1328 struct qcom_scm_desc desc = {
1329 .svc = QCOM_SCM_SVC_LMH,
1330 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1331 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1332 .args[0] = profile_id,
1333 .owner = ARM_SMCCC_OWNER_SIP,
1334 };
1335
1336 return qcom_scm_call(__scm->dev, &desc, NULL);
1337}
2784e3b0 1338EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
de3438c4
TG
1339
1340int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1341 u64 limit_node, u32 node_id, u64 version)
1342{
1343 dma_addr_t payload_phys;
1344 u32 *payload_buf;
1345 int ret, payload_size = 5 * sizeof(u32);
1346
1347 struct qcom_scm_desc desc = {
1348 .svc = QCOM_SCM_SVC_LMH,
1349 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1350 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1351 QCOM_SCM_VAL, QCOM_SCM_VAL),
1352 .args[1] = payload_size,
1353 .args[2] = limit_node,
1354 .args[3] = node_id,
1355 .args[4] = version,
1356 .owner = ARM_SMCCC_OWNER_SIP,
1357 };
1358
1359 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1360 if (!payload_buf)
1361 return -ENOMEM;
1362
1363 payload_buf[0] = payload_fn;
1364 payload_buf[1] = 0;
1365 payload_buf[2] = payload_reg;
1366 payload_buf[3] = 1;
1367 payload_buf[4] = payload_val;
1368
1369 desc.args[0] = payload_phys;
1370
1371 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1372
1373 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1374 return ret;
1375}
2784e3b0 1376EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
de3438c4 1377
65f0c90b
EB
1378static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1379{
1380 struct device_node *tcsr;
1381 struct device_node *np = dev->of_node;
1382 struct resource res;
1383 u32 offset;
1384 int ret;
1385
1386 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1387 if (!tcsr)
1388 return 0;
1389
1390 ret = of_address_to_resource(tcsr, 0, &res);
1391 of_node_put(tcsr);
1392 if (ret)
1393 return ret;
1394
1395 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1396 if (ret < 0)
1397 return ret;
1398
1399 *addr = res.start + offset;
1400
1401 return 0;
1402}
1403
00b12486
ML
1404#ifdef CONFIG_QCOM_QSEECOM
1405
1406/* Lock for QSEECOM SCM call executions */
1407static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1408
1409static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1410 struct qcom_scm_qseecom_resp *res)
1411{
1412 struct qcom_scm_res scm_res = {};
1413 int status;
1414
1415 /*
1416 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1417 * require the respective call lock to be held.
1418 */
1419 lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1420
1421 status = qcom_scm_call(__scm->dev, desc, &scm_res);
1422
1423 res->result = scm_res.result[0];
1424 res->resp_type = scm_res.result[1];
1425 res->data = scm_res.result[2];
1426
1427 if (status)
1428 return status;
1429
1430 return 0;
1431}
1432
1433/**
1434 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1435 * @desc: SCM call descriptor.
1436 * @res: SCM call response (output).
1437 *
1438 * Performs the QSEECOM SCM call described by @desc, returning the response in
1439 * @rsp.
1440 *
1441 * Return: Zero on success, nonzero on failure.
1442 */
1443static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1444 struct qcom_scm_qseecom_resp *res)
1445{
1446 int status;
1447
1448 /*
1449 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1450 * so lock things here. This needs to be extended to callback/listener
1451 * handling when support for that is implemented.
1452 */
1453
1454 mutex_lock(&qcom_scm_qseecom_call_lock);
1455 status = __qcom_scm_qseecom_call(desc, res);
1456 mutex_unlock(&qcom_scm_qseecom_call_lock);
1457
1458 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1459 __func__, desc->owner, desc->svc, desc->cmd, res->result,
1460 res->resp_type, res->data);
1461
1462 if (status) {
1463 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1464 return status;
1465 }
1466
1467 /*
1468 * TODO: Handle incomplete and blocked calls:
1469 *
1470 * Incomplete and blocked calls are not supported yet. Some devices
1471 * and/or commands require those, some don't. Let's warn about them
1472 * prominently in case someone attempts to try these commands with a
1473 * device/command combination that isn't supported yet.
1474 */
1475 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1476 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1477
1478 return 0;
1479}
1480
1481/**
1482 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1483 * @version: Pointer where the QSEECOM version will be stored.
1484 *
1485 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1486 * the TrustZone.
1487 *
1488 * Return: Zero on success, nonzero on failure.
1489 */
1490static int qcom_scm_qseecom_get_version(u32 *version)
1491{
1492 struct qcom_scm_desc desc = {};
1493 struct qcom_scm_qseecom_resp res = {};
1494 u32 feature = 10;
1495 int ret;
1496
1497 desc.owner = QSEECOM_TZ_OWNER_SIP;
1498 desc.svc = QSEECOM_TZ_SVC_INFO;
1499 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1500 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1501 desc.args[0] = feature;
1502
1503 ret = qcom_scm_qseecom_call(&desc, &res);
1504 if (ret)
1505 return ret;
1506
1507 *version = res.result;
1508 return 0;
1509}
1510
1511/**
1512 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1513 * @app_name: The name of the app.
1514 * @app_id: The returned app ID.
1515 *
1516 * Query and return the application ID of the SEE app identified by the given
1517 * name. This returned ID is the unique identifier of the app required for
1518 * subsequent communication.
1519 *
1520 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1521 * loaded or could not be found.
1522 */
1523int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1524{
1525 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1526 unsigned long app_name_len = strlen(app_name);
1527 struct qcom_scm_desc desc = {};
1528 struct qcom_scm_qseecom_resp res = {};
1529 dma_addr_t name_buf_phys;
1530 char *name_buf;
1531 int status;
1532
1533 if (app_name_len >= name_buf_size)
1534 return -EINVAL;
1535
1536 name_buf = kzalloc(name_buf_size, GFP_KERNEL);
1537 if (!name_buf)
1538 return -ENOMEM;
1539
1540 memcpy(name_buf, app_name, app_name_len);
1541
1542 name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
1543 status = dma_mapping_error(__scm->dev, name_buf_phys);
1544 if (status) {
1545 kfree(name_buf);
1546 dev_err(__scm->dev, "qseecom: failed to map dma address\n");
1547 return status;
1548 }
1549
1550 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1551 desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1552 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1553 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1554 desc.args[0] = name_buf_phys;
1555 desc.args[1] = app_name_len;
1556
1557 status = qcom_scm_qseecom_call(&desc, &res);
1558 dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
1559 kfree(name_buf);
1560
1561 if (status)
1562 return status;
1563
1564 if (res.result == QSEECOM_RESULT_FAILURE)
1565 return -ENOENT;
1566
1567 if (res.result != QSEECOM_RESULT_SUCCESS)
1568 return -EINVAL;
1569
1570 if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1571 return -EINVAL;
1572
1573 *app_id = res.data;
1574 return 0;
1575}
1576EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1577
1578/**
1579 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1580 * @app_id: The ID of the target app.
1581 * @req: Request buffer sent to the app (must be DMA-mappable).
1582 * @req_size: Size of the request buffer.
1583 * @rsp: Response buffer, written to by the app (must be DMA-mappable).
1584 * @rsp_size: Size of the response buffer.
1585 *
1586 * Sends a request to the QSEE app associated with the given ID and read back
1587 * its response. The caller must provide two DMA memory regions, one for the
1588 * request and one for the response, and fill out the @req region with the
1589 * respective (app-specific) request data. The QSEE app reads this and returns
1590 * its response in the @rsp region.
1591 *
1592 * Return: Zero on success, nonzero on failure.
1593 */
1594int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
1595 size_t rsp_size)
1596{
1597 struct qcom_scm_qseecom_resp res = {};
1598 struct qcom_scm_desc desc = {};
1599 dma_addr_t req_phys;
1600 dma_addr_t rsp_phys;
1601 int status;
1602
1603 /* Map request buffer */
1604 req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
1605 status = dma_mapping_error(__scm->dev, req_phys);
1606 if (status) {
1607 dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
1608 return status;
1609 }
1610
1611 /* Map response buffer */
1612 rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
1613 status = dma_mapping_error(__scm->dev, rsp_phys);
1614 if (status) {
1615 dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1616 dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
1617 return status;
1618 }
1619
1620 /* Set up SCM call data */
1621 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1622 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1623 desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1624 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1625 QCOM_SCM_RW, QCOM_SCM_VAL,
1626 QCOM_SCM_RW, QCOM_SCM_VAL);
1627 desc.args[0] = app_id;
1628 desc.args[1] = req_phys;
1629 desc.args[2] = req_size;
1630 desc.args[3] = rsp_phys;
1631 desc.args[4] = rsp_size;
1632
1633 /* Perform call */
1634 status = qcom_scm_qseecom_call(&desc, &res);
1635
1636 /* Unmap buffers */
1637 dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
1638 dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1639
1640 if (status)
1641 return status;
1642
1643 if (res.result != QSEECOM_RESULT_SUCCESS)
1644 return -EIO;
1645
1646 return 0;
1647}
1648EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1649
1650/*
1651 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1652 + any potential issues with this, only allow validated machines for now.
1653 */
e478c5fb 1654static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
00b12486
ML
1655 { .compatible = "lenovo,thinkpad-x13s", },
1656 { }
1657};
1658
1659static bool qcom_scm_qseecom_machine_is_allowed(void)
1660{
1661 struct device_node *np;
1662 bool match;
1663
1664 np = of_find_node_by_path("/");
1665 if (!np)
1666 return false;
1667
1668 match = of_match_node(qcom_scm_qseecom_allowlist, np);
1669 of_node_put(np);
1670
1671 return match;
1672}
1673
1674static void qcom_scm_qseecom_free(void *data)
1675{
1676 struct platform_device *qseecom_dev = data;
1677
1678 platform_device_del(qseecom_dev);
1679 platform_device_put(qseecom_dev);
1680}
1681
1682static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1683{
1684 struct platform_device *qseecom_dev;
1685 u32 version;
1686 int ret;
1687
1688 /*
1689 * Note: We do two steps of validation here: First, we try to query the
1690 * QSEECOM version as a check to see if the interface exists on this
1691 * device. Second, we check against known good devices due to current
1692 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1693 *
1694 * Note that we deliberately do the machine check after the version
1695 * check so that we can log potentially supported devices. This should
1696 * be safe as downstream sources indicate that the version query is
1697 * neither blocking nor reentrant.
1698 */
1699 ret = qcom_scm_qseecom_get_version(&version);
1700 if (ret)
1701 return 0;
1702
1703 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1704
1705 if (!qcom_scm_qseecom_machine_is_allowed()) {
1706 dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1707 return 0;
1708 }
1709
1710 /*
1711 * Set up QSEECOM interface device. All application clients will be
1712 * set up and managed by the corresponding driver for it.
1713 */
1714 qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1715 if (!qseecom_dev)
1716 return -ENOMEM;
1717
1718 qseecom_dev->dev.parent = scm->dev;
1719
1720 ret = platform_device_add(qseecom_dev);
1721 if (ret) {
1722 platform_device_put(qseecom_dev);
1723 return ret;
1724 }
1725
1726 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1727}
1728
1729#else /* CONFIG_QCOM_QSEECOM */
1730
1731static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1732{
1733 return 0;
1734}
1735
1736#endif /* CONFIG_QCOM_QSEECOM */
1737
65f0c90b
EB
1738/**
1739 * qcom_scm_is_available() - Checks if SCM is available
1740 */
1741bool qcom_scm_is_available(void)
1742{
1743 return !!__scm;
1744}
2784e3b0 1745EXPORT_SYMBOL_GPL(qcom_scm_is_available);
65f0c90b 1746
6bf32599
GDS
1747static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1748{
1749 /* FW currently only supports a single wq_ctx (zero).
1750 * TODO: Update this logic to include dynamic allocation and lookup of
1751 * completion structs when FW supports more wq_ctx values.
1752 */
1753 if (wq_ctx != 0) {
1754 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1755 return -EINVAL;
1756 }
1757
1758 return 0;
1759}
1760
1761int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1762{
1763 int ret;
1764
1765 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1766 if (ret)
1767 return ret;
1768
1769 wait_for_completion(&__scm->waitq_comp);
1770
1771 return 0;
1772}
1773
1774static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
1775{
1776 int ret;
1777
1778 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1779 if (ret)
1780 return ret;
1781
1782 complete(&__scm->waitq_comp);
1783
1784 return 0;
1785}
1786
1787static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1788{
1789 int ret;
1790 struct qcom_scm *scm = data;
1791 u32 wq_ctx, flags, more_pending = 0;
1792
1793 do {
1794 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1795 if (ret) {
1796 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1797 goto out;
1798 }
1799
1800 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1801 flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1802 dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1803 goto out;
1804 }
1805
1806 ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
1807 if (ret)
1808 goto out;
1809 } while (more_pending);
1810
1811out:
1812 return IRQ_HANDLED;
1813}
1814
d0f6fa7b
AG
1815static int qcom_scm_probe(struct platform_device *pdev)
1816{
1817 struct qcom_scm *scm;
6bf32599 1818 int irq, ret;
d0f6fa7b
AG
1819
1820 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1821 if (!scm)
1822 return -ENOMEM;
1823
8c1b7dc9
BA
1824 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1825 if (ret < 0)
1826 return ret;
1827
65b7ebda
SS
1828 mutex_init(&scm->scm_bw_lock);
1829
65b7ebda
SS
1830 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1831 if (IS_ERR(scm->path))
1832 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1833 "failed to acquire interconnect path\n");
1834
5130464a 1835 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
ae76fd3f
KD
1836 if (IS_ERR(scm->core_clk))
1837 return PTR_ERR(scm->core_clk);
d0f6fa7b 1838
5130464a 1839 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
ae76fd3f
KD
1840 if (IS_ERR(scm->iface_clk))
1841 return PTR_ERR(scm->iface_clk);
d0f6fa7b 1842
5130464a 1843 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
ae76fd3f
KD
1844 if (IS_ERR(scm->bus_clk))
1845 return PTR_ERR(scm->bus_clk);
d0f6fa7b 1846
dd4fe5b2
BA
1847 scm->reset.ops = &qcom_scm_pas_reset_ops;
1848 scm->reset.nr_resets = 1;
1849 scm->reset.of_node = pdev->dev.of_node;
bd4760ca
WY
1850 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1851 if (ret)
1852 return ret;
dd4fe5b2 1853
d0f6fa7b
AG
1854 /* vote for max clk rate for highest performance */
1855 ret = clk_set_rate(scm->core_clk, INT_MAX);
1856 if (ret)
1857 return ret;
1858
1859 __scm = scm;
1860 __scm->dev = &pdev->dev;
1861
6bf32599
GDS
1862 init_completion(&__scm->waitq_comp);
1863
f3d0fbad 1864 irq = platform_get_irq_optional(pdev, 0);
6bf32599
GDS
1865 if (irq < 0) {
1866 if (irq != -ENXIO)
1867 return irq;
1868 } else {
1869 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1870 IRQF_ONESHOT, "qcom-scm", __scm);
1871 if (ret < 0)
1872 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1873 }
1874
f6ea568f 1875 __get_convention();
6b1751a8 1876
8c1b7dc9
BA
1877 /*
1878 * If requested enable "download mode", from this point on warmboot
c19698a9 1879 * will cause the boot stages to enter download mode, unless
8c1b7dc9
BA
1880 * disabled below by a clean shutdown/reboot.
1881 */
1882 if (download_mode)
1883 qcom_scm_set_download_mode(true);
1884
ff4aa3bc
RM
1885
1886 /*
1887 * Disable SDI if indicated by DT that it is enabled by default.
1888 */
1889 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1890 qcom_scm_disable_sdi();
1891
00b12486
ML
1892 /*
1893 * Initialize the QSEECOM interface.
1894 *
1895 * Note: QSEECOM is fairly self-contained and this only adds the
1896 * interface device (the driver of which does most of the heavy
1897 * lifting). So any errors returned here should be either -ENOMEM or
1898 * -EINVAL (with the latter only in case there's a bug in our code).
1899 * This means that there is no need to bring down the whole SCM driver.
1900 * Just log the error instead and let SCM live.
1901 */
1902 ret = qcom_scm_qseecom_init(scm);
1903 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
1904
d0f6fa7b
AG
1905 return 0;
1906}
1907
8c1b7dc9
BA
1908static void qcom_scm_shutdown(struct platform_device *pdev)
1909{
1910 /* Clean shutdown, disable download mode to allow normal restart */
781d32d1 1911 qcom_scm_set_download_mode(false);
8c1b7dc9
BA
1912}
1913
d0f6fa7b 1914static const struct of_device_id qcom_scm_dt_match[] = {
626237db
KD
1915 { .compatible = "qcom,scm" },
1916
1917 /* Legacy entries kept for backwards compatibility */
5130464a
KD
1918 { .compatible = "qcom,scm-apq8064" },
1919 { .compatible = "qcom,scm-apq8084" },
60cd420c 1920 { .compatible = "qcom,scm-ipq4019" },
5130464a
KD
1921 { .compatible = "qcom,scm-msm8953" },
1922 { .compatible = "qcom,scm-msm8974" },
60cd420c 1923 { .compatible = "qcom,scm-msm8996" },
d0f6fa7b
AG
1924 {}
1925};
b42000e4 1926MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
d0f6fa7b 1927
d0f6fa7b
AG
1928static struct platform_driver qcom_scm_driver = {
1929 .driver = {
1930 .name = "qcom_scm",
1931 .of_match_table = qcom_scm_dt_match,
87abf2ba 1932 .suppress_bind_attrs = true,
d0f6fa7b
AG
1933 },
1934 .probe = qcom_scm_probe,
8c1b7dc9 1935 .shutdown = qcom_scm_shutdown,
d0f6fa7b
AG
1936};
1937
1938static int __init qcom_scm_init(void)
1939{
d0f6fa7b
AG
1940 return platform_driver_register(&qcom_scm_driver);
1941}
6c8e99d8 1942subsys_initcall(qcom_scm_init);
b42000e4
JS
1943
1944MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1945MODULE_LICENSE("GPL v2");