firmware: qcom: scm: Rework dload mode availability check
[linux-2.6-block.git] / drivers / firmware / qcom / qcom_scm.c
CommitLineData
97fb5e8d 1// SPDX-License-Identifier: GPL-2.0-only
5443cc5f 2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
2ce76a6a 3 * Copyright (C) 2015 Linaro Ltd.
2a1eb58a 4 */
bc7fbb5e
BG
5
6#include <linux/arm-smccc.h>
7#include <linux/clk.h>
6bf32599 8#include <linux/completion.h>
b6a1dfbc 9#include <linux/cpumask.h>
f01e90fe 10#include <linux/dma-mapping.h>
bc7fbb5e
BG
11#include <linux/export.h>
12#include <linux/firmware/qcom/qcom_scm.h>
13#include <linux/init.h>
65b7ebda 14#include <linux/interconnect.h>
bc7fbb5e 15#include <linux/interrupt.h>
8c1b7dc9 16#include <linux/module.h>
d0f6fa7b 17#include <linux/of.h>
8c1b7dc9 18#include <linux/of_address.h>
6bf32599 19#include <linux/of_irq.h>
d0f6fa7b 20#include <linux/of_platform.h>
bc7fbb5e 21#include <linux/platform_device.h>
dd4fe5b2 22#include <linux/reset-controller.h>
bc7fbb5e 23#include <linux/types.h>
2a1eb58a 24
b6a1dfbc 25#include "qcom_scm.h"
a353e4a0 26
8c1b7dc9
BA
27static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
28module_param(download_mode, bool, 0);
29
d0f6fa7b
AG
30struct qcom_scm {
31 struct device *dev;
32 struct clk *core_clk;
33 struct clk *iface_clk;
34 struct clk *bus_clk;
65b7ebda 35 struct icc_path *path;
6bf32599 36 struct completion waitq_comp;
dd4fe5b2 37 struct reset_controller_dev reset;
8c1b7dc9 38
65b7ebda
SS
39 /* control access to the interconnect path */
40 struct mutex scm_bw_lock;
41 int scm_vote_count;
42
8c1b7dc9 43 u64 dload_mode_addr;
d0f6fa7b
AG
44};
45
d82bd359
AKD
46struct qcom_scm_current_perm_info {
47 __le32 vmid;
48 __le32 perm;
49 __le64 ctx;
50 __le32 ctx_size;
51 __le32 unused;
52};
53
54struct qcom_scm_mem_map_info {
55 __le64 mem_addr;
56 __le64 mem_size;
57};
58
00b12486
ML
59/**
60 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
61 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
62 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
63 * @data: Response data. The type of this data is given in @resp_type.
64 */
65struct qcom_scm_qseecom_resp {
66 u64 result;
67 u64 resp_type;
68 u64 data;
69};
70
71enum qcom_scm_qseecom_result {
72 QSEECOM_RESULT_SUCCESS = 0,
73 QSEECOM_RESULT_INCOMPLETE = 1,
74 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2,
75 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF,
76};
77
78enum qcom_scm_qseecom_resp_type {
79 QSEECOM_SCM_RES_APP_ID = 0xEE01,
80 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02,
81};
82
83enum qcom_scm_qseecom_tz_owner {
84 QSEECOM_TZ_OWNER_SIP = 2,
85 QSEECOM_TZ_OWNER_TZ_APPS = 48,
86 QSEECOM_TZ_OWNER_QSEE_OS = 50
87};
88
89enum qcom_scm_qseecom_tz_svc {
90 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0,
91 QSEECOM_TZ_SVC_APP_MGR = 1,
92 QSEECOM_TZ_SVC_INFO = 6,
93};
94
95enum qcom_scm_qseecom_tz_cmd_app {
96 QSEECOM_TZ_CMD_APP_SEND = 1,
97 QSEECOM_TZ_CMD_APP_LOOKUP = 3,
98};
99
100enum qcom_scm_qseecom_tz_cmd_info {
101 QSEECOM_TZ_CMD_INFO_VERSION = 3,
102};
103
104#define QSEECOM_MAX_APP_NAME_SIZE 64
105
7734c4b5
SG
106/* Each bit configures cold/warm boot address for one of the 4 CPUs */
107static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
108 0, BIT(0), BIT(3), BIT(5)
57d3b816 109};
7734c4b5
SG
110static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
111 BIT(2), BIT(1), BIT(4), BIT(6)
57d3b816
EB
112};
113
6bf32599
GDS
114#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
115#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
116
6bc45428 117static const char * const qcom_scm_convention_names[] = {
9a434cee
EB
118 [SMC_CONVENTION_UNKNOWN] = "unknown",
119 [SMC_CONVENTION_ARM_32] = "smc arm 32",
120 [SMC_CONVENTION_ARM_64] = "smc arm 64",
121 [SMC_CONVENTION_LEGACY] = "smc legacy",
122};
123
d0f6fa7b
AG
124static struct qcom_scm *__scm;
125
126static int qcom_scm_clk_enable(void)
127{
128 int ret;
129
130 ret = clk_prepare_enable(__scm->core_clk);
131 if (ret)
132 goto bail;
133
134 ret = clk_prepare_enable(__scm->iface_clk);
135 if (ret)
136 goto disable_core;
137
138 ret = clk_prepare_enable(__scm->bus_clk);
139 if (ret)
140 goto disable_iface;
141
142 return 0;
143
144disable_iface:
145 clk_disable_unprepare(__scm->iface_clk);
146disable_core:
147 clk_disable_unprepare(__scm->core_clk);
148bail:
149 return ret;
150}
151
152static void qcom_scm_clk_disable(void)
153{
154 clk_disable_unprepare(__scm->core_clk);
155 clk_disable_unprepare(__scm->iface_clk);
156 clk_disable_unprepare(__scm->bus_clk);
157}
158
65b7ebda
SS
159static int qcom_scm_bw_enable(void)
160{
161 int ret = 0;
162
163 if (!__scm->path)
164 return 0;
165
65b7ebda
SS
166 mutex_lock(&__scm->scm_bw_lock);
167 if (!__scm->scm_vote_count) {
168 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
169 if (ret < 0) {
170 dev_err(__scm->dev, "failed to set bandwidth request\n");
171 goto err_bw;
172 }
173 }
174 __scm->scm_vote_count++;
175err_bw:
176 mutex_unlock(&__scm->scm_bw_lock);
177
178 return ret;
179}
180
181static void qcom_scm_bw_disable(void)
182{
e6f3dac9 183 if (!__scm->path)
65b7ebda
SS
184 return;
185
186 mutex_lock(&__scm->scm_bw_lock);
187 if (__scm->scm_vote_count-- == 1)
188 icc_set_bw(__scm->path, 0, 0);
189 mutex_unlock(&__scm->scm_bw_lock);
190}
191
f6ea568f
SB
192enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
193static DEFINE_SPINLOCK(scm_query_lock);
9a434cee 194
f6ea568f 195static enum qcom_scm_convention __get_convention(void)
9a434cee
EB
196{
197 unsigned long flags;
198 struct qcom_scm_desc desc = {
199 .svc = QCOM_SCM_SVC_INFO,
200 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
201 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
202 QCOM_SCM_INFO_IS_CALL_AVAIL) |
203 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
204 .arginfo = QCOM_SCM_ARGS(1),
205 .owner = ARM_SMCCC_OWNER_SIP,
206 };
207 struct qcom_scm_res res;
f6ea568f 208 enum qcom_scm_convention probed_convention;
9a434cee 209 int ret;
257f2935 210 bool forced = false;
9a434cee 211
f6ea568f
SB
212 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
213 return qcom_scm_convention;
9a434cee 214
3337a6fe
KT
215 /*
216 * Per the "SMC calling convention specification", the 64-bit calling
217 * convention can only be used when the client is 64-bit, otherwise
218 * system will encounter the undefined behaviour.
219 */
220#if IS_ENABLED(CONFIG_ARM64)
f6ea568f
SB
221 /*
222 * Device isn't required as there is only one argument - no device
223 * needed to dma_map_single to secure world
224 */
225 probed_convention = SMC_CONVENTION_ARM_64;
226 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
9a434cee 227 if (!ret && res.result[0] == 1)
f6ea568f 228 goto found;
9a434cee 229
257f2935
SB
230 /*
231 * Some SC7180 firmwares didn't implement the
232 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
233 * calling conventions on these firmwares. Luckily we don't make any
234 * early calls into the firmware on these SoCs so the device pointer
235 * will be valid here to check if the compatible matches.
236 */
237 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
238 forced = true;
239 goto found;
240 }
3337a6fe 241#endif
257f2935 242
f6ea568f
SB
243 probed_convention = SMC_CONVENTION_ARM_32;
244 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
9a434cee 245 if (!ret && res.result[0] == 1)
f6ea568f
SB
246 goto found;
247
248 probed_convention = SMC_CONVENTION_LEGACY;
249found:
250 spin_lock_irqsave(&scm_query_lock, flags);
251 if (probed_convention != qcom_scm_convention) {
252 qcom_scm_convention = probed_convention;
257f2935
SB
253 pr_info("qcom_scm: convention: %s%s\n",
254 qcom_scm_convention_names[qcom_scm_convention],
255 forced ? " (forced)" : "");
f6ea568f
SB
256 }
257 spin_unlock_irqrestore(&scm_query_lock, flags);
9a434cee 258
9a434cee
EB
259 return qcom_scm_convention;
260}
261
262/**
263 * qcom_scm_call() - Invoke a syscall in the secure world
264 * @dev: device
9a434cee 265 * @desc: Descriptor structure containing arguments and return values
a5d32f6d 266 * @res: Structure containing results from SMC/HVC call
9a434cee
EB
267 *
268 * Sends a command to the SCM and waits for the command to finish processing.
269 * This should *only* be called in pre-emptible context.
270 */
271static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
272 struct qcom_scm_res *res)
273{
274 might_sleep();
275 switch (__get_convention()) {
276 case SMC_CONVENTION_ARM_32:
277 case SMC_CONVENTION_ARM_64:
278 return scm_smc_call(dev, desc, res, false);
279 case SMC_CONVENTION_LEGACY:
280 return scm_legacy_call(dev, desc, res);
281 default:
282 pr_err("Unknown current SCM calling convention.\n");
283 return -EINVAL;
284 }
285}
286
287/**
288 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
289 * @dev: device
9a434cee
EB
290 * @desc: Descriptor structure containing arguments and return values
291 * @res: Structure containing results from SMC/HVC call
292 *
293 * Sends a command to the SCM and waits for the command to finish processing.
294 * This can be called in atomic context.
295 */
296static int qcom_scm_call_atomic(struct device *dev,
297 const struct qcom_scm_desc *desc,
298 struct qcom_scm_res *res)
299{
300 switch (__get_convention()) {
301 case SMC_CONVENTION_ARM_32:
302 case SMC_CONVENTION_ARM_64:
303 return scm_smc_call(dev, desc, res, true);
304 case SMC_CONVENTION_LEGACY:
305 return scm_legacy_call_atomic(dev, desc, res);
306 default:
307 pr_err("Unknown current SCM calling convention.\n");
308 return -EINVAL;
309 }
310}
311
9d11af8b
SB
312static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
313 u32 cmd_id)
9a434cee
EB
314{
315 int ret;
316 struct qcom_scm_desc desc = {
317 .svc = QCOM_SCM_SVC_INFO,
318 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
319 .owner = ARM_SMCCC_OWNER_SIP,
320 };
321 struct qcom_scm_res res;
322
323 desc.arginfo = QCOM_SCM_ARGS(1);
324 switch (__get_convention()) {
325 case SMC_CONVENTION_ARM_32:
326 case SMC_CONVENTION_ARM_64:
327 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
328 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
329 break;
330 case SMC_CONVENTION_LEGACY:
331 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
332 break;
333 default:
334 pr_err("Unknown SMC convention being used\n");
38212b2a 335 return false;
9a434cee
EB
336 }
337
338 ret = qcom_scm_call(dev, &desc, &res);
339
9d11af8b 340 return ret ? false : !!res.result[0];
9a434cee
EB
341}
342
52beb1fc 343static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
a353e4a0 344{
57d3b816 345 int cpu;
7734c4b5 346 unsigned int flags = 0;
57d3b816
EB
347 struct qcom_scm_desc desc = {
348 .svc = QCOM_SCM_SVC_BOOT,
349 .cmd = QCOM_SCM_BOOT_SET_ADDR,
350 .arginfo = QCOM_SCM_ARGS(2),
7734c4b5 351 .owner = ARM_SMCCC_OWNER_SIP,
57d3b816
EB
352 };
353
52beb1fc 354 for_each_present_cpu(cpu) {
7734c4b5
SG
355 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
356 return -EINVAL;
357 flags |= cpu_bits[cpu];
57d3b816
EB
358 }
359
57d3b816
EB
360 desc.args[0] = flags;
361 desc.args[1] = virt_to_phys(entry);
362
7734c4b5
SG
363 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
364}
57d3b816 365
f60a317b
SG
366static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
367{
368 struct qcom_scm_desc desc = {
369 .svc = QCOM_SCM_SVC_BOOT,
370 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
371 .owner = ARM_SMCCC_OWNER_SIP,
372 .arginfo = QCOM_SCM_ARGS(6),
373 .args = {
374 virt_to_phys(entry),
375 /* Apply to all CPUs in all affinity levels */
376 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
377 flags,
378 },
379 };
380
381 /* Need a device for DMA of the additional arguments */
382 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
383 return -EOPNOTSUPP;
384
385 return qcom_scm_call(__scm->dev, &desc, NULL);
386}
387
7734c4b5 388/**
52beb1fc 389 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
7734c4b5 390 * @entry: Entry point function for the cpus
7734c4b5
SG
391 *
392 * Set the Linux entry point for the SCM to transfer control to when coming
393 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
394 */
52beb1fc 395int qcom_scm_set_warm_boot_addr(void *entry)
7734c4b5 396{
f60a317b
SG
397 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
398 /* Fallback to old SCM call */
399 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
400 return 0;
a353e4a0 401}
2784e3b0 402EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
2ce76a6a
LI
403
404/**
52beb1fc 405 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
2ce76a6a 406 * @entry: Entry point function for the cpus
2ce76a6a 407 */
52beb1fc 408int qcom_scm_set_cold_boot_addr(void *entry)
2ce76a6a 409{
f60a317b
SG
410 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
411 /* Fallback to old SCM call */
412 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
413 return 0;
2ce76a6a 414}
2784e3b0 415EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
767b0235 416
767b0235
LI
417/**
418 * qcom_scm_cpu_power_down() - Power down the cpu
a5d32f6d 419 * @flags: Flags to flush cache
767b0235
LI
420 *
421 * This is an end point to power down cpu. If there was a pending interrupt,
422 * the control would return from this function, otherwise, the cpu jumps to the
423 * warm boot entry point set for this cpu upon reset.
424 */
425void qcom_scm_cpu_power_down(u32 flags)
426{
57d3b816
EB
427 struct qcom_scm_desc desc = {
428 .svc = QCOM_SCM_SVC_BOOT,
429 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
430 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
431 .arginfo = QCOM_SCM_ARGS(1),
432 .owner = ARM_SMCCC_OWNER_SIP,
433 };
434
435 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
767b0235 436}
2784e3b0 437EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
9626b699 438
65f0c90b 439int qcom_scm_set_remote_state(u32 state, u32 id)
f01e90fe 440{
57d3b816
EB
441 struct qcom_scm_desc desc = {
442 .svc = QCOM_SCM_SVC_BOOT,
443 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
444 .arginfo = QCOM_SCM_ARGS(2),
445 .args[0] = state,
446 .args[1] = id,
447 .owner = ARM_SMCCC_OWNER_SIP,
448 };
449 struct qcom_scm_res res;
450 int ret;
451
452 ret = qcom_scm_call(__scm->dev, &desc, &res);
453
454 return ret ? : res.result[0];
f01e90fe 455}
2784e3b0 456EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
f01e90fe 457
ff4aa3bc
RM
458static int qcom_scm_disable_sdi(void)
459{
460 int ret;
461 struct qcom_scm_desc desc = {
462 .svc = QCOM_SCM_SVC_BOOT,
463 .cmd = QCOM_SCM_BOOT_SDI_CONFIG,
464 .args[0] = 1, /* Disable watchdog debug */
465 .args[1] = 0, /* Disable SDI */
466 .arginfo = QCOM_SCM_ARGS(2),
467 .owner = ARM_SMCCC_OWNER_SIP,
468 };
469 struct qcom_scm_res res;
470
471 ret = qcom_scm_clk_enable();
472 if (ret)
473 return ret;
474 ret = qcom_scm_call(__scm->dev, &desc, &res);
475
476 qcom_scm_clk_disable();
477
478 return ret ? : res.result[0];
479}
480
57d3b816
EB
481static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
482{
483 struct qcom_scm_desc desc = {
484 .svc = QCOM_SCM_SVC_BOOT,
485 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
486 .arginfo = QCOM_SCM_ARGS(2),
487 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
488 .owner = ARM_SMCCC_OWNER_SIP,
489 };
490
491 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
492
b88c2828 493 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
57d3b816
EB
494}
495
65f0c90b 496static void qcom_scm_set_download_mode(bool enable)
b0a1614f 497{
65f0c90b 498 int ret = 0;
b0a1614f 499
398a4c58 500 if (__scm->dload_mode_addr) {
57d3b816 501 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
398a4c58
MO
502 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
503 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
504 QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
505 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
65f0c90b
EB
506 } else {
507 dev_err(__scm->dev,
508 "No available mechanism for setting download mode\n");
509 }
b0a1614f 510
65f0c90b
EB
511 if (ret)
512 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
b0a1614f 513}
b0a1614f 514
f01e90fe
BA
515/**
516 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
517 * state machine for a given peripheral, using the
518 * metadata
519 * @peripheral: peripheral id
520 * @metadata: pointer to memory containing ELF header, program header table
521 * and optional blob of data used for authenticating the metadata
522 * and the rest of the firmware
523 * @size: size of the metadata
3a99f121 524 * @ctx: optional metadata context
f01e90fe 525 *
3a99f121
BA
526 * Return: 0 on success.
527 *
528 * Upon successful return, the PAS metadata context (@ctx) will be used to
529 * track the metadata allocation, this needs to be released by invoking
530 * qcom_scm_pas_metadata_release() by the caller.
f01e90fe 531 */
3a99f121
BA
532int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
533 struct qcom_scm_pas_metadata *ctx)
f01e90fe
BA
534{
535 dma_addr_t mdata_phys;
536 void *mdata_buf;
537 int ret;
57d3b816
EB
538 struct qcom_scm_desc desc = {
539 .svc = QCOM_SCM_SVC_PIL,
540 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
541 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
542 .args[0] = peripheral,
543 .owner = ARM_SMCCC_OWNER_SIP,
544 };
545 struct qcom_scm_res res;
f01e90fe
BA
546
547 /*
548 * During the scm call memory protection will be enabled for the meta
549 * data blob, so make sure it's physically contiguous, 4K aligned and
550 * non-cachable to avoid XPU violations.
551 */
552 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
553 GFP_KERNEL);
3de990f7 554 if (!mdata_buf)
f01e90fe 555 return -ENOMEM;
3de990f7 556
f01e90fe
BA
557 memcpy(mdata_buf, metadata, size);
558
559 ret = qcom_scm_clk_enable();
560 if (ret)
3a99f121 561 goto out;
f01e90fe 562
65b7ebda
SS
563 ret = qcom_scm_bw_enable();
564 if (ret)
0c50b7fc 565 goto disable_clk;
65b7ebda 566
57d3b816
EB
567 desc.args[1] = mdata_phys;
568
569 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 570 qcom_scm_bw_disable();
0c50b7fc
GJ
571
572disable_clk:
f01e90fe
BA
573 qcom_scm_clk_disable();
574
3a99f121
BA
575out:
576 if (ret < 0 || !ctx) {
577 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
578 } else if (ctx) {
579 ctx->ptr = mdata_buf;
580 ctx->phys = mdata_phys;
581 ctx->size = size;
582 }
f01e90fe 583
57d3b816 584 return ret ? : res.result[0];
f01e90fe 585}
2784e3b0 586EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
f01e90fe 587
3a99f121
BA
588/**
589 * qcom_scm_pas_metadata_release() - release metadata context
590 * @ctx: metadata context
591 */
592void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
593{
594 if (!ctx->ptr)
595 return;
596
597 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
598
599 ctx->ptr = NULL;
600 ctx->phys = 0;
601 ctx->size = 0;
602}
2784e3b0 603EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
3a99f121 604
f01e90fe
BA
605/**
606 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
607 * for firmware loading
608 * @peripheral: peripheral id
609 * @addr: start address of memory area to prepare
610 * @size: size of the memory area to prepare
611 *
612 * Returns 0 on success.
613 */
614int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
615{
616 int ret;
57d3b816
EB
617 struct qcom_scm_desc desc = {
618 .svc = QCOM_SCM_SVC_PIL,
619 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
620 .arginfo = QCOM_SCM_ARGS(3),
621 .args[0] = peripheral,
622 .args[1] = addr,
623 .args[2] = size,
624 .owner = ARM_SMCCC_OWNER_SIP,
625 };
626 struct qcom_scm_res res;
f01e90fe
BA
627
628 ret = qcom_scm_clk_enable();
629 if (ret)
630 return ret;
631
65b7ebda
SS
632 ret = qcom_scm_bw_enable();
633 if (ret)
0c50b7fc 634 goto disable_clk;
65b7ebda 635
57d3b816 636 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 637 qcom_scm_bw_disable();
0c50b7fc
GJ
638
639disable_clk:
f01e90fe
BA
640 qcom_scm_clk_disable();
641
57d3b816 642 return ret ? : res.result[0];
f01e90fe 643}
2784e3b0 644EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
f01e90fe
BA
645
646/**
647 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
648 * and reset the remote processor
649 * @peripheral: peripheral id
650 *
651 * Return 0 on success.
652 */
653int qcom_scm_pas_auth_and_reset(u32 peripheral)
654{
655 int ret;
57d3b816
EB
656 struct qcom_scm_desc desc = {
657 .svc = QCOM_SCM_SVC_PIL,
658 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
659 .arginfo = QCOM_SCM_ARGS(1),
660 .args[0] = peripheral,
661 .owner = ARM_SMCCC_OWNER_SIP,
662 };
663 struct qcom_scm_res res;
f01e90fe
BA
664
665 ret = qcom_scm_clk_enable();
666 if (ret)
667 return ret;
668
65b7ebda
SS
669 ret = qcom_scm_bw_enable();
670 if (ret)
0c50b7fc 671 goto disable_clk;
65b7ebda 672
57d3b816 673 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 674 qcom_scm_bw_disable();
0c50b7fc
GJ
675
676disable_clk:
f01e90fe
BA
677 qcom_scm_clk_disable();
678
57d3b816 679 return ret ? : res.result[0];
f01e90fe 680}
2784e3b0 681EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
f01e90fe
BA
682
683/**
684 * qcom_scm_pas_shutdown() - Shut down the remote processor
685 * @peripheral: peripheral id
686 *
687 * Returns 0 on success.
688 */
689int qcom_scm_pas_shutdown(u32 peripheral)
690{
691 int ret;
57d3b816
EB
692 struct qcom_scm_desc desc = {
693 .svc = QCOM_SCM_SVC_PIL,
694 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
695 .arginfo = QCOM_SCM_ARGS(1),
696 .args[0] = peripheral,
697 .owner = ARM_SMCCC_OWNER_SIP,
698 };
699 struct qcom_scm_res res;
f01e90fe
BA
700
701 ret = qcom_scm_clk_enable();
702 if (ret)
703 return ret;
704
65b7ebda
SS
705 ret = qcom_scm_bw_enable();
706 if (ret)
0c50b7fc 707 goto disable_clk;
65b7ebda 708
57d3b816 709 ret = qcom_scm_call(__scm->dev, &desc, &res);
65b7ebda 710 qcom_scm_bw_disable();
0c50b7fc
GJ
711
712disable_clk:
f01e90fe
BA
713 qcom_scm_clk_disable();
714
57d3b816 715 return ret ? : res.result[0];
f01e90fe 716}
2784e3b0 717EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
f01e90fe 718
65f0c90b
EB
719/**
720 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
721 * available for the given peripherial
722 * @peripheral: peripheral id
723 *
724 * Returns true if PAS is supported for this peripheral, otherwise false.
725 */
726bool qcom_scm_pas_supported(u32 peripheral)
727{
728 int ret;
57d3b816
EB
729 struct qcom_scm_desc desc = {
730 .svc = QCOM_SCM_SVC_PIL,
731 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
732 .arginfo = QCOM_SCM_ARGS(1),
733 .args[0] = peripheral,
734 .owner = ARM_SMCCC_OWNER_SIP,
735 };
736 struct qcom_scm_res res;
65f0c90b 737
9d11af8b
SB
738 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
739 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
65f0c90b
EB
740 return false;
741
57d3b816
EB
742 ret = qcom_scm_call(__scm->dev, &desc, &res);
743
744 return ret ? false : !!res.result[0];
65f0c90b 745}
2784e3b0 746EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
65f0c90b 747
57d3b816
EB
748static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
749{
750 struct qcom_scm_desc desc = {
751 .svc = QCOM_SCM_SVC_PIL,
752 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
753 .arginfo = QCOM_SCM_ARGS(2),
754 .args[0] = reset,
755 .args[1] = 0,
756 .owner = ARM_SMCCC_OWNER_SIP,
757 };
758 struct qcom_scm_res res;
759 int ret;
760
761 ret = qcom_scm_call(__scm->dev, &desc, &res);
762
763 return ret ? : res.result[0];
764}
765
dd4fe5b2
BA
766static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
767 unsigned long idx)
768{
769 if (idx != 0)
770 return -EINVAL;
771
772 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
773}
774
775static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
776 unsigned long idx)
777{
778 if (idx != 0)
779 return -EINVAL;
780
781 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
782}
783
784static const struct reset_control_ops qcom_scm_pas_reset_ops = {
785 .assert = qcom_scm_pas_reset_assert,
786 .deassert = qcom_scm_pas_reset_deassert,
787};
788
65f0c90b
EB
789int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
790{
57d3b816
EB
791 struct qcom_scm_desc desc = {
792 .svc = QCOM_SCM_SVC_IO,
793 .cmd = QCOM_SCM_IO_READ,
794 .arginfo = QCOM_SCM_ARGS(1),
795 .args[0] = addr,
796 .owner = ARM_SMCCC_OWNER_SIP,
797 };
798 struct qcom_scm_res res;
799 int ret;
800
801
b88c2828 802 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
57d3b816
EB
803 if (ret >= 0)
804 *val = res.result[0];
805
806 return ret < 0 ? ret : 0;
65f0c90b 807}
2784e3b0 808EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
65f0c90b
EB
809
810int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
811{
57d3b816
EB
812 struct qcom_scm_desc desc = {
813 .svc = QCOM_SCM_SVC_IO,
814 .cmd = QCOM_SCM_IO_WRITE,
815 .arginfo = QCOM_SCM_ARGS(2),
816 .args[0] = addr,
817 .args[1] = val,
818 .owner = ARM_SMCCC_OWNER_SIP,
819 };
820
b88c2828 821 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
65f0c90b 822}
2784e3b0 823EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
65f0c90b 824
0434a406
RC
825/**
826 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
827 * supports restore security config interface.
828 *
829 * Return true if restore-cfg interface is supported, false if not.
830 */
831bool qcom_scm_restore_sec_cfg_available(void)
832{
833 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
5443cc5f 834 QCOM_SCM_MP_RESTORE_SEC_CFG);
0434a406 835}
2784e3b0 836EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
0434a406 837
a2c680c6
RC
838int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
839{
57d3b816
EB
840 struct qcom_scm_desc desc = {
841 .svc = QCOM_SCM_SVC_MP,
842 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
843 .arginfo = QCOM_SCM_ARGS(2),
844 .args[0] = device_id,
845 .args[1] = spare,
846 .owner = ARM_SMCCC_OWNER_SIP,
847 };
848 struct qcom_scm_res res;
849 int ret;
850
851 ret = qcom_scm_call(__scm->dev, &desc, &res);
852
853 return ret ? : res.result[0];
a2c680c6 854}
2784e3b0 855EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
a2c680c6 856
b182cc4d
SV
857int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
858{
57d3b816
EB
859 struct qcom_scm_desc desc = {
860 .svc = QCOM_SCM_SVC_MP,
861 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
862 .arginfo = QCOM_SCM_ARGS(1),
863 .args[0] = spare,
864 .owner = ARM_SMCCC_OWNER_SIP,
865 };
866 struct qcom_scm_res res;
867 int ret;
868
869 ret = qcom_scm_call(__scm->dev, &desc, &res);
870
871 if (size)
872 *size = res.result[0];
873
874 return ret ? : res.result[1];
b182cc4d 875}
2784e3b0 876EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
b182cc4d
SV
877
878int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
879{
57d3b816
EB
880 struct qcom_scm_desc desc = {
881 .svc = QCOM_SCM_SVC_MP,
882 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
883 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
884 QCOM_SCM_VAL),
885 .args[0] = addr,
886 .args[1] = size,
887 .args[2] = spare,
888 .owner = ARM_SMCCC_OWNER_SIP,
889 };
890 int ret;
891
57d3b816
EB
892 ret = qcom_scm_call(__scm->dev, &desc, NULL);
893
894 /* the pg table has been initialized already, ignore the error */
895 if (ret == -EPERM)
896 ret = 0;
897
898 return ret;
b182cc4d 899}
2784e3b0 900EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
b182cc4d 901
94351509
ADR
902int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
903{
904 struct qcom_scm_desc desc = {
905 .svc = QCOM_SCM_SVC_MP,
906 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
907 .arginfo = QCOM_SCM_ARGS(2),
908 .args[0] = size,
909 .args[1] = spare,
910 .owner = ARM_SMCCC_OWNER_SIP,
911 };
912
913 return qcom_scm_call(__scm->dev, &desc, NULL);
914}
2784e3b0 915EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
94351509 916
6d885330
SV
917int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
918 u32 cp_nonpixel_start,
919 u32 cp_nonpixel_size)
920{
921 int ret;
922 struct qcom_scm_desc desc = {
923 .svc = QCOM_SCM_SVC_MP,
924 .cmd = QCOM_SCM_MP_VIDEO_VAR,
925 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
926 QCOM_SCM_VAL, QCOM_SCM_VAL),
927 .args[0] = cp_start,
928 .args[1] = cp_size,
929 .args[2] = cp_nonpixel_start,
930 .args[3] = cp_nonpixel_size,
931 .owner = ARM_SMCCC_OWNER_SIP,
932 };
933 struct qcom_scm_res res;
934
935 ret = qcom_scm_call(__scm->dev, &desc, &res);
936
937 return ret ? : res.result[0];
938}
2784e3b0 939EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
6d885330 940
57d3b816
EB
941static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
942 size_t mem_sz, phys_addr_t src, size_t src_sz,
943 phys_addr_t dest, size_t dest_sz)
944{
945 int ret;
946 struct qcom_scm_desc desc = {
947 .svc = QCOM_SCM_SVC_MP,
948 .cmd = QCOM_SCM_MP_ASSIGN,
949 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
950 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
951 QCOM_SCM_VAL, QCOM_SCM_VAL),
952 .args[0] = mem_region,
953 .args[1] = mem_sz,
954 .args[2] = src,
955 .args[3] = src_sz,
956 .args[4] = dest,
957 .args[5] = dest_sz,
958 .args[6] = 0,
959 .owner = ARM_SMCCC_OWNER_SIP,
960 };
961 struct qcom_scm_res res;
962
963 ret = qcom_scm_call(dev, &desc, &res);
964
965 return ret ? : res.result[0];
966}
967
d82bd359
AKD
968/**
969 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
970 * @mem_addr: mem region whose ownership need to be reassigned
971 * @mem_sz: size of the region.
972 * @srcvm: vmid for current set of owners, each set bit in
973 * flag indicate a unique owner
c8b08fc0 974 * @newvm: array having new owners and corresponding permission
d82bd359
AKD
975 * flags
976 * @dest_cnt: number of owners in next set.
977 *
c8b08fc0 978 * Return negative errno on failure or 0 on success with @srcvm updated.
d82bd359
AKD
979 */
980int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
968a26a0 981 u64 *srcvm,
af311ff9
SB
982 const struct qcom_scm_vmperm *newvm,
983 unsigned int dest_cnt)
d82bd359
AKD
984{
985 struct qcom_scm_current_perm_info *destvm;
986 struct qcom_scm_mem_map_info *mem_to_map;
987 phys_addr_t mem_to_map_phys;
988 phys_addr_t dest_phys;
459b1f86 989 dma_addr_t ptr_phys;
d82bd359
AKD
990 size_t mem_to_map_sz;
991 size_t dest_sz;
992 size_t src_sz;
993 size_t ptr_sz;
994 int next_vm;
995 __le32 *src;
996 void *ptr;
af311ff9 997 int ret, i, b;
968a26a0 998 u64 srcvm_bits = *srcvm;
d82bd359 999
968a26a0 1000 src_sz = hweight64(srcvm_bits) * sizeof(*src);
d82bd359
AKD
1001 mem_to_map_sz = sizeof(*mem_to_map);
1002 dest_sz = dest_cnt * sizeof(*destvm);
1003 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1004 ALIGN(dest_sz, SZ_64);
1005
459b1f86 1006 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
d82bd359
AKD
1007 if (!ptr)
1008 return -ENOMEM;
1009
1010 /* Fill source vmid detail */
1011 src = ptr;
af311ff9 1012 i = 0;
968a26a0
EB
1013 for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1014 if (srcvm_bits & BIT(b))
1015 src[i++] = cpu_to_le32(b);
1016 }
d82bd359
AKD
1017
1018 /* Fill details of mem buff to map */
1019 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1020 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
af311ff9
SB
1021 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1022 mem_to_map->mem_size = cpu_to_le64(mem_sz);
d82bd359
AKD
1023
1024 next_vm = 0;
1025 /* Fill details of next vmid detail */
1026 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1027 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
af311ff9
SB
1028 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1029 destvm->vmid = cpu_to_le32(newvm->vmid);
1030 destvm->perm = cpu_to_le32(newvm->perm);
1031 destvm->ctx = 0;
1032 destvm->ctx_size = 0;
1033 next_vm |= BIT(newvm->vmid);
d82bd359
AKD
1034 }
1035
1036 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1037 ptr_phys, src_sz, dest_phys, dest_sz);
459b1f86 1038 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
d82bd359
AKD
1039 if (ret) {
1040 dev_err(__scm->dev,
c8b08fc0 1041 "Assign memory protection call failed %d\n", ret);
d82bd359
AKD
1042 return -EINVAL;
1043 }
1044
1045 *srcvm = next_vm;
1046 return 0;
1047}
2784e3b0 1048EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
d82bd359 1049
65f0c90b
EB
1050/**
1051 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1052 */
1053bool qcom_scm_ocmem_lock_available(void)
1054{
1055 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1056 QCOM_SCM_OCMEM_LOCK_CMD);
1057}
2784e3b0 1058EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
65f0c90b
EB
1059
1060/**
1061 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1062 * region to the specified initiator
1063 *
1064 * @id: tz initiator id
1065 * @offset: OCMEM offset
1066 * @size: OCMEM size
1067 * @mode: access mode (WIDE/NARROW)
1068 */
1069int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1070 u32 mode)
1071{
57d3b816
EB
1072 struct qcom_scm_desc desc = {
1073 .svc = QCOM_SCM_SVC_OCMEM,
1074 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1075 .args[0] = id,
1076 .args[1] = offset,
1077 .args[2] = size,
1078 .args[3] = mode,
1079 .arginfo = QCOM_SCM_ARGS(4),
1080 };
1081
1082 return qcom_scm_call(__scm->dev, &desc, NULL);
65f0c90b 1083}
2784e3b0 1084EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
65f0c90b
EB
1085
1086/**
1087 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1088 * region from the specified initiator
1089 *
1090 * @id: tz initiator id
1091 * @offset: OCMEM offset
1092 * @size: OCMEM size
1093 */
1094int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1095{
57d3b816
EB
1096 struct qcom_scm_desc desc = {
1097 .svc = QCOM_SCM_SVC_OCMEM,
1098 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1099 .args[0] = id,
1100 .args[1] = offset,
1101 .args[2] = size,
1102 .arginfo = QCOM_SCM_ARGS(3),
1103 };
1104
1105 return qcom_scm_call(__scm->dev, &desc, NULL);
65f0c90b 1106}
2784e3b0 1107EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
65f0c90b 1108
0f206514
EB
1109/**
1110 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1111 *
1112 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1113 * qcom_scm_ice_set_key() are available.
1114 */
1115bool qcom_scm_ice_available(void)
1116{
1117 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1118 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1119 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1120 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1121}
2784e3b0 1122EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
0f206514
EB
1123
1124/**
1125 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1126 * @index: the keyslot to invalidate
1127 *
433611ea
EB
1128 * The UFSHCI and eMMC standards define a standard way to do this, but it
1129 * doesn't work on these SoCs; only this SCM call does.
1130 *
1131 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1132 * call doesn't specify which ICE instance the keyslot belongs to.
0f206514
EB
1133 *
1134 * Return: 0 on success; -errno on failure.
1135 */
1136int qcom_scm_ice_invalidate_key(u32 index)
1137{
1138 struct qcom_scm_desc desc = {
1139 .svc = QCOM_SCM_SVC_ES,
1140 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1141 .arginfo = QCOM_SCM_ARGS(1),
1142 .args[0] = index,
1143 .owner = ARM_SMCCC_OWNER_SIP,
1144 };
1145
1146 return qcom_scm_call(__scm->dev, &desc, NULL);
1147}
2784e3b0 1148EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
0f206514
EB
1149
1150/**
1151 * qcom_scm_ice_set_key() - Set an inline encryption key
1152 * @index: the keyslot into which to set the key
1153 * @key: the key to program
1154 * @key_size: the size of the key in bytes
1155 * @cipher: the encryption algorithm the key is for
1156 * @data_unit_size: the encryption data unit size, i.e. the size of each
1157 * individual plaintext and ciphertext. Given in 512-byte
1158 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1159 *
1160 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
433611ea
EB
1161 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1162 *
1163 * The UFSHCI and eMMC standards define a standard way to do this, but it
1164 * doesn't work on these SoCs; only this SCM call does.
0f206514 1165 *
433611ea
EB
1166 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1167 * call doesn't specify which ICE instance the keyslot belongs to.
0f206514
EB
1168 *
1169 * Return: 0 on success; -errno on failure.
1170 */
1171int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1172 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1173{
1174 struct qcom_scm_desc desc = {
1175 .svc = QCOM_SCM_SVC_ES,
1176 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1177 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1178 QCOM_SCM_VAL, QCOM_SCM_VAL,
1179 QCOM_SCM_VAL),
1180 .args[0] = index,
1181 .args[2] = key_size,
1182 .args[3] = cipher,
1183 .args[4] = data_unit_size,
1184 .owner = ARM_SMCCC_OWNER_SIP,
1185 };
1186 void *keybuf;
1187 dma_addr_t key_phys;
1188 int ret;
1189
1190 /*
1191 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1192 * physical address that's been properly flushed. The sanctioned way to
1193 * do this is by using the DMA API. But as is best practice for crypto
1194 * keys, we also must wipe the key after use. This makes kmemdup() +
1195 * dma_map_single() not clearly correct, since the DMA API can use
1196 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1197 * keys is normally rare and thus not performance-critical.
1198 */
1199
1200 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1201 GFP_KERNEL);
1202 if (!keybuf)
1203 return -ENOMEM;
1204 memcpy(keybuf, key, key_size);
1205 desc.args[1] = key_phys;
1206
1207 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1208
1209 memzero_explicit(keybuf, key_size);
1210
1211 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1212 return ret;
1213}
2784e3b0 1214EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
0f206514 1215
65f0c90b
EB
1216/**
1217 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1218 *
1219 * Return true if HDCP is supported, false if not.
1220 */
1221bool qcom_scm_hdcp_available(void)
1222{
9d11af8b 1223 bool avail;
65f0c90b
EB
1224 int ret = qcom_scm_clk_enable();
1225
1226 if (ret)
1227 return ret;
1228
9d11af8b 1229 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
65f0c90b
EB
1230 QCOM_SCM_HDCP_INVOKE);
1231
1232 qcom_scm_clk_disable();
1233
9d11af8b 1234 return avail;
65f0c90b 1235}
2784e3b0 1236EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
65f0c90b
EB
1237
1238/**
1239 * qcom_scm_hdcp_req() - Send HDCP request.
1240 * @req: HDCP request array
1241 * @req_cnt: HDCP request array count
1242 * @resp: response buffer passed to SCM
1243 *
1244 * Write HDCP register(s) through SCM.
1245 */
1246int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1247{
57d3b816
EB
1248 int ret;
1249 struct qcom_scm_desc desc = {
1250 .svc = QCOM_SCM_SVC_HDCP,
1251 .cmd = QCOM_SCM_HDCP_INVOKE,
1252 .arginfo = QCOM_SCM_ARGS(10),
1253 .args = {
1254 req[0].addr,
1255 req[0].val,
1256 req[1].addr,
1257 req[1].val,
1258 req[2].addr,
1259 req[2].val,
1260 req[3].addr,
1261 req[3].val,
1262 req[4].addr,
1263 req[4].val
1264 },
1265 .owner = ARM_SMCCC_OWNER_SIP,
1266 };
1267 struct qcom_scm_res res;
1268
1269 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1270 return -ERANGE;
65f0c90b 1271
57d3b816 1272 ret = qcom_scm_clk_enable();
65f0c90b
EB
1273 if (ret)
1274 return ret;
1275
57d3b816
EB
1276 ret = qcom_scm_call(__scm->dev, &desc, &res);
1277 *resp = res.result[0];
1278
65f0c90b 1279 qcom_scm_clk_disable();
57d3b816 1280
65f0c90b
EB
1281 return ret;
1282}
2784e3b0 1283EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
65f0c90b 1284
071a1333
ADR
1285int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1286{
1287 struct qcom_scm_desc desc = {
1288 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1289 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1290 .arginfo = QCOM_SCM_ARGS(3),
1291 .args[0] = sec_id,
1292 .args[1] = ctx_num,
1293 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1294 .owner = ARM_SMCCC_OWNER_SIP,
1295 };
1296
1297 return qcom_scm_call(__scm->dev, &desc, NULL);
1298}
2784e3b0 1299EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
071a1333 1300
65f0c90b
EB
1301int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1302{
57d3b816
EB
1303 struct qcom_scm_desc desc = {
1304 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1305 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1306 .arginfo = QCOM_SCM_ARGS(2),
1307 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1308 .args[1] = en,
1309 .owner = ARM_SMCCC_OWNER_SIP,
1310 };
1311
1312
1313 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
65f0c90b 1314}
2784e3b0 1315EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
65f0c90b 1316
de3438c4
TG
1317bool qcom_scm_lmh_dcvsh_available(void)
1318{
1319 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1320}
2784e3b0 1321EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
de3438c4
TG
1322
1323int qcom_scm_lmh_profile_change(u32 profile_id)
1324{
1325 struct qcom_scm_desc desc = {
1326 .svc = QCOM_SCM_SVC_LMH,
1327 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1328 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1329 .args[0] = profile_id,
1330 .owner = ARM_SMCCC_OWNER_SIP,
1331 };
1332
1333 return qcom_scm_call(__scm->dev, &desc, NULL);
1334}
2784e3b0 1335EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
de3438c4
TG
1336
1337int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1338 u64 limit_node, u32 node_id, u64 version)
1339{
1340 dma_addr_t payload_phys;
1341 u32 *payload_buf;
1342 int ret, payload_size = 5 * sizeof(u32);
1343
1344 struct qcom_scm_desc desc = {
1345 .svc = QCOM_SCM_SVC_LMH,
1346 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1347 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1348 QCOM_SCM_VAL, QCOM_SCM_VAL),
1349 .args[1] = payload_size,
1350 .args[2] = limit_node,
1351 .args[3] = node_id,
1352 .args[4] = version,
1353 .owner = ARM_SMCCC_OWNER_SIP,
1354 };
1355
1356 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1357 if (!payload_buf)
1358 return -ENOMEM;
1359
1360 payload_buf[0] = payload_fn;
1361 payload_buf[1] = 0;
1362 payload_buf[2] = payload_reg;
1363 payload_buf[3] = 1;
1364 payload_buf[4] = payload_val;
1365
1366 desc.args[0] = payload_phys;
1367
1368 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1369
1370 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1371 return ret;
1372}
2784e3b0 1373EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
de3438c4 1374
65f0c90b
EB
1375static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1376{
1377 struct device_node *tcsr;
1378 struct device_node *np = dev->of_node;
1379 struct resource res;
1380 u32 offset;
1381 int ret;
1382
1383 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1384 if (!tcsr)
1385 return 0;
1386
1387 ret = of_address_to_resource(tcsr, 0, &res);
1388 of_node_put(tcsr);
1389 if (ret)
1390 return ret;
1391
1392 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1393 if (ret < 0)
1394 return ret;
1395
1396 *addr = res.start + offset;
1397
1398 return 0;
1399}
1400
00b12486
ML
1401#ifdef CONFIG_QCOM_QSEECOM
1402
1403/* Lock for QSEECOM SCM call executions */
1404static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1405
1406static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1407 struct qcom_scm_qseecom_resp *res)
1408{
1409 struct qcom_scm_res scm_res = {};
1410 int status;
1411
1412 /*
1413 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1414 * require the respective call lock to be held.
1415 */
1416 lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1417
1418 status = qcom_scm_call(__scm->dev, desc, &scm_res);
1419
1420 res->result = scm_res.result[0];
1421 res->resp_type = scm_res.result[1];
1422 res->data = scm_res.result[2];
1423
1424 if (status)
1425 return status;
1426
1427 return 0;
1428}
1429
1430/**
1431 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1432 * @desc: SCM call descriptor.
1433 * @res: SCM call response (output).
1434 *
1435 * Performs the QSEECOM SCM call described by @desc, returning the response in
1436 * @rsp.
1437 *
1438 * Return: Zero on success, nonzero on failure.
1439 */
1440static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1441 struct qcom_scm_qseecom_resp *res)
1442{
1443 int status;
1444
1445 /*
1446 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1447 * so lock things here. This needs to be extended to callback/listener
1448 * handling when support for that is implemented.
1449 */
1450
1451 mutex_lock(&qcom_scm_qseecom_call_lock);
1452 status = __qcom_scm_qseecom_call(desc, res);
1453 mutex_unlock(&qcom_scm_qseecom_call_lock);
1454
1455 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1456 __func__, desc->owner, desc->svc, desc->cmd, res->result,
1457 res->resp_type, res->data);
1458
1459 if (status) {
1460 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1461 return status;
1462 }
1463
1464 /*
1465 * TODO: Handle incomplete and blocked calls:
1466 *
1467 * Incomplete and blocked calls are not supported yet. Some devices
1468 * and/or commands require those, some don't. Let's warn about them
1469 * prominently in case someone attempts to try these commands with a
1470 * device/command combination that isn't supported yet.
1471 */
1472 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1473 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1474
1475 return 0;
1476}
1477
1478/**
1479 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1480 * @version: Pointer where the QSEECOM version will be stored.
1481 *
1482 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1483 * the TrustZone.
1484 *
1485 * Return: Zero on success, nonzero on failure.
1486 */
1487static int qcom_scm_qseecom_get_version(u32 *version)
1488{
1489 struct qcom_scm_desc desc = {};
1490 struct qcom_scm_qseecom_resp res = {};
1491 u32 feature = 10;
1492 int ret;
1493
1494 desc.owner = QSEECOM_TZ_OWNER_SIP;
1495 desc.svc = QSEECOM_TZ_SVC_INFO;
1496 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1497 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1498 desc.args[0] = feature;
1499
1500 ret = qcom_scm_qseecom_call(&desc, &res);
1501 if (ret)
1502 return ret;
1503
1504 *version = res.result;
1505 return 0;
1506}
1507
1508/**
1509 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1510 * @app_name: The name of the app.
1511 * @app_id: The returned app ID.
1512 *
1513 * Query and return the application ID of the SEE app identified by the given
1514 * name. This returned ID is the unique identifier of the app required for
1515 * subsequent communication.
1516 *
1517 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1518 * loaded or could not be found.
1519 */
1520int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1521{
1522 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1523 unsigned long app_name_len = strlen(app_name);
1524 struct qcom_scm_desc desc = {};
1525 struct qcom_scm_qseecom_resp res = {};
1526 dma_addr_t name_buf_phys;
1527 char *name_buf;
1528 int status;
1529
1530 if (app_name_len >= name_buf_size)
1531 return -EINVAL;
1532
1533 name_buf = kzalloc(name_buf_size, GFP_KERNEL);
1534 if (!name_buf)
1535 return -ENOMEM;
1536
1537 memcpy(name_buf, app_name, app_name_len);
1538
1539 name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
1540 status = dma_mapping_error(__scm->dev, name_buf_phys);
1541 if (status) {
1542 kfree(name_buf);
1543 dev_err(__scm->dev, "qseecom: failed to map dma address\n");
1544 return status;
1545 }
1546
1547 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1548 desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1549 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1550 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1551 desc.args[0] = name_buf_phys;
1552 desc.args[1] = app_name_len;
1553
1554 status = qcom_scm_qseecom_call(&desc, &res);
1555 dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
1556 kfree(name_buf);
1557
1558 if (status)
1559 return status;
1560
1561 if (res.result == QSEECOM_RESULT_FAILURE)
1562 return -ENOENT;
1563
1564 if (res.result != QSEECOM_RESULT_SUCCESS)
1565 return -EINVAL;
1566
1567 if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1568 return -EINVAL;
1569
1570 *app_id = res.data;
1571 return 0;
1572}
1573EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1574
1575/**
1576 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1577 * @app_id: The ID of the target app.
1578 * @req: Request buffer sent to the app (must be DMA-mappable).
1579 * @req_size: Size of the request buffer.
1580 * @rsp: Response buffer, written to by the app (must be DMA-mappable).
1581 * @rsp_size: Size of the response buffer.
1582 *
1583 * Sends a request to the QSEE app associated with the given ID and read back
1584 * its response. The caller must provide two DMA memory regions, one for the
1585 * request and one for the response, and fill out the @req region with the
1586 * respective (app-specific) request data. The QSEE app reads this and returns
1587 * its response in the @rsp region.
1588 *
1589 * Return: Zero on success, nonzero on failure.
1590 */
1591int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
1592 size_t rsp_size)
1593{
1594 struct qcom_scm_qseecom_resp res = {};
1595 struct qcom_scm_desc desc = {};
1596 dma_addr_t req_phys;
1597 dma_addr_t rsp_phys;
1598 int status;
1599
1600 /* Map request buffer */
1601 req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
1602 status = dma_mapping_error(__scm->dev, req_phys);
1603 if (status) {
1604 dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
1605 return status;
1606 }
1607
1608 /* Map response buffer */
1609 rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
1610 status = dma_mapping_error(__scm->dev, rsp_phys);
1611 if (status) {
1612 dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1613 dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
1614 return status;
1615 }
1616
1617 /* Set up SCM call data */
1618 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1619 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1620 desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1621 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1622 QCOM_SCM_RW, QCOM_SCM_VAL,
1623 QCOM_SCM_RW, QCOM_SCM_VAL);
1624 desc.args[0] = app_id;
1625 desc.args[1] = req_phys;
1626 desc.args[2] = req_size;
1627 desc.args[3] = rsp_phys;
1628 desc.args[4] = rsp_size;
1629
1630 /* Perform call */
1631 status = qcom_scm_qseecom_call(&desc, &res);
1632
1633 /* Unmap buffers */
1634 dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
1635 dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1636
1637 if (status)
1638 return status;
1639
1640 if (res.result != QSEECOM_RESULT_SUCCESS)
1641 return -EIO;
1642
1643 return 0;
1644}
1645EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1646
1647/*
1648 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1649 + any potential issues with this, only allow validated machines for now.
1650 */
e478c5fb 1651static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
00b12486
ML
1652 { .compatible = "lenovo,thinkpad-x13s", },
1653 { }
1654};
1655
1656static bool qcom_scm_qseecom_machine_is_allowed(void)
1657{
1658 struct device_node *np;
1659 bool match;
1660
1661 np = of_find_node_by_path("/");
1662 if (!np)
1663 return false;
1664
1665 match = of_match_node(qcom_scm_qseecom_allowlist, np);
1666 of_node_put(np);
1667
1668 return match;
1669}
1670
1671static void qcom_scm_qseecom_free(void *data)
1672{
1673 struct platform_device *qseecom_dev = data;
1674
1675 platform_device_del(qseecom_dev);
1676 platform_device_put(qseecom_dev);
1677}
1678
1679static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1680{
1681 struct platform_device *qseecom_dev;
1682 u32 version;
1683 int ret;
1684
1685 /*
1686 * Note: We do two steps of validation here: First, we try to query the
1687 * QSEECOM version as a check to see if the interface exists on this
1688 * device. Second, we check against known good devices due to current
1689 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1690 *
1691 * Note that we deliberately do the machine check after the version
1692 * check so that we can log potentially supported devices. This should
1693 * be safe as downstream sources indicate that the version query is
1694 * neither blocking nor reentrant.
1695 */
1696 ret = qcom_scm_qseecom_get_version(&version);
1697 if (ret)
1698 return 0;
1699
1700 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1701
1702 if (!qcom_scm_qseecom_machine_is_allowed()) {
1703 dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1704 return 0;
1705 }
1706
1707 /*
1708 * Set up QSEECOM interface device. All application clients will be
1709 * set up and managed by the corresponding driver for it.
1710 */
1711 qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1712 if (!qseecom_dev)
1713 return -ENOMEM;
1714
1715 qseecom_dev->dev.parent = scm->dev;
1716
1717 ret = platform_device_add(qseecom_dev);
1718 if (ret) {
1719 platform_device_put(qseecom_dev);
1720 return ret;
1721 }
1722
1723 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1724}
1725
1726#else /* CONFIG_QCOM_QSEECOM */
1727
1728static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1729{
1730 return 0;
1731}
1732
1733#endif /* CONFIG_QCOM_QSEECOM */
1734
65f0c90b
EB
1735/**
1736 * qcom_scm_is_available() - Checks if SCM is available
1737 */
1738bool qcom_scm_is_available(void)
1739{
1740 return !!__scm;
1741}
2784e3b0 1742EXPORT_SYMBOL_GPL(qcom_scm_is_available);
65f0c90b 1743
6bf32599
GDS
1744static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1745{
1746 /* FW currently only supports a single wq_ctx (zero).
1747 * TODO: Update this logic to include dynamic allocation and lookup of
1748 * completion structs when FW supports more wq_ctx values.
1749 */
1750 if (wq_ctx != 0) {
1751 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1752 return -EINVAL;
1753 }
1754
1755 return 0;
1756}
1757
1758int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1759{
1760 int ret;
1761
1762 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1763 if (ret)
1764 return ret;
1765
1766 wait_for_completion(&__scm->waitq_comp);
1767
1768 return 0;
1769}
1770
000636d9 1771static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
6bf32599
GDS
1772{
1773 int ret;
1774
1775 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1776 if (ret)
1777 return ret;
1778
1779 complete(&__scm->waitq_comp);
1780
1781 return 0;
1782}
1783
1784static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1785{
1786 int ret;
1787 struct qcom_scm *scm = data;
1788 u32 wq_ctx, flags, more_pending = 0;
1789
1790 do {
1791 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1792 if (ret) {
1793 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1794 goto out;
1795 }
1796
1797 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1798 flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1799 dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1800 goto out;
1801 }
1802
000636d9 1803 ret = qcom_scm_waitq_wakeup(wq_ctx);
6bf32599
GDS
1804 if (ret)
1805 goto out;
1806 } while (more_pending);
1807
1808out:
1809 return IRQ_HANDLED;
1810}
1811
d0f6fa7b
AG
1812static int qcom_scm_probe(struct platform_device *pdev)
1813{
1814 struct qcom_scm *scm;
6bf32599 1815 int irq, ret;
d0f6fa7b
AG
1816
1817 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1818 if (!scm)
1819 return -ENOMEM;
1820
8c1b7dc9
BA
1821 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1822 if (ret < 0)
1823 return ret;
1824
65b7ebda
SS
1825 mutex_init(&scm->scm_bw_lock);
1826
65b7ebda
SS
1827 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1828 if (IS_ERR(scm->path))
1829 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1830 "failed to acquire interconnect path\n");
1831
5130464a 1832 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
ae76fd3f
KD
1833 if (IS_ERR(scm->core_clk))
1834 return PTR_ERR(scm->core_clk);
d0f6fa7b 1835
5130464a 1836 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
ae76fd3f
KD
1837 if (IS_ERR(scm->iface_clk))
1838 return PTR_ERR(scm->iface_clk);
d0f6fa7b 1839
5130464a 1840 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
ae76fd3f
KD
1841 if (IS_ERR(scm->bus_clk))
1842 return PTR_ERR(scm->bus_clk);
d0f6fa7b 1843
dd4fe5b2
BA
1844 scm->reset.ops = &qcom_scm_pas_reset_ops;
1845 scm->reset.nr_resets = 1;
1846 scm->reset.of_node = pdev->dev.of_node;
bd4760ca
WY
1847 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1848 if (ret)
1849 return ret;
dd4fe5b2 1850
d0f6fa7b
AG
1851 /* vote for max clk rate for highest performance */
1852 ret = clk_set_rate(scm->core_clk, INT_MAX);
1853 if (ret)
1854 return ret;
1855
1856 __scm = scm;
1857 __scm->dev = &pdev->dev;
1858
6bf32599
GDS
1859 init_completion(&__scm->waitq_comp);
1860
f3d0fbad 1861 irq = platform_get_irq_optional(pdev, 0);
6bf32599
GDS
1862 if (irq < 0) {
1863 if (irq != -ENXIO)
1864 return irq;
1865 } else {
1866 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1867 IRQF_ONESHOT, "qcom-scm", __scm);
1868 if (ret < 0)
1869 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1870 }
1871
f6ea568f 1872 __get_convention();
6b1751a8 1873
8c1b7dc9
BA
1874 /*
1875 * If requested enable "download mode", from this point on warmboot
c19698a9 1876 * will cause the boot stages to enter download mode, unless
8c1b7dc9
BA
1877 * disabled below by a clean shutdown/reboot.
1878 */
1879 if (download_mode)
1880 qcom_scm_set_download_mode(true);
1881
ff4aa3bc
RM
1882
1883 /*
1884 * Disable SDI if indicated by DT that it is enabled by default.
1885 */
1886 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1887 qcom_scm_disable_sdi();
1888
00b12486
ML
1889 /*
1890 * Initialize the QSEECOM interface.
1891 *
1892 * Note: QSEECOM is fairly self-contained and this only adds the
1893 * interface device (the driver of which does most of the heavy
1894 * lifting). So any errors returned here should be either -ENOMEM or
1895 * -EINVAL (with the latter only in case there's a bug in our code).
1896 * This means that there is no need to bring down the whole SCM driver.
1897 * Just log the error instead and let SCM live.
1898 */
1899 ret = qcom_scm_qseecom_init(scm);
1900 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
1901
d0f6fa7b
AG
1902 return 0;
1903}
1904
8c1b7dc9
BA
1905static void qcom_scm_shutdown(struct platform_device *pdev)
1906{
1907 /* Clean shutdown, disable download mode to allow normal restart */
781d32d1 1908 qcom_scm_set_download_mode(false);
8c1b7dc9
BA
1909}
1910
d0f6fa7b 1911static const struct of_device_id qcom_scm_dt_match[] = {
626237db
KD
1912 { .compatible = "qcom,scm" },
1913
1914 /* Legacy entries kept for backwards compatibility */
5130464a
KD
1915 { .compatible = "qcom,scm-apq8064" },
1916 { .compatible = "qcom,scm-apq8084" },
60cd420c 1917 { .compatible = "qcom,scm-ipq4019" },
5130464a
KD
1918 { .compatible = "qcom,scm-msm8953" },
1919 { .compatible = "qcom,scm-msm8974" },
60cd420c 1920 { .compatible = "qcom,scm-msm8996" },
d0f6fa7b
AG
1921 {}
1922};
b42000e4 1923MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
d0f6fa7b 1924
d0f6fa7b
AG
1925static struct platform_driver qcom_scm_driver = {
1926 .driver = {
1927 .name = "qcom_scm",
1928 .of_match_table = qcom_scm_dt_match,
87abf2ba 1929 .suppress_bind_attrs = true,
d0f6fa7b
AG
1930 },
1931 .probe = qcom_scm_probe,
8c1b7dc9 1932 .shutdown = qcom_scm_shutdown,
d0f6fa7b
AG
1933};
1934
1935static int __init qcom_scm_init(void)
1936{
d0f6fa7b
AG
1937 return platform_driver_register(&qcom_scm_driver);
1938}
6c8e99d8 1939subsys_initcall(qcom_scm_init);
b42000e4
JS
1940
1941MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1942MODULE_LICENSE("GPL v2");