1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/interrupt.h>
8 #include <linux/completion.h>
9 #include <linux/cpumask.h>
10 #include <linux/export.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interconnect.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/reset-controller.h>
22 #include <linux/arm-smccc.h>
26 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
27 module_param(download_mode, bool, 0);
29 #define SCM_HAS_CORE_CLK BIT(0)
30 #define SCM_HAS_IFACE_CLK BIT(1)
31 #define SCM_HAS_BUS_CLK BIT(2)
36 struct clk *iface_clk;
38 struct icc_path *path;
39 struct completion waitq_comp;
40 struct reset_controller_dev reset;
42 /* control access to the interconnect path */
43 struct mutex scm_bw_lock;
49 struct qcom_scm_current_perm_info {
57 struct qcom_scm_mem_map_info {
62 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
63 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
64 0, BIT(0), BIT(3), BIT(5)
66 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
67 BIT(2), BIT(1), BIT(4), BIT(6)
70 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
71 #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
73 static const char * const qcom_scm_convention_names[] = {
74 [SMC_CONVENTION_UNKNOWN] = "unknown",
75 [SMC_CONVENTION_ARM_32] = "smc arm 32",
76 [SMC_CONVENTION_ARM_64] = "smc arm 64",
77 [SMC_CONVENTION_LEGACY] = "smc legacy",
80 static struct qcom_scm *__scm;
82 static int qcom_scm_clk_enable(void)
86 ret = clk_prepare_enable(__scm->core_clk);
90 ret = clk_prepare_enable(__scm->iface_clk);
94 ret = clk_prepare_enable(__scm->bus_clk);
101 clk_disable_unprepare(__scm->iface_clk);
103 clk_disable_unprepare(__scm->core_clk);
108 static void qcom_scm_clk_disable(void)
110 clk_disable_unprepare(__scm->core_clk);
111 clk_disable_unprepare(__scm->iface_clk);
112 clk_disable_unprepare(__scm->bus_clk);
115 static int qcom_scm_bw_enable(void)
122 if (IS_ERR(__scm->path))
125 mutex_lock(&__scm->scm_bw_lock);
126 if (!__scm->scm_vote_count) {
127 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
129 dev_err(__scm->dev, "failed to set bandwidth request\n");
133 __scm->scm_vote_count++;
135 mutex_unlock(&__scm->scm_bw_lock);
140 static void qcom_scm_bw_disable(void)
142 if (IS_ERR_OR_NULL(__scm->path))
145 mutex_lock(&__scm->scm_bw_lock);
146 if (__scm->scm_vote_count-- == 1)
147 icc_set_bw(__scm->path, 0, 0);
148 mutex_unlock(&__scm->scm_bw_lock);
151 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
152 static DEFINE_SPINLOCK(scm_query_lock);
154 static enum qcom_scm_convention __get_convention(void)
157 struct qcom_scm_desc desc = {
158 .svc = QCOM_SCM_SVC_INFO,
159 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
160 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
161 QCOM_SCM_INFO_IS_CALL_AVAIL) |
162 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
163 .arginfo = QCOM_SCM_ARGS(1),
164 .owner = ARM_SMCCC_OWNER_SIP,
166 struct qcom_scm_res res;
167 enum qcom_scm_convention probed_convention;
171 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
172 return qcom_scm_convention;
175 * Device isn't required as there is only one argument - no device
176 * needed to dma_map_single to secure world
178 probed_convention = SMC_CONVENTION_ARM_64;
179 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
180 if (!ret && res.result[0] == 1)
184 * Some SC7180 firmwares didn't implement the
185 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
186 * calling conventions on these firmwares. Luckily we don't make any
187 * early calls into the firmware on these SoCs so the device pointer
188 * will be valid here to check if the compatible matches.
190 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
195 probed_convention = SMC_CONVENTION_ARM_32;
196 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
197 if (!ret && res.result[0] == 1)
200 probed_convention = SMC_CONVENTION_LEGACY;
202 spin_lock_irqsave(&scm_query_lock, flags);
203 if (probed_convention != qcom_scm_convention) {
204 qcom_scm_convention = probed_convention;
205 pr_info("qcom_scm: convention: %s%s\n",
206 qcom_scm_convention_names[qcom_scm_convention],
207 forced ? " (forced)" : "");
209 spin_unlock_irqrestore(&scm_query_lock, flags);
211 return qcom_scm_convention;
215 * qcom_scm_call() - Invoke a syscall in the secure world
217 * @desc: Descriptor structure containing arguments and return values
218 * @res: Structure containing results from SMC/HVC call
220 * Sends a command to the SCM and waits for the command to finish processing.
221 * This should *only* be called in pre-emptible context.
223 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
224 struct qcom_scm_res *res)
227 switch (__get_convention()) {
228 case SMC_CONVENTION_ARM_32:
229 case SMC_CONVENTION_ARM_64:
230 return scm_smc_call(dev, desc, res, false);
231 case SMC_CONVENTION_LEGACY:
232 return scm_legacy_call(dev, desc, res);
234 pr_err("Unknown current SCM calling convention.\n");
240 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
242 * @desc: Descriptor structure containing arguments and return values
243 * @res: Structure containing results from SMC/HVC call
245 * Sends a command to the SCM and waits for the command to finish processing.
246 * This can be called in atomic context.
248 static int qcom_scm_call_atomic(struct device *dev,
249 const struct qcom_scm_desc *desc,
250 struct qcom_scm_res *res)
252 switch (__get_convention()) {
253 case SMC_CONVENTION_ARM_32:
254 case SMC_CONVENTION_ARM_64:
255 return scm_smc_call(dev, desc, res, true);
256 case SMC_CONVENTION_LEGACY:
257 return scm_legacy_call_atomic(dev, desc, res);
259 pr_err("Unknown current SCM calling convention.\n");
264 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
268 struct qcom_scm_desc desc = {
269 .svc = QCOM_SCM_SVC_INFO,
270 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
271 .owner = ARM_SMCCC_OWNER_SIP,
273 struct qcom_scm_res res;
275 desc.arginfo = QCOM_SCM_ARGS(1);
276 switch (__get_convention()) {
277 case SMC_CONVENTION_ARM_32:
278 case SMC_CONVENTION_ARM_64:
279 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
280 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
282 case SMC_CONVENTION_LEGACY:
283 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
286 pr_err("Unknown SMC convention being used\n");
290 ret = qcom_scm_call(dev, &desc, &res);
292 return ret ? false : !!res.result[0];
295 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
298 unsigned int flags = 0;
299 struct qcom_scm_desc desc = {
300 .svc = QCOM_SCM_SVC_BOOT,
301 .cmd = QCOM_SCM_BOOT_SET_ADDR,
302 .arginfo = QCOM_SCM_ARGS(2),
303 .owner = ARM_SMCCC_OWNER_SIP,
306 for_each_present_cpu(cpu) {
307 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
309 flags |= cpu_bits[cpu];
312 desc.args[0] = flags;
313 desc.args[1] = virt_to_phys(entry);
315 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
318 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
320 struct qcom_scm_desc desc = {
321 .svc = QCOM_SCM_SVC_BOOT,
322 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
323 .owner = ARM_SMCCC_OWNER_SIP,
324 .arginfo = QCOM_SCM_ARGS(6),
327 /* Apply to all CPUs in all affinity levels */
328 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
333 /* Need a device for DMA of the additional arguments */
334 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
337 return qcom_scm_call(__scm->dev, &desc, NULL);
341 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
342 * @entry: Entry point function for the cpus
344 * Set the Linux entry point for the SCM to transfer control to when coming
345 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
347 int qcom_scm_set_warm_boot_addr(void *entry)
349 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
350 /* Fallback to old SCM call */
351 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
354 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
357 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
358 * @entry: Entry point function for the cpus
360 int qcom_scm_set_cold_boot_addr(void *entry)
362 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
363 /* Fallback to old SCM call */
364 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
367 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
370 * qcom_scm_cpu_power_down() - Power down the cpu
371 * @flags: Flags to flush cache
373 * This is an end point to power down cpu. If there was a pending interrupt,
374 * the control would return from this function, otherwise, the cpu jumps to the
375 * warm boot entry point set for this cpu upon reset.
377 void qcom_scm_cpu_power_down(u32 flags)
379 struct qcom_scm_desc desc = {
380 .svc = QCOM_SCM_SVC_BOOT,
381 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
382 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
383 .arginfo = QCOM_SCM_ARGS(1),
384 .owner = ARM_SMCCC_OWNER_SIP,
387 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
389 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
391 int qcom_scm_set_remote_state(u32 state, u32 id)
393 struct qcom_scm_desc desc = {
394 .svc = QCOM_SCM_SVC_BOOT,
395 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
396 .arginfo = QCOM_SCM_ARGS(2),
399 .owner = ARM_SMCCC_OWNER_SIP,
401 struct qcom_scm_res res;
404 ret = qcom_scm_call(__scm->dev, &desc, &res);
406 return ret ? : res.result[0];
408 EXPORT_SYMBOL(qcom_scm_set_remote_state);
410 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
412 struct qcom_scm_desc desc = {
413 .svc = QCOM_SCM_SVC_BOOT,
414 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
415 .arginfo = QCOM_SCM_ARGS(2),
416 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
417 .owner = ARM_SMCCC_OWNER_SIP,
420 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
422 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
425 static void qcom_scm_set_download_mode(bool enable)
430 avail = __qcom_scm_is_call_available(__scm->dev,
432 QCOM_SCM_BOOT_SET_DLOAD_MODE);
434 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
435 } else if (__scm->dload_mode_addr) {
436 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
437 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
440 "No available mechanism for setting download mode\n");
444 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
448 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
449 * state machine for a given peripheral, using the
451 * @peripheral: peripheral id
452 * @metadata: pointer to memory containing ELF header, program header table
453 * and optional blob of data used for authenticating the metadata
454 * and the rest of the firmware
455 * @size: size of the metadata
456 * @ctx: optional metadata context
458 * Return: 0 on success.
460 * Upon successful return, the PAS metadata context (@ctx) will be used to
461 * track the metadata allocation, this needs to be released by invoking
462 * qcom_scm_pas_metadata_release() by the caller.
464 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
465 struct qcom_scm_pas_metadata *ctx)
467 dma_addr_t mdata_phys;
470 struct qcom_scm_desc desc = {
471 .svc = QCOM_SCM_SVC_PIL,
472 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
473 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
474 .args[0] = peripheral,
475 .owner = ARM_SMCCC_OWNER_SIP,
477 struct qcom_scm_res res;
480 * During the scm call memory protection will be enabled for the meta
481 * data blob, so make sure it's physically contiguous, 4K aligned and
482 * non-cachable to avoid XPU violations.
484 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
487 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
490 memcpy(mdata_buf, metadata, size);
492 ret = qcom_scm_clk_enable();
496 ret = qcom_scm_bw_enable();
500 desc.args[1] = mdata_phys;
502 ret = qcom_scm_call(__scm->dev, &desc, &res);
504 qcom_scm_bw_disable();
505 qcom_scm_clk_disable();
508 if (ret < 0 || !ctx) {
509 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
511 ctx->ptr = mdata_buf;
512 ctx->phys = mdata_phys;
516 return ret ? : res.result[0];
518 EXPORT_SYMBOL(qcom_scm_pas_init_image);
521 * qcom_scm_pas_metadata_release() - release metadata context
522 * @ctx: metadata context
524 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
529 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
535 EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
538 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
539 * for firmware loading
540 * @peripheral: peripheral id
541 * @addr: start address of memory area to prepare
542 * @size: size of the memory area to prepare
544 * Returns 0 on success.
546 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
549 struct qcom_scm_desc desc = {
550 .svc = QCOM_SCM_SVC_PIL,
551 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
552 .arginfo = QCOM_SCM_ARGS(3),
553 .args[0] = peripheral,
556 .owner = ARM_SMCCC_OWNER_SIP,
558 struct qcom_scm_res res;
560 ret = qcom_scm_clk_enable();
564 ret = qcom_scm_bw_enable();
568 ret = qcom_scm_call(__scm->dev, &desc, &res);
569 qcom_scm_bw_disable();
570 qcom_scm_clk_disable();
572 return ret ? : res.result[0];
574 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
577 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
578 * and reset the remote processor
579 * @peripheral: peripheral id
581 * Return 0 on success.
583 int qcom_scm_pas_auth_and_reset(u32 peripheral)
586 struct qcom_scm_desc desc = {
587 .svc = QCOM_SCM_SVC_PIL,
588 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
589 .arginfo = QCOM_SCM_ARGS(1),
590 .args[0] = peripheral,
591 .owner = ARM_SMCCC_OWNER_SIP,
593 struct qcom_scm_res res;
595 ret = qcom_scm_clk_enable();
599 ret = qcom_scm_bw_enable();
603 ret = qcom_scm_call(__scm->dev, &desc, &res);
604 qcom_scm_bw_disable();
605 qcom_scm_clk_disable();
607 return ret ? : res.result[0];
609 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
612 * qcom_scm_pas_shutdown() - Shut down the remote processor
613 * @peripheral: peripheral id
615 * Returns 0 on success.
617 int qcom_scm_pas_shutdown(u32 peripheral)
620 struct qcom_scm_desc desc = {
621 .svc = QCOM_SCM_SVC_PIL,
622 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
623 .arginfo = QCOM_SCM_ARGS(1),
624 .args[0] = peripheral,
625 .owner = ARM_SMCCC_OWNER_SIP,
627 struct qcom_scm_res res;
629 ret = qcom_scm_clk_enable();
633 ret = qcom_scm_bw_enable();
637 ret = qcom_scm_call(__scm->dev, &desc, &res);
639 qcom_scm_bw_disable();
640 qcom_scm_clk_disable();
642 return ret ? : res.result[0];
644 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
647 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
648 * available for the given peripherial
649 * @peripheral: peripheral id
651 * Returns true if PAS is supported for this peripheral, otherwise false.
653 bool qcom_scm_pas_supported(u32 peripheral)
656 struct qcom_scm_desc desc = {
657 .svc = QCOM_SCM_SVC_PIL,
658 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
659 .arginfo = QCOM_SCM_ARGS(1),
660 .args[0] = peripheral,
661 .owner = ARM_SMCCC_OWNER_SIP,
663 struct qcom_scm_res res;
665 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
666 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
669 ret = qcom_scm_call(__scm->dev, &desc, &res);
671 return ret ? false : !!res.result[0];
673 EXPORT_SYMBOL(qcom_scm_pas_supported);
675 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
677 struct qcom_scm_desc desc = {
678 .svc = QCOM_SCM_SVC_PIL,
679 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
680 .arginfo = QCOM_SCM_ARGS(2),
683 .owner = ARM_SMCCC_OWNER_SIP,
685 struct qcom_scm_res res;
688 ret = qcom_scm_call(__scm->dev, &desc, &res);
690 return ret ? : res.result[0];
693 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
699 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
702 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
708 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
711 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
712 .assert = qcom_scm_pas_reset_assert,
713 .deassert = qcom_scm_pas_reset_deassert,
716 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
718 struct qcom_scm_desc desc = {
719 .svc = QCOM_SCM_SVC_IO,
720 .cmd = QCOM_SCM_IO_READ,
721 .arginfo = QCOM_SCM_ARGS(1),
723 .owner = ARM_SMCCC_OWNER_SIP,
725 struct qcom_scm_res res;
729 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
731 *val = res.result[0];
733 return ret < 0 ? ret : 0;
735 EXPORT_SYMBOL(qcom_scm_io_readl);
737 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
739 struct qcom_scm_desc desc = {
740 .svc = QCOM_SCM_SVC_IO,
741 .cmd = QCOM_SCM_IO_WRITE,
742 .arginfo = QCOM_SCM_ARGS(2),
745 .owner = ARM_SMCCC_OWNER_SIP,
748 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
750 EXPORT_SYMBOL(qcom_scm_io_writel);
753 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
754 * supports restore security config interface.
756 * Return true if restore-cfg interface is supported, false if not.
758 bool qcom_scm_restore_sec_cfg_available(void)
760 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
761 QCOM_SCM_MP_RESTORE_SEC_CFG);
763 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
765 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
767 struct qcom_scm_desc desc = {
768 .svc = QCOM_SCM_SVC_MP,
769 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
770 .arginfo = QCOM_SCM_ARGS(2),
771 .args[0] = device_id,
773 .owner = ARM_SMCCC_OWNER_SIP,
775 struct qcom_scm_res res;
778 ret = qcom_scm_call(__scm->dev, &desc, &res);
780 return ret ? : res.result[0];
782 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
784 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
786 struct qcom_scm_desc desc = {
787 .svc = QCOM_SCM_SVC_MP,
788 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
789 .arginfo = QCOM_SCM_ARGS(1),
791 .owner = ARM_SMCCC_OWNER_SIP,
793 struct qcom_scm_res res;
796 ret = qcom_scm_call(__scm->dev, &desc, &res);
799 *size = res.result[0];
801 return ret ? : res.result[1];
803 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
805 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
807 struct qcom_scm_desc desc = {
808 .svc = QCOM_SCM_SVC_MP,
809 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
810 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
815 .owner = ARM_SMCCC_OWNER_SIP,
819 ret = qcom_scm_call(__scm->dev, &desc, NULL);
821 /* the pg table has been initialized already, ignore the error */
827 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
829 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
831 struct qcom_scm_desc desc = {
832 .svc = QCOM_SCM_SVC_MP,
833 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
834 .arginfo = QCOM_SCM_ARGS(2),
837 .owner = ARM_SMCCC_OWNER_SIP,
840 return qcom_scm_call(__scm->dev, &desc, NULL);
842 EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
844 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
845 u32 cp_nonpixel_start,
846 u32 cp_nonpixel_size)
849 struct qcom_scm_desc desc = {
850 .svc = QCOM_SCM_SVC_MP,
851 .cmd = QCOM_SCM_MP_VIDEO_VAR,
852 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
853 QCOM_SCM_VAL, QCOM_SCM_VAL),
856 .args[2] = cp_nonpixel_start,
857 .args[3] = cp_nonpixel_size,
858 .owner = ARM_SMCCC_OWNER_SIP,
860 struct qcom_scm_res res;
862 ret = qcom_scm_call(__scm->dev, &desc, &res);
864 return ret ? : res.result[0];
866 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
868 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
869 size_t mem_sz, phys_addr_t src, size_t src_sz,
870 phys_addr_t dest, size_t dest_sz)
873 struct qcom_scm_desc desc = {
874 .svc = QCOM_SCM_SVC_MP,
875 .cmd = QCOM_SCM_MP_ASSIGN,
876 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
877 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
878 QCOM_SCM_VAL, QCOM_SCM_VAL),
879 .args[0] = mem_region,
886 .owner = ARM_SMCCC_OWNER_SIP,
888 struct qcom_scm_res res;
890 ret = qcom_scm_call(dev, &desc, &res);
892 return ret ? : res.result[0];
896 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
897 * @mem_addr: mem region whose ownership need to be reassigned
898 * @mem_sz: size of the region.
899 * @srcvm: vmid for current set of owners, each set bit in
900 * flag indicate a unique owner
901 * @newvm: array having new owners and corresponding permission
903 * @dest_cnt: number of owners in next set.
905 * Return negative errno on failure or 0 on success with @srcvm updated.
907 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
909 const struct qcom_scm_vmperm *newvm,
910 unsigned int dest_cnt)
912 struct qcom_scm_current_perm_info *destvm;
913 struct qcom_scm_mem_map_info *mem_to_map;
914 phys_addr_t mem_to_map_phys;
915 phys_addr_t dest_phys;
917 size_t mem_to_map_sz;
925 unsigned long srcvm_bits = *srcvm;
927 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
928 mem_to_map_sz = sizeof(*mem_to_map);
929 dest_sz = dest_cnt * sizeof(*destvm);
930 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
931 ALIGN(dest_sz, SZ_64);
933 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
937 /* Fill source vmid detail */
940 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
941 src[i++] = cpu_to_le32(b);
943 /* Fill details of mem buff to map */
944 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
945 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
946 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
947 mem_to_map->mem_size = cpu_to_le64(mem_sz);
950 /* Fill details of next vmid detail */
951 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
952 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
953 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
954 destvm->vmid = cpu_to_le32(newvm->vmid);
955 destvm->perm = cpu_to_le32(newvm->perm);
957 destvm->ctx_size = 0;
958 next_vm |= BIT(newvm->vmid);
961 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
962 ptr_phys, src_sz, dest_phys, dest_sz);
963 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
966 "Assign memory protection call failed %d\n", ret);
973 EXPORT_SYMBOL(qcom_scm_assign_mem);
976 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
978 bool qcom_scm_ocmem_lock_available(void)
980 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
981 QCOM_SCM_OCMEM_LOCK_CMD);
983 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
986 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
987 * region to the specified initiator
989 * @id: tz initiator id
990 * @offset: OCMEM offset
992 * @mode: access mode (WIDE/NARROW)
994 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
997 struct qcom_scm_desc desc = {
998 .svc = QCOM_SCM_SVC_OCMEM,
999 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1004 .arginfo = QCOM_SCM_ARGS(4),
1007 return qcom_scm_call(__scm->dev, &desc, NULL);
1009 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
1012 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1013 * region from the specified initiator
1015 * @id: tz initiator id
1016 * @offset: OCMEM offset
1019 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1021 struct qcom_scm_desc desc = {
1022 .svc = QCOM_SCM_SVC_OCMEM,
1023 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1027 .arginfo = QCOM_SCM_ARGS(3),
1030 return qcom_scm_call(__scm->dev, &desc, NULL);
1032 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
1035 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1037 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1038 * qcom_scm_ice_set_key() are available.
1040 bool qcom_scm_ice_available(void)
1042 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1043 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1044 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1045 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1047 EXPORT_SYMBOL(qcom_scm_ice_available);
1050 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1051 * @index: the keyslot to invalidate
1053 * The UFSHCI and eMMC standards define a standard way to do this, but it
1054 * doesn't work on these SoCs; only this SCM call does.
1056 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1057 * call doesn't specify which ICE instance the keyslot belongs to.
1059 * Return: 0 on success; -errno on failure.
1061 int qcom_scm_ice_invalidate_key(u32 index)
1063 struct qcom_scm_desc desc = {
1064 .svc = QCOM_SCM_SVC_ES,
1065 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1066 .arginfo = QCOM_SCM_ARGS(1),
1068 .owner = ARM_SMCCC_OWNER_SIP,
1071 return qcom_scm_call(__scm->dev, &desc, NULL);
1073 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
1076 * qcom_scm_ice_set_key() - Set an inline encryption key
1077 * @index: the keyslot into which to set the key
1078 * @key: the key to program
1079 * @key_size: the size of the key in bytes
1080 * @cipher: the encryption algorithm the key is for
1081 * @data_unit_size: the encryption data unit size, i.e. the size of each
1082 * individual plaintext and ciphertext. Given in 512-byte
1083 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1085 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1086 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1088 * The UFSHCI and eMMC standards define a standard way to do this, but it
1089 * doesn't work on these SoCs; only this SCM call does.
1091 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1092 * call doesn't specify which ICE instance the keyslot belongs to.
1094 * Return: 0 on success; -errno on failure.
1096 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1097 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1099 struct qcom_scm_desc desc = {
1100 .svc = QCOM_SCM_SVC_ES,
1101 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1102 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1103 QCOM_SCM_VAL, QCOM_SCM_VAL,
1106 .args[2] = key_size,
1108 .args[4] = data_unit_size,
1109 .owner = ARM_SMCCC_OWNER_SIP,
1112 dma_addr_t key_phys;
1116 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1117 * physical address that's been properly flushed. The sanctioned way to
1118 * do this is by using the DMA API. But as is best practice for crypto
1119 * keys, we also must wipe the key after use. This makes kmemdup() +
1120 * dma_map_single() not clearly correct, since the DMA API can use
1121 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1122 * keys is normally rare and thus not performance-critical.
1125 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1129 memcpy(keybuf, key, key_size);
1130 desc.args[1] = key_phys;
1132 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1134 memzero_explicit(keybuf, key_size);
1136 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1139 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1142 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1144 * Return true if HDCP is supported, false if not.
1146 bool qcom_scm_hdcp_available(void)
1149 int ret = qcom_scm_clk_enable();
1154 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1155 QCOM_SCM_HDCP_INVOKE);
1157 qcom_scm_clk_disable();
1161 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1164 * qcom_scm_hdcp_req() - Send HDCP request.
1165 * @req: HDCP request array
1166 * @req_cnt: HDCP request array count
1167 * @resp: response buffer passed to SCM
1169 * Write HDCP register(s) through SCM.
1171 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1174 struct qcom_scm_desc desc = {
1175 .svc = QCOM_SCM_SVC_HDCP,
1176 .cmd = QCOM_SCM_HDCP_INVOKE,
1177 .arginfo = QCOM_SCM_ARGS(10),
1190 .owner = ARM_SMCCC_OWNER_SIP,
1192 struct qcom_scm_res res;
1194 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1197 ret = qcom_scm_clk_enable();
1201 ret = qcom_scm_call(__scm->dev, &desc, &res);
1202 *resp = res.result[0];
1204 qcom_scm_clk_disable();
1208 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1210 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1212 struct qcom_scm_desc desc = {
1213 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1214 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1215 .arginfo = QCOM_SCM_ARGS(3),
1218 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1219 .owner = ARM_SMCCC_OWNER_SIP,
1222 return qcom_scm_call(__scm->dev, &desc, NULL);
1224 EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
1226 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1228 struct qcom_scm_desc desc = {
1229 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1230 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1231 .arginfo = QCOM_SCM_ARGS(2),
1232 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1234 .owner = ARM_SMCCC_OWNER_SIP,
1238 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1240 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1242 bool qcom_scm_lmh_dcvsh_available(void)
1244 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1246 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1248 int qcom_scm_lmh_profile_change(u32 profile_id)
1250 struct qcom_scm_desc desc = {
1251 .svc = QCOM_SCM_SVC_LMH,
1252 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1253 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1254 .args[0] = profile_id,
1255 .owner = ARM_SMCCC_OWNER_SIP,
1258 return qcom_scm_call(__scm->dev, &desc, NULL);
1260 EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1262 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1263 u64 limit_node, u32 node_id, u64 version)
1265 dma_addr_t payload_phys;
1267 int ret, payload_size = 5 * sizeof(u32);
1269 struct qcom_scm_desc desc = {
1270 .svc = QCOM_SCM_SVC_LMH,
1271 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1272 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1273 QCOM_SCM_VAL, QCOM_SCM_VAL),
1274 .args[1] = payload_size,
1275 .args[2] = limit_node,
1278 .owner = ARM_SMCCC_OWNER_SIP,
1281 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1285 payload_buf[0] = payload_fn;
1287 payload_buf[2] = payload_reg;
1289 payload_buf[4] = payload_val;
1291 desc.args[0] = payload_phys;
1293 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1295 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1298 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1300 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1302 struct device_node *tcsr;
1303 struct device_node *np = dev->of_node;
1304 struct resource res;
1308 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1312 ret = of_address_to_resource(tcsr, 0, &res);
1317 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1321 *addr = res.start + offset;
1327 * qcom_scm_is_available() - Checks if SCM is available
1329 bool qcom_scm_is_available(void)
1333 EXPORT_SYMBOL(qcom_scm_is_available);
1335 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1337 /* FW currently only supports a single wq_ctx (zero).
1338 * TODO: Update this logic to include dynamic allocation and lookup of
1339 * completion structs when FW supports more wq_ctx values.
1342 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1349 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1353 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1357 wait_for_completion(&__scm->waitq_comp);
1362 static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
1366 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1370 complete(&__scm->waitq_comp);
1375 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1378 struct qcom_scm *scm = data;
1379 u32 wq_ctx, flags, more_pending = 0;
1382 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1384 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1388 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1389 flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1390 dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1394 ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
1397 } while (more_pending);
1403 static int qcom_scm_probe(struct platform_device *pdev)
1405 struct qcom_scm *scm;
1409 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1413 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1417 mutex_init(&scm->scm_bw_lock);
1419 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1421 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1422 if (IS_ERR(scm->path))
1423 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1424 "failed to acquire interconnect path\n");
1426 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1427 if (IS_ERR(scm->core_clk)) {
1428 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1429 return PTR_ERR(scm->core_clk);
1431 if (clks & SCM_HAS_CORE_CLK) {
1432 dev_err(&pdev->dev, "failed to acquire core clk\n");
1433 return PTR_ERR(scm->core_clk);
1436 scm->core_clk = NULL;
1439 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1440 if (IS_ERR(scm->iface_clk)) {
1441 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1442 return PTR_ERR(scm->iface_clk);
1444 if (clks & SCM_HAS_IFACE_CLK) {
1445 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1446 return PTR_ERR(scm->iface_clk);
1449 scm->iface_clk = NULL;
1452 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1453 if (IS_ERR(scm->bus_clk)) {
1454 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1455 return PTR_ERR(scm->bus_clk);
1457 if (clks & SCM_HAS_BUS_CLK) {
1458 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1459 return PTR_ERR(scm->bus_clk);
1462 scm->bus_clk = NULL;
1465 scm->reset.ops = &qcom_scm_pas_reset_ops;
1466 scm->reset.nr_resets = 1;
1467 scm->reset.of_node = pdev->dev.of_node;
1468 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1472 /* vote for max clk rate for highest performance */
1473 ret = clk_set_rate(scm->core_clk, INT_MAX);
1478 __scm->dev = &pdev->dev;
1480 init_completion(&__scm->waitq_comp);
1482 irq = platform_get_irq_optional(pdev, 0);
1487 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1488 IRQF_ONESHOT, "qcom-scm", __scm);
1490 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1496 * If requested enable "download mode", from this point on warmboot
1497 * will cause the boot stages to enter download mode, unless
1498 * disabled below by a clean shutdown/reboot.
1501 qcom_scm_set_download_mode(true);
1506 static void qcom_scm_shutdown(struct platform_device *pdev)
1508 /* Clean shutdown, disable download mode to allow normal restart */
1510 qcom_scm_set_download_mode(false);
1513 static const struct of_device_id qcom_scm_dt_match[] = {
1514 { .compatible = "qcom,scm-apq8064",
1515 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1517 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1521 { .compatible = "qcom,scm-ipq4019" },
1522 { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1525 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1526 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1527 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1531 { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1535 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1539 { .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK |
1543 { .compatible = "qcom,scm-msm8994" },
1544 { .compatible = "qcom,scm-msm8996" },
1545 { .compatible = "qcom,scm" },
1548 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1550 static struct platform_driver qcom_scm_driver = {
1553 .of_match_table = qcom_scm_dt_match,
1554 .suppress_bind_attrs = true,
1556 .probe = qcom_scm_probe,
1557 .shutdown = qcom_scm_shutdown,
1560 static int __init qcom_scm_init(void)
1562 return platform_driver_register(&qcom_scm_driver);
1564 subsys_initcall(qcom_scm_init);
1566 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1567 MODULE_LICENSE("GPL v2");