crypto: inside-secure - fix gcc-4.9 warnings
[linux-2.6-block.git] / drivers / crypto / caam / ctrl.c
CommitLineData
fb4562b2 1/* * CAAM control-plane driver backend
8e8ec596
KP
2 * Controller-level driver, kernel property detection, initialization
3 *
281922a1 4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
8e8ec596
KP
5 */
6
4776d381 7#include <linux/device.h>
5af50730
RH
8#include <linux/of_address.h>
9#include <linux/of_irq.h>
10
8e8ec596
KP
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
281922a1 15#include "desc_constr.h"
1ac6b731 16#include "ctrl.h"
8e8ec596 17
261ea058
HG
18bool caam_little_end;
19EXPORT_SYMBOL(caam_little_end);
297b9ceb
HG
20bool caam_dpaa2;
21EXPORT_SYMBOL(caam_dpaa2);
261ea058 22
67c2315d
HG
23#ifdef CONFIG_CAAM_QI
24#include "qi.h"
25#endif
26
24821c46 27/*
6c3af955 28 * i.MX targets tend to have clock control subsystems that can
24821c46
VM
29 * enable/disable clocking to our device.
30 */
6c3af955 31#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
24821c46
VM
32static inline struct clk *caam_drv_identify_clk(struct device *dev,
33 char *clk_name)
34{
35 return devm_clk_get(dev, clk_name);
36}
37#else
38static inline struct clk *caam_drv_identify_clk(struct device *dev,
39 char *clk_name)
40{
41 return NULL;
42}
43#endif
44
281922a1
KP
45/*
46 * Descriptor to instantiate RNG State Handle 0 in normal mode and
47 * load the JDKEK, TDKEK and TDSK registers
48 */
1005bccd 49static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
281922a1 50{
1005bccd 51 u32 *jump_cmd, op_flags;
281922a1
KP
52
53 init_job_desc(desc, 0);
54
1005bccd
AP
55 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
56 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
57
281922a1 58 /* INIT RNG in non-test mode */
1005bccd 59 append_operation(desc, op_flags);
281922a1 60
1005bccd
AP
61 if (!handle && do_sk) {
62 /*
63 * For SH0, Secure Keys must be generated as well
64 */
281922a1 65
1005bccd
AP
66 /* wait for done */
67 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
68 set_jump_tgt_here(desc, jump_cmd);
281922a1 69
1005bccd
AP
70 /*
71 * load 1 to clear written reg:
72 * resets the done interrrupt and returns the RNG to idle.
73 */
74 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
75
76 /* Initialize State Handle */
77 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
78 OP_ALG_AAI_RNG4_SK);
79 }
281922a1 80
d5e4e999 81 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
281922a1 82}
281922a1 83
b1f996e0 84/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
1005bccd 85static void build_deinstantiation_desc(u32 *desc, int handle)
b1f996e0
AP
86{
87 init_job_desc(desc, 0);
281922a1 88
b1f996e0 89 /* Uninstantiate State Handle 0 */
281922a1 90 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
1005bccd 91 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
b1f996e0
AP
92
93 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
281922a1
KP
94}
95
04cddbfe
AP
96/*
97 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
98 * the software (no JR/QI used).
99 * @ctrldev - pointer to device
1005bccd
AP
100 * @status - descriptor status, after being run
101 *
04cddbfe
AP
102 * Return: - 0 if no error occurred
103 * - -ENODEV if the DECO couldn't be acquired
104 * - -EAGAIN if an error occurred while executing the descriptor
105 */
1005bccd
AP
106static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
107 u32 *status)
281922a1 108{
997ad290 109 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
fb4562b2
NNL
110 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
111 struct caam_deco __iomem *deco = ctrlpriv->deco;
997ad290 112 unsigned int timeout = 100000;
04cddbfe 113 u32 deco_dbg_reg, flags;
b1f996e0 114 int i;
997ad290 115
17157c90 116
8f1da7b9 117 if (ctrlpriv->virt_en == 1) {
261ea058 118 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
17157c90 119
fb4562b2 120 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
8f1da7b9
HG
121 --timeout)
122 cpu_relax();
123
124 timeout = 100000;
125 }
17157c90 126
261ea058 127 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
997ad290 128
fb4562b2 129 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
997ad290
RG
130 --timeout)
131 cpu_relax();
132
133 if (!timeout) {
134 dev_err(ctrldev, "failed to acquire DECO 0\n");
261ea058 135 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
04cddbfe 136 return -ENODEV;
281922a1
KP
137 }
138
997ad290 139 for (i = 0; i < desc_len(desc); i++)
261ea058 140 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
281922a1 141
04cddbfe
AP
142 flags = DECO_JQCR_WHL;
143 /*
144 * If the descriptor length is longer than 4 words, then the
145 * FOUR bit in JRCTRL register must be set.
146 */
147 if (desc_len(desc) >= 4)
148 flags |= DECO_JQCR_FOUR;
149
150 /* Instruct the DECO to execute it */
261ea058 151 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
997ad290
RG
152
153 timeout = 10000000;
84cf4827 154 do {
fb4562b2 155 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
84cf4827
AP
156 /*
157 * If an error occured in the descriptor, then
158 * the DECO status field will be set to 0x0D
159 */
160 if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
161 DESC_DBG_DECO_STAT_HOST_ERR)
162 break;
997ad290 163 cpu_relax();
84cf4827 164 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
281922a1 165
fb4562b2 166 *status = rd_reg32(&deco->op_status_hi) &
1005bccd 167 DECO_OP_STATUS_HI_ERR_MASK;
997ad290 168
17157c90 169 if (ctrlpriv->virt_en == 1)
261ea058 170 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
17157c90 171
04cddbfe 172 /* Mark the DECO as free */
261ea058 173 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
04cddbfe
AP
174
175 if (!timeout)
176 return -EAGAIN;
177
178 return 0;
179}
180
181/*
182 * instantiate_rng - builds and executes a descriptor on DECO0,
183 * which initializes the RNG block.
184 * @ctrldev - pointer to device
1005bccd
AP
185 * @state_handle_mask - bitmask containing the instantiation status
186 * for the RNG4 state handles which exist in
187 * the RNG4 block: 1 if it's been instantiated
188 * by an external entry, 0 otherwise.
189 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
190 * Caution: this can be done only once; if the keys need to be
191 * regenerated, a POR is required
192 *
04cddbfe
AP
193 * Return: - 0 if no error occurred
194 * - -ENOMEM if there isn't enough memory to allocate the descriptor
195 * - -ENODEV if DECO0 couldn't be acquired
196 * - -EAGAIN if an error occurred when executing the descriptor
197 * f.i. there was a RNG hardware error due to not "good enough"
198 * entropy being aquired.
199 */
1005bccd
AP
200static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
201 int gen_sk)
04cddbfe 202{
1005bccd 203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
fb4562b2 204 struct caam_ctrl __iomem *ctrl;
62743a41 205 u32 *desc, status = 0, rdsta_val;
1005bccd
AP
206 int ret = 0, sh_idx;
207
fb4562b2 208 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
04cddbfe
AP
209 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
210 if (!desc)
211 return -ENOMEM;
04cddbfe 212
1005bccd
AP
213 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
214 /*
215 * If the corresponding bit is set, this state handle
216 * was initialized by somebody else, so it's left alone.
217 */
218 if ((1 << sh_idx) & state_handle_mask)
219 continue;
220
221 /* Create the descriptor for instantiating RNG State Handle */
222 build_instantiation_desc(desc, sh_idx, gen_sk);
223
224 /* Try to run it through DECO0 */
225 ret = run_descriptor_deco0(ctrldev, desc, &status);
226
227 /*
228 * If ret is not 0, or descriptor status is not 0, then
229 * something went wrong. No need to try the next state
230 * handle (if available), bail out here.
231 * Also, if for some reason, the State Handle didn't get
232 * instantiated although the descriptor has finished
233 * without any error (HW optimizations for later
234 * CAAM eras), then try again.
235 */
467707b2 236 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
62743a41
HG
237 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
238 !(rdsta_val & (1 << sh_idx)))
1005bccd
AP
239 ret = -EAGAIN;
240 if (ret)
241 break;
1005bccd
AP
242 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
243 /* Clear the contents before recreating the descriptor */
244 memset(desc, 0x00, CAAM_CMD_SZ * 7);
245 }
04cddbfe 246
997ad290 247 kfree(desc);
04cddbfe 248
281922a1
KP
249 return ret;
250}
251
252/*
b1f996e0
AP
253 * deinstantiate_rng - builds and executes a descriptor on DECO0,
254 * which deinitializes the RNG block.
255 * @ctrldev - pointer to device
1005bccd
AP
256 * @state_handle_mask - bitmask containing the instantiation status
257 * for the RNG4 state handles which exist in
258 * the RNG4 block: 1 if it's been instantiated
b1f996e0
AP
259 *
260 * Return: - 0 if no error occurred
261 * - -ENOMEM if there isn't enough memory to allocate the descriptor
262 * - -ENODEV if DECO0 couldn't be acquired
263 * - -EAGAIN if an error occurred when executing the descriptor
281922a1 264 */
1005bccd 265static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
b1f996e0 266{
1005bccd
AP
267 u32 *desc, status;
268 int sh_idx, ret = 0;
b1f996e0
AP
269
270 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
271 if (!desc)
272 return -ENOMEM;
273
1005bccd
AP
274 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
275 /*
276 * If the corresponding bit is set, then it means the state
277 * handle was initialized by us, and thus it needs to be
1cce2000 278 * deinitialized as well
1005bccd
AP
279 */
280 if ((1 << sh_idx) & state_handle_mask) {
281 /*
282 * Create the descriptor for deinstantating this state
283 * handle
284 */
285 build_deinstantiation_desc(desc, sh_idx);
286
287 /* Try to run it through DECO0 */
288 ret = run_descriptor_deco0(ctrldev, desc, &status);
289
40c98cb5
HG
290 if (ret ||
291 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
1005bccd
AP
292 dev_err(ctrldev,
293 "Failed to deinstantiate RNG4 SH%d\n",
294 sh_idx);
295 break;
296 }
297 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
298 }
299 }
b1f996e0
AP
300
301 kfree(desc);
302
303 return ret;
304}
305
04cddbfe
AP
306static int caam_remove(struct platform_device *pdev)
307{
308 struct device *ctrldev;
309 struct caam_drv_private *ctrlpriv;
fb4562b2 310 struct caam_ctrl __iomem *ctrl;
04cddbfe
AP
311
312 ctrldev = &pdev->dev;
313 ctrlpriv = dev_get_drvdata(ctrldev);
fb4562b2 314 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
04cddbfe 315
ec360607
HG
316 /* Remove platform devices under the crypto node */
317 of_platform_depopulate(ctrldev);
04cddbfe 318
67c2315d
HG
319#ifdef CONFIG_CAAM_QI
320 if (ctrlpriv->qidev)
321 caam_qi_shutdown(ctrlpriv->qidev);
322#endif
323
297b9ceb
HG
324 /*
325 * De-initialize RNG state handles initialized by this driver.
326 * In case of DPAA 2.x, RNG is managed by MC firmware.
327 */
328 if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
1005bccd 329 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
b1f996e0 330
04cddbfe
AP
331 /* Shut down debug views */
332#ifdef CONFIG_DEBUG_FS
333 debugfs_remove_recursive(ctrlpriv->dfs_root);
334#endif
335
336 /* Unmap controller region */
f4ec6aa5 337 iounmap(ctrl);
04cddbfe 338
24821c46
VM
339 /* shut clocks off before finalizing shutdown */
340 clk_disable_unprepare(ctrlpriv->caam_ipg);
341 clk_disable_unprepare(ctrlpriv->caam_mem);
342 clk_disable_unprepare(ctrlpriv->caam_aclk);
b80609a1 343 if (ctrlpriv->caam_emi_slow)
4e518816 344 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
e558017b 345 return 0;
281922a1
KP
346}
347
348/*
84cf4827
AP
349 * kick_trng - sets the various parameters for enabling the initialization
350 * of the RNG4 block in CAAM
351 * @pdev - pointer to the platform device
352 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
281922a1 353 */
84cf4827 354static void kick_trng(struct platform_device *pdev, int ent_delay)
281922a1
KP
355{
356 struct device *ctrldev = &pdev->dev;
357 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
fb4562b2 358 struct caam_ctrl __iomem *ctrl;
281922a1
KP
359 struct rng4tst __iomem *r4tst;
360 u32 val;
361
fb4562b2
NNL
362 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
363 r4tst = &ctrl->r4tst[0];
281922a1
KP
364
365 /* put RNG4 into program mode */
261ea058 366 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
84cf4827
AP
367
368 /*
369 * Performance-wise, it does not make sense to
370 * set the delay to a value that is lower
371 * than the last one that worked (i.e. the state handles
372 * were instantiated properly. Thus, instead of wasting
373 * time trying to set the values controlling the sample
374 * frequency, the function simply returns.
375 */
376 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
377 >> RTSDCTL_ENT_DLY_SHIFT;
8439e94f
HG
378 if (ent_delay <= val)
379 goto start_rng;
84cf4827 380
281922a1 381 val = rd_reg32(&r4tst->rtsdctl);
84cf4827
AP
382 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
383 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
281922a1 384 wr_reg32(&r4tst->rtsdctl, val);
84cf4827
AP
385 /* min. freq. count, equal to 1/4 of the entropy sample length */
386 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
b061f3fe
AP
387 /* disable maximum frequency count */
388 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
e5ffbfc1
AP
389 /* read the control register */
390 val = rd_reg32(&r4tst->rtmctl);
8439e94f 391start_rng:
e5ffbfc1
AP
392 /*
393 * select raw sampling in both entropy shifter
8439e94f 394 * and statistical checker; ; put RNG4 into run mode
e5ffbfc1 395 */
8439e94f 396 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
281922a1
KP
397}
398
82c2f960
AP
399/**
400 * caam_get_era() - Return the ERA of the SEC on SoC, based
883619a9 401 * on "sec-era" propery in the DTS. This property is updated by u-boot.
82c2f960 402 **/
883619a9 403int caam_get_era(void)
82c2f960 404{
883619a9 405 struct device_node *caam_node;
e27513eb
AP
406 int ret;
407 u32 prop;
408
409 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
410 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
411 of_node_put(caam_node);
82c2f960 412
287980e4 413 return ret ? -ENOTSUPP : prop;
82c2f960
AP
414}
415EXPORT_SYMBOL(caam_get_era);
416
ec360607
HG
417static const struct of_device_id caam_match[] = {
418 {
419 .compatible = "fsl,sec-v4.0",
420 },
421 {
422 .compatible = "fsl,sec4.0",
423 },
424 {},
425};
426MODULE_DEVICE_TABLE(of, caam_match);
427
8e8ec596 428/* Probe routine for CAAM top (controller) level */
2930d497 429static int caam_probe(struct platform_device *pdev)
8e8ec596 430{
ec360607 431 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
82c2f960 432 u64 caam_id;
8e8ec596
KP
433 struct device *dev;
434 struct device_node *nprop, *np;
435 struct caam_ctrl __iomem *ctrl;
8e8ec596 436 struct caam_drv_private *ctrlpriv;
24821c46 437 struct clk *clk;
23457bc9
KP
438#ifdef CONFIG_DEBUG_FS
439 struct caam_perfmon *perfmon;
440#endif
17157c90 441 u32 scfgr, comp_params;
eb1139cd 442 u32 cha_vid_ls;
fb4562b2
NNL
443 int pg_size;
444 int BLOCK_OFFSET = 0;
8e8ec596 445
9c4f9733 446 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
8e8ec596
KP
447 if (!ctrlpriv)
448 return -ENOMEM;
449
450 dev = &pdev->dev;
451 dev_set_drvdata(dev, ctrlpriv);
8e8ec596
KP
452 nprop = pdev->dev.of_node;
453
24821c46
VM
454 /* Enable clocking */
455 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
456 if (IS_ERR(clk)) {
457 ret = PTR_ERR(clk);
458 dev_err(&pdev->dev,
459 "can't identify CAAM ipg clk: %d\n", ret);
a3c09550 460 return ret;
24821c46
VM
461 }
462 ctrlpriv->caam_ipg = clk;
463
464 clk = caam_drv_identify_clk(&pdev->dev, "mem");
465 if (IS_ERR(clk)) {
466 ret = PTR_ERR(clk);
467 dev_err(&pdev->dev,
468 "can't identify CAAM mem clk: %d\n", ret);
a3c09550 469 return ret;
24821c46
VM
470 }
471 ctrlpriv->caam_mem = clk;
472
473 clk = caam_drv_identify_clk(&pdev->dev, "aclk");
474 if (IS_ERR(clk)) {
475 ret = PTR_ERR(clk);
476 dev_err(&pdev->dev,
477 "can't identify CAAM aclk clk: %d\n", ret);
a3c09550 478 return ret;
24821c46
VM
479 }
480 ctrlpriv->caam_aclk = clk;
481
4e518816
MF
482 if (!of_machine_is_compatible("fsl,imx6ul")) {
483 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
484 if (IS_ERR(clk)) {
485 ret = PTR_ERR(clk);
486 dev_err(&pdev->dev,
487 "can't identify CAAM emi_slow clk: %d\n", ret);
488 return ret;
489 }
490 ctrlpriv->caam_emi_slow = clk;
24821c46 491 }
24821c46
VM
492
493 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
494 if (ret < 0) {
495 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
31f44d15 496 return ret;
24821c46
VM
497 }
498
499 ret = clk_prepare_enable(ctrlpriv->caam_mem);
500 if (ret < 0) {
501 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
502 ret);
31f44d15 503 goto disable_caam_ipg;
24821c46
VM
504 }
505
506 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
507 if (ret < 0) {
508 dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
31f44d15 509 goto disable_caam_mem;
24821c46
VM
510 }
511
b80609a1 512 if (ctrlpriv->caam_emi_slow) {
4e518816
MF
513 ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
514 if (ret < 0) {
515 dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
516 ret);
517 goto disable_caam_aclk;
518 }
24821c46
VM
519 }
520
8e8ec596
KP
521 /* Get configuration properties from device tree */
522 /* First, get register page */
523 ctrl = of_iomap(nprop, 0);
524 if (ctrl == NULL) {
525 dev_err(dev, "caam: of_iomap() failed\n");
31f44d15
FE
526 ret = -ENOMEM;
527 goto disable_caam_emi_slow;
8e8ec596 528 }
261ea058
HG
529
530 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
531 (CSTA_PLEND | CSTA_ALT_PLEND));
532
fb4562b2
NNL
533 /* Finding the page size for using the CTPR_MS register */
534 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
535 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
8e8ec596 536
fb4562b2
NNL
537 /* Allocating the BLOCK_OFFSET based on the supported page size on
538 * the platform
539 */
540 if (pg_size == 0)
541 BLOCK_OFFSET = PG_SIZE_4K;
542 else
543 BLOCK_OFFSET = PG_SIZE_64K;
544
8439e94f
HG
545 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
546 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
547 ((__force uint8_t *)ctrl +
fb4562b2
NNL
548 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
549 );
8439e94f
HG
550 ctrlpriv->deco = (struct caam_deco __iomem __force *)
551 ((__force uint8_t *)ctrl +
fb4562b2
NNL
552 BLOCK_OFFSET * DECO_BLOCK_NUMBER
553 );
8e8ec596
KP
554
555 /* Get the IRQ of the controller (for security violations only) */
f7578496 556 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
8e8ec596
KP
557
558 /*
559 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
297b9ceb
HG
560 * long pointers in master configuration register.
561 * In case of DPAA 2.x, Management Complex firmware performs
562 * the configuration.
8e8ec596 563 */
297b9ceb
HG
564 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
565 if (!caam_dpaa2)
566 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
567 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
568 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
569 (sizeof(dma_addr_t) == sizeof(u64) ?
570 MCFGR_LONG_PTR : 0));
8e8ec596 571
17157c90
RG
572 /*
573 * Read the Compile Time paramters and SCFGR to determine
574 * if Virtualization is enabled for this platform
575 */
fb4562b2 576 scfgr = rd_reg32(&ctrl->scfgr);
17157c90
RG
577
578 ctrlpriv->virt_en = 0;
579 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
580 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
581 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
582 */
583 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
584 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
585 (scfgr & SCFGR_VIRT_EN)))
586 ctrlpriv->virt_en = 1;
587 } else {
588 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
589 if (comp_params & CTPR_MS_VIRT_EN_POR)
590 ctrlpriv->virt_en = 1;
591 }
592
593 if (ctrlpriv->virt_en == 1)
261ea058
HG
594 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
595 JRSTART_JR1_START | JRSTART_JR2_START |
596 JRSTART_JR3_START);
17157c90 597
b3b5fce7 598 if (sizeof(dma_addr_t) == sizeof(u64)) {
297b9ceb
HG
599 if (caam_dpaa2)
600 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
601 else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
b3b5fce7 602 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
e13af18a 603 else
b3b5fce7
HG
604 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
605 } else {
606 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
607 }
608 if (ret) {
609 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
610 goto iounmap_ctrl;
611 }
8e8ec596 612
ec360607
HG
613 ret = of_platform_populate(nprop, caam_match, NULL, dev);
614 if (ret) {
615 dev_err(dev, "JR platform devices creation error\n");
31f44d15 616 goto iounmap_ctrl;
8e8ec596
KP
617 }
618
67c2315d
HG
619#ifdef CONFIG_DEBUG_FS
620 /*
621 * FIXME: needs better naming distinction, as some amalgamation of
622 * "caam" and nprop->full_name. The OF name isn't distinctive,
623 * but does separate instances
624 */
625 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
626
627 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
628 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
629#endif
c6dc0609 630
8e8ec596 631 ring = 0;
0a63b09d
NL
632 for_each_available_child_of_node(nprop, np)
633 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
634 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
8439e94f
HG
635 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
636 ((__force uint8_t *)ctrl +
ec360607 637 (ring + JR_BLOCK_NUMBER) *
fb4562b2
NNL
638 BLOCK_OFFSET
639 );
a0ea0f6d
SL
640 ctrlpriv->total_jobrs++;
641 ring++;
ec360607 642 }
8e8ec596 643
297b9ceb
HG
644 /* Check to see if (DPAA 1.x) QI present. If so, enable */
645 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
646 if (ctrlpriv->qi_present && !caam_dpaa2) {
8439e94f
HG
647 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
648 ((__force uint8_t *)ctrl +
fb4562b2
NNL
649 BLOCK_OFFSET * QI_BLOCK_NUMBER
650 );
8e8ec596 651 /* This is all that's required to physically enable QI */
fb4562b2 652 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
67c2315d
HG
653
654 /* If QMAN driver is present, init CAAM-QI backend */
655#ifdef CONFIG_CAAM_QI
656 ret = caam_qi_init(pdev);
657 if (ret)
658 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
659#endif
8e8ec596
KP
660 }
661
662 /* If no QI and no rings specified, quit and go home */
663 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
664 dev_err(dev, "no queues configured, terminating\n");
31f44d15
FE
665 ret = -ENOMEM;
666 goto caam_remove;
8e8ec596
KP
667 }
668
fb4562b2 669 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
986dfbcf 670
281922a1 671 /*
986dfbcf 672 * If SEC has RNG version >= 4 and RNG state handle has not been
84cf4827 673 * already instantiated, do RNG instantiation
297b9ceb 674 * In case of DPAA 2.x, RNG is managed by MC firmware.
281922a1 675 */
297b9ceb
HG
676 if (!caam_dpaa2 &&
677 (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
1005bccd 678 ctrlpriv->rng4_sh_init =
fb4562b2 679 rd_reg32(&ctrl->r4tst[0].rdsta);
1005bccd
AP
680 /*
681 * If the secure keys (TDKEK, JDKEK, TDSK), were already
682 * generated, signal this to the function that is instantiating
683 * the state handles. An error would occur if RNG4 attempts
684 * to regenerate these keys before the next POR.
685 */
686 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
687 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
84cf4827 688 do {
1005bccd 689 int inst_handles =
fb4562b2 690 rd_reg32(&ctrl->r4tst[0].rdsta) &
1005bccd
AP
691 RDSTA_IFMASK;
692 /*
693 * If either SH were instantiated by somebody else
694 * (e.g. u-boot) then it is assumed that the entropy
695 * parameters are properly set and thus the function
696 * setting these (kick_trng(...)) is skipped.
697 * Also, if a handle was instantiated, do not change
698 * the TRNG parameters.
699 */
700 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
eeaa1724
AP
701 dev_info(dev,
702 "Entropy delay = %u\n",
703 ent_delay);
1005bccd
AP
704 kick_trng(pdev, ent_delay);
705 ent_delay += 400;
706 }
707 /*
708 * if instantiate_rng(...) fails, the loop will rerun
709 * and the kick_trng(...) function will modfiy the
710 * upper and lower limits of the entropy sampling
711 * interval, leading to a sucessful initialization of
712 * the RNG.
713 */
714 ret = instantiate_rng(dev, inst_handles,
715 gen_sk);
eeaa1724
AP
716 if (ret == -EAGAIN)
717 /*
718 * if here, the loop will rerun,
719 * so don't hog the CPU
720 */
721 cpu_relax();
04cddbfe 722 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
281922a1 723 if (ret) {
84cf4827 724 dev_err(dev, "failed to instantiate RNG");
31f44d15 725 goto caam_remove;
281922a1 726 }
1005bccd
AP
727 /*
728 * Set handles init'ed by this module as the complement of the
729 * already initialized ones
730 */
731 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
575c1bd5
VG
732
733 /* Enable RDB bit so that RNG works faster */
261ea058 734 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
281922a1
KP
735 }
736
8e8ec596
KP
737 /* NOTE: RTIC detection ought to go here, around Si time */
738
fb4562b2
NNL
739 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
740 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
82c2f960 741
8e8ec596 742 /* Report "alive" for developer to see */
82c2f960 743 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
883619a9 744 caam_get_era());
297b9ceb
HG
745 dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
746 ctrlpriv->total_jobrs, ctrlpriv->qi_present,
747 caam_dpaa2 ? "yes" : "no");
8e8ec596
KP
748
749#ifdef CONFIG_DEBUG_FS
a92f7af3
FE
750 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
751 ctrlpriv->ctl, &perfmon->req_dequeued,
752 &caam_fops_u64_ro);
753 debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
754 ctrlpriv->ctl, &perfmon->ob_enc_req,
755 &caam_fops_u64_ro);
756 debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
757 ctrlpriv->ctl, &perfmon->ib_dec_req,
758 &caam_fops_u64_ro);
759 debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
760 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
761 &caam_fops_u64_ro);
762 debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
763 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
764 &caam_fops_u64_ro);
765 debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
766 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
767 &caam_fops_u64_ro);
768 debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
769 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
770 &caam_fops_u64_ro);
8e8ec596
KP
771
772 /* Controller level - global status values */
a92f7af3
FE
773 debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
774 ctrlpriv->ctl, &perfmon->faultaddr,
775 &caam_fops_u32_ro);
776 debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
777 ctrlpriv->ctl, &perfmon->faultdetail,
778 &caam_fops_u32_ro);
779 debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
780 ctrlpriv->ctl, &perfmon->status,
781 &caam_fops_u32_ro);
8e8ec596
KP
782
783 /* Internal covering keys (useful in non-secure mode only) */
8439e94f 784 ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
8e8ec596
KP
785 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
786 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
eda65cc6 787 S_IRUSR |
8e8ec596
KP
788 S_IRGRP | S_IROTH,
789 ctrlpriv->ctl,
790 &ctrlpriv->ctl_kek_wrap);
791
8439e94f 792 ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
8e8ec596
KP
793 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
794 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
eda65cc6 795 S_IRUSR |
8e8ec596
KP
796 S_IRGRP | S_IROTH,
797 ctrlpriv->ctl,
798 &ctrlpriv->ctl_tkek_wrap);
799
8439e94f 800 ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
8e8ec596
KP
801 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
802 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
eda65cc6 803 S_IRUSR |
8e8ec596
KP
804 S_IRGRP | S_IROTH,
805 ctrlpriv->ctl,
806 &ctrlpriv->ctl_tdsk_wrap);
807#endif
808 return 0;
31f44d15
FE
809
810caam_remove:
67c2315d
HG
811#ifdef CONFIG_DEBUG_FS
812 debugfs_remove_recursive(ctrlpriv->dfs_root);
813#endif
31f44d15 814 caam_remove(pdev);
bdc67da7
RK
815 return ret;
816
31f44d15
FE
817iounmap_ctrl:
818 iounmap(ctrl);
819disable_caam_emi_slow:
b80609a1 820 if (ctrlpriv->caam_emi_slow)
4e518816 821 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
31f44d15
FE
822disable_caam_aclk:
823 clk_disable_unprepare(ctrlpriv->caam_aclk);
824disable_caam_mem:
825 clk_disable_unprepare(ctrlpriv->caam_mem);
826disable_caam_ipg:
827 clk_disable_unprepare(ctrlpriv->caam_ipg);
828 return ret;
8e8ec596
KP
829}
830
2930d497 831static struct platform_driver caam_driver = {
8e8ec596
KP
832 .driver = {
833 .name = "caam",
8e8ec596
KP
834 .of_match_table = caam_match,
835 },
836 .probe = caam_probe,
49cfe4db 837 .remove = caam_remove,
8e8ec596
KP
838};
839
741e8c2d 840module_platform_driver(caam_driver);
8e8ec596
KP
841
842MODULE_LICENSE("GPL");
843MODULE_DESCRIPTION("FSL CAAM request backend");
844MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");