Commit | Line | Data |
---|---|---|
a583ed31 HG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */ | |
3 | ||
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/clk.h> | |
7 | #include <linux/hw_random.h> | |
8 | #include <linux/io.h> | |
9 | #include <linux/platform_device.h> | |
10 | #include <linux/pm_runtime.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/irqreturn.h> | |
13 | #include <linux/workqueue.h> | |
14 | #include <linux/circ_buf.h> | |
15 | #include <linux/completion.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/bitfield.h> | |
3357b611 | 18 | #include <linux/fips.h> |
a583ed31 HG |
19 | |
20 | #include "cctrng.h" | |
21 | ||
22 | #define CC_REG_LOW(name) (name ## _BIT_SHIFT) | |
23 | #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1) | |
24 | #define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name)) | |
25 | ||
26 | #define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \ | |
27 | (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val)) | |
28 | ||
29 | #define CC_HW_RESET_LOOP_COUNT 10 | |
30 | #define CC_TRNG_SUSPEND_TIMEOUT 3000 | |
31 | ||
32 | /* data circular buffer in words must be: | |
33 | * - of a power-of-2 size (limitation of circ_buf.h macros) | |
34 | * - at least 6, the size generated in the EHR according to HW implementation | |
35 | */ | |
36 | #define CCTRNG_DATA_BUF_WORDS 32 | |
37 | ||
38 | /* The timeout for the TRNG operation should be calculated with the formula: | |
39 | * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE | |
40 | * while: | |
41 | * - SAMPLE_CNT is input value from the characterisation process | |
42 | * - all the rest are constants | |
43 | */ | |
44 | #define EHR_NUM 1 | |
45 | #define VN_COEFF 4 | |
46 | #define EHR_LENGTH CC_TRNG_EHR_IN_BITS | |
47 | #define SCALE_VALUE 2 | |
48 | #define CCTRNG_TIMEOUT(smpl_cnt) \ | |
49 | (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE) | |
50 | ||
51 | struct cctrng_drvdata { | |
52 | struct platform_device *pdev; | |
53 | void __iomem *cc_base; | |
54 | struct clk *clk; | |
55 | struct hwrng rng; | |
56 | u32 active_rosc; | |
57 | /* Sampling interval for each ring oscillator: | |
58 | * count of ring oscillator cycles between consecutive bits sampling. | |
59 | * Value of 0 indicates non-valid rosc | |
60 | */ | |
61 | u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS]; | |
62 | ||
63 | u32 data_buf[CCTRNG_DATA_BUF_WORDS]; | |
64 | struct circ_buf circ; | |
65 | struct work_struct compwork; | |
66 | struct work_struct startwork; | |
67 | ||
68 | /* pending_hw - 1 when HW is pending, 0 when it is idle */ | |
69 | atomic_t pending_hw; | |
70 | ||
71 | /* protects against multiple concurrent consumers of data_buf */ | |
72 | spinlock_t read_lock; | |
73 | }; | |
74 | ||
75 | ||
76 | /* functions for write/read CC registers */ | |
77 | static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val) | |
78 | { | |
79 | iowrite32(val, (drvdata->cc_base + reg)); | |
80 | } | |
81 | static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg) | |
82 | { | |
83 | return ioread32(drvdata->cc_base + reg); | |
84 | } | |
85 | ||
86 | ||
87 | static int cc_trng_pm_get(struct device *dev) | |
88 | { | |
89 | int rc = 0; | |
90 | ||
91 | rc = pm_runtime_get_sync(dev); | |
92 | ||
93 | /* pm_runtime_get_sync() can return 1 as a valid return code */ | |
94 | return (rc == 1 ? 0 : rc); | |
95 | } | |
96 | ||
97 | static void cc_trng_pm_put_suspend(struct device *dev) | |
98 | { | |
99 | int rc = 0; | |
100 | ||
101 | pm_runtime_mark_last_busy(dev); | |
102 | rc = pm_runtime_put_autosuspend(dev); | |
103 | if (rc) | |
104 | dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc); | |
105 | } | |
106 | ||
107 | static int cc_trng_pm_init(struct cctrng_drvdata *drvdata) | |
108 | { | |
109 | struct device *dev = &(drvdata->pdev->dev); | |
110 | ||
111 | /* must be before the enabling to avoid redundant suspending */ | |
112 | pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT); | |
113 | pm_runtime_use_autosuspend(dev); | |
114 | /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */ | |
115 | return pm_runtime_set_active(dev); | |
116 | } | |
117 | ||
118 | static void cc_trng_pm_go(struct cctrng_drvdata *drvdata) | |
119 | { | |
120 | struct device *dev = &(drvdata->pdev->dev); | |
121 | ||
122 | /* enable the PM module*/ | |
123 | pm_runtime_enable(dev); | |
124 | } | |
125 | ||
126 | static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata) | |
127 | { | |
128 | struct device *dev = &(drvdata->pdev->dev); | |
129 | ||
130 | pm_runtime_disable(dev); | |
131 | } | |
132 | ||
133 | ||
134 | static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata) | |
135 | { | |
136 | struct device *dev = &(drvdata->pdev->dev); | |
137 | struct device_node *np = drvdata->pdev->dev.of_node; | |
138 | int rc; | |
139 | int i; | |
140 | /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */ | |
141 | int ret = -EINVAL; | |
142 | ||
143 | rc = of_property_read_u32_array(np, "arm,rosc-ratio", | |
144 | drvdata->smpl_ratio, | |
145 | CC_TRNG_NUM_OF_ROSCS); | |
146 | if (rc) { | |
147 | /* arm,rosc-ratio was not found in device tree */ | |
148 | return rc; | |
149 | } | |
150 | ||
151 | /* verify that at least one rosc has (sampling ratio > 0) */ | |
152 | for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) { | |
153 | dev_dbg(dev, "rosc %d sampling ratio %u", | |
154 | i, drvdata->smpl_ratio[i]); | |
155 | ||
156 | if (drvdata->smpl_ratio[i] > 0) | |
157 | ret = 0; | |
158 | } | |
159 | ||
160 | return ret; | |
161 | } | |
162 | ||
163 | static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata) | |
164 | { | |
165 | struct device *dev = &(drvdata->pdev->dev); | |
166 | ||
167 | dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc); | |
168 | drvdata->active_rosc += 1; | |
169 | ||
170 | while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) { | |
171 | if (drvdata->smpl_ratio[drvdata->active_rosc] > 0) | |
172 | return 0; | |
173 | ||
174 | drvdata->active_rosc += 1; | |
175 | } | |
176 | return -EINVAL; | |
177 | } | |
178 | ||
179 | ||
180 | static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata) | |
181 | { | |
182 | u32 max_cycles; | |
183 | ||
184 | /* Set watchdog threshold to maximal allowed time (in CPU cycles) */ | |
185 | max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]); | |
186 | cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles); | |
187 | ||
188 | /* enable the RND source */ | |
189 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1); | |
190 | ||
191 | /* unmask RNG interrupts */ | |
192 | cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK); | |
193 | } | |
194 | ||
195 | ||
196 | /* increase circular data buffer index (head/tail) */ | |
197 | static inline void circ_idx_inc(int *idx, int bytes) | |
198 | { | |
199 | *idx += (bytes + 3) >> 2; | |
200 | *idx &= (CCTRNG_DATA_BUF_WORDS - 1); | |
201 | } | |
202 | ||
203 | static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata) | |
204 | { | |
205 | return CIRC_SPACE(drvdata->circ.head, | |
206 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); | |
207 | ||
208 | } | |
209 | ||
210 | static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait) | |
211 | { | |
212 | /* current implementation ignores "wait" */ | |
213 | ||
214 | struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv; | |
215 | struct device *dev = &(drvdata->pdev->dev); | |
216 | u32 *buf = (u32 *)drvdata->circ.buf; | |
217 | size_t copied = 0; | |
218 | size_t cnt_w; | |
219 | size_t size; | |
220 | size_t left; | |
221 | ||
222 | if (!spin_trylock(&drvdata->read_lock)) { | |
223 | /* concurrent consumers from data_buf cannot be served */ | |
224 | dev_dbg_ratelimited(dev, "unable to hold lock\n"); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | /* copy till end of data buffer (without wrap back) */ | |
229 | cnt_w = CIRC_CNT_TO_END(drvdata->circ.head, | |
230 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); | |
231 | size = min((cnt_w<<2), max); | |
232 | memcpy(data, &(buf[drvdata->circ.tail]), size); | |
233 | copied = size; | |
234 | circ_idx_inc(&drvdata->circ.tail, size); | |
235 | /* copy rest of data in data buffer */ | |
236 | left = max - copied; | |
237 | if (left > 0) { | |
238 | cnt_w = CIRC_CNT(drvdata->circ.head, | |
239 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); | |
240 | size = min((cnt_w<<2), left); | |
241 | memcpy(data, &(buf[drvdata->circ.tail]), size); | |
242 | copied += size; | |
243 | circ_idx_inc(&drvdata->circ.tail, size); | |
244 | } | |
245 | ||
246 | spin_unlock(&drvdata->read_lock); | |
247 | ||
248 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { | |
249 | if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { | |
250 | /* re-check space in buffer to avoid potential race */ | |
251 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { | |
252 | /* increment device's usage counter */ | |
253 | int rc = cc_trng_pm_get(dev); | |
254 | ||
255 | if (rc) { | |
256 | dev_err(dev, | |
257 | "cc_trng_pm_get returned %x\n", | |
258 | rc); | |
259 | return rc; | |
260 | } | |
261 | ||
262 | /* schedule execution of deferred work handler | |
263 | * for filling of data buffer | |
264 | */ | |
265 | schedule_work(&drvdata->startwork); | |
266 | } else { | |
267 | atomic_set(&drvdata->pending_hw, 0); | |
268 | } | |
269 | } | |
270 | } | |
271 | ||
272 | return copied; | |
273 | } | |
274 | ||
275 | static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata) | |
276 | { | |
277 | u32 tmp_smpl_cnt = 0; | |
278 | struct device *dev = &(drvdata->pdev->dev); | |
279 | ||
280 | dev_dbg(dev, "cctrng hw trigger.\n"); | |
281 | ||
282 | /* enable the HW RND clock */ | |
283 | cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); | |
284 | ||
285 | /* do software reset */ | |
286 | cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1); | |
287 | /* in order to verify that the reset has completed, | |
288 | * the sample count need to be verified | |
289 | */ | |
290 | do { | |
291 | /* enable the HW RND clock */ | |
292 | cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); | |
293 | ||
294 | /* set sampling ratio (rng_clocks) between consecutive bits */ | |
295 | cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET, | |
296 | drvdata->smpl_ratio[drvdata->active_rosc]); | |
297 | ||
298 | /* read the sampling ratio */ | |
299 | tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET); | |
300 | ||
301 | } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]); | |
302 | ||
303 | /* disable the RND source for setting new parameters in HW */ | |
304 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); | |
305 | ||
306 | cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF); | |
307 | ||
308 | cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc); | |
309 | ||
310 | /* Debug Control register: set to 0 - no bypasses */ | |
311 | cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0); | |
312 | ||
313 | cc_trng_enable_rnd_source(drvdata); | |
314 | } | |
315 | ||
43f3c2b4 | 316 | static void cc_trng_compwork_handler(struct work_struct *w) |
a583ed31 HG |
317 | { |
318 | u32 isr = 0; | |
319 | u32 ehr_valid = 0; | |
320 | struct cctrng_drvdata *drvdata = | |
321 | container_of(w, struct cctrng_drvdata, compwork); | |
322 | struct device *dev = &(drvdata->pdev->dev); | |
323 | int i; | |
324 | ||
325 | /* stop DMA and the RNG source */ | |
326 | cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0); | |
327 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); | |
328 | ||
329 | /* read RNG_ISR and check for errors */ | |
330 | isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET); | |
331 | ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr); | |
332 | dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid); | |
333 | ||
b2d7e8bc | 334 | if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) { |
a583ed31 HG |
335 | fips_fail_notify(); |
336 | /* FIPS error is fatal */ | |
337 | panic("Got HW CRNGT error while fips is enabled!\n"); | |
338 | } | |
a583ed31 HG |
339 | |
340 | /* Clear all pending RNG interrupts */ | |
341 | cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr); | |
342 | ||
343 | ||
344 | if (!ehr_valid) { | |
345 | /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */ | |
346 | if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) || | |
347 | CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) { | |
348 | dev_dbg(dev, "cctrng autocorr/timeout error.\n"); | |
349 | goto next_rosc; | |
350 | } | |
351 | ||
352 | /* in case of VN error, ignore it */ | |
353 | } | |
354 | ||
355 | /* read EHR data from registers */ | |
356 | for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) { | |
357 | /* calc word ptr in data_buf */ | |
358 | u32 *buf = (u32 *)drvdata->circ.buf; | |
359 | ||
360 | buf[drvdata->circ.head] = cc_ioread(drvdata, | |
361 | CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32))); | |
362 | ||
363 | /* EHR_DATA registers are cleared on read. In case 0 value was | |
364 | * returned, restart the entropy collection. | |
365 | */ | |
366 | if (buf[drvdata->circ.head] == 0) { | |
367 | dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n", | |
368 | drvdata->active_rosc); | |
369 | goto next_rosc; | |
370 | } | |
371 | ||
372 | circ_idx_inc(&drvdata->circ.head, 1<<2); | |
373 | } | |
374 | ||
375 | atomic_set(&drvdata->pending_hw, 0); | |
376 | ||
377 | /* continue to fill data buffer if needed */ | |
378 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { | |
379 | if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { | |
380 | /* Re-enable rnd source */ | |
381 | cc_trng_enable_rnd_source(drvdata); | |
382 | return; | |
383 | } | |
384 | } | |
385 | ||
386 | cc_trng_pm_put_suspend(dev); | |
387 | ||
388 | dev_dbg(dev, "compwork handler done\n"); | |
389 | return; | |
390 | ||
391 | next_rosc: | |
392 | if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) && | |
393 | (cc_trng_change_rosc(drvdata) == 0)) { | |
394 | /* trigger trng hw with next rosc */ | |
395 | cc_trng_hw_trigger(drvdata); | |
396 | } else { | |
397 | atomic_set(&drvdata->pending_hw, 0); | |
398 | cc_trng_pm_put_suspend(dev); | |
399 | } | |
400 | } | |
401 | ||
402 | static irqreturn_t cc_isr(int irq, void *dev_id) | |
403 | { | |
404 | struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id; | |
405 | struct device *dev = &(drvdata->pdev->dev); | |
406 | u32 irr; | |
407 | ||
408 | /* if driver suspended return, probably shared interrupt */ | |
409 | if (pm_runtime_suspended(dev)) | |
410 | return IRQ_NONE; | |
411 | ||
412 | /* read the interrupt status */ | |
413 | irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); | |
414 | dev_dbg(dev, "Got IRR=0x%08X\n", irr); | |
415 | ||
416 | if (irr == 0) /* Probably shared interrupt line */ | |
417 | return IRQ_NONE; | |
418 | ||
419 | /* clear interrupt - must be before processing events */ | |
420 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr); | |
421 | ||
422 | /* RNG interrupt - most probable */ | |
423 | if (irr & CC_HOST_RNG_IRQ_MASK) { | |
424 | /* Mask RNG interrupts - will be unmasked in deferred work */ | |
425 | cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF); | |
426 | ||
427 | /* We clear RNG interrupt here, | |
428 | * to avoid it from firing as we'll unmask RNG interrupts. | |
429 | */ | |
430 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, | |
431 | CC_HOST_RNG_IRQ_MASK); | |
432 | ||
433 | irr &= ~CC_HOST_RNG_IRQ_MASK; | |
434 | ||
435 | /* schedule execution of deferred work handler */ | |
436 | schedule_work(&drvdata->compwork); | |
437 | } | |
438 | ||
439 | if (irr) { | |
440 | dev_dbg_ratelimited(dev, | |
441 | "IRR includes unknown cause bits (0x%08X)\n", | |
442 | irr); | |
443 | /* Just warning */ | |
444 | } | |
445 | ||
446 | return IRQ_HANDLED; | |
447 | } | |
448 | ||
43f3c2b4 | 449 | static void cc_trng_startwork_handler(struct work_struct *w) |
a583ed31 HG |
450 | { |
451 | struct cctrng_drvdata *drvdata = | |
452 | container_of(w, struct cctrng_drvdata, startwork); | |
453 | ||
454 | drvdata->active_rosc = 0; | |
455 | cc_trng_hw_trigger(drvdata); | |
456 | } | |
457 | ||
458 | ||
459 | static int cc_trng_clk_init(struct cctrng_drvdata *drvdata) | |
460 | { | |
461 | struct clk *clk; | |
462 | struct device *dev = &(drvdata->pdev->dev); | |
463 | int rc = 0; | |
464 | ||
465 | clk = devm_clk_get_optional(dev, NULL); | |
4d6aef2f KK |
466 | if (IS_ERR(clk)) |
467 | return dev_err_probe(dev, PTR_ERR(clk), | |
468 | "Error getting clock\n"); | |
469 | ||
a583ed31 HG |
470 | drvdata->clk = clk; |
471 | ||
472 | rc = clk_prepare_enable(drvdata->clk); | |
473 | if (rc) { | |
474 | dev_err(dev, "Failed to enable clock\n"); | |
475 | return rc; | |
476 | } | |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
481 | static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) | |
482 | { | |
483 | clk_disable_unprepare(drvdata->clk); | |
484 | } | |
485 | ||
486 | ||
487 | static int cctrng_probe(struct platform_device *pdev) | |
488 | { | |
489 | struct resource *req_mem_cc_regs = NULL; | |
490 | struct cctrng_drvdata *drvdata; | |
491 | struct device *dev = &pdev->dev; | |
492 | int rc = 0; | |
493 | u32 val; | |
494 | int irq; | |
495 | ||
496 | drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); | |
497 | if (!drvdata) | |
498 | return -ENOMEM; | |
499 | ||
500 | drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); | |
501 | if (!drvdata->rng.name) | |
502 | return -ENOMEM; | |
503 | ||
504 | drvdata->rng.read = cctrng_read; | |
505 | drvdata->rng.priv = (unsigned long)drvdata; | |
506 | drvdata->rng.quality = CC_TRNG_QUALITY; | |
507 | ||
508 | platform_set_drvdata(pdev, drvdata); | |
509 | drvdata->pdev = pdev; | |
510 | ||
511 | drvdata->circ.buf = (char *)drvdata->data_buf; | |
512 | ||
513 | /* Get device resources */ | |
514 | /* First CC registers space */ | |
515 | req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
516 | /* Map registers space */ | |
517 | drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); | |
518 | if (IS_ERR(drvdata->cc_base)) { | |
519 | dev_err(dev, "Failed to ioremap registers"); | |
520 | return PTR_ERR(drvdata->cc_base); | |
521 | } | |
522 | ||
523 | dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, | |
524 | req_mem_cc_regs); | |
525 | dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", | |
526 | &req_mem_cc_regs->start, drvdata->cc_base); | |
527 | ||
528 | /* Then IRQ */ | |
529 | irq = platform_get_irq(pdev, 0); | |
530 | if (irq < 0) { | |
531 | dev_err(dev, "Failed getting IRQ resource\n"); | |
532 | return irq; | |
533 | } | |
534 | ||
535 | /* parse sampling rate from device tree */ | |
536 | rc = cc_trng_parse_sampling_ratio(drvdata); | |
537 | if (rc) { | |
538 | dev_err(dev, "Failed to get legal sampling ratio for rosc\n"); | |
539 | return rc; | |
540 | } | |
541 | ||
542 | rc = cc_trng_clk_init(drvdata); | |
543 | if (rc) { | |
544 | dev_err(dev, "cc_trng_clk_init failed\n"); | |
545 | return rc; | |
546 | } | |
547 | ||
548 | INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler); | |
549 | INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler); | |
550 | spin_lock_init(&drvdata->read_lock); | |
551 | ||
552 | /* register the driver isr function */ | |
553 | rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata); | |
554 | if (rc) { | |
555 | dev_err(dev, "Could not register to interrupt %d\n", irq); | |
556 | goto post_clk_err; | |
557 | } | |
558 | dev_dbg(dev, "Registered to IRQ: %d\n", irq); | |
559 | ||
560 | /* Clear all pending interrupts */ | |
561 | val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); | |
562 | dev_dbg(dev, "IRR=0x%08X\n", val); | |
563 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val); | |
564 | ||
565 | /* unmask HOST RNG interrupt */ | |
566 | cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, | |
567 | cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & | |
568 | ~CC_HOST_RNG_IRQ_MASK); | |
569 | ||
570 | /* init PM */ | |
571 | rc = cc_trng_pm_init(drvdata); | |
572 | if (rc) { | |
573 | dev_err(dev, "cc_trng_pm_init failed\n"); | |
574 | goto post_clk_err; | |
575 | } | |
576 | ||
577 | /* increment device's usage counter */ | |
578 | rc = cc_trng_pm_get(dev); | |
579 | if (rc) { | |
580 | dev_err(dev, "cc_trng_pm_get returned %x\n", rc); | |
581 | goto post_pm_err; | |
582 | } | |
583 | ||
584 | /* set pending_hw to verify that HW won't be triggered from read */ | |
585 | atomic_set(&drvdata->pending_hw, 1); | |
586 | ||
587 | /* registration of the hwrng device */ | |
588 | rc = hwrng_register(&drvdata->rng); | |
589 | if (rc) { | |
590 | dev_err(dev, "Could not register hwrng device.\n"); | |
591 | goto post_pm_err; | |
592 | } | |
593 | ||
594 | /* trigger HW to start generate data */ | |
595 | drvdata->active_rosc = 0; | |
596 | cc_trng_hw_trigger(drvdata); | |
597 | ||
598 | /* All set, we can allow auto-suspend */ | |
599 | cc_trng_pm_go(drvdata); | |
600 | ||
601 | dev_info(dev, "ARM cctrng device initialized\n"); | |
602 | ||
603 | return 0; | |
604 | ||
605 | post_pm_err: | |
606 | cc_trng_pm_fini(drvdata); | |
607 | ||
608 | post_clk_err: | |
609 | cc_trng_clk_fini(drvdata); | |
610 | ||
611 | return rc; | |
612 | } | |
613 | ||
614 | static int cctrng_remove(struct platform_device *pdev) | |
615 | { | |
616 | struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev); | |
617 | struct device *dev = &pdev->dev; | |
618 | ||
619 | dev_dbg(dev, "Releasing cctrng resources...\n"); | |
620 | ||
621 | hwrng_unregister(&drvdata->rng); | |
622 | ||
623 | cc_trng_pm_fini(drvdata); | |
624 | ||
625 | cc_trng_clk_fini(drvdata); | |
626 | ||
627 | dev_info(dev, "ARM cctrng device terminated\n"); | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
632 | static int __maybe_unused cctrng_suspend(struct device *dev) | |
633 | { | |
634 | struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); | |
635 | ||
636 | dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); | |
637 | cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, | |
638 | POWER_DOWN_ENABLE); | |
639 | ||
640 | clk_disable_unprepare(drvdata->clk); | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
645 | static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata) | |
646 | { | |
647 | unsigned int val; | |
648 | unsigned int i; | |
649 | ||
650 | for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { | |
651 | /* in cc7x3 NVM_IS_IDLE indicates that CC reset is | |
652 | * completed and device is fully functional | |
653 | */ | |
654 | val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET); | |
655 | if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) { | |
656 | /* hw indicate reset completed */ | |
657 | return true; | |
658 | } | |
659 | /* allow scheduling other process on the processor */ | |
660 | schedule(); | |
661 | } | |
662 | /* reset not completed */ | |
663 | return false; | |
664 | } | |
665 | ||
666 | static int __maybe_unused cctrng_resume(struct device *dev) | |
667 | { | |
668 | struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); | |
669 | int rc; | |
670 | ||
671 | dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); | |
672 | /* Enables the device source clk */ | |
673 | rc = clk_prepare_enable(drvdata->clk); | |
674 | if (rc) { | |
675 | dev_err(dev, "failed getting clock back on. We're toast.\n"); | |
676 | return rc; | |
677 | } | |
678 | ||
679 | /* wait for Cryptocell reset completion */ | |
680 | if (!cctrng_wait_for_reset_completion(drvdata)) { | |
681 | dev_err(dev, "Cryptocell reset not completed"); | |
682 | return -EBUSY; | |
683 | } | |
684 | ||
685 | /* unmask HOST RNG interrupt */ | |
686 | cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, | |
687 | cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & | |
688 | ~CC_HOST_RNG_IRQ_MASK); | |
689 | ||
690 | cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, | |
691 | POWER_DOWN_DISABLE); | |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
696 | static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL); | |
697 | ||
698 | static const struct of_device_id arm_cctrng_dt_match[] = { | |
699 | { .compatible = "arm,cryptocell-713-trng", }, | |
700 | { .compatible = "arm,cryptocell-703-trng", }, | |
701 | {}, | |
702 | }; | |
703 | MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match); | |
704 | ||
705 | static struct platform_driver cctrng_driver = { | |
706 | .driver = { | |
707 | .name = "cctrng", | |
708 | .of_match_table = arm_cctrng_dt_match, | |
709 | .pm = &cctrng_pm, | |
710 | }, | |
711 | .probe = cctrng_probe, | |
712 | .remove = cctrng_remove, | |
713 | }; | |
714 | ||
715 | static int __init cctrng_mod_init(void) | |
716 | { | |
717 | /* Compile time assertion checks */ | |
718 | BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); | |
719 | BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); | |
720 | ||
721 | return platform_driver_register(&cctrng_driver); | |
722 | } | |
723 | module_init(cctrng_mod_init); | |
724 | ||
725 | static void __exit cctrng_mod_exit(void) | |
726 | { | |
727 | platform_driver_unregister(&cctrng_driver); | |
728 | } | |
729 | module_exit(cctrng_mod_exit); | |
730 | ||
731 | /* Module description */ | |
732 | MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver"); | |
733 | MODULE_AUTHOR("ARM"); | |
734 | MODULE_LICENSE("GPL v2"); |