Commit | Line | Data |
---|---|---|
15b59e7c MV |
1 | /* |
2 | * Freescale i.MX23/i.MX28 Data Co-Processor driver | |
3 | * | |
4 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> | |
5 | * | |
6 | * The code contained herein is licensed under the GNU General Public | |
7 | * License. You may obtain a copy of the GNU General Public License | |
8 | * Version 2 or later at the following locations: | |
9 | * | |
10 | * http://www.opensource.org/licenses/gpl-license.html | |
11 | * http://www.gnu.org/copyleft/gpl.html | |
12 | */ | |
13 | ||
15b59e7c MV |
14 | #include <linux/dma-mapping.h> |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kthread.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/stmp_device.h> | |
23 | ||
24 | #include <crypto/aes.h> | |
25 | #include <crypto/sha.h> | |
26 | #include <crypto/internal/hash.h> | |
29406bb9 | 27 | #include <crypto/internal/skcipher.h> |
15b59e7c MV |
28 | |
29 | #define DCP_MAX_CHANS 4 | |
30 | #define DCP_BUF_SZ PAGE_SIZE | |
c709eeba | 31 | #define DCP_SHA_PAY_SZ 64 |
15b59e7c | 32 | |
1a7c6856 MV |
33 | #define DCP_ALIGNMENT 64 |
34 | ||
c709eeba RS |
35 | /* |
36 | * Null hashes to align with hw behavior on imx6sl and ull | |
37 | * these are flipped for consistency with hw output | |
38 | */ | |
ce4e4584 | 39 | static const uint8_t sha1_null_hash[] = |
c709eeba RS |
40 | "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" |
41 | "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; | |
42 | ||
ce4e4584 | 43 | static const uint8_t sha256_null_hash[] = |
c709eeba RS |
44 | "\x55\xb8\x52\x78\x1b\x99\x95\xa4" |
45 | "\x4c\x93\x9b\x64\xe4\x41\xae\x27" | |
46 | "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" | |
47 | "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; | |
48 | ||
15b59e7c MV |
49 | /* DCP DMA descriptor. */ |
50 | struct dcp_dma_desc { | |
51 | uint32_t next_cmd_addr; | |
52 | uint32_t control0; | |
53 | uint32_t control1; | |
54 | uint32_t source; | |
55 | uint32_t destination; | |
56 | uint32_t size; | |
57 | uint32_t payload; | |
58 | uint32_t status; | |
59 | }; | |
60 | ||
61 | /* Coherent aligned block for bounce buffering. */ | |
62 | struct dcp_coherent_block { | |
63 | uint8_t aes_in_buf[DCP_BUF_SZ]; | |
64 | uint8_t aes_out_buf[DCP_BUF_SZ]; | |
65 | uint8_t sha_in_buf[DCP_BUF_SZ]; | |
c709eeba | 66 | uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; |
15b59e7c MV |
67 | |
68 | uint8_t aes_key[2 * AES_KEYSIZE_128]; | |
15b59e7c MV |
69 | |
70 | struct dcp_dma_desc desc[DCP_MAX_CHANS]; | |
71 | }; | |
72 | ||
73 | struct dcp { | |
74 | struct device *dev; | |
75 | void __iomem *base; | |
76 | ||
77 | uint32_t caps; | |
78 | ||
79 | struct dcp_coherent_block *coh; | |
80 | ||
81 | struct completion completion[DCP_MAX_CHANS]; | |
d80771c0 | 82 | spinlock_t lock[DCP_MAX_CHANS]; |
15b59e7c MV |
83 | struct task_struct *thread[DCP_MAX_CHANS]; |
84 | struct crypto_queue queue[DCP_MAX_CHANS]; | |
85 | }; | |
86 | ||
87 | enum dcp_chan { | |
88 | DCP_CHAN_HASH_SHA = 0, | |
89 | DCP_CHAN_CRYPTO = 2, | |
90 | }; | |
91 | ||
92 | struct dcp_async_ctx { | |
93 | /* Common context */ | |
94 | enum dcp_chan chan; | |
95 | uint32_t fill; | |
96 | ||
97 | /* SHA Hash-specific context */ | |
98 | struct mutex mutex; | |
99 | uint32_t alg; | |
100 | unsigned int hot:1; | |
101 | ||
102 | /* Crypto-specific context */ | |
f805f59d | 103 | struct crypto_sync_skcipher *fallback; |
15b59e7c MV |
104 | unsigned int key_len; |
105 | uint8_t key[AES_KEYSIZE_128]; | |
106 | }; | |
107 | ||
2021abaa MV |
108 | struct dcp_aes_req_ctx { |
109 | unsigned int enc:1; | |
110 | unsigned int ecb:1; | |
111 | }; | |
112 | ||
15b59e7c MV |
113 | struct dcp_sha_req_ctx { |
114 | unsigned int init:1; | |
115 | unsigned int fini:1; | |
116 | }; | |
117 | ||
ea9e7568 DD |
118 | struct dcp_export_state { |
119 | struct dcp_sha_req_ctx req_ctx; | |
120 | struct dcp_async_ctx async_ctx; | |
121 | }; | |
122 | ||
15b59e7c MV |
123 | /* |
124 | * There can even be only one instance of the MXS DCP due to the | |
125 | * design of Linux Crypto API. | |
126 | */ | |
127 | static struct dcp *global_sdcp; | |
15b59e7c MV |
128 | |
129 | /* DCP register layout. */ | |
130 | #define MXS_DCP_CTRL 0x00 | |
131 | #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) | |
132 | #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) | |
133 | ||
134 | #define MXS_DCP_STAT 0x10 | |
135 | #define MXS_DCP_STAT_CLR 0x18 | |
136 | #define MXS_DCP_STAT_IRQ_MASK 0xf | |
137 | ||
138 | #define MXS_DCP_CHANNELCTRL 0x20 | |
139 | #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff | |
140 | ||
141 | #define MXS_DCP_CAPABILITY1 0x40 | |
142 | #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) | |
143 | #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) | |
144 | #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) | |
145 | ||
146 | #define MXS_DCP_CONTEXT 0x50 | |
147 | ||
148 | #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) | |
149 | ||
150 | #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) | |
151 | ||
152 | #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) | |
153 | #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) | |
154 | ||
155 | /* DMA descriptor bits. */ | |
156 | #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) | |
157 | #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) | |
158 | #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) | |
159 | #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) | |
160 | #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) | |
161 | #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) | |
162 | #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) | |
163 | #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) | |
164 | #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) | |
165 | ||
166 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) | |
167 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) | |
168 | #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) | |
169 | #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) | |
170 | #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) | |
171 | ||
172 | static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) | |
173 | { | |
174 | struct dcp *sdcp = global_sdcp; | |
175 | const int chan = actx->chan; | |
176 | uint32_t stat; | |
dd0fff8d | 177 | unsigned long ret; |
15b59e7c MV |
178 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
179 | ||
180 | dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), | |
181 | DMA_TO_DEVICE); | |
182 | ||
183 | reinit_completion(&sdcp->completion[chan]); | |
184 | ||
185 | /* Clear status register. */ | |
186 | writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); | |
187 | ||
188 | /* Load the DMA descriptor. */ | |
189 | writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); | |
190 | ||
191 | /* Increment the semaphore to start the DMA transfer. */ | |
192 | writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); | |
193 | ||
194 | ret = wait_for_completion_timeout(&sdcp->completion[chan], | |
195 | msecs_to_jiffies(1000)); | |
196 | if (!ret) { | |
197 | dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", | |
198 | chan, readl(sdcp->base + MXS_DCP_STAT)); | |
199 | return -ETIMEDOUT; | |
200 | } | |
201 | ||
202 | stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); | |
203 | if (stat & 0xff) { | |
204 | dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", | |
205 | chan, stat); | |
206 | return -EINVAL; | |
207 | } | |
208 | ||
209 | dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | /* | |
215 | * Encryption (AES128) | |
216 | */ | |
2021abaa MV |
217 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, |
218 | struct ablkcipher_request *req, int init) | |
15b59e7c MV |
219 | { |
220 | struct dcp *sdcp = global_sdcp; | |
221 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | |
2021abaa | 222 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
15b59e7c MV |
223 | int ret; |
224 | ||
225 | dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, | |
226 | 2 * AES_KEYSIZE_128, | |
227 | DMA_TO_DEVICE); | |
228 | dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, | |
229 | DCP_BUF_SZ, DMA_TO_DEVICE); | |
230 | dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, | |
231 | DCP_BUF_SZ, DMA_FROM_DEVICE); | |
232 | ||
fadd7a6e RS |
233 | if (actx->fill % AES_BLOCK_SIZE) { |
234 | dev_err(sdcp->dev, "Invalid block size!\n"); | |
235 | ret = -EINVAL; | |
236 | goto aes_done_run; | |
237 | } | |
238 | ||
15b59e7c MV |
239 | /* Fill in the DMA descriptor. */ |
240 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | | |
241 | MXS_DCP_CONTROL0_INTERRUPT | | |
242 | MXS_DCP_CONTROL0_ENABLE_CIPHER; | |
243 | ||
244 | /* Payload contains the key. */ | |
245 | desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; | |
246 | ||
2021abaa | 247 | if (rctx->enc) |
15b59e7c MV |
248 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; |
249 | if (init) | |
250 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; | |
251 | ||
252 | desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; | |
253 | ||
2021abaa | 254 | if (rctx->ecb) |
15b59e7c MV |
255 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; |
256 | else | |
257 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; | |
258 | ||
259 | desc->next_cmd_addr = 0; | |
260 | desc->source = src_phys; | |
261 | desc->destination = dst_phys; | |
262 | desc->size = actx->fill; | |
263 | desc->payload = key_phys; | |
264 | desc->status = 0; | |
265 | ||
266 | ret = mxs_dcp_start_dma(actx); | |
267 | ||
fadd7a6e | 268 | aes_done_run: |
15b59e7c MV |
269 | dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, |
270 | DMA_TO_DEVICE); | |
271 | dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); | |
272 | dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); | |
273 | ||
274 | return ret; | |
275 | } | |
276 | ||
277 | static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) | |
278 | { | |
279 | struct dcp *sdcp = global_sdcp; | |
280 | ||
281 | struct ablkcipher_request *req = ablkcipher_request_cast(arq); | |
282 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | |
2021abaa | 283 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
15b59e7c MV |
284 | |
285 | struct scatterlist *dst = req->dst; | |
286 | struct scatterlist *src = req->src; | |
287 | const int nents = sg_nents(req->src); | |
288 | ||
289 | const int out_off = DCP_BUF_SZ; | |
290 | uint8_t *in_buf = sdcp->coh->aes_in_buf; | |
291 | uint8_t *out_buf = sdcp->coh->aes_out_buf; | |
292 | ||
293 | uint8_t *out_tmp, *src_buf, *dst_buf = NULL; | |
294 | uint32_t dst_off = 0; | |
fadd7a6e | 295 | uint32_t last_out_len = 0; |
15b59e7c MV |
296 | |
297 | uint8_t *key = sdcp->coh->aes_key; | |
298 | ||
299 | int ret = 0; | |
300 | int split = 0; | |
fadd7a6e | 301 | unsigned int i, len, clen, rem = 0, tlen = 0; |
15b59e7c | 302 | int init = 0; |
fadd7a6e | 303 | bool limit_hit = false; |
15b59e7c MV |
304 | |
305 | actx->fill = 0; | |
306 | ||
307 | /* Copy the key from the temporary location. */ | |
308 | memcpy(key, actx->key, actx->key_len); | |
309 | ||
2021abaa | 310 | if (!rctx->ecb) { |
15b59e7c MV |
311 | /* Copy the CBC IV just past the key. */ |
312 | memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); | |
313 | /* CBC needs the INIT set. */ | |
314 | init = 1; | |
315 | } else { | |
316 | memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); | |
317 | } | |
318 | ||
319 | for_each_sg(req->src, src, nents, i) { | |
320 | src_buf = sg_virt(src); | |
321 | len = sg_dma_len(src); | |
fadd7a6e RS |
322 | tlen += len; |
323 | limit_hit = tlen > req->nbytes; | |
324 | ||
325 | if (limit_hit) | |
326 | len = req->nbytes - (tlen - len); | |
15b59e7c MV |
327 | |
328 | do { | |
329 | if (actx->fill + len > out_off) | |
330 | clen = out_off - actx->fill; | |
331 | else | |
332 | clen = len; | |
333 | ||
334 | memcpy(in_buf + actx->fill, src_buf, clen); | |
335 | len -= clen; | |
336 | src_buf += clen; | |
337 | actx->fill += clen; | |
338 | ||
339 | /* | |
340 | * If we filled the buffer or this is the last SG, | |
341 | * submit the buffer. | |
342 | */ | |
fadd7a6e RS |
343 | if (actx->fill == out_off || sg_is_last(src) || |
344 | limit_hit) { | |
2021abaa | 345 | ret = mxs_dcp_run_aes(actx, req, init); |
15b59e7c MV |
346 | if (ret) |
347 | return ret; | |
348 | init = 0; | |
349 | ||
350 | out_tmp = out_buf; | |
fadd7a6e | 351 | last_out_len = actx->fill; |
15b59e7c MV |
352 | while (dst && actx->fill) { |
353 | if (!split) { | |
354 | dst_buf = sg_virt(dst); | |
355 | dst_off = 0; | |
356 | } | |
357 | rem = min(sg_dma_len(dst) - dst_off, | |
358 | actx->fill); | |
359 | ||
360 | memcpy(dst_buf + dst_off, out_tmp, rem); | |
361 | out_tmp += rem; | |
362 | dst_off += rem; | |
363 | actx->fill -= rem; | |
364 | ||
365 | if (dst_off == sg_dma_len(dst)) { | |
366 | dst = sg_next(dst); | |
367 | split = 0; | |
368 | } else { | |
369 | split = 1; | |
370 | } | |
371 | } | |
372 | } | |
373 | } while (len); | |
fadd7a6e RS |
374 | |
375 | if (limit_hit) | |
376 | break; | |
377 | } | |
378 | ||
379 | /* Copy the IV for CBC for chaining */ | |
380 | if (!rctx->ecb) { | |
381 | if (rctx->enc) | |
382 | memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), | |
383 | AES_BLOCK_SIZE); | |
384 | else | |
385 | memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), | |
386 | AES_BLOCK_SIZE); | |
15b59e7c MV |
387 | } |
388 | ||
389 | return ret; | |
390 | } | |
391 | ||
392 | static int dcp_chan_thread_aes(void *data) | |
393 | { | |
394 | struct dcp *sdcp = global_sdcp; | |
395 | const int chan = DCP_CHAN_CRYPTO; | |
396 | ||
397 | struct crypto_async_request *backlog; | |
398 | struct crypto_async_request *arq; | |
399 | ||
400 | int ret; | |
401 | ||
d80771c0 LC |
402 | while (!kthread_should_stop()) { |
403 | set_current_state(TASK_INTERRUPTIBLE); | |
15b59e7c | 404 | |
d80771c0 | 405 | spin_lock(&sdcp->lock[chan]); |
15b59e7c MV |
406 | backlog = crypto_get_backlog(&sdcp->queue[chan]); |
407 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | |
d80771c0 LC |
408 | spin_unlock(&sdcp->lock[chan]); |
409 | ||
410 | if (!backlog && !arq) { | |
411 | schedule(); | |
412 | continue; | |
413 | } | |
414 | ||
415 | set_current_state(TASK_RUNNING); | |
15b59e7c MV |
416 | |
417 | if (backlog) | |
418 | backlog->complete(backlog, -EINPROGRESS); | |
419 | ||
420 | if (arq) { | |
421 | ret = mxs_dcp_aes_block_crypt(arq); | |
422 | arq->complete(arq, ret); | |
15b59e7c | 423 | } |
d80771c0 | 424 | } |
15b59e7c MV |
425 | |
426 | return 0; | |
427 | } | |
428 | ||
429 | static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) | |
430 | { | |
29406bb9 HX |
431 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
432 | struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
f805f59d | 433 | SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); |
15b59e7c MV |
434 | int ret; |
435 | ||
f805f59d | 436 | skcipher_request_set_sync_tfm(subreq, ctx->fallback); |
29406bb9 HX |
437 | skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); |
438 | skcipher_request_set_crypt(subreq, req->src, req->dst, | |
439 | req->nbytes, req->info); | |
15b59e7c MV |
440 | |
441 | if (enc) | |
29406bb9 | 442 | ret = crypto_skcipher_encrypt(subreq); |
15b59e7c | 443 | else |
29406bb9 | 444 | ret = crypto_skcipher_decrypt(subreq); |
15b59e7c | 445 | |
29406bb9 | 446 | skcipher_request_zero(subreq); |
15b59e7c MV |
447 | |
448 | return ret; | |
449 | } | |
450 | ||
451 | static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) | |
452 | { | |
453 | struct dcp *sdcp = global_sdcp; | |
454 | struct crypto_async_request *arq = &req->base; | |
455 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | |
2021abaa | 456 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
15b59e7c MV |
457 | int ret; |
458 | ||
459 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) | |
460 | return mxs_dcp_block_fallback(req, enc); | |
461 | ||
2021abaa MV |
462 | rctx->enc = enc; |
463 | rctx->ecb = ecb; | |
15b59e7c MV |
464 | actx->chan = DCP_CHAN_CRYPTO; |
465 | ||
d80771c0 | 466 | spin_lock(&sdcp->lock[actx->chan]); |
15b59e7c | 467 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); |
d80771c0 | 468 | spin_unlock(&sdcp->lock[actx->chan]); |
15b59e7c MV |
469 | |
470 | wake_up_process(sdcp->thread[actx->chan]); | |
471 | ||
472 | return -EINPROGRESS; | |
473 | } | |
474 | ||
475 | static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) | |
476 | { | |
477 | return mxs_dcp_aes_enqueue(req, 0, 1); | |
478 | } | |
479 | ||
480 | static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) | |
481 | { | |
482 | return mxs_dcp_aes_enqueue(req, 1, 1); | |
483 | } | |
484 | ||
485 | static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) | |
486 | { | |
487 | return mxs_dcp_aes_enqueue(req, 0, 0); | |
488 | } | |
489 | ||
490 | static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) | |
491 | { | |
492 | return mxs_dcp_aes_enqueue(req, 1, 0); | |
493 | } | |
494 | ||
495 | static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
496 | unsigned int len) | |
497 | { | |
498 | struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); | |
499 | unsigned int ret; | |
500 | ||
501 | /* | |
502 | * AES 128 is supposed by the hardware, store key into temporary | |
503 | * buffer and exit. We must use the temporary buffer here, since | |
504 | * there can still be an operation in progress. | |
505 | */ | |
506 | actx->key_len = len; | |
507 | if (len == AES_KEYSIZE_128) { | |
508 | memcpy(actx->key, key, len); | |
509 | return 0; | |
510 | } | |
511 | ||
15b59e7c MV |
512 | /* |
513 | * If the requested AES key size is not supported by the hardware, | |
514 | * but is supported by in-kernel software implementation, we use | |
515 | * software fallback. | |
516 | */ | |
f805f59d KC |
517 | crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); |
518 | crypto_sync_skcipher_set_flags(actx->fallback, | |
29406bb9 | 519 | tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); |
15b59e7c | 520 | |
f805f59d | 521 | ret = crypto_sync_skcipher_setkey(actx->fallback, key, len); |
15b59e7c MV |
522 | if (!ret) |
523 | return 0; | |
524 | ||
525 | tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
f805f59d | 526 | tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) & |
29406bb9 | 527 | CRYPTO_TFM_RES_MASK; |
15b59e7c MV |
528 | |
529 | return ret; | |
530 | } | |
531 | ||
532 | static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) | |
533 | { | |
2231204b | 534 | const char *name = crypto_tfm_alg_name(tfm); |
15b59e7c | 535 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); |
f805f59d | 536 | struct crypto_sync_skcipher *blk; |
15b59e7c | 537 | |
f805f59d | 538 | blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); |
15b59e7c MV |
539 | if (IS_ERR(blk)) |
540 | return PTR_ERR(blk); | |
541 | ||
542 | actx->fallback = blk; | |
2021abaa | 543 | tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); |
15b59e7c MV |
544 | return 0; |
545 | } | |
546 | ||
547 | static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) | |
548 | { | |
549 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); | |
550 | ||
f805f59d | 551 | crypto_free_sync_skcipher(actx->fallback); |
15b59e7c MV |
552 | } |
553 | ||
554 | /* | |
555 | * Hashing (SHA1/SHA256) | |
556 | */ | |
557 | static int mxs_dcp_run_sha(struct ahash_request *req) | |
558 | { | |
559 | struct dcp *sdcp = global_sdcp; | |
560 | int ret; | |
561 | ||
562 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
563 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | |
564 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
15b59e7c | 565 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
15b59e7c | 566 | |
04d088cc | 567 | dma_addr_t digest_phys = 0; |
15b59e7c MV |
568 | dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, |
569 | DCP_BUF_SZ, DMA_TO_DEVICE); | |
570 | ||
571 | /* Fill in the DMA descriptor. */ | |
572 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | | |
573 | MXS_DCP_CONTROL0_INTERRUPT | | |
574 | MXS_DCP_CONTROL0_ENABLE_HASH; | |
575 | if (rctx->init) | |
576 | desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; | |
577 | ||
578 | desc->control1 = actx->alg; | |
579 | desc->next_cmd_addr = 0; | |
580 | desc->source = buf_phys; | |
581 | desc->destination = 0; | |
582 | desc->size = actx->fill; | |
583 | desc->payload = 0; | |
584 | desc->status = 0; | |
585 | ||
c709eeba RS |
586 | /* |
587 | * Align driver with hw behavior when generating null hashes | |
588 | */ | |
589 | if (rctx->init && rctx->fini && desc->size == 0) { | |
590 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | |
591 | const uint8_t *sha_buf = | |
592 | (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? | |
593 | sha1_null_hash : sha256_null_hash; | |
594 | memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); | |
595 | ret = 0; | |
596 | goto done_run; | |
597 | } | |
598 | ||
15b59e7c MV |
599 | /* Set HASH_TERM bit for last transfer block. */ |
600 | if (rctx->fini) { | |
c709eeba RS |
601 | digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, |
602 | DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); | |
15b59e7c MV |
603 | desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; |
604 | desc->payload = digest_phys; | |
605 | } | |
606 | ||
607 | ret = mxs_dcp_start_dma(actx); | |
608 | ||
04d088cc | 609 | if (rctx->fini) |
c709eeba | 610 | dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, |
04d088cc MV |
611 | DMA_FROM_DEVICE); |
612 | ||
c709eeba | 613 | done_run: |
15b59e7c MV |
614 | dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); |
615 | ||
616 | return ret; | |
617 | } | |
618 | ||
619 | static int dcp_sha_req_to_buf(struct crypto_async_request *arq) | |
620 | { | |
621 | struct dcp *sdcp = global_sdcp; | |
622 | ||
623 | struct ahash_request *req = ahash_request_cast(arq); | |
624 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
625 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | |
626 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
627 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | |
628 | const int nents = sg_nents(req->src); | |
629 | ||
15b59e7c | 630 | uint8_t *in_buf = sdcp->coh->sha_in_buf; |
c709eeba | 631 | uint8_t *out_buf = sdcp->coh->sha_out_buf; |
15b59e7c MV |
632 | |
633 | uint8_t *src_buf; | |
634 | ||
635 | struct scatterlist *src; | |
636 | ||
637 | unsigned int i, len, clen; | |
638 | int ret; | |
639 | ||
640 | int fin = rctx->fini; | |
641 | if (fin) | |
642 | rctx->fini = 0; | |
643 | ||
644 | for_each_sg(req->src, src, nents, i) { | |
645 | src_buf = sg_virt(src); | |
646 | len = sg_dma_len(src); | |
647 | ||
648 | do { | |
649 | if (actx->fill + len > DCP_BUF_SZ) | |
650 | clen = DCP_BUF_SZ - actx->fill; | |
651 | else | |
652 | clen = len; | |
653 | ||
654 | memcpy(in_buf + actx->fill, src_buf, clen); | |
655 | len -= clen; | |
656 | src_buf += clen; | |
657 | actx->fill += clen; | |
658 | ||
659 | /* | |
660 | * If we filled the buffer and still have some | |
661 | * more data, submit the buffer. | |
662 | */ | |
663 | if (len && actx->fill == DCP_BUF_SZ) { | |
664 | ret = mxs_dcp_run_sha(req); | |
665 | if (ret) | |
666 | return ret; | |
667 | actx->fill = 0; | |
668 | rctx->init = 0; | |
669 | } | |
670 | } while (len); | |
671 | } | |
672 | ||
673 | if (fin) { | |
674 | rctx->fini = 1; | |
675 | ||
676 | /* Submit whatever is left. */ | |
04d088cc MV |
677 | if (!req->result) |
678 | return -EINVAL; | |
679 | ||
15b59e7c | 680 | ret = mxs_dcp_run_sha(req); |
04d088cc | 681 | if (ret) |
15b59e7c | 682 | return ret; |
04d088cc | 683 | |
15b59e7c MV |
684 | actx->fill = 0; |
685 | ||
c709eeba RS |
686 | /* For some reason the result is flipped */ |
687 | for (i = 0; i < halg->digestsize; i++) | |
688 | req->result[i] = out_buf[halg->digestsize - i - 1]; | |
15b59e7c MV |
689 | } |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | static int dcp_chan_thread_sha(void *data) | |
695 | { | |
696 | struct dcp *sdcp = global_sdcp; | |
697 | const int chan = DCP_CHAN_HASH_SHA; | |
698 | ||
699 | struct crypto_async_request *backlog; | |
700 | struct crypto_async_request *arq; | |
701 | ||
702 | struct dcp_sha_req_ctx *rctx; | |
703 | ||
704 | struct ahash_request *req; | |
705 | int ret, fini; | |
706 | ||
d80771c0 LC |
707 | while (!kthread_should_stop()) { |
708 | set_current_state(TASK_INTERRUPTIBLE); | |
15b59e7c | 709 | |
d80771c0 | 710 | spin_lock(&sdcp->lock[chan]); |
15b59e7c MV |
711 | backlog = crypto_get_backlog(&sdcp->queue[chan]); |
712 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | |
d80771c0 LC |
713 | spin_unlock(&sdcp->lock[chan]); |
714 | ||
715 | if (!backlog && !arq) { | |
716 | schedule(); | |
717 | continue; | |
718 | } | |
719 | ||
720 | set_current_state(TASK_RUNNING); | |
15b59e7c MV |
721 | |
722 | if (backlog) | |
723 | backlog->complete(backlog, -EINPROGRESS); | |
724 | ||
725 | if (arq) { | |
726 | req = ahash_request_cast(arq); | |
727 | rctx = ahash_request_ctx(req); | |
728 | ||
729 | ret = dcp_sha_req_to_buf(arq); | |
730 | fini = rctx->fini; | |
731 | arq->complete(arq, ret); | |
15b59e7c | 732 | } |
d80771c0 | 733 | } |
15b59e7c MV |
734 | |
735 | return 0; | |
736 | } | |
737 | ||
738 | static int dcp_sha_init(struct ahash_request *req) | |
739 | { | |
740 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
741 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | |
742 | ||
743 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | |
744 | ||
745 | /* | |
746 | * Start hashing session. The code below only inits the | |
747 | * hashing session context, nothing more. | |
748 | */ | |
749 | memset(actx, 0, sizeof(*actx)); | |
750 | ||
751 | if (strcmp(halg->base.cra_name, "sha1") == 0) | |
752 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; | |
753 | else | |
754 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; | |
755 | ||
756 | actx->fill = 0; | |
757 | actx->hot = 0; | |
758 | actx->chan = DCP_CHAN_HASH_SHA; | |
759 | ||
760 | mutex_init(&actx->mutex); | |
761 | ||
762 | return 0; | |
763 | } | |
764 | ||
765 | static int dcp_sha_update_fx(struct ahash_request *req, int fini) | |
766 | { | |
767 | struct dcp *sdcp = global_sdcp; | |
768 | ||
769 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
770 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
771 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | |
772 | ||
773 | int ret; | |
774 | ||
775 | /* | |
776 | * Ignore requests that have no data in them and are not | |
777 | * the trailing requests in the stream of requests. | |
778 | */ | |
779 | if (!req->nbytes && !fini) | |
780 | return 0; | |
781 | ||
782 | mutex_lock(&actx->mutex); | |
783 | ||
784 | rctx->fini = fini; | |
785 | ||
786 | if (!actx->hot) { | |
787 | actx->hot = 1; | |
788 | rctx->init = 1; | |
789 | } | |
790 | ||
d80771c0 | 791 | spin_lock(&sdcp->lock[actx->chan]); |
15b59e7c | 792 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); |
d80771c0 | 793 | spin_unlock(&sdcp->lock[actx->chan]); |
15b59e7c MV |
794 | |
795 | wake_up_process(sdcp->thread[actx->chan]); | |
796 | mutex_unlock(&actx->mutex); | |
797 | ||
798 | return -EINPROGRESS; | |
799 | } | |
800 | ||
801 | static int dcp_sha_update(struct ahash_request *req) | |
802 | { | |
803 | return dcp_sha_update_fx(req, 0); | |
804 | } | |
805 | ||
806 | static int dcp_sha_final(struct ahash_request *req) | |
807 | { | |
808 | ahash_request_set_crypt(req, NULL, req->result, 0); | |
809 | req->nbytes = 0; | |
810 | return dcp_sha_update_fx(req, 1); | |
811 | } | |
812 | ||
813 | static int dcp_sha_finup(struct ahash_request *req) | |
814 | { | |
815 | return dcp_sha_update_fx(req, 1); | |
816 | } | |
817 | ||
818 | static int dcp_sha_digest(struct ahash_request *req) | |
819 | { | |
820 | int ret; | |
821 | ||
822 | ret = dcp_sha_init(req); | |
823 | if (ret) | |
824 | return ret; | |
825 | ||
826 | return dcp_sha_finup(req); | |
827 | } | |
828 | ||
ea9e7568 | 829 | static int dcp_sha_import(struct ahash_request *req, const void *in) |
9190b6fd | 830 | { |
ea9e7568 DD |
831 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
832 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
833 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | |
834 | const struct dcp_export_state *export = in; | |
835 | ||
836 | memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); | |
837 | memset(actx, 0, sizeof(struct dcp_async_ctx)); | |
838 | memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); | |
839 | memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); | |
840 | ||
841 | return 0; | |
9190b6fd KK |
842 | } |
843 | ||
ea9e7568 | 844 | static int dcp_sha_export(struct ahash_request *req, void *out) |
9190b6fd | 845 | { |
ea9e7568 DD |
846 | struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); |
847 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
848 | struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); | |
849 | struct dcp_export_state *export = out; | |
850 | ||
851 | memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); | |
852 | memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); | |
853 | ||
854 | return 0; | |
9190b6fd KK |
855 | } |
856 | ||
15b59e7c MV |
857 | static int dcp_sha_cra_init(struct crypto_tfm *tfm) |
858 | { | |
859 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
860 | sizeof(struct dcp_sha_req_ctx)); | |
861 | return 0; | |
862 | } | |
863 | ||
864 | static void dcp_sha_cra_exit(struct crypto_tfm *tfm) | |
865 | { | |
866 | } | |
867 | ||
868 | /* AES 128 ECB and AES 128 CBC */ | |
869 | static struct crypto_alg dcp_aes_algs[] = { | |
870 | { | |
871 | .cra_name = "ecb(aes)", | |
872 | .cra_driver_name = "ecb-aes-dcp", | |
873 | .cra_priority = 400, | |
874 | .cra_alignmask = 15, | |
875 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
876 | CRYPTO_ALG_ASYNC | | |
877 | CRYPTO_ALG_NEED_FALLBACK, | |
878 | .cra_init = mxs_dcp_aes_fallback_init, | |
879 | .cra_exit = mxs_dcp_aes_fallback_exit, | |
880 | .cra_blocksize = AES_BLOCK_SIZE, | |
881 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | |
882 | .cra_type = &crypto_ablkcipher_type, | |
883 | .cra_module = THIS_MODULE, | |
884 | .cra_u = { | |
885 | .ablkcipher = { | |
886 | .min_keysize = AES_MIN_KEY_SIZE, | |
887 | .max_keysize = AES_MAX_KEY_SIZE, | |
888 | .setkey = mxs_dcp_aes_setkey, | |
889 | .encrypt = mxs_dcp_aes_ecb_encrypt, | |
890 | .decrypt = mxs_dcp_aes_ecb_decrypt | |
891 | }, | |
892 | }, | |
893 | }, { | |
894 | .cra_name = "cbc(aes)", | |
895 | .cra_driver_name = "cbc-aes-dcp", | |
896 | .cra_priority = 400, | |
897 | .cra_alignmask = 15, | |
898 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
899 | CRYPTO_ALG_ASYNC | | |
900 | CRYPTO_ALG_NEED_FALLBACK, | |
901 | .cra_init = mxs_dcp_aes_fallback_init, | |
902 | .cra_exit = mxs_dcp_aes_fallback_exit, | |
903 | .cra_blocksize = AES_BLOCK_SIZE, | |
904 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | |
905 | .cra_type = &crypto_ablkcipher_type, | |
906 | .cra_module = THIS_MODULE, | |
907 | .cra_u = { | |
908 | .ablkcipher = { | |
909 | .min_keysize = AES_MIN_KEY_SIZE, | |
910 | .max_keysize = AES_MAX_KEY_SIZE, | |
911 | .setkey = mxs_dcp_aes_setkey, | |
912 | .encrypt = mxs_dcp_aes_cbc_encrypt, | |
913 | .decrypt = mxs_dcp_aes_cbc_decrypt, | |
914 | .ivsize = AES_BLOCK_SIZE, | |
915 | }, | |
916 | }, | |
917 | }, | |
918 | }; | |
919 | ||
920 | /* SHA1 */ | |
921 | static struct ahash_alg dcp_sha1_alg = { | |
922 | .init = dcp_sha_init, | |
923 | .update = dcp_sha_update, | |
924 | .final = dcp_sha_final, | |
925 | .finup = dcp_sha_finup, | |
926 | .digest = dcp_sha_digest, | |
ea9e7568 DD |
927 | .import = dcp_sha_import, |
928 | .export = dcp_sha_export, | |
15b59e7c MV |
929 | .halg = { |
930 | .digestsize = SHA1_DIGEST_SIZE, | |
ea9e7568 | 931 | .statesize = sizeof(struct dcp_export_state), |
15b59e7c MV |
932 | .base = { |
933 | .cra_name = "sha1", | |
934 | .cra_driver_name = "sha1-dcp", | |
935 | .cra_priority = 400, | |
936 | .cra_alignmask = 63, | |
937 | .cra_flags = CRYPTO_ALG_ASYNC, | |
938 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
939 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | |
940 | .cra_module = THIS_MODULE, | |
941 | .cra_init = dcp_sha_cra_init, | |
942 | .cra_exit = dcp_sha_cra_exit, | |
943 | }, | |
944 | }, | |
945 | }; | |
946 | ||
947 | /* SHA256 */ | |
948 | static struct ahash_alg dcp_sha256_alg = { | |
949 | .init = dcp_sha_init, | |
950 | .update = dcp_sha_update, | |
951 | .final = dcp_sha_final, | |
952 | .finup = dcp_sha_finup, | |
953 | .digest = dcp_sha_digest, | |
ea9e7568 DD |
954 | .import = dcp_sha_import, |
955 | .export = dcp_sha_export, | |
15b59e7c MV |
956 | .halg = { |
957 | .digestsize = SHA256_DIGEST_SIZE, | |
ea9e7568 | 958 | .statesize = sizeof(struct dcp_export_state), |
15b59e7c MV |
959 | .base = { |
960 | .cra_name = "sha256", | |
961 | .cra_driver_name = "sha256-dcp", | |
962 | .cra_priority = 400, | |
963 | .cra_alignmask = 63, | |
964 | .cra_flags = CRYPTO_ALG_ASYNC, | |
965 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
966 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | |
967 | .cra_module = THIS_MODULE, | |
968 | .cra_init = dcp_sha_cra_init, | |
969 | .cra_exit = dcp_sha_cra_exit, | |
970 | }, | |
971 | }, | |
972 | }; | |
973 | ||
974 | static irqreturn_t mxs_dcp_irq(int irq, void *context) | |
975 | { | |
976 | struct dcp *sdcp = context; | |
977 | uint32_t stat; | |
978 | int i; | |
979 | ||
980 | stat = readl(sdcp->base + MXS_DCP_STAT); | |
981 | stat &= MXS_DCP_STAT_IRQ_MASK; | |
982 | if (!stat) | |
983 | return IRQ_NONE; | |
984 | ||
985 | /* Clear the interrupts. */ | |
986 | writel(stat, sdcp->base + MXS_DCP_STAT_CLR); | |
987 | ||
988 | /* Complete the DMA requests that finished. */ | |
989 | for (i = 0; i < DCP_MAX_CHANS; i++) | |
990 | if (stat & (1 << i)) | |
991 | complete(&sdcp->completion[i]); | |
992 | ||
993 | return IRQ_HANDLED; | |
994 | } | |
995 | ||
996 | static int mxs_dcp_probe(struct platform_device *pdev) | |
997 | { | |
998 | struct device *dev = &pdev->dev; | |
999 | struct dcp *sdcp = NULL; | |
1000 | int i, ret; | |
1001 | ||
1002 | struct resource *iores; | |
1003 | int dcp_vmi_irq, dcp_irq; | |
1004 | ||
15b59e7c MV |
1005 | if (global_sdcp) { |
1006 | dev_err(dev, "Only one DCP instance allowed!\n"); | |
5fc8005b | 1007 | return -ENODEV; |
15b59e7c MV |
1008 | } |
1009 | ||
1010 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1011 | dcp_vmi_irq = platform_get_irq(pdev, 0); | |
353ef083 GS |
1012 | if (dcp_vmi_irq < 0) { |
1013 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); | |
5fc8005b | 1014 | return dcp_vmi_irq; |
353ef083 | 1015 | } |
d9588f87 | 1016 | |
15b59e7c | 1017 | dcp_irq = platform_get_irq(pdev, 1); |
353ef083 GS |
1018 | if (dcp_irq < 0) { |
1019 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); | |
5fc8005b | 1020 | return dcp_irq; |
353ef083 | 1021 | } |
15b59e7c MV |
1022 | |
1023 | sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); | |
5fc8005b FE |
1024 | if (!sdcp) |
1025 | return -ENOMEM; | |
15b59e7c MV |
1026 | |
1027 | sdcp->dev = dev; | |
1028 | sdcp->base = devm_ioremap_resource(dev, iores); | |
5fc8005b FE |
1029 | if (IS_ERR(sdcp->base)) |
1030 | return PTR_ERR(sdcp->base); | |
1031 | ||
15b59e7c MV |
1032 | |
1033 | ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, | |
1034 | "dcp-vmi-irq", sdcp); | |
1035 | if (ret) { | |
1036 | dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); | |
5fc8005b | 1037 | return ret; |
15b59e7c MV |
1038 | } |
1039 | ||
1040 | ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, | |
1041 | "dcp-irq", sdcp); | |
1042 | if (ret) { | |
1043 | dev_err(dev, "Failed to claim DCP IRQ!\n"); | |
5fc8005b | 1044 | return ret; |
15b59e7c MV |
1045 | } |
1046 | ||
1047 | /* Allocate coherent helper block. */ | |
1a7c6856 MV |
1048 | sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, |
1049 | GFP_KERNEL); | |
5fc8005b FE |
1050 | if (!sdcp->coh) |
1051 | return -ENOMEM; | |
15b59e7c | 1052 | |
1a7c6856 MV |
1053 | /* Re-align the structure so it fits the DCP constraints. */ |
1054 | sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); | |
1055 | ||
15b59e7c | 1056 | /* Restart the DCP block. */ |
fecfd7f7 FE |
1057 | ret = stmp_reset_block(sdcp->base); |
1058 | if (ret) | |
5fc8005b | 1059 | return ret; |
15b59e7c MV |
1060 | |
1061 | /* Initialize control register. */ | |
1062 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | | |
1063 | MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, | |
1064 | sdcp->base + MXS_DCP_CTRL); | |
1065 | ||
1066 | /* Enable all DCP DMA channels. */ | |
1067 | writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, | |
1068 | sdcp->base + MXS_DCP_CHANNELCTRL); | |
1069 | ||
1070 | /* | |
1071 | * We do not enable context switching. Give the context buffer a | |
1072 | * pointer to an illegal address so if context switching is | |
1073 | * inadvertantly enabled, the DCP will return an error instead of | |
1074 | * trashing good memory. The DCP DMA cannot access ROM, so any ROM | |
1075 | * address will do. | |
1076 | */ | |
1077 | writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); | |
1078 | for (i = 0; i < DCP_MAX_CHANS; i++) | |
1079 | writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); | |
1080 | writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); | |
1081 | ||
1082 | global_sdcp = sdcp; | |
1083 | ||
1084 | platform_set_drvdata(pdev, sdcp); | |
1085 | ||
1086 | for (i = 0; i < DCP_MAX_CHANS; i++) { | |
d80771c0 | 1087 | spin_lock_init(&sdcp->lock[i]); |
15b59e7c MV |
1088 | init_completion(&sdcp->completion[i]); |
1089 | crypto_init_queue(&sdcp->queue[i], 50); | |
1090 | } | |
1091 | ||
1092 | /* Create the SHA and AES handler threads. */ | |
1093 | sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, | |
1094 | NULL, "mxs_dcp_chan/sha"); | |
1095 | if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { | |
1096 | dev_err(dev, "Error starting SHA thread!\n"); | |
5fc8005b | 1097 | return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); |
15b59e7c MV |
1098 | } |
1099 | ||
1100 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, | |
1101 | NULL, "mxs_dcp_chan/aes"); | |
1102 | if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { | |
1103 | dev_err(dev, "Error starting SHA thread!\n"); | |
1104 | ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); | |
1105 | goto err_destroy_sha_thread; | |
1106 | } | |
1107 | ||
1108 | /* Register the various crypto algorithms. */ | |
1109 | sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); | |
1110 | ||
1111 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { | |
1112 | ret = crypto_register_algs(dcp_aes_algs, | |
1113 | ARRAY_SIZE(dcp_aes_algs)); | |
1114 | if (ret) { | |
1115 | /* Failed to register algorithm. */ | |
1116 | dev_err(dev, "Failed to register AES crypto!\n"); | |
1117 | goto err_destroy_aes_thread; | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { | |
1122 | ret = crypto_register_ahash(&dcp_sha1_alg); | |
1123 | if (ret) { | |
1124 | dev_err(dev, "Failed to register %s hash!\n", | |
1125 | dcp_sha1_alg.halg.base.cra_name); | |
1126 | goto err_unregister_aes; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { | |
1131 | ret = crypto_register_ahash(&dcp_sha256_alg); | |
1132 | if (ret) { | |
1133 | dev_err(dev, "Failed to register %s hash!\n", | |
1134 | dcp_sha256_alg.halg.base.cra_name); | |
1135 | goto err_unregister_sha1; | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | return 0; | |
1140 | ||
1141 | err_unregister_sha1: | |
1142 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) | |
1143 | crypto_unregister_ahash(&dcp_sha1_alg); | |
1144 | ||
1145 | err_unregister_aes: | |
1146 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) | |
1147 | crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); | |
1148 | ||
1149 | err_destroy_aes_thread: | |
1150 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); | |
1151 | ||
1152 | err_destroy_sha_thread: | |
1153 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); | |
15b59e7c MV |
1154 | return ret; |
1155 | } | |
1156 | ||
1157 | static int mxs_dcp_remove(struct platform_device *pdev) | |
1158 | { | |
1159 | struct dcp *sdcp = platform_get_drvdata(pdev); | |
1160 | ||
15b59e7c MV |
1161 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) |
1162 | crypto_unregister_ahash(&dcp_sha256_alg); | |
1163 | ||
1164 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) | |
1165 | crypto_unregister_ahash(&dcp_sha1_alg); | |
1166 | ||
1167 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) | |
1168 | crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); | |
1169 | ||
1170 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); | |
1171 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); | |
1172 | ||
1173 | platform_set_drvdata(pdev, NULL); | |
1174 | ||
15b59e7c | 1175 | global_sdcp = NULL; |
15b59e7c MV |
1176 | |
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | static const struct of_device_id mxs_dcp_dt_ids[] = { | |
1181 | { .compatible = "fsl,imx23-dcp", .data = NULL, }, | |
1182 | { .compatible = "fsl,imx28-dcp", .data = NULL, }, | |
1183 | { /* sentinel */ } | |
1184 | }; | |
1185 | ||
1186 | MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); | |
1187 | ||
1188 | static struct platform_driver mxs_dcp_driver = { | |
1189 | .probe = mxs_dcp_probe, | |
1190 | .remove = mxs_dcp_remove, | |
1191 | .driver = { | |
1192 | .name = "mxs-dcp", | |
15b59e7c MV |
1193 | .of_match_table = mxs_dcp_dt_ids, |
1194 | }, | |
1195 | }; | |
1196 | ||
1197 | module_platform_driver(mxs_dcp_driver); | |
1198 | ||
1199 | MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); | |
1200 | MODULE_DESCRIPTION("Freescale MXS DCP Driver"); | |
1201 | MODULE_LICENSE("GPL"); | |
1202 | MODULE_ALIAS("platform:mxs-dcp"); |