Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[linux-2.6-block.git] / drivers / crypto / mxs-dcp.c
CommitLineData
15b59e7c
MV
1/*
2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
3 *
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14#include <linux/crypto.h>
15#include <linux/dma-mapping.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/stmp_device.h>
24
25#include <crypto/aes.h>
26#include <crypto/sha.h>
27#include <crypto/internal/hash.h>
28
29#define DCP_MAX_CHANS 4
30#define DCP_BUF_SZ PAGE_SIZE
31
1a7c6856
MV
32#define DCP_ALIGNMENT 64
33
15b59e7c
MV
34/* DCP DMA descriptor. */
35struct dcp_dma_desc {
36 uint32_t next_cmd_addr;
37 uint32_t control0;
38 uint32_t control1;
39 uint32_t source;
40 uint32_t destination;
41 uint32_t size;
42 uint32_t payload;
43 uint32_t status;
44};
45
46/* Coherent aligned block for bounce buffering. */
47struct dcp_coherent_block {
48 uint8_t aes_in_buf[DCP_BUF_SZ];
49 uint8_t aes_out_buf[DCP_BUF_SZ];
50 uint8_t sha_in_buf[DCP_BUF_SZ];
51
52 uint8_t aes_key[2 * AES_KEYSIZE_128];
15b59e7c
MV
53
54 struct dcp_dma_desc desc[DCP_MAX_CHANS];
55};
56
57struct dcp {
58 struct device *dev;
59 void __iomem *base;
60
61 uint32_t caps;
62
63 struct dcp_coherent_block *coh;
64
65 struct completion completion[DCP_MAX_CHANS];
66 struct mutex mutex[DCP_MAX_CHANS];
67 struct task_struct *thread[DCP_MAX_CHANS];
68 struct crypto_queue queue[DCP_MAX_CHANS];
69};
70
71enum dcp_chan {
72 DCP_CHAN_HASH_SHA = 0,
73 DCP_CHAN_CRYPTO = 2,
74};
75
76struct dcp_async_ctx {
77 /* Common context */
78 enum dcp_chan chan;
79 uint32_t fill;
80
81 /* SHA Hash-specific context */
82 struct mutex mutex;
83 uint32_t alg;
84 unsigned int hot:1;
85
86 /* Crypto-specific context */
15b59e7c
MV
87 struct crypto_ablkcipher *fallback;
88 unsigned int key_len;
89 uint8_t key[AES_KEYSIZE_128];
90};
91
2021abaa
MV
92struct dcp_aes_req_ctx {
93 unsigned int enc:1;
94 unsigned int ecb:1;
95};
96
15b59e7c
MV
97struct dcp_sha_req_ctx {
98 unsigned int init:1;
99 unsigned int fini:1;
100};
101
102/*
103 * There can even be only one instance of the MXS DCP due to the
104 * design of Linux Crypto API.
105 */
106static struct dcp *global_sdcp;
15b59e7c
MV
107
108/* DCP register layout. */
109#define MXS_DCP_CTRL 0x00
110#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
111#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
112
113#define MXS_DCP_STAT 0x10
114#define MXS_DCP_STAT_CLR 0x18
115#define MXS_DCP_STAT_IRQ_MASK 0xf
116
117#define MXS_DCP_CHANNELCTRL 0x20
118#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
119
120#define MXS_DCP_CAPABILITY1 0x40
121#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
122#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
123#define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
124
125#define MXS_DCP_CONTEXT 0x50
126
127#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
128
129#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
130
131#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
132#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
133
134/* DMA descriptor bits. */
135#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
136#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
137#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
138#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
139#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
140#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
141#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
142#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
143#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
144
145#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
146#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
147#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
148#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
149#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
150
151static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
152{
153 struct dcp *sdcp = global_sdcp;
154 const int chan = actx->chan;
155 uint32_t stat;
dd0fff8d 156 unsigned long ret;
15b59e7c
MV
157 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
158
159 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
160 DMA_TO_DEVICE);
161
162 reinit_completion(&sdcp->completion[chan]);
163
164 /* Clear status register. */
165 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
166
167 /* Load the DMA descriptor. */
168 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
169
170 /* Increment the semaphore to start the DMA transfer. */
171 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
172
173 ret = wait_for_completion_timeout(&sdcp->completion[chan],
174 msecs_to_jiffies(1000));
175 if (!ret) {
176 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
177 chan, readl(sdcp->base + MXS_DCP_STAT));
178 return -ETIMEDOUT;
179 }
180
181 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
182 if (stat & 0xff) {
183 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
184 chan, stat);
185 return -EINVAL;
186 }
187
188 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
189
190 return 0;
191}
192
193/*
194 * Encryption (AES128)
195 */
2021abaa
MV
196static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
197 struct ablkcipher_request *req, int init)
15b59e7c
MV
198{
199 struct dcp *sdcp = global_sdcp;
200 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2021abaa 201 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
15b59e7c
MV
202 int ret;
203
204 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
205 2 * AES_KEYSIZE_128,
206 DMA_TO_DEVICE);
207 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
208 DCP_BUF_SZ, DMA_TO_DEVICE);
209 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
210 DCP_BUF_SZ, DMA_FROM_DEVICE);
211
212 /* Fill in the DMA descriptor. */
213 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
214 MXS_DCP_CONTROL0_INTERRUPT |
215 MXS_DCP_CONTROL0_ENABLE_CIPHER;
216
217 /* Payload contains the key. */
218 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
219
2021abaa 220 if (rctx->enc)
15b59e7c
MV
221 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
222 if (init)
223 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
224
225 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
226
2021abaa 227 if (rctx->ecb)
15b59e7c
MV
228 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
229 else
230 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
231
232 desc->next_cmd_addr = 0;
233 desc->source = src_phys;
234 desc->destination = dst_phys;
235 desc->size = actx->fill;
236 desc->payload = key_phys;
237 desc->status = 0;
238
239 ret = mxs_dcp_start_dma(actx);
240
241 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
242 DMA_TO_DEVICE);
243 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
244 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
245
246 return ret;
247}
248
249static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
250{
251 struct dcp *sdcp = global_sdcp;
252
253 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
254 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
2021abaa 255 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
15b59e7c
MV
256
257 struct scatterlist *dst = req->dst;
258 struct scatterlist *src = req->src;
259 const int nents = sg_nents(req->src);
260
261 const int out_off = DCP_BUF_SZ;
262 uint8_t *in_buf = sdcp->coh->aes_in_buf;
263 uint8_t *out_buf = sdcp->coh->aes_out_buf;
264
265 uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
266 uint32_t dst_off = 0;
267
268 uint8_t *key = sdcp->coh->aes_key;
269
270 int ret = 0;
271 int split = 0;
272 unsigned int i, len, clen, rem = 0;
273 int init = 0;
274
275 actx->fill = 0;
276
277 /* Copy the key from the temporary location. */
278 memcpy(key, actx->key, actx->key_len);
279
2021abaa 280 if (!rctx->ecb) {
15b59e7c
MV
281 /* Copy the CBC IV just past the key. */
282 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
283 /* CBC needs the INIT set. */
284 init = 1;
285 } else {
286 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
287 }
288
289 for_each_sg(req->src, src, nents, i) {
290 src_buf = sg_virt(src);
291 len = sg_dma_len(src);
292
293 do {
294 if (actx->fill + len > out_off)
295 clen = out_off - actx->fill;
296 else
297 clen = len;
298
299 memcpy(in_buf + actx->fill, src_buf, clen);
300 len -= clen;
301 src_buf += clen;
302 actx->fill += clen;
303
304 /*
305 * If we filled the buffer or this is the last SG,
306 * submit the buffer.
307 */
308 if (actx->fill == out_off || sg_is_last(src)) {
2021abaa 309 ret = mxs_dcp_run_aes(actx, req, init);
15b59e7c
MV
310 if (ret)
311 return ret;
312 init = 0;
313
314 out_tmp = out_buf;
315 while (dst && actx->fill) {
316 if (!split) {
317 dst_buf = sg_virt(dst);
318 dst_off = 0;
319 }
320 rem = min(sg_dma_len(dst) - dst_off,
321 actx->fill);
322
323 memcpy(dst_buf + dst_off, out_tmp, rem);
324 out_tmp += rem;
325 dst_off += rem;
326 actx->fill -= rem;
327
328 if (dst_off == sg_dma_len(dst)) {
329 dst = sg_next(dst);
330 split = 0;
331 } else {
332 split = 1;
333 }
334 }
335 }
336 } while (len);
337 }
338
339 return ret;
340}
341
342static int dcp_chan_thread_aes(void *data)
343{
344 struct dcp *sdcp = global_sdcp;
345 const int chan = DCP_CHAN_CRYPTO;
346
347 struct crypto_async_request *backlog;
348 struct crypto_async_request *arq;
349
350 int ret;
351
352 do {
353 __set_current_state(TASK_INTERRUPTIBLE);
354
355 mutex_lock(&sdcp->mutex[chan]);
356 backlog = crypto_get_backlog(&sdcp->queue[chan]);
357 arq = crypto_dequeue_request(&sdcp->queue[chan]);
358 mutex_unlock(&sdcp->mutex[chan]);
359
360 if (backlog)
361 backlog->complete(backlog, -EINPROGRESS);
362
363 if (arq) {
364 ret = mxs_dcp_aes_block_crypt(arq);
365 arq->complete(arq, ret);
366 continue;
367 }
368
369 schedule();
370 } while (!kthread_should_stop());
371
372 return 0;
373}
374
375static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
376{
377 struct crypto_tfm *tfm =
378 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
379 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
380 crypto_ablkcipher_reqtfm(req));
381 int ret;
382
383 ablkcipher_request_set_tfm(req, ctx->fallback);
384
385 if (enc)
386 ret = crypto_ablkcipher_encrypt(req);
387 else
388 ret = crypto_ablkcipher_decrypt(req);
389
390 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
391
392 return ret;
393}
394
395static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
396{
397 struct dcp *sdcp = global_sdcp;
398 struct crypto_async_request *arq = &req->base;
399 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
2021abaa 400 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
15b59e7c
MV
401 int ret;
402
403 if (unlikely(actx->key_len != AES_KEYSIZE_128))
404 return mxs_dcp_block_fallback(req, enc);
405
2021abaa
MV
406 rctx->enc = enc;
407 rctx->ecb = ecb;
15b59e7c
MV
408 actx->chan = DCP_CHAN_CRYPTO;
409
410 mutex_lock(&sdcp->mutex[actx->chan]);
411 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
412 mutex_unlock(&sdcp->mutex[actx->chan]);
413
414 wake_up_process(sdcp->thread[actx->chan]);
415
416 return -EINPROGRESS;
417}
418
419static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
420{
421 return mxs_dcp_aes_enqueue(req, 0, 1);
422}
423
424static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
425{
426 return mxs_dcp_aes_enqueue(req, 1, 1);
427}
428
429static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
430{
431 return mxs_dcp_aes_enqueue(req, 0, 0);
432}
433
434static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
435{
436 return mxs_dcp_aes_enqueue(req, 1, 0);
437}
438
439static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
440 unsigned int len)
441{
442 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
443 unsigned int ret;
444
445 /*
446 * AES 128 is supposed by the hardware, store key into temporary
447 * buffer and exit. We must use the temporary buffer here, since
448 * there can still be an operation in progress.
449 */
450 actx->key_len = len;
451 if (len == AES_KEYSIZE_128) {
452 memcpy(actx->key, key, len);
453 return 0;
454 }
455
456 /* Check if the key size is supported by kernel at all. */
457 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
458 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
459 return -EINVAL;
460 }
461
462 /*
463 * If the requested AES key size is not supported by the hardware,
464 * but is supported by in-kernel software implementation, we use
465 * software fallback.
466 */
467 actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
468 actx->fallback->base.crt_flags |=
469 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
470
471 ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
472 if (!ret)
473 return 0;
474
475 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
476 tfm->base.crt_flags |=
477 actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
478
479 return ret;
480}
481
482static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
483{
2231204b 484 const char *name = crypto_tfm_alg_name(tfm);
15b59e7c
MV
485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
487 struct crypto_ablkcipher *blk;
488
489 blk = crypto_alloc_ablkcipher(name, 0, flags);
490 if (IS_ERR(blk))
491 return PTR_ERR(blk);
492
493 actx->fallback = blk;
2021abaa 494 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
15b59e7c
MV
495 return 0;
496}
497
498static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
499{
500 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
501
502 crypto_free_ablkcipher(actx->fallback);
503 actx->fallback = NULL;
504}
505
506/*
507 * Hashing (SHA1/SHA256)
508 */
509static int mxs_dcp_run_sha(struct ahash_request *req)
510{
511 struct dcp *sdcp = global_sdcp;
512 int ret;
513
514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
515 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
516 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
04d088cc 517 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
15b59e7c
MV
518
519 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
15b59e7c 520
04d088cc 521 dma_addr_t digest_phys = 0;
15b59e7c
MV
522 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
523 DCP_BUF_SZ, DMA_TO_DEVICE);
524
525 /* Fill in the DMA descriptor. */
526 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
527 MXS_DCP_CONTROL0_INTERRUPT |
528 MXS_DCP_CONTROL0_ENABLE_HASH;
529 if (rctx->init)
530 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
531
532 desc->control1 = actx->alg;
533 desc->next_cmd_addr = 0;
534 desc->source = buf_phys;
535 desc->destination = 0;
536 desc->size = actx->fill;
537 desc->payload = 0;
538 desc->status = 0;
539
540 /* Set HASH_TERM bit for last transfer block. */
541 if (rctx->fini) {
04d088cc
MV
542 digest_phys = dma_map_single(sdcp->dev, req->result,
543 halg->digestsize, DMA_FROM_DEVICE);
15b59e7c
MV
544 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
545 desc->payload = digest_phys;
546 }
547
548 ret = mxs_dcp_start_dma(actx);
549
04d088cc
MV
550 if (rctx->fini)
551 dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
552 DMA_FROM_DEVICE);
553
15b59e7c
MV
554 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
555
556 return ret;
557}
558
559static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
560{
561 struct dcp *sdcp = global_sdcp;
562
563 struct ahash_request *req = ahash_request_cast(arq);
564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
565 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
566 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
567 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
568 const int nents = sg_nents(req->src);
569
15b59e7c
MV
570 uint8_t *in_buf = sdcp->coh->sha_in_buf;
571
572 uint8_t *src_buf;
573
574 struct scatterlist *src;
575
576 unsigned int i, len, clen;
577 int ret;
578
579 int fin = rctx->fini;
580 if (fin)
581 rctx->fini = 0;
582
583 for_each_sg(req->src, src, nents, i) {
584 src_buf = sg_virt(src);
585 len = sg_dma_len(src);
586
587 do {
588 if (actx->fill + len > DCP_BUF_SZ)
589 clen = DCP_BUF_SZ - actx->fill;
590 else
591 clen = len;
592
593 memcpy(in_buf + actx->fill, src_buf, clen);
594 len -= clen;
595 src_buf += clen;
596 actx->fill += clen;
597
598 /*
599 * If we filled the buffer and still have some
600 * more data, submit the buffer.
601 */
602 if (len && actx->fill == DCP_BUF_SZ) {
603 ret = mxs_dcp_run_sha(req);
604 if (ret)
605 return ret;
606 actx->fill = 0;
607 rctx->init = 0;
608 }
609 } while (len);
610 }
611
612 if (fin) {
613 rctx->fini = 1;
614
615 /* Submit whatever is left. */
04d088cc
MV
616 if (!req->result)
617 return -EINVAL;
618
15b59e7c 619 ret = mxs_dcp_run_sha(req);
04d088cc 620 if (ret)
15b59e7c 621 return ret;
04d088cc 622
15b59e7c
MV
623 actx->fill = 0;
624
625 /* For some reason, the result is flipped. */
04d088cc
MV
626 for (i = 0; i < halg->digestsize / 2; i++) {
627 swap(req->result[i],
628 req->result[halg->digestsize - i - 1]);
629 }
15b59e7c
MV
630 }
631
632 return 0;
633}
634
635static int dcp_chan_thread_sha(void *data)
636{
637 struct dcp *sdcp = global_sdcp;
638 const int chan = DCP_CHAN_HASH_SHA;
639
640 struct crypto_async_request *backlog;
641 struct crypto_async_request *arq;
642
643 struct dcp_sha_req_ctx *rctx;
644
645 struct ahash_request *req;
646 int ret, fini;
647
648 do {
649 __set_current_state(TASK_INTERRUPTIBLE);
650
651 mutex_lock(&sdcp->mutex[chan]);
652 backlog = crypto_get_backlog(&sdcp->queue[chan]);
653 arq = crypto_dequeue_request(&sdcp->queue[chan]);
654 mutex_unlock(&sdcp->mutex[chan]);
655
656 if (backlog)
657 backlog->complete(backlog, -EINPROGRESS);
658
659 if (arq) {
660 req = ahash_request_cast(arq);
661 rctx = ahash_request_ctx(req);
662
663 ret = dcp_sha_req_to_buf(arq);
664 fini = rctx->fini;
665 arq->complete(arq, ret);
666 if (!fini)
667 continue;
668 }
669
670 schedule();
671 } while (!kthread_should_stop());
672
673 return 0;
674}
675
676static int dcp_sha_init(struct ahash_request *req)
677{
678 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
679 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
680
681 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
682
683 /*
684 * Start hashing session. The code below only inits the
685 * hashing session context, nothing more.
686 */
687 memset(actx, 0, sizeof(*actx));
688
689 if (strcmp(halg->base.cra_name, "sha1") == 0)
690 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
691 else
692 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
693
694 actx->fill = 0;
695 actx->hot = 0;
696 actx->chan = DCP_CHAN_HASH_SHA;
697
698 mutex_init(&actx->mutex);
699
700 return 0;
701}
702
703static int dcp_sha_update_fx(struct ahash_request *req, int fini)
704{
705 struct dcp *sdcp = global_sdcp;
706
707 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
709 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
710
711 int ret;
712
713 /*
714 * Ignore requests that have no data in them and are not
715 * the trailing requests in the stream of requests.
716 */
717 if (!req->nbytes && !fini)
718 return 0;
719
720 mutex_lock(&actx->mutex);
721
722 rctx->fini = fini;
723
724 if (!actx->hot) {
725 actx->hot = 1;
726 rctx->init = 1;
727 }
728
729 mutex_lock(&sdcp->mutex[actx->chan]);
730 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
731 mutex_unlock(&sdcp->mutex[actx->chan]);
732
733 wake_up_process(sdcp->thread[actx->chan]);
734 mutex_unlock(&actx->mutex);
735
736 return -EINPROGRESS;
737}
738
739static int dcp_sha_update(struct ahash_request *req)
740{
741 return dcp_sha_update_fx(req, 0);
742}
743
744static int dcp_sha_final(struct ahash_request *req)
745{
746 ahash_request_set_crypt(req, NULL, req->result, 0);
747 req->nbytes = 0;
748 return dcp_sha_update_fx(req, 1);
749}
750
751static int dcp_sha_finup(struct ahash_request *req)
752{
753 return dcp_sha_update_fx(req, 1);
754}
755
756static int dcp_sha_digest(struct ahash_request *req)
757{
758 int ret;
759
760 ret = dcp_sha_init(req);
761 if (ret)
762 return ret;
763
764 return dcp_sha_finup(req);
765}
766
767static int dcp_sha_cra_init(struct crypto_tfm *tfm)
768{
769 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
770 sizeof(struct dcp_sha_req_ctx));
771 return 0;
772}
773
774static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
775{
776}
777
778/* AES 128 ECB and AES 128 CBC */
779static struct crypto_alg dcp_aes_algs[] = {
780 {
781 .cra_name = "ecb(aes)",
782 .cra_driver_name = "ecb-aes-dcp",
783 .cra_priority = 400,
784 .cra_alignmask = 15,
785 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
786 CRYPTO_ALG_ASYNC |
787 CRYPTO_ALG_NEED_FALLBACK,
788 .cra_init = mxs_dcp_aes_fallback_init,
789 .cra_exit = mxs_dcp_aes_fallback_exit,
790 .cra_blocksize = AES_BLOCK_SIZE,
791 .cra_ctxsize = sizeof(struct dcp_async_ctx),
792 .cra_type = &crypto_ablkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_u = {
795 .ablkcipher = {
796 .min_keysize = AES_MIN_KEY_SIZE,
797 .max_keysize = AES_MAX_KEY_SIZE,
798 .setkey = mxs_dcp_aes_setkey,
799 .encrypt = mxs_dcp_aes_ecb_encrypt,
800 .decrypt = mxs_dcp_aes_ecb_decrypt
801 },
802 },
803 }, {
804 .cra_name = "cbc(aes)",
805 .cra_driver_name = "cbc-aes-dcp",
806 .cra_priority = 400,
807 .cra_alignmask = 15,
808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
809 CRYPTO_ALG_ASYNC |
810 CRYPTO_ALG_NEED_FALLBACK,
811 .cra_init = mxs_dcp_aes_fallback_init,
812 .cra_exit = mxs_dcp_aes_fallback_exit,
813 .cra_blocksize = AES_BLOCK_SIZE,
814 .cra_ctxsize = sizeof(struct dcp_async_ctx),
815 .cra_type = &crypto_ablkcipher_type,
816 .cra_module = THIS_MODULE,
817 .cra_u = {
818 .ablkcipher = {
819 .min_keysize = AES_MIN_KEY_SIZE,
820 .max_keysize = AES_MAX_KEY_SIZE,
821 .setkey = mxs_dcp_aes_setkey,
822 .encrypt = mxs_dcp_aes_cbc_encrypt,
823 .decrypt = mxs_dcp_aes_cbc_decrypt,
824 .ivsize = AES_BLOCK_SIZE,
825 },
826 },
827 },
828};
829
830/* SHA1 */
831static struct ahash_alg dcp_sha1_alg = {
832 .init = dcp_sha_init,
833 .update = dcp_sha_update,
834 .final = dcp_sha_final,
835 .finup = dcp_sha_finup,
836 .digest = dcp_sha_digest,
837 .halg = {
838 .digestsize = SHA1_DIGEST_SIZE,
839 .base = {
840 .cra_name = "sha1",
841 .cra_driver_name = "sha1-dcp",
842 .cra_priority = 400,
843 .cra_alignmask = 63,
844 .cra_flags = CRYPTO_ALG_ASYNC,
845 .cra_blocksize = SHA1_BLOCK_SIZE,
846 .cra_ctxsize = sizeof(struct dcp_async_ctx),
847 .cra_module = THIS_MODULE,
848 .cra_init = dcp_sha_cra_init,
849 .cra_exit = dcp_sha_cra_exit,
850 },
851 },
852};
853
854/* SHA256 */
855static struct ahash_alg dcp_sha256_alg = {
856 .init = dcp_sha_init,
857 .update = dcp_sha_update,
858 .final = dcp_sha_final,
859 .finup = dcp_sha_finup,
860 .digest = dcp_sha_digest,
861 .halg = {
862 .digestsize = SHA256_DIGEST_SIZE,
863 .base = {
864 .cra_name = "sha256",
865 .cra_driver_name = "sha256-dcp",
866 .cra_priority = 400,
867 .cra_alignmask = 63,
868 .cra_flags = CRYPTO_ALG_ASYNC,
869 .cra_blocksize = SHA256_BLOCK_SIZE,
870 .cra_ctxsize = sizeof(struct dcp_async_ctx),
871 .cra_module = THIS_MODULE,
872 .cra_init = dcp_sha_cra_init,
873 .cra_exit = dcp_sha_cra_exit,
874 },
875 },
876};
877
878static irqreturn_t mxs_dcp_irq(int irq, void *context)
879{
880 struct dcp *sdcp = context;
881 uint32_t stat;
882 int i;
883
884 stat = readl(sdcp->base + MXS_DCP_STAT);
885 stat &= MXS_DCP_STAT_IRQ_MASK;
886 if (!stat)
887 return IRQ_NONE;
888
889 /* Clear the interrupts. */
890 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
891
892 /* Complete the DMA requests that finished. */
893 for (i = 0; i < DCP_MAX_CHANS; i++)
894 if (stat & (1 << i))
895 complete(&sdcp->completion[i]);
896
897 return IRQ_HANDLED;
898}
899
900static int mxs_dcp_probe(struct platform_device *pdev)
901{
902 struct device *dev = &pdev->dev;
903 struct dcp *sdcp = NULL;
904 int i, ret;
905
906 struct resource *iores;
907 int dcp_vmi_irq, dcp_irq;
908
15b59e7c
MV
909 if (global_sdcp) {
910 dev_err(dev, "Only one DCP instance allowed!\n");
5fc8005b 911 return -ENODEV;
15b59e7c
MV
912 }
913
914 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
915 dcp_vmi_irq = platform_get_irq(pdev, 0);
5fc8005b
FE
916 if (dcp_vmi_irq < 0)
917 return dcp_vmi_irq;
d9588f87 918
15b59e7c 919 dcp_irq = platform_get_irq(pdev, 1);
5fc8005b
FE
920 if (dcp_irq < 0)
921 return dcp_irq;
15b59e7c
MV
922
923 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
5fc8005b
FE
924 if (!sdcp)
925 return -ENOMEM;
15b59e7c
MV
926
927 sdcp->dev = dev;
928 sdcp->base = devm_ioremap_resource(dev, iores);
5fc8005b
FE
929 if (IS_ERR(sdcp->base))
930 return PTR_ERR(sdcp->base);
931
15b59e7c
MV
932
933 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
934 "dcp-vmi-irq", sdcp);
935 if (ret) {
936 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
5fc8005b 937 return ret;
15b59e7c
MV
938 }
939
940 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
941 "dcp-irq", sdcp);
942 if (ret) {
943 dev_err(dev, "Failed to claim DCP IRQ!\n");
5fc8005b 944 return ret;
15b59e7c
MV
945 }
946
947 /* Allocate coherent helper block. */
1a7c6856
MV
948 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
949 GFP_KERNEL);
5fc8005b
FE
950 if (!sdcp->coh)
951 return -ENOMEM;
15b59e7c 952
1a7c6856
MV
953 /* Re-align the structure so it fits the DCP constraints. */
954 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
955
15b59e7c 956 /* Restart the DCP block. */
fecfd7f7
FE
957 ret = stmp_reset_block(sdcp->base);
958 if (ret)
5fc8005b 959 return ret;
15b59e7c
MV
960
961 /* Initialize control register. */
962 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
963 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
964 sdcp->base + MXS_DCP_CTRL);
965
966 /* Enable all DCP DMA channels. */
967 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
968 sdcp->base + MXS_DCP_CHANNELCTRL);
969
970 /*
971 * We do not enable context switching. Give the context buffer a
972 * pointer to an illegal address so if context switching is
973 * inadvertantly enabled, the DCP will return an error instead of
974 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
975 * address will do.
976 */
977 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
978 for (i = 0; i < DCP_MAX_CHANS; i++)
979 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
980 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
981
982 global_sdcp = sdcp;
983
984 platform_set_drvdata(pdev, sdcp);
985
986 for (i = 0; i < DCP_MAX_CHANS; i++) {
987 mutex_init(&sdcp->mutex[i]);
988 init_completion(&sdcp->completion[i]);
989 crypto_init_queue(&sdcp->queue[i], 50);
990 }
991
992 /* Create the SHA and AES handler threads. */
993 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
994 NULL, "mxs_dcp_chan/sha");
995 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
996 dev_err(dev, "Error starting SHA thread!\n");
5fc8005b 997 return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
15b59e7c
MV
998 }
999
1000 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1001 NULL, "mxs_dcp_chan/aes");
1002 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1003 dev_err(dev, "Error starting SHA thread!\n");
1004 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1005 goto err_destroy_sha_thread;
1006 }
1007
1008 /* Register the various crypto algorithms. */
1009 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1010
1011 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1012 ret = crypto_register_algs(dcp_aes_algs,
1013 ARRAY_SIZE(dcp_aes_algs));
1014 if (ret) {
1015 /* Failed to register algorithm. */
1016 dev_err(dev, "Failed to register AES crypto!\n");
1017 goto err_destroy_aes_thread;
1018 }
1019 }
1020
1021 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1022 ret = crypto_register_ahash(&dcp_sha1_alg);
1023 if (ret) {
1024 dev_err(dev, "Failed to register %s hash!\n",
1025 dcp_sha1_alg.halg.base.cra_name);
1026 goto err_unregister_aes;
1027 }
1028 }
1029
1030 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1031 ret = crypto_register_ahash(&dcp_sha256_alg);
1032 if (ret) {
1033 dev_err(dev, "Failed to register %s hash!\n",
1034 dcp_sha256_alg.halg.base.cra_name);
1035 goto err_unregister_sha1;
1036 }
1037 }
1038
1039 return 0;
1040
1041err_unregister_sha1:
1042 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1043 crypto_unregister_ahash(&dcp_sha1_alg);
1044
1045err_unregister_aes:
1046 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1047 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1048
1049err_destroy_aes_thread:
1050 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1051
1052err_destroy_sha_thread:
1053 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
15b59e7c
MV
1054 return ret;
1055}
1056
1057static int mxs_dcp_remove(struct platform_device *pdev)
1058{
1059 struct dcp *sdcp = platform_get_drvdata(pdev);
1060
15b59e7c
MV
1061 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1062 crypto_unregister_ahash(&dcp_sha256_alg);
1063
1064 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1065 crypto_unregister_ahash(&dcp_sha1_alg);
1066
1067 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1068 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1069
1070 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1072
1073 platform_set_drvdata(pdev, NULL);
1074
15b59e7c 1075 global_sdcp = NULL;
15b59e7c
MV
1076
1077 return 0;
1078}
1079
1080static const struct of_device_id mxs_dcp_dt_ids[] = {
1081 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1082 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1083 { /* sentinel */ }
1084};
1085
1086MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1087
1088static struct platform_driver mxs_dcp_driver = {
1089 .probe = mxs_dcp_probe,
1090 .remove = mxs_dcp_remove,
1091 .driver = {
1092 .name = "mxs-dcp",
15b59e7c
MV
1093 .of_match_table = mxs_dcp_dt_ids,
1094 },
1095};
1096
1097module_platform_driver(mxs_dcp_driver);
1098
1099MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1100MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1101MODULE_LICENSE("GPL");
1102MODULE_ALIAS("platform:mxs-dcp");