crypto: testmgr - mark authenticated ctr(aes) also as FIPS able
[linux-block.git] / drivers / crypto / atmel-sha.c
CommitLineData
ebc82efa
NR
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
ebc82efa
NR
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
ebc82efa 30#include <linux/irq.h>
ebc82efa
NR
31#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
abfe7ae4 33#include <linux/of_device.h>
ebc82efa
NR
34#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
d4905b38 42#include <linux/platform_data/crypto-atmel.h>
ebc82efa
NR
43#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
54#define SHA_FLAGS_FINUP BIT(16)
55#define SHA_FLAGS_SG BIT(17)
7cee3508 56#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
ebc82efa 57#define SHA_FLAGS_SHA1 BIT(18)
d4905b38
NR
58#define SHA_FLAGS_SHA224 BIT(19)
59#define SHA_FLAGS_SHA256 BIT(20)
60#define SHA_FLAGS_SHA384 BIT(21)
61#define SHA_FLAGS_SHA512 BIT(22)
62#define SHA_FLAGS_ERROR BIT(23)
63#define SHA_FLAGS_PAD BIT(24)
7cee3508 64#define SHA_FLAGS_RESTORE BIT(25)
ebc82efa
NR
65
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
68
cc831d32 69#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
ebc82efa
NR
70
71#define ATMEL_SHA_DMA_THRESHOLD 56
72
d4905b38
NR
73struct atmel_sha_caps {
74 bool has_dma;
75 bool has_dualbuff;
76 bool has_sha224;
77 bool has_sha_384_512;
7cee3508 78 bool has_uihv;
d4905b38 79};
ebc82efa
NR
80
81struct atmel_sha_dev;
82
cc831d32
CP
83/*
84 * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
85 * tested by the ahash_prepare_alg() function.
86 */
87struct atmel_sha_state {
88 u8 digest[SHA512_DIGEST_SIZE];
89 u8 buffer[SHA_BUFFER_LEN];
90 u64 digcnt[2];
91 size_t bufcnt;
92};
93
ebc82efa
NR
94struct atmel_sha_reqctx {
95 struct atmel_sha_dev *dd;
96 unsigned long flags;
97 unsigned long op;
98
d4905b38
NR
99 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
100 u64 digcnt[2];
ebc82efa
NR
101 size_t bufcnt;
102 size_t buflen;
103 dma_addr_t dma_addr;
104
105 /* walk state */
106 struct scatterlist *sg;
107 unsigned int offset; /* offset in current sg */
108 unsigned int total; /* total request */
109
d4905b38
NR
110 size_t block_size;
111
ebc82efa
NR
112 u8 buffer[0] __aligned(sizeof(u32));
113};
114
115struct atmel_sha_ctx {
116 struct atmel_sha_dev *dd;
117
118 unsigned long flags;
ebc82efa
NR
119};
120
d4905b38
NR
121#define ATMEL_SHA_QUEUE_LENGTH 50
122
123struct atmel_sha_dma {
124 struct dma_chan *chan;
125 struct dma_slave_config dma_conf;
126};
ebc82efa
NR
127
128struct atmel_sha_dev {
129 struct list_head list;
130 unsigned long phys_base;
131 struct device *dev;
132 struct clk *iclk;
133 int irq;
134 void __iomem *io_base;
135
136 spinlock_t lock;
137 int err;
138 struct tasklet_struct done_task;
f56809c3 139 struct tasklet_struct queue_task;
ebc82efa
NR
140
141 unsigned long flags;
142 struct crypto_queue queue;
143 struct ahash_request *req;
d4905b38
NR
144
145 struct atmel_sha_dma dma_lch_in;
146
147 struct atmel_sha_caps caps;
148
149 u32 hw_version;
ebc82efa
NR
150};
151
152struct atmel_sha_drv {
153 struct list_head dev_list;
154 spinlock_t lock;
155};
156
157static struct atmel_sha_drv atmel_sha = {
158 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
159 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
160};
161
162static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
163{
164 return readl_relaxed(dd->io_base + offset);
165}
166
167static inline void atmel_sha_write(struct atmel_sha_dev *dd,
168 u32 offset, u32 value)
169{
170 writel_relaxed(value, dd->io_base + offset);
171}
172
ebc82efa
NR
173static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
174{
175 size_t count;
176
177 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
178 count = min(ctx->sg->length - ctx->offset, ctx->total);
179 count = min(count, ctx->buflen - ctx->bufcnt);
180
803eeae8
LZ
181 if (count <= 0) {
182 /*
183 * Check if count <= 0 because the buffer is full or
184 * because the sg length is 0. In the latest case,
185 * check if there is another sg in the list, a 0 length
186 * sg doesn't necessarily mean the end of the sg list.
187 */
188 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
189 ctx->sg = sg_next(ctx->sg);
190 continue;
191 } else {
192 break;
193 }
194 }
ebc82efa
NR
195
196 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
197 ctx->offset, count, 0);
198
199 ctx->bufcnt += count;
200 ctx->offset += count;
201 ctx->total -= count;
202
203 if (ctx->offset == ctx->sg->length) {
204 ctx->sg = sg_next(ctx->sg);
205 if (ctx->sg)
206 ctx->offset = 0;
207 else
208 ctx->total = 0;
209 }
210 }
211
212 return 0;
213}
214
215/*
d4905b38
NR
216 * The purpose of this padding is to ensure that the padded message is a
217 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
218 * The bit "1" is appended at the end of the message followed by
219 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
220 * 128 bits block (SHA384/SHA512) equals to the message length in bits
221 * is appended.
ebc82efa 222 *
d4905b38 223 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
ebc82efa
NR
224 * - if message length < 56 bytes then padlen = 56 - message length
225 * - else padlen = 64 + 56 - message length
d4905b38
NR
226 *
227 * For SHA384/SHA512, padlen is calculated as followed:
228 * - if message length < 112 bytes then padlen = 112 - message length
229 * - else padlen = 128 + 112 - message length
ebc82efa
NR
230 */
231static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
232{
233 unsigned int index, padlen;
d4905b38
NR
234 u64 bits[2];
235 u64 size[2];
236
237 size[0] = ctx->digcnt[0];
238 size[1] = ctx->digcnt[1];
239
240 size[0] += ctx->bufcnt;
241 if (size[0] < ctx->bufcnt)
242 size[1]++;
243
244 size[0] += length;
245 if (size[0] < length)
246 size[1]++;
247
248 bits[1] = cpu_to_be64(size[0] << 3);
249 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
250
251 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
252 index = ctx->bufcnt & 0x7f;
253 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
254 *(ctx->buffer + ctx->bufcnt) = 0x80;
255 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
256 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
257 ctx->bufcnt += padlen + 16;
258 ctx->flags |= SHA_FLAGS_PAD;
259 } else {
260 index = ctx->bufcnt & 0x3f;
261 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
262 *(ctx->buffer + ctx->bufcnt) = 0x80;
263 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
264 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
265 ctx->bufcnt += padlen + 8;
266 ctx->flags |= SHA_FLAGS_PAD;
267 }
ebc82efa
NR
268}
269
270static int atmel_sha_init(struct ahash_request *req)
271{
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
274 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
275 struct atmel_sha_dev *dd = NULL;
276 struct atmel_sha_dev *tmp;
277
278 spin_lock_bh(&atmel_sha.lock);
279 if (!tctx->dd) {
280 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
281 dd = tmp;
282 break;
283 }
284 tctx->dd = dd;
285 } else {
286 dd = tctx->dd;
287 }
288
289 spin_unlock_bh(&atmel_sha.lock);
290
291 ctx->dd = dd;
292
293 ctx->flags = 0;
294
295 dev_dbg(dd->dev, "init: digest size: %d\n",
296 crypto_ahash_digestsize(tfm));
297
d4905b38
NR
298 switch (crypto_ahash_digestsize(tfm)) {
299 case SHA1_DIGEST_SIZE:
ebc82efa 300 ctx->flags |= SHA_FLAGS_SHA1;
d4905b38
NR
301 ctx->block_size = SHA1_BLOCK_SIZE;
302 break;
303 case SHA224_DIGEST_SIZE:
304 ctx->flags |= SHA_FLAGS_SHA224;
305 ctx->block_size = SHA224_BLOCK_SIZE;
306 break;
307 case SHA256_DIGEST_SIZE:
ebc82efa 308 ctx->flags |= SHA_FLAGS_SHA256;
d4905b38
NR
309 ctx->block_size = SHA256_BLOCK_SIZE;
310 break;
311 case SHA384_DIGEST_SIZE:
312 ctx->flags |= SHA_FLAGS_SHA384;
313 ctx->block_size = SHA384_BLOCK_SIZE;
314 break;
315 case SHA512_DIGEST_SIZE:
316 ctx->flags |= SHA_FLAGS_SHA512;
317 ctx->block_size = SHA512_BLOCK_SIZE;
318 break;
319 default:
320 return -EINVAL;
321 break;
322 }
ebc82efa
NR
323
324 ctx->bufcnt = 0;
d4905b38
NR
325 ctx->digcnt[0] = 0;
326 ctx->digcnt[1] = 0;
ebc82efa
NR
327 ctx->buflen = SHA_BUFFER_LEN;
328
329 return 0;
330}
331
332static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
333{
334 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
7cee3508
CP
335 u32 valmr = SHA_MR_MODE_AUTO;
336 unsigned int i, hashsize = 0;
ebc82efa
NR
337
338 if (likely(dma)) {
d4905b38
NR
339 if (!dd->caps.has_dma)
340 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
ebc82efa 341 valmr = SHA_MR_MODE_PDC;
d4905b38
NR
342 if (dd->caps.has_dualbuff)
343 valmr |= SHA_MR_DUALBUFF;
ebc82efa
NR
344 } else {
345 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
346 }
347
7cee3508
CP
348 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
349 case SHA_FLAGS_SHA1:
d4905b38 350 valmr |= SHA_MR_ALGO_SHA1;
7cee3508
CP
351 hashsize = SHA1_DIGEST_SIZE;
352 break;
353
354 case SHA_FLAGS_SHA224:
d4905b38 355 valmr |= SHA_MR_ALGO_SHA224;
7cee3508
CP
356 hashsize = SHA256_DIGEST_SIZE;
357 break;
358
359 case SHA_FLAGS_SHA256:
ebc82efa 360 valmr |= SHA_MR_ALGO_SHA256;
7cee3508
CP
361 hashsize = SHA256_DIGEST_SIZE;
362 break;
363
364 case SHA_FLAGS_SHA384:
d4905b38 365 valmr |= SHA_MR_ALGO_SHA384;
7cee3508
CP
366 hashsize = SHA512_DIGEST_SIZE;
367 break;
368
369 case SHA_FLAGS_SHA512:
d4905b38 370 valmr |= SHA_MR_ALGO_SHA512;
7cee3508
CP
371 hashsize = SHA512_DIGEST_SIZE;
372 break;
373
374 default:
375 break;
376 }
ebc82efa
NR
377
378 /* Setting CR_FIRST only for the first iteration */
7cee3508
CP
379 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
380 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
381 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
382 const u32 *hash = (const u32 *)ctx->digest;
383
384 /*
385 * Restore the hardware context: update the User Initialize
386 * Hash Value (UIHV) with the value saved when the latest
387 * 'update' operation completed on this very same crypto
388 * request.
389 */
390 ctx->flags &= ~SHA_FLAGS_RESTORE;
391 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
392 for (i = 0; i < hashsize / sizeof(u32); ++i)
393 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
394 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
395 valmr |= SHA_MR_UIHV;
396 }
397 /*
398 * WARNING: If the UIHV feature is not available, the hardware CANNOT
399 * process concurrent requests: the internal registers used to store
400 * the hash/digest are still set to the partial digest output values
401 * computed during the latest round.
402 */
ebc82efa 403
ebc82efa
NR
404 atmel_sha_write(dd, SHA_MR, valmr);
405}
406
407static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
408 size_t length, int final)
409{
410 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
411 int count, len32;
412 const u32 *buffer = (const u32 *)buf;
413
d4905b38
NR
414 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
415 ctx->digcnt[1], ctx->digcnt[0], length, final);
ebc82efa
NR
416
417 atmel_sha_write_ctrl(dd, 0);
418
419 /* should be non-zero before next lines to disable clocks later */
d4905b38
NR
420 ctx->digcnt[0] += length;
421 if (ctx->digcnt[0] < length)
422 ctx->digcnt[1]++;
ebc82efa
NR
423
424 if (final)
425 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
426
427 len32 = DIV_ROUND_UP(length, sizeof(u32));
428
429 dd->flags |= SHA_FLAGS_CPU;
430
431 for (count = 0; count < len32; count++)
432 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
433
434 return -EINPROGRESS;
435}
436
437static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
438 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
439{
440 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
441 int len32;
442
d4905b38
NR
443 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
444 ctx->digcnt[1], ctx->digcnt[0], length1, final);
ebc82efa
NR
445
446 len32 = DIV_ROUND_UP(length1, sizeof(u32));
447 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
448 atmel_sha_write(dd, SHA_TPR, dma_addr1);
449 atmel_sha_write(dd, SHA_TCR, len32);
450
451 len32 = DIV_ROUND_UP(length2, sizeof(u32));
452 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
453 atmel_sha_write(dd, SHA_TNCR, len32);
454
455 atmel_sha_write_ctrl(dd, 1);
456
457 /* should be non-zero before next lines to disable clocks later */
d4905b38
NR
458 ctx->digcnt[0] += length1;
459 if (ctx->digcnt[0] < length1)
460 ctx->digcnt[1]++;
ebc82efa
NR
461
462 if (final)
463 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
464
465 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
466
467 /* Start DMA transfer */
468 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
469
470 return -EINPROGRESS;
471}
472
d4905b38
NR
473static void atmel_sha_dma_callback(void *data)
474{
475 struct atmel_sha_dev *dd = data;
476
477 /* dma_lch_in - completed - wait DATRDY */
478 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
479}
480
481static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
482 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
483{
484 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
485 struct dma_async_tx_descriptor *in_desc;
486 struct scatterlist sg[2];
487
488 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
489 ctx->digcnt[1], ctx->digcnt[0], length1, final);
490
3f1992c0
LZ
491 dd->dma_lch_in.dma_conf.src_maxburst = 16;
492 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
d4905b38
NR
493
494 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
495
496 if (length2) {
497 sg_init_table(sg, 2);
498 sg_dma_address(&sg[0]) = dma_addr1;
499 sg_dma_len(&sg[0]) = length1;
500 sg_dma_address(&sg[1]) = dma_addr2;
501 sg_dma_len(&sg[1]) = length2;
502 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
503 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
504 } else {
505 sg_init_table(sg, 1);
506 sg_dma_address(&sg[0]) = dma_addr1;
507 sg_dma_len(&sg[0]) = length1;
508 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
509 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
510 }
511 if (!in_desc)
512 return -EINVAL;
513
514 in_desc->callback = atmel_sha_dma_callback;
515 in_desc->callback_param = dd;
516
517 atmel_sha_write_ctrl(dd, 1);
518
519 /* should be non-zero before next lines to disable clocks later */
520 ctx->digcnt[0] += length1;
521 if (ctx->digcnt[0] < length1)
522 ctx->digcnt[1]++;
523
524 if (final)
525 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
526
527 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
528
529 /* Start DMA transfer */
530 dmaengine_submit(in_desc);
531 dma_async_issue_pending(dd->dma_lch_in.chan);
532
533 return -EINPROGRESS;
534}
535
536static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
537 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
538{
539 if (dd->caps.has_dma)
540 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
541 dma_addr2, length2, final);
542 else
543 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
544 dma_addr2, length2, final);
545}
546
ebc82efa
NR
547static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
548{
549 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
550 int bufcnt;
551
552 atmel_sha_append_sg(ctx);
553 atmel_sha_fill_padding(ctx, 0);
ebc82efa
NR
554 bufcnt = ctx->bufcnt;
555 ctx->bufcnt = 0;
556
557 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
558}
559
560static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
561 struct atmel_sha_reqctx *ctx,
562 size_t length, int final)
563{
564 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
d4905b38 565 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
ebc82efa
NR
566 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
567 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
d4905b38 568 ctx->block_size);
ebc82efa
NR
569 return -EINVAL;
570 }
571
572 ctx->flags &= ~SHA_FLAGS_SG;
573
574 /* next call does not fail... so no unmap in the case of error */
d4905b38 575 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
ebc82efa
NR
576}
577
578static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
579{
580 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
581 unsigned int final;
582 size_t count;
583
584 atmel_sha_append_sg(ctx);
585
586 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
587
d4905b38
NR
588 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
589 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
ebc82efa
NR
590
591 if (final)
592 atmel_sha_fill_padding(ctx, 0);
593
0099286b 594 if (final || (ctx->bufcnt == ctx->buflen)) {
ebc82efa
NR
595 count = ctx->bufcnt;
596 ctx->bufcnt = 0;
597 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
598 }
599
600 return 0;
601}
602
603static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
604{
605 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
606 unsigned int length, final, tail;
607 struct scatterlist *sg;
608 unsigned int count;
609
610 if (!ctx->total)
611 return 0;
612
613 if (ctx->bufcnt || ctx->offset)
614 return atmel_sha_update_dma_slow(dd);
615
d4905b38
NR
616 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
617 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
ebc82efa
NR
618
619 sg = ctx->sg;
620
621 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
622 return atmel_sha_update_dma_slow(dd);
623
d4905b38
NR
624 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
625 /* size is not ctx->block_size aligned */
ebc82efa
NR
626 return atmel_sha_update_dma_slow(dd);
627
628 length = min(ctx->total, sg->length);
629
630 if (sg_is_last(sg)) {
631 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
d4905b38
NR
632 /* not last sg must be ctx->block_size aligned */
633 tail = length & (ctx->block_size - 1);
ebc82efa 634 length -= tail;
ebc82efa
NR
635 }
636 }
637
638 ctx->total -= length;
639 ctx->offset = length; /* offset where to start slow */
640
641 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
642
643 /* Add padding */
644 if (final) {
d4905b38 645 tail = length & (ctx->block_size - 1);
ebc82efa
NR
646 length -= tail;
647 ctx->total += tail;
648 ctx->offset = length; /* offset where to start slow */
649
650 sg = ctx->sg;
651 atmel_sha_append_sg(ctx);
652
653 atmel_sha_fill_padding(ctx, length);
654
655 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
d4905b38 656 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
ebc82efa
NR
657 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
658 dev_err(dd->dev, "dma %u bytes error\n",
d4905b38 659 ctx->buflen + ctx->block_size);
ebc82efa
NR
660 return -EINVAL;
661 }
662
663 if (length == 0) {
664 ctx->flags &= ~SHA_FLAGS_SG;
665 count = ctx->bufcnt;
666 ctx->bufcnt = 0;
d4905b38 667 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
ebc82efa
NR
668 0, final);
669 } else {
670 ctx->sg = sg;
671 if (!dma_map_sg(dd->dev, ctx->sg, 1,
672 DMA_TO_DEVICE)) {
673 dev_err(dd->dev, "dma_map_sg error\n");
674 return -EINVAL;
675 }
676
677 ctx->flags |= SHA_FLAGS_SG;
678
679 count = ctx->bufcnt;
680 ctx->bufcnt = 0;
d4905b38 681 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
ebc82efa
NR
682 length, ctx->dma_addr, count, final);
683 }
684 }
685
686 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
687 dev_err(dd->dev, "dma_map_sg error\n");
688 return -EINVAL;
689 }
690
691 ctx->flags |= SHA_FLAGS_SG;
692
693 /* next call does not fail... so no unmap in the case of error */
d4905b38 694 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
ebc82efa
NR
695 0, final);
696}
697
698static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
699{
700 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
701
702 if (ctx->flags & SHA_FLAGS_SG) {
703 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
704 if (ctx->sg->length == ctx->offset) {
705 ctx->sg = sg_next(ctx->sg);
706 if (ctx->sg)
707 ctx->offset = 0;
708 }
d4905b38 709 if (ctx->flags & SHA_FLAGS_PAD) {
ebc82efa 710 dma_unmap_single(dd->dev, ctx->dma_addr,
d4905b38
NR
711 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
712 }
ebc82efa
NR
713 } else {
714 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
d4905b38 715 ctx->block_size, DMA_TO_DEVICE);
ebc82efa
NR
716 }
717
718 return 0;
719}
720
721static int atmel_sha_update_req(struct atmel_sha_dev *dd)
722{
723 struct ahash_request *req = dd->req;
724 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
725 int err;
726
d4905b38
NR
727 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
728 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
ebc82efa
NR
729
730 if (ctx->flags & SHA_FLAGS_CPU)
731 err = atmel_sha_update_cpu(dd);
732 else
733 err = atmel_sha_update_dma_start(dd);
734
735 /* wait for dma completion before can take more data */
d4905b38
NR
736 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
737 err, ctx->digcnt[1], ctx->digcnt[0]);
ebc82efa
NR
738
739 return err;
740}
741
742static int atmel_sha_final_req(struct atmel_sha_dev *dd)
743{
744 struct ahash_request *req = dd->req;
745 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
746 int err = 0;
747 int count;
748
749 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
750 atmel_sha_fill_padding(ctx, 0);
751 count = ctx->bufcnt;
752 ctx->bufcnt = 0;
753 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
754 }
755 /* faster to handle last block with cpu */
756 else {
757 atmel_sha_fill_padding(ctx, 0);
758 count = ctx->bufcnt;
759 ctx->bufcnt = 0;
760 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
761 }
762
763 dev_dbg(dd->dev, "final_req: err: %d\n", err);
764
765 return err;
766}
767
768static void atmel_sha_copy_hash(struct ahash_request *req)
769{
770 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
771 u32 *hash = (u32 *)ctx->digest;
7cee3508 772 unsigned int i, hashsize;
ebc82efa 773
7cee3508
CP
774 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
775 case SHA_FLAGS_SHA1:
776 hashsize = SHA1_DIGEST_SIZE;
777 break;
778
779 case SHA_FLAGS_SHA224:
780 case SHA_FLAGS_SHA256:
781 hashsize = SHA256_DIGEST_SIZE;
782 break;
783
784 case SHA_FLAGS_SHA384:
785 case SHA_FLAGS_SHA512:
786 hashsize = SHA512_DIGEST_SIZE;
787 break;
788
789 default:
790 /* Should not happen... */
791 return;
792 }
793
794 for (i = 0; i < hashsize / sizeof(u32); ++i)
795 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
796 ctx->flags |= SHA_FLAGS_RESTORE;
ebc82efa
NR
797}
798
799static void atmel_sha_copy_ready_hash(struct ahash_request *req)
800{
801 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
802
803 if (!req->result)
804 return;
805
d4905b38 806 if (ctx->flags & SHA_FLAGS_SHA1)
ebc82efa 807 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
d4905b38
NR
808 else if (ctx->flags & SHA_FLAGS_SHA224)
809 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
810 else if (ctx->flags & SHA_FLAGS_SHA256)
ebc82efa 811 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
d4905b38
NR
812 else if (ctx->flags & SHA_FLAGS_SHA384)
813 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
814 else
815 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
ebc82efa
NR
816}
817
818static int atmel_sha_finish(struct ahash_request *req)
819{
820 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
821 struct atmel_sha_dev *dd = ctx->dd;
ebc82efa 822
d4905b38 823 if (ctx->digcnt[0] || ctx->digcnt[1])
ebc82efa
NR
824 atmel_sha_copy_ready_hash(req);
825
d4905b38
NR
826 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
827 ctx->digcnt[0], ctx->bufcnt);
ebc82efa 828
871b88a8 829 return 0;
ebc82efa
NR
830}
831
832static void atmel_sha_finish_req(struct ahash_request *req, int err)
833{
834 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
835 struct atmel_sha_dev *dd = ctx->dd;
836
837 if (!err) {
838 atmel_sha_copy_hash(req);
839 if (SHA_FLAGS_FINAL & dd->flags)
840 err = atmel_sha_finish(req);
841 } else {
842 ctx->flags |= SHA_FLAGS_ERROR;
843 }
844
845 /* atomic operation is not needed here */
846 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
847 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
848
849 clk_disable_unprepare(dd->iclk);
850
851 if (req->base.complete)
852 req->base.complete(&req->base, err);
853
854 /* handle new request */
f56809c3 855 tasklet_schedule(&dd->queue_task);
ebc82efa
NR
856}
857
858static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
859{
9d83d299
LC
860 int err;
861
862 err = clk_prepare_enable(dd->iclk);
863 if (err)
864 return err;
ebc82efa 865
d4905b38 866 if (!(SHA_FLAGS_INIT & dd->flags)) {
ebc82efa 867 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
ebc82efa
NR
868 dd->flags |= SHA_FLAGS_INIT;
869 dd->err = 0;
870 }
871
872 return 0;
873}
874
d4905b38
NR
875static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
876{
877 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
878}
879
880static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
881{
882 atmel_sha_hw_init(dd);
883
884 dd->hw_version = atmel_sha_get_version(dd);
885
886 dev_info(dd->dev,
887 "version: 0x%x\n", dd->hw_version);
888
889 clk_disable_unprepare(dd->iclk);
890}
891
ebc82efa
NR
892static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
893 struct ahash_request *req)
894{
895 struct crypto_async_request *async_req, *backlog;
896 struct atmel_sha_reqctx *ctx;
897 unsigned long flags;
898 int err = 0, ret = 0;
899
900 spin_lock_irqsave(&dd->lock, flags);
901 if (req)
902 ret = ahash_enqueue_request(&dd->queue, req);
903
904 if (SHA_FLAGS_BUSY & dd->flags) {
905 spin_unlock_irqrestore(&dd->lock, flags);
906 return ret;
907 }
908
909 backlog = crypto_get_backlog(&dd->queue);
910 async_req = crypto_dequeue_request(&dd->queue);
911 if (async_req)
912 dd->flags |= SHA_FLAGS_BUSY;
913
914 spin_unlock_irqrestore(&dd->lock, flags);
915
916 if (!async_req)
917 return ret;
918
919 if (backlog)
920 backlog->complete(backlog, -EINPROGRESS);
921
922 req = ahash_request_cast(async_req);
923 dd->req = req;
924 ctx = ahash_request_ctx(req);
925
926 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
927 ctx->op, req->nbytes);
928
929 err = atmel_sha_hw_init(dd);
930
931 if (err)
932 goto err1;
933
934 if (ctx->op == SHA_OP_UPDATE) {
935 err = atmel_sha_update_req(dd);
d4905b38 936 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
ebc82efa
NR
937 /* no final() after finup() */
938 err = atmel_sha_final_req(dd);
ebc82efa
NR
939 } else if (ctx->op == SHA_OP_FINAL) {
940 err = atmel_sha_final_req(dd);
941 }
942
943err1:
944 if (err != -EINPROGRESS)
945 /* done_task will not finish it, so do it here */
946 atmel_sha_finish_req(req, err);
947
948 dev_dbg(dd->dev, "exit, err: %d\n", err);
949
950 return ret;
951}
952
953static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
954{
955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
956 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
957 struct atmel_sha_dev *dd = tctx->dd;
958
959 ctx->op = op;
960
961 return atmel_sha_handle_queue(dd, req);
962}
963
964static int atmel_sha_update(struct ahash_request *req)
965{
966 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
967
968 if (!req->nbytes)
969 return 0;
970
971 ctx->total = req->nbytes;
972 ctx->sg = req->src;
973 ctx->offset = 0;
974
975 if (ctx->flags & SHA_FLAGS_FINUP) {
976 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
977 /* faster to use CPU for short transfers */
978 ctx->flags |= SHA_FLAGS_CPU;
979 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
980 atmel_sha_append_sg(ctx);
981 return 0;
982 }
983 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
984}
985
986static int atmel_sha_final(struct ahash_request *req)
987{
988 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
989 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
990 struct atmel_sha_dev *dd = tctx->dd;
991
992 int err = 0;
993
994 ctx->flags |= SHA_FLAGS_FINUP;
995
996 if (ctx->flags & SHA_FLAGS_ERROR)
997 return 0; /* uncompleted hash is not needed */
998
999 if (ctx->bufcnt) {
1000 return atmel_sha_enqueue(req, SHA_OP_FINAL);
1001 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
1002 err = atmel_sha_hw_init(dd);
1003 if (err)
1004 goto err1;
1005
1900c583 1006 dd->req = req;
ebc82efa
NR
1007 dd->flags |= SHA_FLAGS_BUSY;
1008 err = atmel_sha_final_req(dd);
1009 } else {
1010 /* copy ready hash (+ finalize hmac) */
1011 return atmel_sha_finish(req);
1012 }
1013
1014err1:
1015 if (err != -EINPROGRESS)
1016 /* done_task will not finish it, so do it here */
1017 atmel_sha_finish_req(req, err);
1018
1019 return err;
1020}
1021
1022static int atmel_sha_finup(struct ahash_request *req)
1023{
1024 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1025 int err1, err2;
1026
1027 ctx->flags |= SHA_FLAGS_FINUP;
1028
1029 err1 = atmel_sha_update(req);
1030 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1031 return err1;
1032
1033 /*
1034 * final() has to be always called to cleanup resources
1035 * even if udpate() failed, except EINPROGRESS
1036 */
1037 err2 = atmel_sha_final(req);
1038
1039 return err1 ?: err2;
1040}
1041
1042static int atmel_sha_digest(struct ahash_request *req)
1043{
1044 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1045}
1046
cc831d32
CP
1047
1048static int atmel_sha_export(struct ahash_request *req, void *out)
1049{
1050 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1051 struct atmel_sha_state state;
1052
1053 memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
1054 memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
1055 state.bufcnt = ctx->bufcnt;
1056 state.digcnt[0] = ctx->digcnt[0];
1057 state.digcnt[1] = ctx->digcnt[1];
1058
1059 /* out might be unaligned. */
1060 memcpy(out, &state, sizeof(state));
1061 return 0;
1062}
1063
1064static int atmel_sha_import(struct ahash_request *req, const void *in)
1065{
1066 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1067 struct atmel_sha_state state;
1068
1069 /* in might be unaligned. */
1070 memcpy(&state, in, sizeof(state));
1071
1072 memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
1073 memcpy(ctx->buffer, state.buffer, state.bufcnt);
1074 ctx->bufcnt = state.bufcnt;
1075 ctx->digcnt[0] = state.digcnt[0];
1076 ctx->digcnt[1] = state.digcnt[1];
1077 return 0;
1078}
1079
be95f0fa 1080static int atmel_sha_cra_init(struct crypto_tfm *tfm)
ebc82efa 1081{
ebc82efa
NR
1082 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1083 sizeof(struct atmel_sha_reqctx) +
d4905b38 1084 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
ebc82efa
NR
1085
1086 return 0;
1087}
1088
d4905b38 1089static struct ahash_alg sha_1_256_algs[] = {
ebc82efa
NR
1090{
1091 .init = atmel_sha_init,
1092 .update = atmel_sha_update,
1093 .final = atmel_sha_final,
1094 .finup = atmel_sha_finup,
1095 .digest = atmel_sha_digest,
cc831d32
CP
1096 .export = atmel_sha_export,
1097 .import = atmel_sha_import,
ebc82efa
NR
1098 .halg = {
1099 .digestsize = SHA1_DIGEST_SIZE,
cc831d32 1100 .statesize = sizeof(struct atmel_sha_state),
ebc82efa
NR
1101 .base = {
1102 .cra_name = "sha1",
1103 .cra_driver_name = "atmel-sha1",
1104 .cra_priority = 100,
be95f0fa 1105 .cra_flags = CRYPTO_ALG_ASYNC,
ebc82efa
NR
1106 .cra_blocksize = SHA1_BLOCK_SIZE,
1107 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1108 .cra_alignmask = 0,
1109 .cra_module = THIS_MODULE,
1110 .cra_init = atmel_sha_cra_init,
ebc82efa
NR
1111 }
1112 }
1113},
1114{
1115 .init = atmel_sha_init,
1116 .update = atmel_sha_update,
1117 .final = atmel_sha_final,
1118 .finup = atmel_sha_finup,
1119 .digest = atmel_sha_digest,
cc831d32
CP
1120 .export = atmel_sha_export,
1121 .import = atmel_sha_import,
ebc82efa
NR
1122 .halg = {
1123 .digestsize = SHA256_DIGEST_SIZE,
cc831d32 1124 .statesize = sizeof(struct atmel_sha_state),
ebc82efa
NR
1125 .base = {
1126 .cra_name = "sha256",
1127 .cra_driver_name = "atmel-sha256",
1128 .cra_priority = 100,
be95f0fa 1129 .cra_flags = CRYPTO_ALG_ASYNC,
ebc82efa
NR
1130 .cra_blocksize = SHA256_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1132 .cra_alignmask = 0,
1133 .cra_module = THIS_MODULE,
1134 .cra_init = atmel_sha_cra_init,
ebc82efa
NR
1135 }
1136 }
1137},
1138};
1139
d4905b38
NR
1140static struct ahash_alg sha_224_alg = {
1141 .init = atmel_sha_init,
1142 .update = atmel_sha_update,
1143 .final = atmel_sha_final,
1144 .finup = atmel_sha_finup,
1145 .digest = atmel_sha_digest,
cc831d32
CP
1146 .export = atmel_sha_export,
1147 .import = atmel_sha_import,
d4905b38
NR
1148 .halg = {
1149 .digestsize = SHA224_DIGEST_SIZE,
cc831d32 1150 .statesize = sizeof(struct atmel_sha_state),
d4905b38
NR
1151 .base = {
1152 .cra_name = "sha224",
1153 .cra_driver_name = "atmel-sha224",
1154 .cra_priority = 100,
be95f0fa 1155 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1156 .cra_blocksize = SHA224_BLOCK_SIZE,
1157 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1158 .cra_alignmask = 0,
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1161 }
1162 }
1163};
1164
1165static struct ahash_alg sha_384_512_algs[] = {
1166{
1167 .init = atmel_sha_init,
1168 .update = atmel_sha_update,
1169 .final = atmel_sha_final,
1170 .finup = atmel_sha_finup,
1171 .digest = atmel_sha_digest,
cc831d32
CP
1172 .export = atmel_sha_export,
1173 .import = atmel_sha_import,
d4905b38
NR
1174 .halg = {
1175 .digestsize = SHA384_DIGEST_SIZE,
cc831d32 1176 .statesize = sizeof(struct atmel_sha_state),
d4905b38
NR
1177 .base = {
1178 .cra_name = "sha384",
1179 .cra_driver_name = "atmel-sha384",
1180 .cra_priority = 100,
be95f0fa 1181 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1182 .cra_blocksize = SHA384_BLOCK_SIZE,
1183 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1184 .cra_alignmask = 0x3,
1185 .cra_module = THIS_MODULE,
1186 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1187 }
1188 }
1189},
1190{
1191 .init = atmel_sha_init,
1192 .update = atmel_sha_update,
1193 .final = atmel_sha_final,
1194 .finup = atmel_sha_finup,
1195 .digest = atmel_sha_digest,
cc831d32
CP
1196 .export = atmel_sha_export,
1197 .import = atmel_sha_import,
d4905b38
NR
1198 .halg = {
1199 .digestsize = SHA512_DIGEST_SIZE,
cc831d32 1200 .statesize = sizeof(struct atmel_sha_state),
d4905b38
NR
1201 .base = {
1202 .cra_name = "sha512",
1203 .cra_driver_name = "atmel-sha512",
1204 .cra_priority = 100,
be95f0fa 1205 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1206 .cra_blocksize = SHA512_BLOCK_SIZE,
1207 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1208 .cra_alignmask = 0x3,
1209 .cra_module = THIS_MODULE,
1210 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1211 }
1212 }
1213},
1214};
1215
f56809c3
CP
1216static void atmel_sha_queue_task(unsigned long data)
1217{
1218 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1219
1220 atmel_sha_handle_queue(dd, NULL);
1221}
1222
ebc82efa
NR
1223static void atmel_sha_done_task(unsigned long data)
1224{
1225 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1226 int err = 0;
1227
ebc82efa
NR
1228 if (SHA_FLAGS_CPU & dd->flags) {
1229 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1230 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1231 goto finish;
1232 }
1233 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1234 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1235 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1236 atmel_sha_update_dma_stop(dd);
1237 if (dd->err) {
1238 err = dd->err;
1239 goto finish;
1240 }
1241 }
1242 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1243 /* hash or semi-hash ready */
1244 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1245 SHA_FLAGS_OUTPUT_READY);
1246 err = atmel_sha_update_dma_start(dd);
1247 if (err != -EINPROGRESS)
1248 goto finish;
1249 }
1250 }
1251 return;
1252
1253finish:
1254 /* finish curent request */
1255 atmel_sha_finish_req(dd->req, err);
1256}
1257
1258static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1259{
1260 struct atmel_sha_dev *sha_dd = dev_id;
1261 u32 reg;
1262
1263 reg = atmel_sha_read(sha_dd, SHA_ISR);
1264 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1265 atmel_sha_write(sha_dd, SHA_IDR, reg);
1266 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1267 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1268 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1269 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1270 tasklet_schedule(&sha_dd->done_task);
1271 } else {
1272 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1273 }
1274 return IRQ_HANDLED;
1275 }
1276
1277 return IRQ_NONE;
1278}
1279
1280static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1281{
1282 int i;
1283
d4905b38
NR
1284 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1285 crypto_unregister_ahash(&sha_1_256_algs[i]);
1286
1287 if (dd->caps.has_sha224)
1288 crypto_unregister_ahash(&sha_224_alg);
1289
1290 if (dd->caps.has_sha_384_512) {
1291 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1292 crypto_unregister_ahash(&sha_384_512_algs[i]);
1293 }
ebc82efa
NR
1294}
1295
1296static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1297{
1298 int err, i, j;
1299
d4905b38
NR
1300 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1301 err = crypto_register_ahash(&sha_1_256_algs[i]);
ebc82efa 1302 if (err)
d4905b38
NR
1303 goto err_sha_1_256_algs;
1304 }
1305
1306 if (dd->caps.has_sha224) {
1307 err = crypto_register_ahash(&sha_224_alg);
1308 if (err)
1309 goto err_sha_224_algs;
1310 }
1311
1312 if (dd->caps.has_sha_384_512) {
1313 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1314 err = crypto_register_ahash(&sha_384_512_algs[i]);
1315 if (err)
1316 goto err_sha_384_512_algs;
1317 }
ebc82efa
NR
1318 }
1319
1320 return 0;
1321
d4905b38
NR
1322err_sha_384_512_algs:
1323 for (j = 0; j < i; j++)
1324 crypto_unregister_ahash(&sha_384_512_algs[j]);
1325 crypto_unregister_ahash(&sha_224_alg);
1326err_sha_224_algs:
1327 i = ARRAY_SIZE(sha_1_256_algs);
1328err_sha_1_256_algs:
ebc82efa 1329 for (j = 0; j < i; j++)
d4905b38 1330 crypto_unregister_ahash(&sha_1_256_algs[j]);
ebc82efa
NR
1331
1332 return err;
1333}
1334
d4905b38
NR
1335static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1336{
1337 struct at_dma_slave *sl = slave;
1338
1339 if (sl && sl->dma_dev == chan->device->dev) {
1340 chan->private = sl;
1341 return true;
1342 } else {
1343 return false;
1344 }
1345}
1346
1347static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1348 struct crypto_platform_data *pdata)
1349{
1350 int err = -ENOMEM;
1351 dma_cap_mask_t mask_in;
1352
abfe7ae4
NF
1353 /* Try to grab DMA channel */
1354 dma_cap_zero(mask_in);
1355 dma_cap_set(DMA_SLAVE, mask_in);
d4905b38 1356
abfe7ae4
NF
1357 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1358 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1359 if (!dd->dma_lch_in.chan) {
1360 dev_warn(dd->dev, "no DMA channel available\n");
1361 return err;
d4905b38
NR
1362 }
1363
abfe7ae4
NF
1364 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1365 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1366 SHA_REG_DIN(0);
1367 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1368 dd->dma_lch_in.dma_conf.src_addr_width =
1369 DMA_SLAVE_BUSWIDTH_4_BYTES;
1370 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1371 dd->dma_lch_in.dma_conf.dst_addr_width =
1372 DMA_SLAVE_BUSWIDTH_4_BYTES;
1373 dd->dma_lch_in.dma_conf.device_fc = false;
1374
1375 return 0;
d4905b38
NR
1376}
1377
1378static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1379{
1380 dma_release_channel(dd->dma_lch_in.chan);
1381}
1382
1383static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1384{
1385
1386 dd->caps.has_dma = 0;
1387 dd->caps.has_dualbuff = 0;
1388 dd->caps.has_sha224 = 0;
1389 dd->caps.has_sha_384_512 = 0;
7cee3508 1390 dd->caps.has_uihv = 0;
d4905b38
NR
1391
1392 /* keep only major version number */
1393 switch (dd->hw_version & 0xff0) {
507c5cc2
CP
1394 case 0x510:
1395 dd->caps.has_dma = 1;
1396 dd->caps.has_dualbuff = 1;
1397 dd->caps.has_sha224 = 1;
1398 dd->caps.has_sha_384_512 = 1;
7cee3508 1399 dd->caps.has_uihv = 1;
507c5cc2 1400 break;
141824d0
LZ
1401 case 0x420:
1402 dd->caps.has_dma = 1;
1403 dd->caps.has_dualbuff = 1;
1404 dd->caps.has_sha224 = 1;
1405 dd->caps.has_sha_384_512 = 1;
7cee3508 1406 dd->caps.has_uihv = 1;
141824d0 1407 break;
d4905b38
NR
1408 case 0x410:
1409 dd->caps.has_dma = 1;
1410 dd->caps.has_dualbuff = 1;
1411 dd->caps.has_sha224 = 1;
1412 dd->caps.has_sha_384_512 = 1;
1413 break;
1414 case 0x400:
1415 dd->caps.has_dma = 1;
1416 dd->caps.has_dualbuff = 1;
1417 dd->caps.has_sha224 = 1;
1418 break;
1419 case 0x320:
1420 break;
1421 default:
1422 dev_warn(dd->dev,
1423 "Unmanaged sha version, set minimum capabilities\n");
1424 break;
1425 }
1426}
1427
abfe7ae4
NF
1428#if defined(CONFIG_OF)
1429static const struct of_device_id atmel_sha_dt_ids[] = {
1430 { .compatible = "atmel,at91sam9g46-sha" },
1431 { /* sentinel */ }
1432};
1433
1434MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1435
1436static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1437{
1438 struct device_node *np = pdev->dev.of_node;
1439 struct crypto_platform_data *pdata;
1440
1441 if (!np) {
1442 dev_err(&pdev->dev, "device node not found\n");
1443 return ERR_PTR(-EINVAL);
1444 }
1445
1446 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1447 if (!pdata) {
1448 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1449 return ERR_PTR(-ENOMEM);
1450 }
1451
1452 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1453 sizeof(*(pdata->dma_slave)),
1454 GFP_KERNEL);
1455 if (!pdata->dma_slave) {
1456 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
abfe7ae4
NF
1457 return ERR_PTR(-ENOMEM);
1458 }
1459
1460 return pdata;
1461}
1462#else /* CONFIG_OF */
1463static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1464{
1465 return ERR_PTR(-EINVAL);
1466}
1467#endif
1468
49cfe4db 1469static int atmel_sha_probe(struct platform_device *pdev)
ebc82efa
NR
1470{
1471 struct atmel_sha_dev *sha_dd;
d4905b38 1472 struct crypto_platform_data *pdata;
ebc82efa
NR
1473 struct device *dev = &pdev->dev;
1474 struct resource *sha_res;
ebc82efa
NR
1475 int err;
1476
b0e8b341 1477 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
ebc82efa
NR
1478 if (sha_dd == NULL) {
1479 dev_err(dev, "unable to alloc data struct.\n");
1480 err = -ENOMEM;
1481 goto sha_dd_err;
1482 }
1483
1484 sha_dd->dev = dev;
1485
1486 platform_set_drvdata(pdev, sha_dd);
1487
1488 INIT_LIST_HEAD(&sha_dd->list);
62728e82 1489 spin_lock_init(&sha_dd->lock);
ebc82efa
NR
1490
1491 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1492 (unsigned long)sha_dd);
f56809c3
CP
1493 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
1494 (unsigned long)sha_dd);
ebc82efa
NR
1495
1496 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1497
1498 sha_dd->irq = -1;
1499
1500 /* Get the base address */
1501 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 if (!sha_res) {
1503 dev_err(dev, "no MEM resource info\n");
1504 err = -ENODEV;
1505 goto res_err;
1506 }
1507 sha_dd->phys_base = sha_res->start;
ebc82efa
NR
1508
1509 /* Get the IRQ */
1510 sha_dd->irq = platform_get_irq(pdev, 0);
1511 if (sha_dd->irq < 0) {
1512 dev_err(dev, "no IRQ resource info\n");
1513 err = sha_dd->irq;
1514 goto res_err;
1515 }
1516
b0e8b341
LC
1517 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1518 IRQF_SHARED, "atmel-sha", sha_dd);
ebc82efa
NR
1519 if (err) {
1520 dev_err(dev, "unable to request sha irq.\n");
1521 goto res_err;
1522 }
1523
1524 /* Initializing the clock */
b0e8b341 1525 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
ebc82efa 1526 if (IS_ERR(sha_dd->iclk)) {
be208356 1527 dev_err(dev, "clock initialization failed.\n");
ebc82efa 1528 err = PTR_ERR(sha_dd->iclk);
b0e8b341 1529 goto res_err;
ebc82efa
NR
1530 }
1531
b0e8b341 1532 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
ebc82efa
NR
1533 if (!sha_dd->io_base) {
1534 dev_err(dev, "can't ioremap\n");
1535 err = -ENOMEM;
b0e8b341 1536 goto res_err;
ebc82efa
NR
1537 }
1538
d4905b38
NR
1539 atmel_sha_hw_version_init(sha_dd);
1540
1541 atmel_sha_get_cap(sha_dd);
1542
1543 if (sha_dd->caps.has_dma) {
1544 pdata = pdev->dev.platform_data;
1545 if (!pdata) {
abfe7ae4
NF
1546 pdata = atmel_sha_of_init(pdev);
1547 if (IS_ERR(pdata)) {
1548 dev_err(&pdev->dev, "platform data not available\n");
1549 err = PTR_ERR(pdata);
b0e8b341 1550 goto res_err;
abfe7ae4
NF
1551 }
1552 }
1553 if (!pdata->dma_slave) {
d4905b38 1554 err = -ENXIO;
b0e8b341 1555 goto res_err;
d4905b38
NR
1556 }
1557 err = atmel_sha_dma_init(sha_dd, pdata);
1558 if (err)
1559 goto err_sha_dma;
abfe7ae4
NF
1560
1561 dev_info(dev, "using %s for DMA transfers\n",
1562 dma_chan_name(sha_dd->dma_lch_in.chan));
d4905b38
NR
1563 }
1564
ebc82efa
NR
1565 spin_lock(&atmel_sha.lock);
1566 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1567 spin_unlock(&atmel_sha.lock);
1568
1569 err = atmel_sha_register_algs(sha_dd);
1570 if (err)
1571 goto err_algs;
1572
1ca5b7d9
NF
1573 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1574 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1575 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
ebc82efa
NR
1576
1577 return 0;
1578
1579err_algs:
1580 spin_lock(&atmel_sha.lock);
1581 list_del(&sha_dd->list);
1582 spin_unlock(&atmel_sha.lock);
d4905b38
NR
1583 if (sha_dd->caps.has_dma)
1584 atmel_sha_dma_cleanup(sha_dd);
1585err_sha_dma:
ebc82efa 1586res_err:
f56809c3 1587 tasklet_kill(&sha_dd->queue_task);
ebc82efa 1588 tasklet_kill(&sha_dd->done_task);
ebc82efa
NR
1589sha_dd_err:
1590 dev_err(dev, "initialization failed.\n");
1591
1592 return err;
1593}
1594
49cfe4db 1595static int atmel_sha_remove(struct platform_device *pdev)
ebc82efa
NR
1596{
1597 static struct atmel_sha_dev *sha_dd;
1598
1599 sha_dd = platform_get_drvdata(pdev);
1600 if (!sha_dd)
1601 return -ENODEV;
1602 spin_lock(&atmel_sha.lock);
1603 list_del(&sha_dd->list);
1604 spin_unlock(&atmel_sha.lock);
1605
1606 atmel_sha_unregister_algs(sha_dd);
1607
f56809c3 1608 tasklet_kill(&sha_dd->queue_task);
ebc82efa
NR
1609 tasklet_kill(&sha_dd->done_task);
1610
d4905b38
NR
1611 if (sha_dd->caps.has_dma)
1612 atmel_sha_dma_cleanup(sha_dd);
1613
ebc82efa
NR
1614 iounmap(sha_dd->io_base);
1615
1616 clk_put(sha_dd->iclk);
1617
1618 if (sha_dd->irq >= 0)
1619 free_irq(sha_dd->irq, sha_dd);
1620
ebc82efa
NR
1621 return 0;
1622}
1623
1624static struct platform_driver atmel_sha_driver = {
1625 .probe = atmel_sha_probe,
49cfe4db 1626 .remove = atmel_sha_remove,
ebc82efa
NR
1627 .driver = {
1628 .name = "atmel_sha",
abfe7ae4 1629 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
ebc82efa
NR
1630 },
1631};
1632
1633module_platform_driver(atmel_sha_driver);
1634
d4905b38 1635MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
ebc82efa
NR
1636MODULE_LICENSE("GPL v2");
1637MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");