crypto: atmel-{sha,tdes} - Propagate error from _hw_version_init()
[linux-block.git] / drivers / crypto / atmel-sha.c
CommitLineData
820684cc 1// SPDX-License-Identifier: GPL-2.0
ebc82efa
NR
2/*
3 * Cryptographic API.
4 *
5 * Support for ATMEL SHA1/SHA256 HW acceleration.
6 *
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
9 *
ebc82efa
NR
10 * Some ideas are from omap-sham.c drivers.
11 */
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/hw_random.h>
21#include <linux/platform_device.h>
22
23#include <linux/device.h>
ebc82efa
NR
24#include <linux/init.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
ebc82efa 27#include <linux/irq.h>
ebc82efa
NR
28#include <linux/scatterlist.h>
29#include <linux/dma-mapping.h>
abfe7ae4 30#include <linux/of_device.h>
ebc82efa
NR
31#include <linux/delay.h>
32#include <linux/crypto.h>
33#include <linux/cryptohash.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/sha.h>
37#include <crypto/hash.h>
38#include <crypto/internal/hash.h>
d4905b38 39#include <linux/platform_data/crypto-atmel.h>
ebc82efa 40#include "atmel-sha-regs.h"
89a82ef8 41#include "atmel-authenc.h"
ebc82efa 42
7c783029
TA
43#define ATMEL_SHA_PRIORITY 300
44
ebc82efa
NR
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
0569fc46 53#define SHA_FLAGS_DUMP_REG BIT(7)
ebc82efa 54
81d8750b 55/* bits[11:8] are reserved. */
f07cebad 56
ebc82efa
NR
57#define SHA_FLAGS_FINUP BIT(16)
58#define SHA_FLAGS_SG BIT(17)
d4905b38
NR
59#define SHA_FLAGS_ERROR BIT(23)
60#define SHA_FLAGS_PAD BIT(24)
7cee3508 61#define SHA_FLAGS_RESTORE BIT(25)
eec12f66
CP
62#define SHA_FLAGS_IDATAR0 BIT(26)
63#define SHA_FLAGS_WAIT_DATARDY BIT(27)
ebc82efa 64
81d8750b 65#define SHA_OP_INIT 0
ebc82efa
NR
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
81d8750b 68#define SHA_OP_DIGEST 3
ebc82efa 69
cc831d32 70#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
ebc82efa
NR
71
72#define ATMEL_SHA_DMA_THRESHOLD 56
73
d4905b38
NR
74struct atmel_sha_caps {
75 bool has_dma;
76 bool has_dualbuff;
77 bool has_sha224;
78 bool has_sha_384_512;
7cee3508 79 bool has_uihv;
81d8750b 80 bool has_hmac;
d4905b38 81};
ebc82efa
NR
82
83struct atmel_sha_dev;
84
cc831d32 85/*
9c4274d9 86 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
cc831d32
CP
87 * tested by the ahash_prepare_alg() function.
88 */
ebc82efa
NR
89struct atmel_sha_reqctx {
90 struct atmel_sha_dev *dd;
91 unsigned long flags;
92 unsigned long op;
93
d4905b38
NR
94 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
95 u64 digcnt[2];
ebc82efa
NR
96 size_t bufcnt;
97 size_t buflen;
98 dma_addr_t dma_addr;
99
100 /* walk state */
101 struct scatterlist *sg;
102 unsigned int offset; /* offset in current sg */
103 unsigned int total; /* total request */
104
d4905b38 105 size_t block_size;
81d8750b 106 size_t hash_size;
d4905b38 107
9c4274d9 108 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
ebc82efa
NR
109};
110
a29af939
CP
111typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
112
ebc82efa
NR
113struct atmel_sha_ctx {
114 struct atmel_sha_dev *dd;
a29af939 115 atmel_sha_fn_t start;
ebc82efa
NR
116
117 unsigned long flags;
ebc82efa
NR
118};
119
d4905b38
NR
120#define ATMEL_SHA_QUEUE_LENGTH 50
121
122struct atmel_sha_dma {
123 struct dma_chan *chan;
124 struct dma_slave_config dma_conf;
69303cf0
CP
125 struct scatterlist *sg;
126 int nents;
127 unsigned int last_sg_length;
d4905b38 128};
ebc82efa
NR
129
130struct atmel_sha_dev {
131 struct list_head list;
132 unsigned long phys_base;
133 struct device *dev;
134 struct clk *iclk;
135 int irq;
136 void __iomem *io_base;
137
138 spinlock_t lock;
139 int err;
140 struct tasklet_struct done_task;
f56809c3 141 struct tasklet_struct queue_task;
ebc82efa
NR
142
143 unsigned long flags;
144 struct crypto_queue queue;
145 struct ahash_request *req;
a29af939 146 bool is_async;
89a82ef8 147 bool force_complete;
b5ce82a7 148 atmel_sha_fn_t resume;
eec12f66 149 atmel_sha_fn_t cpu_transfer_complete;
d4905b38
NR
150
151 struct atmel_sha_dma dma_lch_in;
152
153 struct atmel_sha_caps caps;
154
81d8750b
CP
155 struct scatterlist tmp;
156
d4905b38 157 u32 hw_version;
ebc82efa
NR
158};
159
160struct atmel_sha_drv {
161 struct list_head dev_list;
162 spinlock_t lock;
163};
164
165static struct atmel_sha_drv atmel_sha = {
166 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
167 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
168};
169
0569fc46
CP
170#ifdef VERBOSE_DEBUG
171static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
172{
173 switch (offset) {
174 case SHA_CR:
175 return "CR";
176
177 case SHA_MR:
178 return "MR";
179
180 case SHA_IER:
181 return "IER";
182
183 case SHA_IDR:
184 return "IDR";
185
186 case SHA_IMR:
187 return "IMR";
188
189 case SHA_ISR:
190 return "ISR";
191
192 case SHA_MSR:
193 return "MSR";
194
195 case SHA_BCR:
196 return "BCR";
197
198 case SHA_REG_DIN(0):
199 case SHA_REG_DIN(1):
200 case SHA_REG_DIN(2):
201 case SHA_REG_DIN(3):
202 case SHA_REG_DIN(4):
203 case SHA_REG_DIN(5):
204 case SHA_REG_DIN(6):
205 case SHA_REG_DIN(7):
206 case SHA_REG_DIN(8):
207 case SHA_REG_DIN(9):
208 case SHA_REG_DIN(10):
209 case SHA_REG_DIN(11):
210 case SHA_REG_DIN(12):
211 case SHA_REG_DIN(13):
212 case SHA_REG_DIN(14):
213 case SHA_REG_DIN(15):
214 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
215 break;
216
217 case SHA_REG_DIGEST(0):
218 case SHA_REG_DIGEST(1):
219 case SHA_REG_DIGEST(2):
220 case SHA_REG_DIGEST(3):
221 case SHA_REG_DIGEST(4):
222 case SHA_REG_DIGEST(5):
223 case SHA_REG_DIGEST(6):
224 case SHA_REG_DIGEST(7):
225 case SHA_REG_DIGEST(8):
226 case SHA_REG_DIGEST(9):
227 case SHA_REG_DIGEST(10):
228 case SHA_REG_DIGEST(11):
229 case SHA_REG_DIGEST(12):
230 case SHA_REG_DIGEST(13):
231 case SHA_REG_DIGEST(14):
232 case SHA_REG_DIGEST(15):
233 if (wr)
234 snprintf(tmp, sz, "IDATAR[%u]",
235 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
236 else
237 snprintf(tmp, sz, "ODATAR[%u]",
238 (offset - SHA_REG_DIGEST(0)) >> 2);
239 break;
240
241 case SHA_HW_VERSION:
242 return "HWVER";
243
244 default:
245 snprintf(tmp, sz, "0x%02x", offset);
246 break;
247 }
248
249 return tmp;
250}
251
252#endif /* VERBOSE_DEBUG */
253
ebc82efa
NR
254static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
255{
0569fc46
CP
256 u32 value = readl_relaxed(dd->io_base + offset);
257
258#ifdef VERBOSE_DEBUG
259 if (dd->flags & SHA_FLAGS_DUMP_REG) {
260 char tmp[16];
261
262 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
263 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
264 }
265#endif /* VERBOSE_DEBUG */
266
267 return value;
ebc82efa
NR
268}
269
270static inline void atmel_sha_write(struct atmel_sha_dev *dd,
271 u32 offset, u32 value)
272{
0569fc46
CP
273#ifdef VERBOSE_DEBUG
274 if (dd->flags & SHA_FLAGS_DUMP_REG) {
275 char tmp[16];
276
277 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
278 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
279 }
280#endif /* VERBOSE_DEBUG */
281
ebc82efa
NR
282 writel_relaxed(value, dd->io_base + offset);
283}
284
a29af939
CP
285static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
286{
287 struct ahash_request *req = dd->req;
288
289 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
0569fc46
CP
290 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
291 SHA_FLAGS_DUMP_REG);
a29af939
CP
292
293 clk_disable(dd->iclk);
294
89a82ef8 295 if ((dd->is_async || dd->force_complete) && req->base.complete)
a29af939
CP
296 req->base.complete(&req->base, err);
297
298 /* handle new request */
299 tasklet_schedule(&dd->queue_task);
300
301 return err;
302}
303
ebc82efa
NR
304static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
305{
306 size_t count;
307
308 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
309 count = min(ctx->sg->length - ctx->offset, ctx->total);
310 count = min(count, ctx->buflen - ctx->bufcnt);
311
803eeae8
LZ
312 if (count <= 0) {
313 /*
314 * Check if count <= 0 because the buffer is full or
315 * because the sg length is 0. In the latest case,
316 * check if there is another sg in the list, a 0 length
317 * sg doesn't necessarily mean the end of the sg list.
318 */
319 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
320 ctx->sg = sg_next(ctx->sg);
321 continue;
322 } else {
323 break;
324 }
325 }
ebc82efa
NR
326
327 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
328 ctx->offset, count, 0);
329
330 ctx->bufcnt += count;
331 ctx->offset += count;
332 ctx->total -= count;
333
334 if (ctx->offset == ctx->sg->length) {
335 ctx->sg = sg_next(ctx->sg);
336 if (ctx->sg)
337 ctx->offset = 0;
338 else
339 ctx->total = 0;
340 }
341 }
342
343 return 0;
344}
345
346/*
d4905b38
NR
347 * The purpose of this padding is to ensure that the padded message is a
348 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
349 * The bit "1" is appended at the end of the message followed by
350 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
351 * 128 bits block (SHA384/SHA512) equals to the message length in bits
352 * is appended.
ebc82efa 353 *
d4905b38 354 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
ebc82efa
NR
355 * - if message length < 56 bytes then padlen = 56 - message length
356 * - else padlen = 64 + 56 - message length
d4905b38
NR
357 *
358 * For SHA384/SHA512, padlen is calculated as followed:
359 * - if message length < 112 bytes then padlen = 112 - message length
360 * - else padlen = 128 + 112 - message length
ebc82efa
NR
361 */
362static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
363{
364 unsigned int index, padlen;
427e6e3a 365 __be64 bits[2];
d4905b38
NR
366 u64 size[2];
367
368 size[0] = ctx->digcnt[0];
369 size[1] = ctx->digcnt[1];
370
371 size[0] += ctx->bufcnt;
372 if (size[0] < ctx->bufcnt)
373 size[1]++;
374
375 size[0] += length;
376 if (size[0] < length)
377 size[1]++;
378
379 bits[1] = cpu_to_be64(size[0] << 3);
380 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
381
f07cebad
CP
382 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
383 case SHA_FLAGS_SHA384:
384 case SHA_FLAGS_SHA512:
d4905b38
NR
385 index = ctx->bufcnt & 0x7f;
386 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
387 *(ctx->buffer + ctx->bufcnt) = 0x80;
388 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
389 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
390 ctx->bufcnt += padlen + 16;
391 ctx->flags |= SHA_FLAGS_PAD;
f07cebad
CP
392 break;
393
394 default:
d4905b38
NR
395 index = ctx->bufcnt & 0x3f;
396 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
397 *(ctx->buffer + ctx->bufcnt) = 0x80;
398 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
399 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
400 ctx->bufcnt += padlen + 8;
401 ctx->flags |= SHA_FLAGS_PAD;
f07cebad 402 break;
d4905b38 403 }
ebc82efa
NR
404}
405
8340c7fd 406static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
ebc82efa 407{
ebc82efa
NR
408 struct atmel_sha_dev *dd = NULL;
409 struct atmel_sha_dev *tmp;
410
411 spin_lock_bh(&atmel_sha.lock);
412 if (!tctx->dd) {
413 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
414 dd = tmp;
415 break;
416 }
417 tctx->dd = dd;
418 } else {
419 dd = tctx->dd;
420 }
421
422 spin_unlock_bh(&atmel_sha.lock);
423
8340c7fd
CP
424 return dd;
425}
426
427static int atmel_sha_init(struct ahash_request *req)
428{
429 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
430 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
431 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
432 struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
433
ebc82efa
NR
434 ctx->dd = dd;
435
436 ctx->flags = 0;
437
438 dev_dbg(dd->dev, "init: digest size: %d\n",
439 crypto_ahash_digestsize(tfm));
440
d4905b38
NR
441 switch (crypto_ahash_digestsize(tfm)) {
442 case SHA1_DIGEST_SIZE:
ebc82efa 443 ctx->flags |= SHA_FLAGS_SHA1;
d4905b38
NR
444 ctx->block_size = SHA1_BLOCK_SIZE;
445 break;
446 case SHA224_DIGEST_SIZE:
447 ctx->flags |= SHA_FLAGS_SHA224;
448 ctx->block_size = SHA224_BLOCK_SIZE;
449 break;
450 case SHA256_DIGEST_SIZE:
ebc82efa 451 ctx->flags |= SHA_FLAGS_SHA256;
d4905b38
NR
452 ctx->block_size = SHA256_BLOCK_SIZE;
453 break;
454 case SHA384_DIGEST_SIZE:
455 ctx->flags |= SHA_FLAGS_SHA384;
456 ctx->block_size = SHA384_BLOCK_SIZE;
457 break;
458 case SHA512_DIGEST_SIZE:
459 ctx->flags |= SHA_FLAGS_SHA512;
460 ctx->block_size = SHA512_BLOCK_SIZE;
461 break;
462 default:
463 return -EINVAL;
464 break;
465 }
ebc82efa
NR
466
467 ctx->bufcnt = 0;
d4905b38
NR
468 ctx->digcnt[0] = 0;
469 ctx->digcnt[1] = 0;
ebc82efa
NR
470 ctx->buflen = SHA_BUFFER_LEN;
471
472 return 0;
473}
474
475static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
476{
477 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
7cee3508
CP
478 u32 valmr = SHA_MR_MODE_AUTO;
479 unsigned int i, hashsize = 0;
ebc82efa
NR
480
481 if (likely(dma)) {
d4905b38
NR
482 if (!dd->caps.has_dma)
483 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
ebc82efa 484 valmr = SHA_MR_MODE_PDC;
d4905b38
NR
485 if (dd->caps.has_dualbuff)
486 valmr |= SHA_MR_DUALBUFF;
ebc82efa
NR
487 } else {
488 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
489 }
490
7cee3508
CP
491 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
492 case SHA_FLAGS_SHA1:
d4905b38 493 valmr |= SHA_MR_ALGO_SHA1;
7cee3508
CP
494 hashsize = SHA1_DIGEST_SIZE;
495 break;
496
497 case SHA_FLAGS_SHA224:
d4905b38 498 valmr |= SHA_MR_ALGO_SHA224;
7cee3508
CP
499 hashsize = SHA256_DIGEST_SIZE;
500 break;
501
502 case SHA_FLAGS_SHA256:
ebc82efa 503 valmr |= SHA_MR_ALGO_SHA256;
7cee3508
CP
504 hashsize = SHA256_DIGEST_SIZE;
505 break;
506
507 case SHA_FLAGS_SHA384:
d4905b38 508 valmr |= SHA_MR_ALGO_SHA384;
7cee3508
CP
509 hashsize = SHA512_DIGEST_SIZE;
510 break;
511
512 case SHA_FLAGS_SHA512:
d4905b38 513 valmr |= SHA_MR_ALGO_SHA512;
7cee3508
CP
514 hashsize = SHA512_DIGEST_SIZE;
515 break;
516
517 default:
518 break;
519 }
ebc82efa
NR
520
521 /* Setting CR_FIRST only for the first iteration */
7cee3508
CP
522 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
523 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
524 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
525 const u32 *hash = (const u32 *)ctx->digest;
526
527 /*
528 * Restore the hardware context: update the User Initialize
529 * Hash Value (UIHV) with the value saved when the latest
530 * 'update' operation completed on this very same crypto
531 * request.
532 */
533 ctx->flags &= ~SHA_FLAGS_RESTORE;
534 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
535 for (i = 0; i < hashsize / sizeof(u32); ++i)
536 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
537 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
538 valmr |= SHA_MR_UIHV;
539 }
540 /*
541 * WARNING: If the UIHV feature is not available, the hardware CANNOT
542 * process concurrent requests: the internal registers used to store
543 * the hash/digest are still set to the partial digest output values
544 * computed during the latest round.
545 */
ebc82efa 546
ebc82efa
NR
547 atmel_sha_write(dd, SHA_MR, valmr);
548}
549
9064ed92
CP
550static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
551 atmel_sha_fn_t resume)
552{
553 u32 isr = atmel_sha_read(dd, SHA_ISR);
554
555 if (unlikely(isr & SHA_INT_DATARDY))
556 return resume(dd);
557
558 dd->resume = resume;
559 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
560 return -EINPROGRESS;
561}
562
ebc82efa
NR
563static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
564 size_t length, int final)
565{
566 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
567 int count, len32;
568 const u32 *buffer = (const u32 *)buf;
569
4c147bcf 570 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
d4905b38 571 ctx->digcnt[1], ctx->digcnt[0], length, final);
ebc82efa
NR
572
573 atmel_sha_write_ctrl(dd, 0);
574
575 /* should be non-zero before next lines to disable clocks later */
d4905b38
NR
576 ctx->digcnt[0] += length;
577 if (ctx->digcnt[0] < length)
578 ctx->digcnt[1]++;
ebc82efa
NR
579
580 if (final)
581 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
582
583 len32 = DIV_ROUND_UP(length, sizeof(u32));
584
585 dd->flags |= SHA_FLAGS_CPU;
586
587 for (count = 0; count < len32; count++)
588 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
589
590 return -EINPROGRESS;
591}
592
593static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
594 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
595{
596 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
597 int len32;
598
4c147bcf 599 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
d4905b38 600 ctx->digcnt[1], ctx->digcnt[0], length1, final);
ebc82efa
NR
601
602 len32 = DIV_ROUND_UP(length1, sizeof(u32));
603 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
604 atmel_sha_write(dd, SHA_TPR, dma_addr1);
605 atmel_sha_write(dd, SHA_TCR, len32);
606
607 len32 = DIV_ROUND_UP(length2, sizeof(u32));
608 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
609 atmel_sha_write(dd, SHA_TNCR, len32);
610
611 atmel_sha_write_ctrl(dd, 1);
612
613 /* should be non-zero before next lines to disable clocks later */
d4905b38
NR
614 ctx->digcnt[0] += length1;
615 if (ctx->digcnt[0] < length1)
616 ctx->digcnt[1]++;
ebc82efa
NR
617
618 if (final)
619 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
620
621 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
622
623 /* Start DMA transfer */
624 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
625
626 return -EINPROGRESS;
627}
628
d4905b38
NR
629static void atmel_sha_dma_callback(void *data)
630{
631 struct atmel_sha_dev *dd = data;
632
a29af939
CP
633 dd->is_async = true;
634
d4905b38
NR
635 /* dma_lch_in - completed - wait DATRDY */
636 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
637}
638
639static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
640 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
641{
642 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
643 struct dma_async_tx_descriptor *in_desc;
644 struct scatterlist sg[2];
645
4c147bcf 646 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
d4905b38
NR
647 ctx->digcnt[1], ctx->digcnt[0], length1, final);
648
3f1992c0
LZ
649 dd->dma_lch_in.dma_conf.src_maxburst = 16;
650 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
d4905b38
NR
651
652 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
653
654 if (length2) {
655 sg_init_table(sg, 2);
656 sg_dma_address(&sg[0]) = dma_addr1;
657 sg_dma_len(&sg[0]) = length1;
658 sg_dma_address(&sg[1]) = dma_addr2;
659 sg_dma_len(&sg[1]) = length2;
660 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
661 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 } else {
663 sg_init_table(sg, 1);
664 sg_dma_address(&sg[0]) = dma_addr1;
665 sg_dma_len(&sg[0]) = length1;
666 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
667 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
668 }
669 if (!in_desc)
dd3f9f40 670 return atmel_sha_complete(dd, -EINVAL);
d4905b38
NR
671
672 in_desc->callback = atmel_sha_dma_callback;
673 in_desc->callback_param = dd;
674
675 atmel_sha_write_ctrl(dd, 1);
676
677 /* should be non-zero before next lines to disable clocks later */
678 ctx->digcnt[0] += length1;
679 if (ctx->digcnt[0] < length1)
680 ctx->digcnt[1]++;
681
682 if (final)
683 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
684
685 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
686
687 /* Start DMA transfer */
688 dmaengine_submit(in_desc);
689 dma_async_issue_pending(dd->dma_lch_in.chan);
690
691 return -EINPROGRESS;
692}
693
694static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
695 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
696{
697 if (dd->caps.has_dma)
698 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
699 dma_addr2, length2, final);
700 else
701 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
702 dma_addr2, length2, final);
703}
704
ebc82efa
NR
705static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
706{
707 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
708 int bufcnt;
709
710 atmel_sha_append_sg(ctx);
711 atmel_sha_fill_padding(ctx, 0);
ebc82efa
NR
712 bufcnt = ctx->bufcnt;
713 ctx->bufcnt = 0;
714
715 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
716}
717
718static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
719 struct atmel_sha_reqctx *ctx,
720 size_t length, int final)
721{
722 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
d4905b38 723 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
ebc82efa 724 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
4c147bcf 725 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
d4905b38 726 ctx->block_size);
dd3f9f40 727 return atmel_sha_complete(dd, -EINVAL);
ebc82efa
NR
728 }
729
730 ctx->flags &= ~SHA_FLAGS_SG;
731
732 /* next call does not fail... so no unmap in the case of error */
d4905b38 733 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
ebc82efa
NR
734}
735
736static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
737{
738 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
739 unsigned int final;
740 size_t count;
741
742 atmel_sha_append_sg(ctx);
743
744 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
745
4c147bcf 746 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
d4905b38 747 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
ebc82efa
NR
748
749 if (final)
750 atmel_sha_fill_padding(ctx, 0);
751
0099286b 752 if (final || (ctx->bufcnt == ctx->buflen)) {
ebc82efa
NR
753 count = ctx->bufcnt;
754 ctx->bufcnt = 0;
755 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
756 }
757
758 return 0;
759}
760
761static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
762{
763 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
764 unsigned int length, final, tail;
765 struct scatterlist *sg;
766 unsigned int count;
767
768 if (!ctx->total)
769 return 0;
770
771 if (ctx->bufcnt || ctx->offset)
772 return atmel_sha_update_dma_slow(dd);
773
4c147bcf 774 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
d4905b38 775 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
ebc82efa
NR
776
777 sg = ctx->sg;
778
779 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
780 return atmel_sha_update_dma_slow(dd);
781
d4905b38
NR
782 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
783 /* size is not ctx->block_size aligned */
ebc82efa
NR
784 return atmel_sha_update_dma_slow(dd);
785
786 length = min(ctx->total, sg->length);
787
788 if (sg_is_last(sg)) {
789 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
d4905b38
NR
790 /* not last sg must be ctx->block_size aligned */
791 tail = length & (ctx->block_size - 1);
ebc82efa 792 length -= tail;
ebc82efa
NR
793 }
794 }
795
796 ctx->total -= length;
797 ctx->offset = length; /* offset where to start slow */
798
799 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
800
801 /* Add padding */
802 if (final) {
d4905b38 803 tail = length & (ctx->block_size - 1);
ebc82efa
NR
804 length -= tail;
805 ctx->total += tail;
806 ctx->offset = length; /* offset where to start slow */
807
808 sg = ctx->sg;
809 atmel_sha_append_sg(ctx);
810
811 atmel_sha_fill_padding(ctx, length);
812
813 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
d4905b38 814 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
ebc82efa 815 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
4c147bcf 816 dev_err(dd->dev, "dma %zu bytes error\n",
d4905b38 817 ctx->buflen + ctx->block_size);
dd3f9f40 818 return atmel_sha_complete(dd, -EINVAL);
ebc82efa
NR
819 }
820
821 if (length == 0) {
822 ctx->flags &= ~SHA_FLAGS_SG;
823 count = ctx->bufcnt;
824 ctx->bufcnt = 0;
d4905b38 825 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
ebc82efa
NR
826 0, final);
827 } else {
828 ctx->sg = sg;
829 if (!dma_map_sg(dd->dev, ctx->sg, 1,
830 DMA_TO_DEVICE)) {
831 dev_err(dd->dev, "dma_map_sg error\n");
dd3f9f40 832 return atmel_sha_complete(dd, -EINVAL);
ebc82efa
NR
833 }
834
835 ctx->flags |= SHA_FLAGS_SG;
836
837 count = ctx->bufcnt;
838 ctx->bufcnt = 0;
d4905b38 839 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
ebc82efa
NR
840 length, ctx->dma_addr, count, final);
841 }
842 }
843
844 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
845 dev_err(dd->dev, "dma_map_sg error\n");
dd3f9f40 846 return atmel_sha_complete(dd, -EINVAL);
ebc82efa
NR
847 }
848
849 ctx->flags |= SHA_FLAGS_SG;
850
851 /* next call does not fail... so no unmap in the case of error */
d4905b38 852 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
ebc82efa
NR
853 0, final);
854}
855
856static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
857{
858 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
859
860 if (ctx->flags & SHA_FLAGS_SG) {
861 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
862 if (ctx->sg->length == ctx->offset) {
863 ctx->sg = sg_next(ctx->sg);
864 if (ctx->sg)
865 ctx->offset = 0;
866 }
d4905b38 867 if (ctx->flags & SHA_FLAGS_PAD) {
ebc82efa 868 dma_unmap_single(dd->dev, ctx->dma_addr,
d4905b38
NR
869 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
870 }
ebc82efa
NR
871 } else {
872 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
d4905b38 873 ctx->block_size, DMA_TO_DEVICE);
ebc82efa
NR
874 }
875
876 return 0;
877}
878
879static int atmel_sha_update_req(struct atmel_sha_dev *dd)
880{
881 struct ahash_request *req = dd->req;
882 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
883 int err;
884
d4905b38
NR
885 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
886 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
ebc82efa
NR
887
888 if (ctx->flags & SHA_FLAGS_CPU)
889 err = atmel_sha_update_cpu(dd);
890 else
891 err = atmel_sha_update_dma_start(dd);
892
893 /* wait for dma completion before can take more data */
d4905b38
NR
894 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
895 err, ctx->digcnt[1], ctx->digcnt[0]);
ebc82efa
NR
896
897 return err;
898}
899
900static int atmel_sha_final_req(struct atmel_sha_dev *dd)
901{
902 struct ahash_request *req = dd->req;
903 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
904 int err = 0;
905 int count;
906
907 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
908 atmel_sha_fill_padding(ctx, 0);
909 count = ctx->bufcnt;
910 ctx->bufcnt = 0;
911 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
912 }
913 /* faster to handle last block with cpu */
914 else {
915 atmel_sha_fill_padding(ctx, 0);
916 count = ctx->bufcnt;
917 ctx->bufcnt = 0;
918 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
919 }
920
921 dev_dbg(dd->dev, "final_req: err: %d\n", err);
922
923 return err;
924}
925
926static void atmel_sha_copy_hash(struct ahash_request *req)
927{
928 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
929 u32 *hash = (u32 *)ctx->digest;
7cee3508 930 unsigned int i, hashsize;
ebc82efa 931
7cee3508
CP
932 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
933 case SHA_FLAGS_SHA1:
934 hashsize = SHA1_DIGEST_SIZE;
935 break;
936
937 case SHA_FLAGS_SHA224:
938 case SHA_FLAGS_SHA256:
939 hashsize = SHA256_DIGEST_SIZE;
940 break;
941
942 case SHA_FLAGS_SHA384:
943 case SHA_FLAGS_SHA512:
944 hashsize = SHA512_DIGEST_SIZE;
945 break;
946
947 default:
948 /* Should not happen... */
949 return;
950 }
951
952 for (i = 0; i < hashsize / sizeof(u32); ++i)
953 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
954 ctx->flags |= SHA_FLAGS_RESTORE;
ebc82efa
NR
955}
956
957static void atmel_sha_copy_ready_hash(struct ahash_request *req)
958{
959 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
960
961 if (!req->result)
962 return;
963
f07cebad
CP
964 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
965 default:
966 case SHA_FLAGS_SHA1:
ebc82efa 967 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
f07cebad
CP
968 break;
969
970 case SHA_FLAGS_SHA224:
d4905b38 971 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
f07cebad
CP
972 break;
973
974 case SHA_FLAGS_SHA256:
ebc82efa 975 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
f07cebad
CP
976 break;
977
978 case SHA_FLAGS_SHA384:
d4905b38 979 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
f07cebad
CP
980 break;
981
982 case SHA_FLAGS_SHA512:
d4905b38 983 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
f07cebad
CP
984 break;
985 }
ebc82efa
NR
986}
987
988static int atmel_sha_finish(struct ahash_request *req)
989{
990 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
991 struct atmel_sha_dev *dd = ctx->dd;
ebc82efa 992
d4905b38 993 if (ctx->digcnt[0] || ctx->digcnt[1])
ebc82efa
NR
994 atmel_sha_copy_ready_hash(req);
995
4c147bcf 996 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
d4905b38 997 ctx->digcnt[0], ctx->bufcnt);
ebc82efa 998
871b88a8 999 return 0;
ebc82efa
NR
1000}
1001
1002static void atmel_sha_finish_req(struct ahash_request *req, int err)
1003{
1004 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1005 struct atmel_sha_dev *dd = ctx->dd;
1006
1007 if (!err) {
1008 atmel_sha_copy_hash(req);
1009 if (SHA_FLAGS_FINAL & dd->flags)
1010 err = atmel_sha_finish(req);
1011 } else {
1012 ctx->flags |= SHA_FLAGS_ERROR;
1013 }
1014
1015 /* atomic operation is not needed here */
a29af939 1016 (void)atmel_sha_complete(dd, err);
ebc82efa
NR
1017}
1018
1019static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1020{
9d83d299
LC
1021 int err;
1022
c033042a 1023 err = clk_enable(dd->iclk);
9d83d299
LC
1024 if (err)
1025 return err;
ebc82efa 1026
d4905b38 1027 if (!(SHA_FLAGS_INIT & dd->flags)) {
ebc82efa 1028 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
ebc82efa
NR
1029 dd->flags |= SHA_FLAGS_INIT;
1030 dd->err = 0;
1031 }
1032
1033 return 0;
1034}
1035
d4905b38
NR
1036static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
1037{
1038 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
1039}
1040
0efe58f3 1041static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
d4905b38 1042{
0efe58f3
TA
1043 int err;
1044
1045 err = atmel_sha_hw_init(dd);
1046 if (err)
1047 return err;
d4905b38
NR
1048
1049 dd->hw_version = atmel_sha_get_version(dd);
1050
1051 dev_info(dd->dev,
1052 "version: 0x%x\n", dd->hw_version);
1053
c033042a 1054 clk_disable(dd->iclk);
0efe58f3
TA
1055
1056 return 0;
d4905b38
NR
1057}
1058
ebc82efa
NR
1059static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1060 struct ahash_request *req)
1061{
1062 struct crypto_async_request *async_req, *backlog;
a29af939 1063 struct atmel_sha_ctx *ctx;
ebc82efa 1064 unsigned long flags;
a29af939 1065 bool start_async;
ebc82efa
NR
1066 int err = 0, ret = 0;
1067
1068 spin_lock_irqsave(&dd->lock, flags);
1069 if (req)
1070 ret = ahash_enqueue_request(&dd->queue, req);
1071
1072 if (SHA_FLAGS_BUSY & dd->flags) {
1073 spin_unlock_irqrestore(&dd->lock, flags);
1074 return ret;
1075 }
1076
1077 backlog = crypto_get_backlog(&dd->queue);
1078 async_req = crypto_dequeue_request(&dd->queue);
1079 if (async_req)
1080 dd->flags |= SHA_FLAGS_BUSY;
1081
1082 spin_unlock_irqrestore(&dd->lock, flags);
1083
1084 if (!async_req)
1085 return ret;
1086
1087 if (backlog)
1088 backlog->complete(backlog, -EINPROGRESS);
1089
a29af939
CP
1090 ctx = crypto_tfm_ctx(async_req->tfm);
1091
1092 dd->req = ahash_request_cast(async_req);
1093 start_async = (dd->req != req);
1094 dd->is_async = start_async;
89a82ef8 1095 dd->force_complete = false;
a29af939
CP
1096
1097 /* WARNING: ctx->start() MAY change dd->is_async. */
1098 err = ctx->start(dd);
1099 return (start_async) ? ret : err;
1100}
1101
b5ce82a7
CP
1102static int atmel_sha_done(struct atmel_sha_dev *dd);
1103
a29af939
CP
1104static int atmel_sha_start(struct atmel_sha_dev *dd)
1105{
1106 struct ahash_request *req = dd->req;
1107 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1108 int err;
ebc82efa
NR
1109
1110 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1111 ctx->op, req->nbytes);
1112
1113 err = atmel_sha_hw_init(dd);
ebc82efa 1114 if (err)
19998acb
CP
1115 return atmel_sha_complete(dd, err);
1116
1117 /*
1118 * atmel_sha_update_req() and atmel_sha_final_req() can return either:
1119 * -EINPROGRESS: the hardware is busy and the SHA driver will resume
1120 * its job later in the done_task.
1121 * This is the main path.
1122 *
1123 * 0: the SHA driver can continue its job then release the hardware
1124 * later, if needed, with atmel_sha_finish_req().
1125 * This is the alternate path.
1126 *
1127 * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
1128 * been called, hence the hardware has been released.
1129 * The SHA driver must stop its job without calling
1130 * atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
1131 * called a second time.
1132 *
1133 * Please note that currently, atmel_sha_final_req() never returns 0.
1134 */
ebc82efa 1135
b5ce82a7 1136 dd->resume = atmel_sha_done;
ebc82efa
NR
1137 if (ctx->op == SHA_OP_UPDATE) {
1138 err = atmel_sha_update_req(dd);
19998acb 1139 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
ebc82efa
NR
1140 /* no final() after finup() */
1141 err = atmel_sha_final_req(dd);
ebc82efa
NR
1142 } else if (ctx->op == SHA_OP_FINAL) {
1143 err = atmel_sha_final_req(dd);
1144 }
1145
19998acb 1146 if (!err)
ebc82efa
NR
1147 /* done_task will not finish it, so do it here */
1148 atmel_sha_finish_req(req, err);
1149
1150 dev_dbg(dd->dev, "exit, err: %d\n", err);
1151
a29af939 1152 return err;
ebc82efa
NR
1153}
1154
1155static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1156{
1157 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1158 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1159 struct atmel_sha_dev *dd = tctx->dd;
1160
1161 ctx->op = op;
1162
1163 return atmel_sha_handle_queue(dd, req);
1164}
1165
1166static int atmel_sha_update(struct ahash_request *req)
1167{
1168 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1169
1170 if (!req->nbytes)
1171 return 0;
1172
1173 ctx->total = req->nbytes;
1174 ctx->sg = req->src;
1175 ctx->offset = 0;
1176
1177 if (ctx->flags & SHA_FLAGS_FINUP) {
1178 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1179 /* faster to use CPU for short transfers */
1180 ctx->flags |= SHA_FLAGS_CPU;
1181 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1182 atmel_sha_append_sg(ctx);
1183 return 0;
1184 }
1185 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1186}
1187
1188static int atmel_sha_final(struct ahash_request *req)
1189{
1190 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
ebc82efa
NR
1191
1192 ctx->flags |= SHA_FLAGS_FINUP;
1193
1194 if (ctx->flags & SHA_FLAGS_ERROR)
1195 return 0; /* uncompleted hash is not needed */
1196
ad84112a 1197 if (ctx->flags & SHA_FLAGS_PAD)
ebc82efa
NR
1198 /* copy ready hash (+ finalize hmac) */
1199 return atmel_sha_finish(req);
ebc82efa 1200
ad84112a 1201 return atmel_sha_enqueue(req, SHA_OP_FINAL);
ebc82efa
NR
1202}
1203
1204static int atmel_sha_finup(struct ahash_request *req)
1205{
1206 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1207 int err1, err2;
1208
1209 ctx->flags |= SHA_FLAGS_FINUP;
1210
1211 err1 = atmel_sha_update(req);
1606043f
GBY
1212 if (err1 == -EINPROGRESS ||
1213 (err1 == -EBUSY && (ahash_request_flags(req) &
1214 CRYPTO_TFM_REQ_MAY_BACKLOG)))
ebc82efa
NR
1215 return err1;
1216
1217 /*
1218 * final() has to be always called to cleanup resources
1219 * even if udpate() failed, except EINPROGRESS
1220 */
1221 err2 = atmel_sha_final(req);
1222
1223 return err1 ?: err2;
1224}
1225
1226static int atmel_sha_digest(struct ahash_request *req)
1227{
1228 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1229}
1230
cc831d32
CP
1231
1232static int atmel_sha_export(struct ahash_request *req, void *out)
1233{
1234 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
cc831d32 1235
9c4274d9 1236 memcpy(out, ctx, sizeof(*ctx));
cc831d32
CP
1237 return 0;
1238}
1239
1240static int atmel_sha_import(struct ahash_request *req, const void *in)
1241{
1242 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
cc831d32 1243
9c4274d9 1244 memcpy(ctx, in, sizeof(*ctx));
cc831d32
CP
1245 return 0;
1246}
1247
be95f0fa 1248static int atmel_sha_cra_init(struct crypto_tfm *tfm)
ebc82efa 1249{
a29af939
CP
1250 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1251
ebc82efa 1252 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
9c4274d9 1253 sizeof(struct atmel_sha_reqctx));
a29af939 1254 ctx->start = atmel_sha_start;
ebc82efa
NR
1255
1256 return 0;
1257}
1258
d4905b38 1259static struct ahash_alg sha_1_256_algs[] = {
ebc82efa
NR
1260{
1261 .init = atmel_sha_init,
1262 .update = atmel_sha_update,
1263 .final = atmel_sha_final,
1264 .finup = atmel_sha_finup,
1265 .digest = atmel_sha_digest,
cc831d32
CP
1266 .export = atmel_sha_export,
1267 .import = atmel_sha_import,
ebc82efa
NR
1268 .halg = {
1269 .digestsize = SHA1_DIGEST_SIZE,
9c4274d9 1270 .statesize = sizeof(struct atmel_sha_reqctx),
ebc82efa
NR
1271 .base = {
1272 .cra_name = "sha1",
1273 .cra_driver_name = "atmel-sha1",
7c783029 1274 .cra_priority = ATMEL_SHA_PRIORITY,
be95f0fa 1275 .cra_flags = CRYPTO_ALG_ASYNC,
ebc82efa
NR
1276 .cra_blocksize = SHA1_BLOCK_SIZE,
1277 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1278 .cra_alignmask = 0,
1279 .cra_module = THIS_MODULE,
1280 .cra_init = atmel_sha_cra_init,
ebc82efa
NR
1281 }
1282 }
1283},
1284{
1285 .init = atmel_sha_init,
1286 .update = atmel_sha_update,
1287 .final = atmel_sha_final,
1288 .finup = atmel_sha_finup,
1289 .digest = atmel_sha_digest,
cc831d32
CP
1290 .export = atmel_sha_export,
1291 .import = atmel_sha_import,
ebc82efa
NR
1292 .halg = {
1293 .digestsize = SHA256_DIGEST_SIZE,
9c4274d9 1294 .statesize = sizeof(struct atmel_sha_reqctx),
ebc82efa
NR
1295 .base = {
1296 .cra_name = "sha256",
1297 .cra_driver_name = "atmel-sha256",
7c783029 1298 .cra_priority = ATMEL_SHA_PRIORITY,
be95f0fa 1299 .cra_flags = CRYPTO_ALG_ASYNC,
ebc82efa
NR
1300 .cra_blocksize = SHA256_BLOCK_SIZE,
1301 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1302 .cra_alignmask = 0,
1303 .cra_module = THIS_MODULE,
1304 .cra_init = atmel_sha_cra_init,
ebc82efa
NR
1305 }
1306 }
1307},
1308};
1309
d4905b38
NR
1310static struct ahash_alg sha_224_alg = {
1311 .init = atmel_sha_init,
1312 .update = atmel_sha_update,
1313 .final = atmel_sha_final,
1314 .finup = atmel_sha_finup,
1315 .digest = atmel_sha_digest,
cc831d32
CP
1316 .export = atmel_sha_export,
1317 .import = atmel_sha_import,
d4905b38
NR
1318 .halg = {
1319 .digestsize = SHA224_DIGEST_SIZE,
9c4274d9 1320 .statesize = sizeof(struct atmel_sha_reqctx),
d4905b38
NR
1321 .base = {
1322 .cra_name = "sha224",
1323 .cra_driver_name = "atmel-sha224",
7c783029 1324 .cra_priority = ATMEL_SHA_PRIORITY,
be95f0fa 1325 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1326 .cra_blocksize = SHA224_BLOCK_SIZE,
1327 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1328 .cra_alignmask = 0,
1329 .cra_module = THIS_MODULE,
1330 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1331 }
1332 }
1333};
1334
1335static struct ahash_alg sha_384_512_algs[] = {
1336{
1337 .init = atmel_sha_init,
1338 .update = atmel_sha_update,
1339 .final = atmel_sha_final,
1340 .finup = atmel_sha_finup,
1341 .digest = atmel_sha_digest,
cc831d32
CP
1342 .export = atmel_sha_export,
1343 .import = atmel_sha_import,
d4905b38
NR
1344 .halg = {
1345 .digestsize = SHA384_DIGEST_SIZE,
9c4274d9 1346 .statesize = sizeof(struct atmel_sha_reqctx),
d4905b38
NR
1347 .base = {
1348 .cra_name = "sha384",
1349 .cra_driver_name = "atmel-sha384",
7c783029 1350 .cra_priority = ATMEL_SHA_PRIORITY,
be95f0fa 1351 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1352 .cra_blocksize = SHA384_BLOCK_SIZE,
1353 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1354 .cra_alignmask = 0x3,
1355 .cra_module = THIS_MODULE,
1356 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1357 }
1358 }
1359},
1360{
1361 .init = atmel_sha_init,
1362 .update = atmel_sha_update,
1363 .final = atmel_sha_final,
1364 .finup = atmel_sha_finup,
1365 .digest = atmel_sha_digest,
cc831d32
CP
1366 .export = atmel_sha_export,
1367 .import = atmel_sha_import,
d4905b38
NR
1368 .halg = {
1369 .digestsize = SHA512_DIGEST_SIZE,
9c4274d9 1370 .statesize = sizeof(struct atmel_sha_reqctx),
d4905b38
NR
1371 .base = {
1372 .cra_name = "sha512",
1373 .cra_driver_name = "atmel-sha512",
7c783029 1374 .cra_priority = ATMEL_SHA_PRIORITY,
be95f0fa 1375 .cra_flags = CRYPTO_ALG_ASYNC,
d4905b38
NR
1376 .cra_blocksize = SHA512_BLOCK_SIZE,
1377 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1378 .cra_alignmask = 0x3,
1379 .cra_module = THIS_MODULE,
1380 .cra_init = atmel_sha_cra_init,
d4905b38
NR
1381 }
1382 }
1383},
1384};
1385
f56809c3
CP
1386static void atmel_sha_queue_task(unsigned long data)
1387{
1388 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1389
1390 atmel_sha_handle_queue(dd, NULL);
1391}
1392
b5ce82a7 1393static int atmel_sha_done(struct atmel_sha_dev *dd)
ebc82efa 1394{
ebc82efa
NR
1395 int err = 0;
1396
ebc82efa
NR
1397 if (SHA_FLAGS_CPU & dd->flags) {
1398 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1399 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1400 goto finish;
1401 }
1402 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1403 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1404 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1405 atmel_sha_update_dma_stop(dd);
1406 if (dd->err) {
1407 err = dd->err;
1408 goto finish;
1409 }
1410 }
1411 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1412 /* hash or semi-hash ready */
1413 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1414 SHA_FLAGS_OUTPUT_READY);
1415 err = atmel_sha_update_dma_start(dd);
1416 if (err != -EINPROGRESS)
1417 goto finish;
1418 }
1419 }
b5ce82a7 1420 return err;
ebc82efa
NR
1421
1422finish:
1423 /* finish curent request */
1424 atmel_sha_finish_req(dd->req, err);
b5ce82a7
CP
1425
1426 return err;
1427}
1428
1429static void atmel_sha_done_task(unsigned long data)
1430{
1431 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1432
1433 dd->is_async = true;
1434 (void)dd->resume(dd);
ebc82efa
NR
1435}
1436
1437static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1438{
1439 struct atmel_sha_dev *sha_dd = dev_id;
1440 u32 reg;
1441
1442 reg = atmel_sha_read(sha_dd, SHA_ISR);
1443 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1444 atmel_sha_write(sha_dd, SHA_IDR, reg);
1445 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1446 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1447 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1448 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1449 tasklet_schedule(&sha_dd->done_task);
1450 } else {
1451 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1452 }
1453 return IRQ_HANDLED;
1454 }
1455
1456 return IRQ_NONE;
1457}
1458
eec12f66 1459
69303cf0
CP
1460/* DMA transfer functions */
1461
1462static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1463 struct scatterlist *sg,
1464 size_t len)
1465{
1466 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1467 struct ahash_request *req = dd->req;
1468 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1469 size_t bs = ctx->block_size;
1470 int nents;
1471
1472 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1473 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1474 return false;
1475
1476 /*
1477 * This is the last sg, the only one that is allowed to
1478 * have an unaligned length.
1479 */
1480 if (len <= sg->length) {
1481 dma->nents = nents + 1;
1482 dma->last_sg_length = sg->length;
1483 sg->length = ALIGN(len, sizeof(u32));
1484 return true;
1485 }
1486
1487 /* All other sg lengths MUST be aligned to the block size. */
1488 if (!IS_ALIGNED(sg->length, bs))
1489 return false;
1490
1491 len -= sg->length;
1492 }
1493
1494 return false;
1495}
1496
1497static void atmel_sha_dma_callback2(void *data)
1498{
1499 struct atmel_sha_dev *dd = data;
1500 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1501 struct scatterlist *sg;
1502 int nents;
1503
1504 dmaengine_terminate_all(dma->chan);
1505 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1506
1507 sg = dma->sg;
1508 for (nents = 0; nents < dma->nents - 1; ++nents)
1509 sg = sg_next(sg);
1510 sg->length = dma->last_sg_length;
1511
1512 dd->is_async = true;
1513 (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1514}
1515
1516static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1517 struct scatterlist *src,
1518 size_t len,
1519 atmel_sha_fn_t resume)
1520{
1521 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1522 struct dma_slave_config *config = &dma->dma_conf;
1523 struct dma_chan *chan = dma->chan;
1524 struct dma_async_tx_descriptor *desc;
1525 dma_cookie_t cookie;
1526 unsigned int sg_len;
1527 int err;
1528
1529 dd->resume = resume;
1530
1531 /*
1532 * dma->nents has already been initialized by
1533 * atmel_sha_dma_check_aligned().
1534 */
1535 dma->sg = src;
1536 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1537 if (!sg_len) {
1538 err = -ENOMEM;
1539 goto exit;
1540 }
1541
1542 config->src_maxburst = 16;
1543 config->dst_maxburst = 16;
1544 err = dmaengine_slave_config(chan, config);
1545 if (err)
1546 goto unmap_sg;
1547
1548 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1549 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1550 if (!desc) {
1551 err = -ENOMEM;
1552 goto unmap_sg;
1553 }
1554
1555 desc->callback = atmel_sha_dma_callback2;
1556 desc->callback_param = dd;
1557 cookie = dmaengine_submit(desc);
1558 err = dma_submit_error(cookie);
1559 if (err)
1560 goto unmap_sg;
1561
1562 dma_async_issue_pending(chan);
1563
1564 return -EINPROGRESS;
1565
1566unmap_sg:
1567 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1568exit:
1569 return atmel_sha_complete(dd, err);
1570}
1571
1572
eec12f66
CP
1573/* CPU transfer functions */
1574
1575static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1576{
1577 struct ahash_request *req = dd->req;
1578 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1579 const u32 *words = (const u32 *)ctx->buffer;
1580 size_t i, num_words;
1581 u32 isr, din, din_inc;
1582
1583 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1584 for (;;) {
1585 /* Write data into the Input Data Registers. */
1586 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1587 for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1588 atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1589
1590 ctx->offset += ctx->bufcnt;
1591 ctx->total -= ctx->bufcnt;
1592
1593 if (!ctx->total)
1594 break;
1595
1596 /*
1597 * Prepare next block:
1598 * Fill ctx->buffer now with the next data to be written into
1599 * IDATARx: it gives time for the SHA hardware to process
1600 * the current data so the SHA_INT_DATARDY flag might be set
1601 * in SHA_ISR when polling this register at the beginning of
1602 * the next loop.
1603 */
1604 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1605 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1606 ctx->offset, ctx->bufcnt, 0);
1607
1608 /* Wait for hardware to be ready again. */
1609 isr = atmel_sha_read(dd, SHA_ISR);
1610 if (!(isr & SHA_INT_DATARDY)) {
1611 /* Not ready yet. */
1612 dd->resume = atmel_sha_cpu_transfer;
1613 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1614 return -EINPROGRESS;
1615 }
1616 }
1617
1618 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1619 return dd->cpu_transfer_complete(dd);
1620
1621 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1622}
1623
1624static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1625 struct scatterlist *sg,
1626 unsigned int len,
1627 bool idatar0_only,
1628 bool wait_data_ready,
1629 atmel_sha_fn_t resume)
1630{
1631 struct ahash_request *req = dd->req;
1632 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1633
1634 if (!len)
1635 return resume(dd);
1636
1637 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1638
1639 if (idatar0_only)
1640 ctx->flags |= SHA_FLAGS_IDATAR0;
1641
1642 if (wait_data_ready)
1643 ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1644
1645 ctx->sg = sg;
1646 ctx->total = len;
1647 ctx->offset = 0;
1648
1649 /* Prepare the first block to be written. */
1650 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1651 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1652 ctx->offset, ctx->bufcnt, 0);
1653
1654 dd->cpu_transfer_complete = resume;
1655 return atmel_sha_cpu_transfer(dd);
1656}
1657
81d8750b
CP
1658static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1659 const void *data, unsigned int datalen,
1660 bool auto_padding,
1661 atmel_sha_fn_t resume)
1662{
1663 struct ahash_request *req = dd->req;
1664 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1665 u32 msglen = (auto_padding) ? datalen : 0;
1666 u32 mr = SHA_MR_MODE_AUTO;
1667
1668 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1669 return atmel_sha_complete(dd, -EINVAL);
1670
1671 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1672 atmel_sha_write(dd, SHA_MR, mr);
1673 atmel_sha_write(dd, SHA_MSR, msglen);
1674 atmel_sha_write(dd, SHA_BCR, msglen);
1675 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1676
1677 sg_init_one(&dd->tmp, data, datalen);
1678 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1679}
1680
1681
1682/* hmac functions */
1683
1684struct atmel_sha_hmac_key {
1685 bool valid;
1686 unsigned int keylen;
1687 u8 buffer[SHA512_BLOCK_SIZE];
1688 u8 *keydup;
1689};
1690
1691static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1692{
1693 memset(hkey, 0, sizeof(*hkey));
1694}
1695
1696static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1697{
1698 kfree(hkey->keydup);
1699 memset(hkey, 0, sizeof(*hkey));
1700}
1701
1702static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1703 const u8 *key,
1704 unsigned int keylen)
1705{
1706 atmel_sha_hmac_key_release(hkey);
1707
1708 if (keylen > sizeof(hkey->buffer)) {
1709 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1710 if (!hkey->keydup)
1711 return -ENOMEM;
1712
1713 } else {
1714 memcpy(hkey->buffer, key, keylen);
1715 }
1716
1717 hkey->valid = true;
1718 hkey->keylen = keylen;
1719 return 0;
1720}
1721
1722static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1723 const u8 **key,
1724 unsigned int *keylen)
1725{
1726 if (!hkey->valid)
1727 return false;
1728
1729 *keylen = hkey->keylen;
1730 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1731 return true;
1732}
1733
1734
1735struct atmel_sha_hmac_ctx {
1736 struct atmel_sha_ctx base;
1737
1738 struct atmel_sha_hmac_key hkey;
1739 u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1740 u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1741 atmel_sha_fn_t resume;
1742};
1743
1744static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1745 atmel_sha_fn_t resume);
1746static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1747 const u8 *key, unsigned int keylen);
1748static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1749static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1750static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1751static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1752
1753static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1754static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1755static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1756static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1757
1758static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1759 atmel_sha_fn_t resume)
1760{
1761 struct ahash_request *req = dd->req;
1762 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1763 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1764 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1765 unsigned int keylen;
1766 const u8 *key;
1767 size_t bs;
1768
1769 hmac->resume = resume;
1770 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1771 case SHA_FLAGS_SHA1:
1772 ctx->block_size = SHA1_BLOCK_SIZE;
1773 ctx->hash_size = SHA1_DIGEST_SIZE;
1774 break;
1775
1776 case SHA_FLAGS_SHA224:
1777 ctx->block_size = SHA224_BLOCK_SIZE;
1778 ctx->hash_size = SHA256_DIGEST_SIZE;
1779 break;
1780
1781 case SHA_FLAGS_SHA256:
1782 ctx->block_size = SHA256_BLOCK_SIZE;
1783 ctx->hash_size = SHA256_DIGEST_SIZE;
1784 break;
1785
1786 case SHA_FLAGS_SHA384:
1787 ctx->block_size = SHA384_BLOCK_SIZE;
1788 ctx->hash_size = SHA512_DIGEST_SIZE;
1789 break;
1790
1791 case SHA_FLAGS_SHA512:
1792 ctx->block_size = SHA512_BLOCK_SIZE;
1793 ctx->hash_size = SHA512_DIGEST_SIZE;
1794 break;
1795
1796 default:
1797 return atmel_sha_complete(dd, -EINVAL);
1798 }
1799 bs = ctx->block_size;
1800
1801 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1802 return resume(dd);
1803
1804 /* Compute K' from K. */
1805 if (unlikely(keylen > bs))
1806 return atmel_sha_hmac_prehash_key(dd, key, keylen);
1807
1808 /* Prepare ipad. */
1809 memcpy((u8 *)hmac->ipad, key, keylen);
1810 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1811 return atmel_sha_hmac_compute_ipad_hash(dd);
1812}
1813
1814static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1815 const u8 *key, unsigned int keylen)
1816{
1817 return atmel_sha_cpu_hash(dd, key, keylen, true,
1818 atmel_sha_hmac_prehash_key_done);
1819}
1820
1821static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1822{
1823 struct ahash_request *req = dd->req;
1824 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1825 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1826 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1827 size_t ds = crypto_ahash_digestsize(tfm);
1828 size_t bs = ctx->block_size;
1829 size_t i, num_words = ds / sizeof(u32);
1830
1831 /* Prepare ipad. */
1832 for (i = 0; i < num_words; ++i)
1833 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1834 memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1835 return atmel_sha_hmac_compute_ipad_hash(dd);
1836}
1837
1838static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1839{
1840 struct ahash_request *req = dd->req;
1841 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1842 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1843 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1844 size_t bs = ctx->block_size;
1845 size_t i, num_words = bs / sizeof(u32);
1846
1847 memcpy(hmac->opad, hmac->ipad, bs);
1848 for (i = 0; i < num_words; ++i) {
1849 hmac->ipad[i] ^= 0x36363636;
1850 hmac->opad[i] ^= 0x5c5c5c5c;
1851 }
1852
1853 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1854 atmel_sha_hmac_compute_opad_hash);
1855}
1856
1857static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1858{
1859 struct ahash_request *req = dd->req;
1860 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1861 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1862 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1863 size_t bs = ctx->block_size;
1864 size_t hs = ctx->hash_size;
1865 size_t i, num_words = hs / sizeof(u32);
1866
1867 for (i = 0; i < num_words; ++i)
1868 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1869 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1870 atmel_sha_hmac_setup_done);
1871}
1872
1873static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1874{
1875 struct ahash_request *req = dd->req;
1876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1877 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1878 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1879 size_t hs = ctx->hash_size;
1880 size_t i, num_words = hs / sizeof(u32);
1881
1882 for (i = 0; i < num_words; ++i)
1883 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1884 atmel_sha_hmac_key_release(&hmac->hkey);
1885 return hmac->resume(dd);
1886}
1887
1888static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1889{
1890 struct ahash_request *req = dd->req;
1891 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1892 int err;
1893
1894 err = atmel_sha_hw_init(dd);
1895 if (err)
1896 return atmel_sha_complete(dd, err);
1897
1898 switch (ctx->op) {
1899 case SHA_OP_INIT:
1900 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1901 break;
1902
1903 case SHA_OP_UPDATE:
1904 dd->resume = atmel_sha_done;
1905 err = atmel_sha_update_req(dd);
1906 break;
1907
1908 case SHA_OP_FINAL:
1909 dd->resume = atmel_sha_hmac_final;
1910 err = atmel_sha_final_req(dd);
1911 break;
1912
1913 case SHA_OP_DIGEST:
1914 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1915 break;
1916
1917 default:
1918 return atmel_sha_complete(dd, -EINVAL);
1919 }
1920
1921 return err;
1922}
1923
1924static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1925 unsigned int keylen)
1926{
1927 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1928
1929 if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1930 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1931 return -EINVAL;
1932 }
1933
1934 return 0;
1935}
1936
1937static int atmel_sha_hmac_init(struct ahash_request *req)
1938{
1939 int err;
1940
1941 err = atmel_sha_init(req);
1942 if (err)
1943 return err;
1944
1945 return atmel_sha_enqueue(req, SHA_OP_INIT);
1946}
1947
1948static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1949{
1950 struct ahash_request *req = dd->req;
1951 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1952 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1953 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1954 size_t bs = ctx->block_size;
1955 size_t hs = ctx->hash_size;
1956
1957 ctx->bufcnt = 0;
1958 ctx->digcnt[0] = bs;
1959 ctx->digcnt[1] = 0;
1960 ctx->flags |= SHA_FLAGS_RESTORE;
1961 memcpy(ctx->digest, hmac->ipad, hs);
1962 return atmel_sha_complete(dd, 0);
1963}
1964
1965static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1966{
1967 struct ahash_request *req = dd->req;
1968 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1969 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1970 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1971 u32 *digest = (u32 *)ctx->digest;
1972 size_t ds = crypto_ahash_digestsize(tfm);
1973 size_t bs = ctx->block_size;
1974 size_t hs = ctx->hash_size;
1975 size_t i, num_words;
1976 u32 mr;
1977
1978 /* Save d = SHA((K' + ipad) | msg). */
1979 num_words = ds / sizeof(u32);
1980 for (i = 0; i < num_words; ++i)
1981 digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1982
1983 /* Restore context to finish computing SHA((K' + opad) | d). */
1984 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1985 num_words = hs / sizeof(u32);
1986 for (i = 0; i < num_words; ++i)
1987 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1988
1989 mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1990 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1991 atmel_sha_write(dd, SHA_MR, mr);
1992 atmel_sha_write(dd, SHA_MSR, bs + ds);
1993 atmel_sha_write(dd, SHA_BCR, ds);
1994 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1995
1996 sg_init_one(&dd->tmp, digest, ds);
1997 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1998 atmel_sha_hmac_final_done);
1999}
2000
2001static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
2002{
2003 /*
2004 * req->result might not be sizeof(u32) aligned, so copy the
2005 * digest into ctx->digest[] before memcpy() the data into
2006 * req->result.
2007 */
2008 atmel_sha_copy_hash(dd->req);
2009 atmel_sha_copy_ready_hash(dd->req);
2010 return atmel_sha_complete(dd, 0);
2011}
2012
2013static int atmel_sha_hmac_digest(struct ahash_request *req)
2014{
2015 int err;
2016
2017 err = atmel_sha_init(req);
2018 if (err)
2019 return err;
2020
2021 return atmel_sha_enqueue(req, SHA_OP_DIGEST);
2022}
2023
2024static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
2025{
2026 struct ahash_request *req = dd->req;
2027 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
2028 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2029 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2030 size_t hs = ctx->hash_size;
2031 size_t i, num_words = hs / sizeof(u32);
2032 bool use_dma = false;
2033 u32 mr;
2034
2035 /* Special case for empty message. */
2036 if (!req->nbytes)
2037 return atmel_sha_complete(dd, -EINVAL); // TODO:
2038
2039 /* Check DMA threshold and alignment. */
2040 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
2041 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
2042 use_dma = true;
2043
2044 /* Write both initial hash values to compute a HMAC. */
2045 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2046 for (i = 0; i < num_words; ++i)
2047 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2048
2049 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2050 for (i = 0; i < num_words; ++i)
2051 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2052
2053 /* Write the Mode, Message Size, Bytes Count then Control Registers. */
2054 mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
2055 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2056 if (use_dma)
2057 mr |= SHA_MR_MODE_IDATAR0;
2058 else
2059 mr |= SHA_MR_MODE_AUTO;
2060 atmel_sha_write(dd, SHA_MR, mr);
2061
2062 atmel_sha_write(dd, SHA_MSR, req->nbytes);
2063 atmel_sha_write(dd, SHA_BCR, req->nbytes);
2064
2065 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2066
2067 /* Process data. */
2068 if (use_dma)
2069 return atmel_sha_dma_start(dd, req->src, req->nbytes,
2070 atmel_sha_hmac_final_done);
2071
2072 return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
2073 atmel_sha_hmac_final_done);
2074}
2075
2076static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
2077{
2078 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2079
2080 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2081 sizeof(struct atmel_sha_reqctx));
2082 hmac->base.start = atmel_sha_hmac_start;
2083 atmel_sha_hmac_key_init(&hmac->hkey);
2084
2085 return 0;
2086}
2087
2088static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
2089{
2090 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2091
2092 atmel_sha_hmac_key_release(&hmac->hkey);
2093}
2094
2095static struct ahash_alg sha_hmac_algs[] = {
2096{
2097 .init = atmel_sha_hmac_init,
2098 .update = atmel_sha_update,
2099 .final = atmel_sha_final,
2100 .digest = atmel_sha_hmac_digest,
2101 .setkey = atmel_sha_hmac_setkey,
2102 .export = atmel_sha_export,
2103 .import = atmel_sha_import,
2104 .halg = {
2105 .digestsize = SHA1_DIGEST_SIZE,
2106 .statesize = sizeof(struct atmel_sha_reqctx),
2107 .base = {
2108 .cra_name = "hmac(sha1)",
2109 .cra_driver_name = "atmel-hmac-sha1",
7c783029 2110 .cra_priority = ATMEL_SHA_PRIORITY,
81d8750b
CP
2111 .cra_flags = CRYPTO_ALG_ASYNC,
2112 .cra_blocksize = SHA1_BLOCK_SIZE,
2113 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2114 .cra_alignmask = 0,
2115 .cra_module = THIS_MODULE,
2116 .cra_init = atmel_sha_hmac_cra_init,
2117 .cra_exit = atmel_sha_hmac_cra_exit,
2118 }
2119 }
2120},
2121{
2122 .init = atmel_sha_hmac_init,
2123 .update = atmel_sha_update,
2124 .final = atmel_sha_final,
2125 .digest = atmel_sha_hmac_digest,
2126 .setkey = atmel_sha_hmac_setkey,
2127 .export = atmel_sha_export,
2128 .import = atmel_sha_import,
2129 .halg = {
2130 .digestsize = SHA224_DIGEST_SIZE,
2131 .statesize = sizeof(struct atmel_sha_reqctx),
2132 .base = {
2133 .cra_name = "hmac(sha224)",
2134 .cra_driver_name = "atmel-hmac-sha224",
7c783029 2135 .cra_priority = ATMEL_SHA_PRIORITY,
81d8750b
CP
2136 .cra_flags = CRYPTO_ALG_ASYNC,
2137 .cra_blocksize = SHA224_BLOCK_SIZE,
2138 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2139 .cra_alignmask = 0,
2140 .cra_module = THIS_MODULE,
2141 .cra_init = atmel_sha_hmac_cra_init,
2142 .cra_exit = atmel_sha_hmac_cra_exit,
2143 }
2144 }
2145},
2146{
2147 .init = atmel_sha_hmac_init,
2148 .update = atmel_sha_update,
2149 .final = atmel_sha_final,
2150 .digest = atmel_sha_hmac_digest,
2151 .setkey = atmel_sha_hmac_setkey,
2152 .export = atmel_sha_export,
2153 .import = atmel_sha_import,
2154 .halg = {
2155 .digestsize = SHA256_DIGEST_SIZE,
2156 .statesize = sizeof(struct atmel_sha_reqctx),
2157 .base = {
2158 .cra_name = "hmac(sha256)",
2159 .cra_driver_name = "atmel-hmac-sha256",
7c783029 2160 .cra_priority = ATMEL_SHA_PRIORITY,
81d8750b
CP
2161 .cra_flags = CRYPTO_ALG_ASYNC,
2162 .cra_blocksize = SHA256_BLOCK_SIZE,
2163 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2164 .cra_alignmask = 0,
2165 .cra_module = THIS_MODULE,
2166 .cra_init = atmel_sha_hmac_cra_init,
2167 .cra_exit = atmel_sha_hmac_cra_exit,
2168 }
2169 }
2170},
2171{
2172 .init = atmel_sha_hmac_init,
2173 .update = atmel_sha_update,
2174 .final = atmel_sha_final,
2175 .digest = atmel_sha_hmac_digest,
2176 .setkey = atmel_sha_hmac_setkey,
2177 .export = atmel_sha_export,
2178 .import = atmel_sha_import,
2179 .halg = {
2180 .digestsize = SHA384_DIGEST_SIZE,
2181 .statesize = sizeof(struct atmel_sha_reqctx),
2182 .base = {
2183 .cra_name = "hmac(sha384)",
2184 .cra_driver_name = "atmel-hmac-sha384",
7c783029 2185 .cra_priority = ATMEL_SHA_PRIORITY,
81d8750b
CP
2186 .cra_flags = CRYPTO_ALG_ASYNC,
2187 .cra_blocksize = SHA384_BLOCK_SIZE,
2188 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2189 .cra_alignmask = 0,
2190 .cra_module = THIS_MODULE,
2191 .cra_init = atmel_sha_hmac_cra_init,
2192 .cra_exit = atmel_sha_hmac_cra_exit,
2193 }
2194 }
2195},
2196{
2197 .init = atmel_sha_hmac_init,
2198 .update = atmel_sha_update,
2199 .final = atmel_sha_final,
2200 .digest = atmel_sha_hmac_digest,
2201 .setkey = atmel_sha_hmac_setkey,
2202 .export = atmel_sha_export,
2203 .import = atmel_sha_import,
2204 .halg = {
2205 .digestsize = SHA512_DIGEST_SIZE,
2206 .statesize = sizeof(struct atmel_sha_reqctx),
2207 .base = {
2208 .cra_name = "hmac(sha512)",
2209 .cra_driver_name = "atmel-hmac-sha512",
7c783029 2210 .cra_priority = ATMEL_SHA_PRIORITY,
81d8750b
CP
2211 .cra_flags = CRYPTO_ALG_ASYNC,
2212 .cra_blocksize = SHA512_BLOCK_SIZE,
2213 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2214 .cra_alignmask = 0,
2215 .cra_module = THIS_MODULE,
2216 .cra_init = atmel_sha_hmac_cra_init,
2217 .cra_exit = atmel_sha_hmac_cra_exit,
2218 }
2219 }
2220},
2221};
eec12f66 2222
1520c725 2223#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
89a82ef8
CP
2224/* authenc functions */
2225
2226static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
2227static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
2228static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
2229
2230
2231struct atmel_sha_authenc_ctx {
2232 struct crypto_ahash *tfm;
2233};
2234
2235struct atmel_sha_authenc_reqctx {
2236 struct atmel_sha_reqctx base;
2237
2238 atmel_aes_authenc_fn_t cb;
2239 struct atmel_aes_dev *aes_dev;
2240
2241 /* _init() parameters. */
2242 struct scatterlist *assoc;
2243 u32 assoclen;
2244 u32 textlen;
2245
2246 /* _final() parameters. */
2247 u32 *digest;
2248 unsigned int digestlen;
2249};
2250
2251static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
2252 int err)
2253{
2254 struct ahash_request *req = areq->data;
2255 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2256
2257 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
2258}
2259
2260static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
2261{
2262 struct ahash_request *req = dd->req;
2263 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2264 int err;
2265
2266 /*
2267 * Force atmel_sha_complete() to call req->base.complete(), ie
2268 * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
2269 */
2270 dd->force_complete = true;
2271
2272 err = atmel_sha_hw_init(dd);
2273 return authctx->cb(authctx->aes_dev, err, dd->is_async);
2274}
2275
2276bool atmel_sha_authenc_is_ready(void)
2277{
2278 struct atmel_sha_ctx dummy;
2279
2280 dummy.dd = NULL;
2281 return (atmel_sha_find_dev(&dummy) != NULL);
2282}
2283EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
2284
2285unsigned int atmel_sha_authenc_get_reqsize(void)
2286{
2287 return sizeof(struct atmel_sha_authenc_reqctx);
2288}
2289EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
2290
2291struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
2292{
2293 struct atmel_sha_authenc_ctx *auth;
2294 struct crypto_ahash *tfm;
2295 struct atmel_sha_ctx *tctx;
2296 const char *name;
2297 int err = -EINVAL;
2298
2299 switch (mode & SHA_FLAGS_MODE_MASK) {
2300 case SHA_FLAGS_HMAC_SHA1:
2301 name = "atmel-hmac-sha1";
2302 break;
2303
2304 case SHA_FLAGS_HMAC_SHA224:
2305 name = "atmel-hmac-sha224";
2306 break;
2307
2308 case SHA_FLAGS_HMAC_SHA256:
2309 name = "atmel-hmac-sha256";
2310 break;
2311
2312 case SHA_FLAGS_HMAC_SHA384:
2313 name = "atmel-hmac-sha384";
2314 break;
2315
2316 case SHA_FLAGS_HMAC_SHA512:
2317 name = "atmel-hmac-sha512";
2318 break;
2319
2320 default:
2321 goto error;
2322 }
2323
85d7311f 2324 tfm = crypto_alloc_ahash(name, 0, 0);
89a82ef8
CP
2325 if (IS_ERR(tfm)) {
2326 err = PTR_ERR(tfm);
2327 goto error;
2328 }
2329 tctx = crypto_ahash_ctx(tfm);
2330 tctx->start = atmel_sha_authenc_start;
2331 tctx->flags = mode;
2332
2333 auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2334 if (!auth) {
2335 err = -ENOMEM;
2336 goto err_free_ahash;
2337 }
2338 auth->tfm = tfm;
2339
2340 return auth;
2341
2342err_free_ahash:
2343 crypto_free_ahash(tfm);
2344error:
2345 return ERR_PTR(err);
2346}
2347EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
2348
2349void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
2350{
2351 if (auth)
2352 crypto_free_ahash(auth->tfm);
2353 kfree(auth);
2354}
2355EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
2356
2357int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
2358 const u8 *key, unsigned int keylen,
2359 u32 *flags)
2360{
2361 struct crypto_ahash *tfm = auth->tfm;
2362 int err;
2363
2364 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
2365 crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
2366 err = crypto_ahash_setkey(tfm, key, keylen);
2367 *flags = crypto_ahash_get_flags(tfm);
2368
2369 return err;
2370}
2371EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
2372
2373int atmel_sha_authenc_schedule(struct ahash_request *req,
2374 struct atmel_sha_authenc_ctx *auth,
2375 atmel_aes_authenc_fn_t cb,
2376 struct atmel_aes_dev *aes_dev)
2377{
2378 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2379 struct atmel_sha_reqctx *ctx = &authctx->base;
2380 struct crypto_ahash *tfm = auth->tfm;
2381 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
2382 struct atmel_sha_dev *dd;
2383
2384 /* Reset request context (MUST be done first). */
2385 memset(authctx, 0, sizeof(*authctx));
2386
2387 /* Get SHA device. */
2388 dd = atmel_sha_find_dev(tctx);
2389 if (!dd)
2390 return cb(aes_dev, -ENODEV, false);
2391
2392 /* Init request context. */
2393 ctx->dd = dd;
2394 ctx->buflen = SHA_BUFFER_LEN;
2395 authctx->cb = cb;
2396 authctx->aes_dev = aes_dev;
2397 ahash_request_set_tfm(req, tfm);
2398 ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
2399
2400 return atmel_sha_handle_queue(dd, req);
2401}
2402EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
2403
2404int atmel_sha_authenc_init(struct ahash_request *req,
2405 struct scatterlist *assoc, unsigned int assoclen,
2406 unsigned int textlen,
2407 atmel_aes_authenc_fn_t cb,
2408 struct atmel_aes_dev *aes_dev)
2409{
2410 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2411 struct atmel_sha_reqctx *ctx = &authctx->base;
2412 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2413 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2414 struct atmel_sha_dev *dd = ctx->dd;
2415
2416 if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
2417 return atmel_sha_complete(dd, -EINVAL);
2418
2419 authctx->cb = cb;
2420 authctx->aes_dev = aes_dev;
2421 authctx->assoc = assoc;
2422 authctx->assoclen = assoclen;
2423 authctx->textlen = textlen;
2424
2425 ctx->flags = hmac->base.flags;
2426 return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
2427}
2428EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
2429
2430static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
2431{
2432 struct ahash_request *req = dd->req;
2433 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2434 struct atmel_sha_reqctx *ctx = &authctx->base;
2435 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2436 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2437 size_t hs = ctx->hash_size;
2438 size_t i, num_words = hs / sizeof(u32);
2439 u32 mr, msg_size;
2440
2441 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2442 for (i = 0; i < num_words; ++i)
2443 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2444
2445 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2446 for (i = 0; i < num_words; ++i)
2447 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2448
2449 mr = (SHA_MR_MODE_IDATAR0 |
2450 SHA_MR_HMAC |
2451 SHA_MR_DUALBUFF);
2452 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2453 atmel_sha_write(dd, SHA_MR, mr);
2454
2455 msg_size = authctx->assoclen + authctx->textlen;
2456 atmel_sha_write(dd, SHA_MSR, msg_size);
2457 atmel_sha_write(dd, SHA_BCR, msg_size);
2458
2459 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2460
2461 /* Process assoc data. */
2462 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
2463 true, false,
2464 atmel_sha_authenc_init_done);
2465}
2466
2467static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
2468{
2469 struct ahash_request *req = dd->req;
2470 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2471
2472 return authctx->cb(authctx->aes_dev, 0, dd->is_async);
2473}
2474
2475int atmel_sha_authenc_final(struct ahash_request *req,
2476 u32 *digest, unsigned int digestlen,
2477 atmel_aes_authenc_fn_t cb,
2478 struct atmel_aes_dev *aes_dev)
2479{
2480 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2481 struct atmel_sha_reqctx *ctx = &authctx->base;
2482 struct atmel_sha_dev *dd = ctx->dd;
2483
2484 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2485 case SHA_FLAGS_SHA1:
2486 authctx->digestlen = SHA1_DIGEST_SIZE;
2487 break;
2488
2489 case SHA_FLAGS_SHA224:
2490 authctx->digestlen = SHA224_DIGEST_SIZE;
2491 break;
2492
2493 case SHA_FLAGS_SHA256:
2494 authctx->digestlen = SHA256_DIGEST_SIZE;
2495 break;
2496
2497 case SHA_FLAGS_SHA384:
2498 authctx->digestlen = SHA384_DIGEST_SIZE;
2499 break;
2500
2501 case SHA_FLAGS_SHA512:
2502 authctx->digestlen = SHA512_DIGEST_SIZE;
2503 break;
2504
2505 default:
2506 return atmel_sha_complete(dd, -EINVAL);
2507 }
2508 if (authctx->digestlen > digestlen)
2509 authctx->digestlen = digestlen;
2510
2511 authctx->cb = cb;
2512 authctx->aes_dev = aes_dev;
2513 authctx->digest = digest;
2514 return atmel_sha_wait_for_data_ready(dd,
2515 atmel_sha_authenc_final_done);
2516}
2517EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
2518
2519static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
2520{
2521 struct ahash_request *req = dd->req;
2522 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2523 size_t i, num_words = authctx->digestlen / sizeof(u32);
2524
2525 for (i = 0; i < num_words; ++i)
2526 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
2527
2528 return atmel_sha_complete(dd, 0);
2529}
2530
2531void atmel_sha_authenc_abort(struct ahash_request *req)
2532{
2533 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2534 struct atmel_sha_reqctx *ctx = &authctx->base;
2535 struct atmel_sha_dev *dd = ctx->dd;
2536
2537 /* Prevent atmel_sha_complete() from calling req->base.complete(). */
2538 dd->is_async = false;
2539 dd->force_complete = false;
2540 (void)atmel_sha_complete(dd, 0);
2541}
2542EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
2543
2544#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2545
2546
ebc82efa
NR
2547static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2548{
2549 int i;
2550
81d8750b
CP
2551 if (dd->caps.has_hmac)
2552 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2553 crypto_unregister_ahash(&sha_hmac_algs[i]);
2554
d4905b38
NR
2555 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2556 crypto_unregister_ahash(&sha_1_256_algs[i]);
2557
2558 if (dd->caps.has_sha224)
2559 crypto_unregister_ahash(&sha_224_alg);
2560
2561 if (dd->caps.has_sha_384_512) {
2562 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2563 crypto_unregister_ahash(&sha_384_512_algs[i]);
2564 }
ebc82efa
NR
2565}
2566
2567static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2568{
2569 int err, i, j;
2570
d4905b38
NR
2571 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2572 err = crypto_register_ahash(&sha_1_256_algs[i]);
ebc82efa 2573 if (err)
d4905b38
NR
2574 goto err_sha_1_256_algs;
2575 }
2576
2577 if (dd->caps.has_sha224) {
2578 err = crypto_register_ahash(&sha_224_alg);
2579 if (err)
2580 goto err_sha_224_algs;
2581 }
2582
2583 if (dd->caps.has_sha_384_512) {
2584 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2585 err = crypto_register_ahash(&sha_384_512_algs[i]);
2586 if (err)
2587 goto err_sha_384_512_algs;
2588 }
ebc82efa
NR
2589 }
2590
81d8750b
CP
2591 if (dd->caps.has_hmac) {
2592 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2593 err = crypto_register_ahash(&sha_hmac_algs[i]);
2594 if (err)
2595 goto err_sha_hmac_algs;
2596 }
2597 }
2598
ebc82efa
NR
2599 return 0;
2600
81d8750b
CP
2601 /*i = ARRAY_SIZE(sha_hmac_algs);*/
2602err_sha_hmac_algs:
2603 for (j = 0; j < i; j++)
2604 crypto_unregister_ahash(&sha_hmac_algs[j]);
2605 i = ARRAY_SIZE(sha_384_512_algs);
d4905b38
NR
2606err_sha_384_512_algs:
2607 for (j = 0; j < i; j++)
2608 crypto_unregister_ahash(&sha_384_512_algs[j]);
2609 crypto_unregister_ahash(&sha_224_alg);
2610err_sha_224_algs:
2611 i = ARRAY_SIZE(sha_1_256_algs);
2612err_sha_1_256_algs:
ebc82efa 2613 for (j = 0; j < i; j++)
d4905b38 2614 crypto_unregister_ahash(&sha_1_256_algs[j]);
ebc82efa
NR
2615
2616 return err;
2617}
2618
d4905b38
NR
2619static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
2620 struct crypto_platform_data *pdata)
2621{
db28512f
PU
2622 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
2623 if (IS_ERR(dd->dma_lch_in.chan)) {
2624 int ret = PTR_ERR(dd->dma_lch_in.chan);
d4905b38 2625
db28512f
PU
2626 if (ret != -EPROBE_DEFER)
2627 dev_warn(dd->dev, "no DMA channel available\n");
2628 return ret;
d4905b38
NR
2629 }
2630
abfe7ae4
NF
2631 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
2632 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2633 SHA_REG_DIN(0);
2634 dd->dma_lch_in.dma_conf.src_maxburst = 1;
2635 dd->dma_lch_in.dma_conf.src_addr_width =
2636 DMA_SLAVE_BUSWIDTH_4_BYTES;
2637 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2638 dd->dma_lch_in.dma_conf.dst_addr_width =
2639 DMA_SLAVE_BUSWIDTH_4_BYTES;
2640 dd->dma_lch_in.dma_conf.device_fc = false;
2641
2642 return 0;
d4905b38
NR
2643}
2644
2645static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2646{
2647 dma_release_channel(dd->dma_lch_in.chan);
2648}
2649
2650static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2651{
2652
2653 dd->caps.has_dma = 0;
2654 dd->caps.has_dualbuff = 0;
2655 dd->caps.has_sha224 = 0;
2656 dd->caps.has_sha_384_512 = 0;
7cee3508 2657 dd->caps.has_uihv = 0;
81d8750b 2658 dd->caps.has_hmac = 0;
d4905b38
NR
2659
2660 /* keep only major version number */
2661 switch (dd->hw_version & 0xff0) {
507c5cc2
CP
2662 case 0x510:
2663 dd->caps.has_dma = 1;
2664 dd->caps.has_dualbuff = 1;
2665 dd->caps.has_sha224 = 1;
2666 dd->caps.has_sha_384_512 = 1;
7cee3508 2667 dd->caps.has_uihv = 1;
81d8750b 2668 dd->caps.has_hmac = 1;
507c5cc2 2669 break;
141824d0
LZ
2670 case 0x420:
2671 dd->caps.has_dma = 1;
2672 dd->caps.has_dualbuff = 1;
2673 dd->caps.has_sha224 = 1;
2674 dd->caps.has_sha_384_512 = 1;
7cee3508 2675 dd->caps.has_uihv = 1;
141824d0 2676 break;
d4905b38
NR
2677 case 0x410:
2678 dd->caps.has_dma = 1;
2679 dd->caps.has_dualbuff = 1;
2680 dd->caps.has_sha224 = 1;
2681 dd->caps.has_sha_384_512 = 1;
2682 break;
2683 case 0x400:
2684 dd->caps.has_dma = 1;
2685 dd->caps.has_dualbuff = 1;
2686 dd->caps.has_sha224 = 1;
2687 break;
2688 case 0x320:
2689 break;
2690 default:
2691 dev_warn(dd->dev,
2692 "Unmanaged sha version, set minimum capabilities\n");
2693 break;
2694 }
2695}
2696
abfe7ae4
NF
2697#if defined(CONFIG_OF)
2698static const struct of_device_id atmel_sha_dt_ids[] = {
2699 { .compatible = "atmel,at91sam9g46-sha" },
2700 { /* sentinel */ }
2701};
2702
2703MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2704
2705static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
2706{
2707 struct device_node *np = pdev->dev.of_node;
2708 struct crypto_platform_data *pdata;
2709
2710 if (!np) {
2711 dev_err(&pdev->dev, "device node not found\n");
2712 return ERR_PTR(-EINVAL);
2713 }
2714
2715 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
02684839 2716 if (!pdata)
abfe7ae4 2717 return ERR_PTR(-ENOMEM);
abfe7ae4 2718
abfe7ae4
NF
2719 return pdata;
2720}
2721#else /* CONFIG_OF */
2722static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
2723{
2724 return ERR_PTR(-EINVAL);
2725}
2726#endif
2727
49cfe4db 2728static int atmel_sha_probe(struct platform_device *pdev)
ebc82efa
NR
2729{
2730 struct atmel_sha_dev *sha_dd;
d4905b38 2731 struct crypto_platform_data *pdata;
ebc82efa
NR
2732 struct device *dev = &pdev->dev;
2733 struct resource *sha_res;
ebc82efa
NR
2734 int err;
2735
b0e8b341 2736 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
ebc82efa 2737 if (sha_dd == NULL) {
ebc82efa
NR
2738 err = -ENOMEM;
2739 goto sha_dd_err;
2740 }
2741
2742 sha_dd->dev = dev;
2743
2744 platform_set_drvdata(pdev, sha_dd);
2745
2746 INIT_LIST_HEAD(&sha_dd->list);
62728e82 2747 spin_lock_init(&sha_dd->lock);
ebc82efa
NR
2748
2749 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2750 (unsigned long)sha_dd);
f56809c3
CP
2751 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2752 (unsigned long)sha_dd);
ebc82efa
NR
2753
2754 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2755
ebc82efa
NR
2756 /* Get the base address */
2757 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2758 if (!sha_res) {
2759 dev_err(dev, "no MEM resource info\n");
2760 err = -ENODEV;
2761 goto res_err;
2762 }
2763 sha_dd->phys_base = sha_res->start;
ebc82efa
NR
2764
2765 /* Get the IRQ */
2766 sha_dd->irq = platform_get_irq(pdev, 0);
2767 if (sha_dd->irq < 0) {
ebc82efa
NR
2768 err = sha_dd->irq;
2769 goto res_err;
2770 }
2771
b0e8b341
LC
2772 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2773 IRQF_SHARED, "atmel-sha", sha_dd);
ebc82efa
NR
2774 if (err) {
2775 dev_err(dev, "unable to request sha irq.\n");
2776 goto res_err;
2777 }
2778
2779 /* Initializing the clock */
b0e8b341 2780 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
ebc82efa 2781 if (IS_ERR(sha_dd->iclk)) {
be208356 2782 dev_err(dev, "clock initialization failed.\n");
ebc82efa 2783 err = PTR_ERR(sha_dd->iclk);
b0e8b341 2784 goto res_err;
ebc82efa
NR
2785 }
2786
b0e8b341 2787 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
9b52d55f 2788 if (IS_ERR(sha_dd->io_base)) {
ebc82efa 2789 dev_err(dev, "can't ioremap\n");
9b52d55f 2790 err = PTR_ERR(sha_dd->io_base);
b0e8b341 2791 goto res_err;
ebc82efa
NR
2792 }
2793
c033042a
CP
2794 err = clk_prepare(sha_dd->iclk);
2795 if (err)
2796 goto res_err;
2797
0efe58f3
TA
2798 err = atmel_sha_hw_version_init(sha_dd);
2799 if (err)
2800 goto iclk_unprepare;
d4905b38
NR
2801
2802 atmel_sha_get_cap(sha_dd);
2803
2804 if (sha_dd->caps.has_dma) {
2805 pdata = pdev->dev.platform_data;
2806 if (!pdata) {
abfe7ae4
NF
2807 pdata = atmel_sha_of_init(pdev);
2808 if (IS_ERR(pdata)) {
2809 dev_err(&pdev->dev, "platform data not available\n");
2810 err = PTR_ERR(pdata);
c033042a 2811 goto iclk_unprepare;
abfe7ae4
NF
2812 }
2813 }
db28512f 2814
d4905b38
NR
2815 err = atmel_sha_dma_init(sha_dd, pdata);
2816 if (err)
2817 goto err_sha_dma;
abfe7ae4
NF
2818
2819 dev_info(dev, "using %s for DMA transfers\n",
2820 dma_chan_name(sha_dd->dma_lch_in.chan));
d4905b38
NR
2821 }
2822
ebc82efa
NR
2823 spin_lock(&atmel_sha.lock);
2824 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2825 spin_unlock(&atmel_sha.lock);
2826
2827 err = atmel_sha_register_algs(sha_dd);
2828 if (err)
2829 goto err_algs;
2830
1ca5b7d9
NF
2831 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2832 sha_dd->caps.has_sha224 ? "/SHA224" : "",
2833 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
ebc82efa
NR
2834
2835 return 0;
2836
2837err_algs:
2838 spin_lock(&atmel_sha.lock);
2839 list_del(&sha_dd->list);
2840 spin_unlock(&atmel_sha.lock);
d4905b38
NR
2841 if (sha_dd->caps.has_dma)
2842 atmel_sha_dma_cleanup(sha_dd);
2843err_sha_dma:
c033042a
CP
2844iclk_unprepare:
2845 clk_unprepare(sha_dd->iclk);
ebc82efa 2846res_err:
f56809c3 2847 tasklet_kill(&sha_dd->queue_task);
ebc82efa 2848 tasklet_kill(&sha_dd->done_task);
ebc82efa
NR
2849sha_dd_err:
2850 dev_err(dev, "initialization failed.\n");
2851
2852 return err;
2853}
2854
49cfe4db 2855static int atmel_sha_remove(struct platform_device *pdev)
ebc82efa 2856{
22d96f04 2857 struct atmel_sha_dev *sha_dd;
ebc82efa
NR
2858
2859 sha_dd = platform_get_drvdata(pdev);
2860 if (!sha_dd)
2861 return -ENODEV;
2862 spin_lock(&atmel_sha.lock);
2863 list_del(&sha_dd->list);
2864 spin_unlock(&atmel_sha.lock);
2865
2866 atmel_sha_unregister_algs(sha_dd);
2867
f56809c3 2868 tasklet_kill(&sha_dd->queue_task);
ebc82efa
NR
2869 tasklet_kill(&sha_dd->done_task);
2870
d4905b38
NR
2871 if (sha_dd->caps.has_dma)
2872 atmel_sha_dma_cleanup(sha_dd);
2873
c033042a
CP
2874 clk_unprepare(sha_dd->iclk);
2875
ebc82efa
NR
2876 return 0;
2877}
2878
2879static struct platform_driver atmel_sha_driver = {
2880 .probe = atmel_sha_probe,
49cfe4db 2881 .remove = atmel_sha_remove,
ebc82efa
NR
2882 .driver = {
2883 .name = "atmel_sha",
abfe7ae4 2884 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
ebc82efa
NR
2885 },
2886};
2887
2888module_platform_driver(atmel_sha_driver);
2889
d4905b38 2890MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
ebc82efa
NR
2891MODULE_LICENSE("GPL v2");
2892MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");