crypto: drbg - HMAC-SHA1 DRBG has crypto strength of 128 bits
[linux-2.6-block.git] / drivers / crypto / caam / caamhash.c
CommitLineData
045e3678
YK
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
045e3678
YK
75#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
045e3678
YK
92#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
cfc6f11b
RG
97
98static struct list_head hash_list;
99
045e3678
YK
100/* ahash per-session context */
101struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135};
136
137/* Common job descriptor seq in/out ptr routines */
138
139/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
142 int ctx_len)
143{
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
147}
148
149/* Map req->result, and append seq_out_ptr command that points to it */
150static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151 u8 *result, int digestsize)
152{
153 dma_addr_t dst_dma;
154
155 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
157
158 return dst_dma;
159}
160
161/* Map current buffer in state and put it in link table */
162static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
164 u8 *buf, int buflen)
165{
166 dma_addr_t buf_dma;
167
168 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
170
171 return buf_dma;
172}
173
174/* Map req->src and put it in link table */
175static inline void src_map_to_sec4_sg(struct device *jrdev,
176 struct scatterlist *src, int src_nents,
643b39b0
YK
177 struct sec4_sg_entry *sec4_sg,
178 bool chained)
045e3678 179{
643b39b0 180 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
045e3678
YK
181 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182}
183
184/*
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
187 */
188static inline dma_addr_t
189try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190 u8 *buf, dma_addr_t buf_dma, int buflen,
191 int last_buflen)
192{
193 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195 if (buflen)
196 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197 else
198 buf_dma = 0;
199
200 return buf_dma;
201}
202
203/* Map state->caam_ctx, and add it to link table */
204static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205 struct caam_hash_state *state,
206 int ctx_len,
207 struct sec4_sg_entry *sec4_sg,
208 u32 flag)
209{
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212}
213
214/* Common shared descriptor commands */
215static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216{
217 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218 ctx->split_key_len, CLASS_2 |
219 KEY_DEST_MDHA_SPLIT | KEY_ENC);
220}
221
222/* Append key if it has been set */
223static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224{
225 u32 *key_jump_cmd;
226
61bb86bb 227 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
228
229 if (ctx->split_key_len) {
230 /* Skip if already shared */
231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232 JUMP_COND_SHRD);
233
234 append_key_ahash(desc, ctx);
235
236 set_jump_tgt_here(desc, key_jump_cmd);
237 }
238
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241}
242
243/*
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
246 * or req->result
247 */
248static inline void ahash_append_load_str(u32 *desc, int digestsize)
249{
250 /* Calculate remaining bytes to read */
251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255 FIFOLD_TYPE_MSG | KEY_VLF);
256
257 /* Store class2 context bytes */
258 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259 LDST_SRCDST_BYTE_CONTEXT);
260}
261
262/*
263 * For ahash update, final and finup, import context, read and write to seqout
264 */
265static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266 int digestsize,
267 struct caam_hash_ctx *ctx)
268{
269 init_sh_desc_key_ahash(desc, ctx);
270
271 /* Import context from software */
272 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273 LDST_CLASS_2_CCB | ctx->ctx_len);
274
275 /* Class 2 operation */
276 append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278 /*
279 * Load from buf and/or src and write to req->result or state->context
280 */
281 ahash_append_load_str(desc, digestsize);
282}
283
284/* For ahash firsts and digest, read and write to seqout */
285static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize, struct caam_hash_ctx *ctx)
287{
288 init_sh_desc_key_ahash(desc, ctx);
289
290 /* Class 2 operation */
291 append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293 /*
294 * Load from buf and/or src and write to req->result or state->context
295 */
296 ahash_append_load_str(desc, digestsize);
297}
298
299static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300{
301 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302 int digestsize = crypto_ahash_digestsize(ahash);
303 struct device *jrdev = ctx->jrdev;
304 u32 have_key = 0;
305 u32 *desc;
306
307 if (ctx->split_key_len)
308 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310 /* ahash_update shared descriptor */
311 desc = ctx->sh_desc_update;
312
61bb86bb 313 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
314
315 /* Import context from software */
316 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317 LDST_CLASS_2_CCB | ctx->ctx_len);
318
319 /* Class 2 operation */
320 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321 OP_ALG_ENCRYPT);
322
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc, ctx->ctx_len);
325
326 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327 DMA_TO_DEVICE);
328 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
330 return -ENOMEM;
331 }
332#ifdef DEBUG
514df281
AP
333 print_hex_dump(KERN_ERR,
334 "ahash update shdesc@"__stringify(__LINE__)": ",
045e3678
YK
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336#endif
337
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
340
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 ctx->ctx_len, ctx);
343
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 desc_bytes(desc),
346 DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
349 return -ENOMEM;
350 }
351#ifdef DEBUG
514df281
AP
352 print_hex_dump(KERN_ERR,
353 "ahash update first shdesc@"__stringify(__LINE__)": ",
045e3678
YK
354 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
355#endif
356
357 /* ahash_final shared descriptor */
358 desc = ctx->sh_desc_fin;
359
360 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361 OP_ALG_AS_FINALIZE, digestsize, ctx);
362
363 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
364 DMA_TO_DEVICE);
365 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366 dev_err(jrdev, "unable to map shared descriptor\n");
367 return -ENOMEM;
368 }
369#ifdef DEBUG
514df281 370 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
045e3678
YK
371 DUMP_PREFIX_ADDRESS, 16, 4, desc,
372 desc_bytes(desc), 1);
373#endif
374
375 /* ahash_finup shared descriptor */
376 desc = ctx->sh_desc_finup;
377
378 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379 OP_ALG_AS_FINALIZE, digestsize, ctx);
380
381 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
382 DMA_TO_DEVICE);
383 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384 dev_err(jrdev, "unable to map shared descriptor\n");
385 return -ENOMEM;
386 }
387#ifdef DEBUG
514df281 388 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
045e3678
YK
389 DUMP_PREFIX_ADDRESS, 16, 4, desc,
390 desc_bytes(desc), 1);
391#endif
392
393 /* ahash_digest shared descriptor */
394 desc = ctx->sh_desc_digest;
395
396 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
397 digestsize, ctx);
398
399 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
400 desc_bytes(desc),
401 DMA_TO_DEVICE);
402 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403 dev_err(jrdev, "unable to map shared descriptor\n");
404 return -ENOMEM;
405 }
406#ifdef DEBUG
514df281
AP
407 print_hex_dump(KERN_ERR,
408 "ahash digest shdesc@"__stringify(__LINE__)": ",
045e3678
YK
409 DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 desc_bytes(desc), 1);
411#endif
412
413 return 0;
414}
415
66b3e887 416static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
417 u32 keylen)
418{
419 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420 ctx->split_key_pad_len, key_in, keylen,
421 ctx->alg_op);
422}
423
424/* Digest hash size if it is too large */
66b3e887 425static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
426 u32 *keylen, u8 *key_out, u32 digestsize)
427{
428 struct device *jrdev = ctx->jrdev;
429 u32 *desc;
430 struct split_key_result result;
431 dma_addr_t src_dma, dst_dma;
432 int ret = 0;
433
9c23b7d3 434 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
435 if (!desc) {
436 dev_err(jrdev, "unable to allocate key input memory\n");
437 return -ENOMEM;
438 }
045e3678
YK
439
440 init_job_desc(desc, 0);
441
442 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
443 DMA_TO_DEVICE);
444 if (dma_mapping_error(jrdev, src_dma)) {
445 dev_err(jrdev, "unable to map key input memory\n");
446 kfree(desc);
447 return -ENOMEM;
448 }
449 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
450 DMA_FROM_DEVICE);
451 if (dma_mapping_error(jrdev, dst_dma)) {
452 dev_err(jrdev, "unable to map key output memory\n");
453 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454 kfree(desc);
455 return -ENOMEM;
456 }
457
458 /* Job descriptor to perform unkeyed hash on key_in */
459 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460 OP_ALG_AS_INITFINAL);
461 append_seq_in_ptr(desc, src_dma, *keylen, 0);
462 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466 LDST_SRCDST_BYTE_CONTEXT);
467
468#ifdef DEBUG
514df281 469 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
045e3678 470 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
514df281 471 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
472 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
473#endif
474
475 result.err = 0;
476 init_completion(&result.completion);
477
478 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
479 if (!ret) {
480 /* in progress */
481 wait_for_completion_interruptible(&result.completion);
482 ret = result.err;
483#ifdef DEBUG
514df281
AP
484 print_hex_dump(KERN_ERR,
485 "digested key@"__stringify(__LINE__)": ",
045e3678
YK
486 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
487 digestsize, 1);
488#endif
489 }
490 *keylen = digestsize;
491
492 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
494
495 kfree(desc);
496
497 return ret;
498}
499
500static int ahash_setkey(struct crypto_ahash *ahash,
501 const u8 *key, unsigned int keylen)
502{
503 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506 struct device *jrdev = ctx->jrdev;
507 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508 int digestsize = crypto_ahash_digestsize(ahash);
509 int ret = 0;
510 u8 *hashed_key = NULL;
511
512#ifdef DEBUG
513 printk(KERN_ERR "keylen %d\n", keylen);
514#endif
515
516 if (keylen > blocksize) {
517 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
518 GFP_DMA);
519 if (!hashed_key)
520 return -ENOMEM;
521 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
522 digestsize);
523 if (ret)
524 goto badkey;
525 key = hashed_key;
526 }
527
528 /* Pick class 2 key length from algorithm submask */
529 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530 OP_ALG_ALGSEL_SHIFT] * 2;
531 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
532
533#ifdef DEBUG
534 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535 ctx->split_key_len, ctx->split_key_pad_len);
514df281 536 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
045e3678
YK
537 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
538#endif
539
540 ret = gen_split_hash_key(ctx, key, keylen);
541 if (ret)
542 goto badkey;
543
544 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
545 DMA_TO_DEVICE);
546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n");
3d67be27
HG
548 ret = -ENOMEM;
549 goto map_err;
045e3678
YK
550 }
551#ifdef DEBUG
514df281 552 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
045e3678
YK
553 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
554 ctx->split_key_pad_len, 1);
555#endif
556
557 ret = ahash_set_sh_desc(ahash);
558 if (ret) {
559 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
560 DMA_TO_DEVICE);
561 }
562
3d67be27 563map_err:
045e3678
YK
564 kfree(hashed_key);
565 return ret;
566badkey:
567 kfree(hashed_key);
568 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
569 return -EINVAL;
570}
571
572/*
573 * ahash_edesc - s/w-extended ahash descriptor
574 * @dst_dma: physical mapped address of req->result
575 * @sec4_sg_dma: physical mapped address of h/w link table
643b39b0 576 * @chained: if source is chained
045e3678
YK
577 * @src_nents: number of segments in input scatterlist
578 * @sec4_sg_bytes: length of dma mapped sec4_sg space
579 * @sec4_sg: pointer to h/w link table
580 * @hw_desc: the h/w job descriptor followed by any referenced link tables
581 */
582struct ahash_edesc {
583 dma_addr_t dst_dma;
584 dma_addr_t sec4_sg_dma;
643b39b0 585 bool chained;
045e3678
YK
586 int src_nents;
587 int sec4_sg_bytes;
588 struct sec4_sg_entry *sec4_sg;
589 u32 hw_desc[0];
590};
591
592static inline void ahash_unmap(struct device *dev,
593 struct ahash_edesc *edesc,
594 struct ahash_request *req, int dst_len)
595{
596 if (edesc->src_nents)
643b39b0
YK
597 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
598 DMA_TO_DEVICE, edesc->chained);
045e3678
YK
599 if (edesc->dst_dma)
600 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
601
602 if (edesc->sec4_sg_bytes)
603 dma_unmap_single(dev, edesc->sec4_sg_dma,
604 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
605}
606
607static inline void ahash_unmap_ctx(struct device *dev,
608 struct ahash_edesc *edesc,
609 struct ahash_request *req, int dst_len, u32 flag)
610{
611 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
612 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
613 struct caam_hash_state *state = ahash_request_ctx(req);
614
615 if (state->ctx_dma)
616 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
617 ahash_unmap(dev, edesc, req, dst_len);
618}
619
620static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
621 void *context)
622{
623 struct ahash_request *req = context;
624 struct ahash_edesc *edesc;
625 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
626 int digestsize = crypto_ahash_digestsize(ahash);
627#ifdef DEBUG
628 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
629 struct caam_hash_state *state = ahash_request_ctx(req);
630
631 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
632#endif
633
634 edesc = (struct ahash_edesc *)((char *)desc -
635 offsetof(struct ahash_edesc, hw_desc));
fa9659cd
MV
636 if (err)
637 caam_jr_strstatus(jrdev, err);
045e3678
YK
638
639 ahash_unmap(jrdev, edesc, req, digestsize);
640 kfree(edesc);
641
642#ifdef DEBUG
514df281 643 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
644 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
645 ctx->ctx_len, 1);
646 if (req->result)
514df281 647 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
648 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
649 digestsize, 1);
650#endif
651
652 req->base.complete(&req->base, err);
653}
654
655static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
656 void *context)
657{
658 struct ahash_request *req = context;
659 struct ahash_edesc *edesc;
660 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
661 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
662#ifdef DEBUG
663 struct caam_hash_state *state = ahash_request_ctx(req);
664 int digestsize = crypto_ahash_digestsize(ahash);
665
666 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
667#endif
668
669 edesc = (struct ahash_edesc *)((char *)desc -
670 offsetof(struct ahash_edesc, hw_desc));
fa9659cd
MV
671 if (err)
672 caam_jr_strstatus(jrdev, err);
045e3678
YK
673
674 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
675 kfree(edesc);
676
677#ifdef DEBUG
514df281 678 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
679 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
680 ctx->ctx_len, 1);
681 if (req->result)
514df281 682 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
683 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
684 digestsize, 1);
685#endif
686
687 req->base.complete(&req->base, err);
688}
689
690static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
691 void *context)
692{
693 struct ahash_request *req = context;
694 struct ahash_edesc *edesc;
695 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
696 int digestsize = crypto_ahash_digestsize(ahash);
697#ifdef DEBUG
698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699 struct caam_hash_state *state = ahash_request_ctx(req);
700
701 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
702#endif
703
704 edesc = (struct ahash_edesc *)((char *)desc -
705 offsetof(struct ahash_edesc, hw_desc));
fa9659cd
MV
706 if (err)
707 caam_jr_strstatus(jrdev, err);
045e3678
YK
708
709 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
710 kfree(edesc);
711
712#ifdef DEBUG
514df281 713 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
714 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
715 ctx->ctx_len, 1);
716 if (req->result)
514df281 717 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
718 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
719 digestsize, 1);
720#endif
721
722 req->base.complete(&req->base, err);
723}
724
725static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
726 void *context)
727{
728 struct ahash_request *req = context;
729 struct ahash_edesc *edesc;
730 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
731 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
732#ifdef DEBUG
733 struct caam_hash_state *state = ahash_request_ctx(req);
734 int digestsize = crypto_ahash_digestsize(ahash);
735
736 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
737#endif
738
739 edesc = (struct ahash_edesc *)((char *)desc -
740 offsetof(struct ahash_edesc, hw_desc));
fa9659cd
MV
741 if (err)
742 caam_jr_strstatus(jrdev, err);
045e3678
YK
743
744 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
745 kfree(edesc);
746
747#ifdef DEBUG
514df281 748 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
749 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
750 ctx->ctx_len, 1);
751 if (req->result)
514df281 752 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
753 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
754 digestsize, 1);
755#endif
756
757 req->base.complete(&req->base, err);
758}
759
760/* submit update job descriptor */
761static int ahash_update_ctx(struct ahash_request *req)
762{
763 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
764 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
765 struct caam_hash_state *state = ahash_request_ctx(req);
766 struct device *jrdev = ctx->jrdev;
767 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
768 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
769 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
770 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
771 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
772 int *next_buflen = state->current_buf ? &state->buflen_0 :
773 &state->buflen_1, last_buflen;
774 int in_len = *buflen + req->nbytes, to_hash;
775 u32 *sh_desc = ctx->sh_desc_update, *desc;
776 dma_addr_t ptr = ctx->sh_desc_update_dma;
777 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
778 struct ahash_edesc *edesc;
643b39b0 779 bool chained = false;
045e3678
YK
780 int ret = 0;
781 int sh_len;
782
783 last_buflen = *next_buflen;
784 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
785 to_hash = in_len - *next_buflen;
786
787 if (to_hash) {
643b39b0
YK
788 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
789 &chained);
045e3678
YK
790 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
791 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
792 sizeof(struct sec4_sg_entry);
793
794 /*
795 * allocate space for base edesc and hw desc commands,
796 * link tables
797 */
798 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
799 sec4_sg_bytes, GFP_DMA | flags);
800 if (!edesc) {
801 dev_err(jrdev,
802 "could not allocate extended descriptor\n");
803 return -ENOMEM;
804 }
805
806 edesc->src_nents = src_nents;
643b39b0 807 edesc->chained = chained;
045e3678
YK
808 edesc->sec4_sg_bytes = sec4_sg_bytes;
809 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
810 DESC_JOB_IO_LEN;
045e3678
YK
811
812 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
813 edesc->sec4_sg, DMA_BIDIRECTIONAL);
814
815 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
816 edesc->sec4_sg + 1,
817 buf, state->buf_dma,
818 *buflen, last_buflen);
819
820 if (src_nents) {
821 src_map_to_sec4_sg(jrdev, req->src, src_nents,
643b39b0
YK
822 edesc->sec4_sg + sec4_sg_src_index,
823 chained);
045e3678
YK
824 if (*next_buflen) {
825 sg_copy_part(next_buf, req->src, to_hash -
826 *buflen, req->nbytes);
827 state->current_buf = !state->current_buf;
828 }
829 } else {
830 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
831 SEC4_SG_LEN_FIN;
832 }
833
834 sh_len = desc_len(sh_desc);
835 desc = edesc->hw_desc;
836 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
837 HDR_REVERSE);
838
1da2be33
RG
839 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
840 sec4_sg_bytes,
841 DMA_TO_DEVICE);
842
045e3678
YK
843 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
844 to_hash, LDST_SGF);
845
846 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
847
848#ifdef DEBUG
514df281 849 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
850 DUMP_PREFIX_ADDRESS, 16, 4, desc,
851 desc_bytes(desc), 1);
852#endif
853
854 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
855 if (!ret) {
856 ret = -EINPROGRESS;
857 } else {
858 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
859 DMA_BIDIRECTIONAL);
860 kfree(edesc);
861 }
862 } else if (*next_buflen) {
863 sg_copy(buf + *buflen, req->src, req->nbytes);
864 *buflen = *next_buflen;
865 *next_buflen = last_buflen;
866 }
867#ifdef DEBUG
514df281 868 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 869 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 870 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
871 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
872 *next_buflen, 1);
873#endif
874
875 return ret;
876}
877
878static int ahash_final_ctx(struct ahash_request *req)
879{
880 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
881 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
882 struct caam_hash_state *state = ahash_request_ctx(req);
883 struct device *jrdev = ctx->jrdev;
884 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
885 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
886 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
887 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
888 int last_buflen = state->current_buf ? state->buflen_0 :
889 state->buflen_1;
890 u32 *sh_desc = ctx->sh_desc_fin, *desc;
891 dma_addr_t ptr = ctx->sh_desc_fin_dma;
892 int sec4_sg_bytes;
893 int digestsize = crypto_ahash_digestsize(ahash);
894 struct ahash_edesc *edesc;
895 int ret = 0;
896 int sh_len;
897
898 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
899
900 /* allocate space for base edesc and hw desc commands, link tables */
901 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
902 sec4_sg_bytes, GFP_DMA | flags);
903 if (!edesc) {
904 dev_err(jrdev, "could not allocate extended descriptor\n");
905 return -ENOMEM;
906 }
907
908 sh_len = desc_len(sh_desc);
909 desc = edesc->hw_desc;
910 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
911
912 edesc->sec4_sg_bytes = sec4_sg_bytes;
913 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
914 DESC_JOB_IO_LEN;
045e3678
YK
915 edesc->src_nents = 0;
916
917 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
918 DMA_TO_DEVICE);
919
920 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
921 buf, state->buf_dma, buflen,
922 last_buflen);
923 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
924
1da2be33
RG
925 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
926 sec4_sg_bytes, DMA_TO_DEVICE);
927
045e3678
YK
928 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
929 LDST_SGF);
930
931 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
932 digestsize);
933
934#ifdef DEBUG
514df281 935 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
936 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
937#endif
938
939 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
940 if (!ret) {
941 ret = -EINPROGRESS;
942 } else {
943 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
944 kfree(edesc);
945 }
946
947 return ret;
948}
949
950static int ahash_finup_ctx(struct ahash_request *req)
951{
952 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
953 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
954 struct caam_hash_state *state = ahash_request_ctx(req);
955 struct device *jrdev = ctx->jrdev;
956 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
957 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
958 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
959 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
960 int last_buflen = state->current_buf ? state->buflen_0 :
961 state->buflen_1;
962 u32 *sh_desc = ctx->sh_desc_finup, *desc;
963 dma_addr_t ptr = ctx->sh_desc_finup_dma;
964 int sec4_sg_bytes, sec4_sg_src_index;
965 int src_nents;
966 int digestsize = crypto_ahash_digestsize(ahash);
967 struct ahash_edesc *edesc;
643b39b0 968 bool chained = false;
045e3678
YK
969 int ret = 0;
970 int sh_len;
971
643b39b0 972 src_nents = __sg_count(req->src, req->nbytes, &chained);
045e3678
YK
973 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
974 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
975 sizeof(struct sec4_sg_entry);
976
977 /* allocate space for base edesc and hw desc commands, link tables */
978 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
979 sec4_sg_bytes, GFP_DMA | flags);
980 if (!edesc) {
981 dev_err(jrdev, "could not allocate extended descriptor\n");
982 return -ENOMEM;
983 }
984
985 sh_len = desc_len(sh_desc);
986 desc = edesc->hw_desc;
987 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
988
989 edesc->src_nents = src_nents;
643b39b0 990 edesc->chained = chained;
045e3678
YK
991 edesc->sec4_sg_bytes = sec4_sg_bytes;
992 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
993 DESC_JOB_IO_LEN;
045e3678
YK
994
995 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
996 DMA_TO_DEVICE);
997
998 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
999 buf, state->buf_dma, buflen,
1000 last_buflen);
1001
1002 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
643b39b0 1003 sec4_sg_src_index, chained);
045e3678 1004
1da2be33
RG
1005 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1006 sec4_sg_bytes, DMA_TO_DEVICE);
1007
045e3678
YK
1008 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1009 buflen + req->nbytes, LDST_SGF);
1010
1011 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1012 digestsize);
1013
1014#ifdef DEBUG
514df281 1015 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1016 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1017#endif
1018
1019 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1020 if (!ret) {
1021 ret = -EINPROGRESS;
1022 } else {
1023 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1024 kfree(edesc);
1025 }
1026
1027 return ret;
1028}
1029
1030static int ahash_digest(struct ahash_request *req)
1031{
1032 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1033 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1034 struct device *jrdev = ctx->jrdev;
1035 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1036 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1037 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1038 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1039 int digestsize = crypto_ahash_digestsize(ahash);
1040 int src_nents, sec4_sg_bytes;
1041 dma_addr_t src_dma;
1042 struct ahash_edesc *edesc;
643b39b0 1043 bool chained = false;
045e3678
YK
1044 int ret = 0;
1045 u32 options;
1046 int sh_len;
1047
643b39b0
YK
1048 src_nents = sg_count(req->src, req->nbytes, &chained);
1049 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1050 chained);
045e3678
YK
1051 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1052
1053 /* allocate space for base edesc and hw desc commands, link tables */
1054 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1055 DESC_JOB_IO_LEN, GFP_DMA | flags);
1056 if (!edesc) {
1057 dev_err(jrdev, "could not allocate extended descriptor\n");
1058 return -ENOMEM;
1059 }
1060 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1061 DESC_JOB_IO_LEN;
045e3678 1062 edesc->src_nents = src_nents;
643b39b0 1063 edesc->chained = chained;
045e3678
YK
1064
1065 sh_len = desc_len(sh_desc);
1066 desc = edesc->hw_desc;
1067 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1068
1069 if (src_nents) {
1070 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1da2be33
RG
1071 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1072 sec4_sg_bytes, DMA_TO_DEVICE);
045e3678
YK
1073 src_dma = edesc->sec4_sg_dma;
1074 options = LDST_SGF;
1075 } else {
1076 src_dma = sg_dma_address(req->src);
1077 options = 0;
1078 }
1079 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1080
1081 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1082 digestsize);
1083
1084#ifdef DEBUG
514df281 1085 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1086 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1087#endif
1088
1089 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1090 if (!ret) {
1091 ret = -EINPROGRESS;
1092 } else {
1093 ahash_unmap(jrdev, edesc, req, digestsize);
1094 kfree(edesc);
1095 }
1096
1097 return ret;
1098}
1099
1100/* submit ahash final if it the first job descriptor */
1101static int ahash_final_no_ctx(struct ahash_request *req)
1102{
1103 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1104 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1105 struct caam_hash_state *state = ahash_request_ctx(req);
1106 struct device *jrdev = ctx->jrdev;
1107 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1108 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1109 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1110 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1111 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1112 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1113 int digestsize = crypto_ahash_digestsize(ahash);
1114 struct ahash_edesc *edesc;
1115 int ret = 0;
1116 int sh_len;
1117
1118 /* allocate space for base edesc and hw desc commands, link tables */
1119 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1120 GFP_DMA | flags);
1121 if (!edesc) {
1122 dev_err(jrdev, "could not allocate extended descriptor\n");
1123 return -ENOMEM;
1124 }
1125
1126 sh_len = desc_len(sh_desc);
1127 desc = edesc->hw_desc;
1128 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1129
1130 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1131
1132 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1133
1134 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1135 digestsize);
1136 edesc->src_nents = 0;
1137
1138#ifdef DEBUG
514df281 1139 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1141#endif
1142
1143 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 if (!ret) {
1145 ret = -EINPROGRESS;
1146 } else {
1147 ahash_unmap(jrdev, edesc, req, digestsize);
1148 kfree(edesc);
1149 }
1150
1151 return ret;
1152}
1153
1154/* submit ahash update if it the first job descriptor after update */
1155static int ahash_update_no_ctx(struct ahash_request *req)
1156{
1157 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1158 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1159 struct caam_hash_state *state = ahash_request_ctx(req);
1160 struct device *jrdev = ctx->jrdev;
1161 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1162 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1163 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1164 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1165 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1166 int *next_buflen = state->current_buf ? &state->buflen_0 :
1167 &state->buflen_1;
1168 int in_len = *buflen + req->nbytes, to_hash;
1169 int sec4_sg_bytes, src_nents;
1170 struct ahash_edesc *edesc;
1171 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1172 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
643b39b0 1173 bool chained = false;
045e3678
YK
1174 int ret = 0;
1175 int sh_len;
1176
1177 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1178 to_hash = in_len - *next_buflen;
1179
1180 if (to_hash) {
643b39b0
YK
1181 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1182 &chained);
045e3678
YK
1183 sec4_sg_bytes = (1 + src_nents) *
1184 sizeof(struct sec4_sg_entry);
1185
1186 /*
1187 * allocate space for base edesc and hw desc commands,
1188 * link tables
1189 */
1190 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1191 sec4_sg_bytes, GFP_DMA | flags);
1192 if (!edesc) {
1193 dev_err(jrdev,
1194 "could not allocate extended descriptor\n");
1195 return -ENOMEM;
1196 }
1197
1198 edesc->src_nents = src_nents;
643b39b0 1199 edesc->chained = chained;
045e3678
YK
1200 edesc->sec4_sg_bytes = sec4_sg_bytes;
1201 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1202 DESC_JOB_IO_LEN;
045e3678
YK
1203
1204 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1205 buf, *buflen);
1206 src_map_to_sec4_sg(jrdev, req->src, src_nents,
643b39b0 1207 edesc->sec4_sg + 1, chained);
045e3678
YK
1208 if (*next_buflen) {
1209 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1210 req->nbytes);
1211 state->current_buf = !state->current_buf;
1212 }
1213
1214 sh_len = desc_len(sh_desc);
1215 desc = edesc->hw_desc;
1216 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1217 HDR_REVERSE);
1218
1da2be33
RG
1219 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1220 sec4_sg_bytes,
1221 DMA_TO_DEVICE);
1222
045e3678
YK
1223 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1224
1225 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1226
1227#ifdef DEBUG
514df281 1228 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1229 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1230 desc_bytes(desc), 1);
1231#endif
1232
1233 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1234 if (!ret) {
1235 ret = -EINPROGRESS;
1236 state->update = ahash_update_ctx;
1237 state->finup = ahash_finup_ctx;
1238 state->final = ahash_final_ctx;
1239 } else {
1240 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1241 DMA_TO_DEVICE);
1242 kfree(edesc);
1243 }
1244 } else if (*next_buflen) {
1245 sg_copy(buf + *buflen, req->src, req->nbytes);
1246 *buflen = *next_buflen;
1247 *next_buflen = 0;
1248 }
1249#ifdef DEBUG
514df281 1250 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 1251 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 1252 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1253 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1254 *next_buflen, 1);
1255#endif
1256
1257 return ret;
1258}
1259
1260/* submit ahash finup if it the first job descriptor after update */
1261static int ahash_finup_no_ctx(struct ahash_request *req)
1262{
1263 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1264 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1265 struct caam_hash_state *state = ahash_request_ctx(req);
1266 struct device *jrdev = ctx->jrdev;
1267 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1268 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1269 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1270 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1271 int last_buflen = state->current_buf ? state->buflen_0 :
1272 state->buflen_1;
1273 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1274 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1275 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1276 int digestsize = crypto_ahash_digestsize(ahash);
1277 struct ahash_edesc *edesc;
643b39b0 1278 bool chained = false;
045e3678
YK
1279 int sh_len;
1280 int ret = 0;
1281
643b39b0 1282 src_nents = __sg_count(req->src, req->nbytes, &chained);
045e3678
YK
1283 sec4_sg_src_index = 2;
1284 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1285 sizeof(struct sec4_sg_entry);
1286
1287 /* allocate space for base edesc and hw desc commands, link tables */
1288 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1289 sec4_sg_bytes, GFP_DMA | flags);
1290 if (!edesc) {
1291 dev_err(jrdev, "could not allocate extended descriptor\n");
1292 return -ENOMEM;
1293 }
1294
1295 sh_len = desc_len(sh_desc);
1296 desc = edesc->hw_desc;
1297 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1298
1299 edesc->src_nents = src_nents;
643b39b0 1300 edesc->chained = chained;
045e3678
YK
1301 edesc->sec4_sg_bytes = sec4_sg_bytes;
1302 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1303 DESC_JOB_IO_LEN;
045e3678
YK
1304
1305 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1306 state->buf_dma, buflen,
1307 last_buflen);
1308
643b39b0
YK
1309 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1310 chained);
045e3678 1311
1da2be33
RG
1312 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1313 sec4_sg_bytes, DMA_TO_DEVICE);
1314
045e3678
YK
1315 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1316 req->nbytes, LDST_SGF);
1317
1318 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1319 digestsize);
1320
1321#ifdef DEBUG
514df281 1322 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1323 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1324#endif
1325
1326 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1327 if (!ret) {
1328 ret = -EINPROGRESS;
1329 } else {
1330 ahash_unmap(jrdev, edesc, req, digestsize);
1331 kfree(edesc);
1332 }
1333
1334 return ret;
1335}
1336
1337/* submit first update job descriptor after init */
1338static int ahash_update_first(struct ahash_request *req)
1339{
1340 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1341 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1342 struct caam_hash_state *state = ahash_request_ctx(req);
1343 struct device *jrdev = ctx->jrdev;
1344 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1345 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1346 u8 *next_buf = state->buf_0 + state->current_buf *
1347 CAAM_MAX_HASH_BLOCK_SIZE;
1348 int *next_buflen = &state->buflen_0 + state->current_buf;
1349 int to_hash;
1350 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1351 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1352 int sec4_sg_bytes, src_nents;
1353 dma_addr_t src_dma;
1354 u32 options;
1355 struct ahash_edesc *edesc;
643b39b0 1356 bool chained = false;
045e3678
YK
1357 int ret = 0;
1358 int sh_len;
1359
1360 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1361 1);
1362 to_hash = req->nbytes - *next_buflen;
1363
1364 if (to_hash) {
643b39b0
YK
1365 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1366 &chained);
1367 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1368 DMA_TO_DEVICE, chained);
045e3678
YK
1369 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1370
1371 /*
1372 * allocate space for base edesc and hw desc commands,
1373 * link tables
1374 */
1375 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1376 sec4_sg_bytes, GFP_DMA | flags);
1377 if (!edesc) {
1378 dev_err(jrdev,
1379 "could not allocate extended descriptor\n");
1380 return -ENOMEM;
1381 }
1382
1383 edesc->src_nents = src_nents;
643b39b0 1384 edesc->chained = chained;
045e3678
YK
1385 edesc->sec4_sg_bytes = sec4_sg_bytes;
1386 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1387 DESC_JOB_IO_LEN;
045e3678
YK
1388
1389 if (src_nents) {
1390 sg_to_sec4_sg_last(req->src, src_nents,
1391 edesc->sec4_sg, 0);
1da2be33
RG
1392 edesc->sec4_sg_dma = dma_map_single(jrdev,
1393 edesc->sec4_sg,
1394 sec4_sg_bytes,
1395 DMA_TO_DEVICE);
045e3678
YK
1396 src_dma = edesc->sec4_sg_dma;
1397 options = LDST_SGF;
1398 } else {
1399 src_dma = sg_dma_address(req->src);
1400 options = 0;
1401 }
1402
1403 if (*next_buflen)
1404 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1405
1406 sh_len = desc_len(sh_desc);
1407 desc = edesc->hw_desc;
1408 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1409 HDR_REVERSE);
1410
1411 append_seq_in_ptr(desc, src_dma, to_hash, options);
1412
1413 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1414
1415#ifdef DEBUG
514df281 1416 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1417 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1418 desc_bytes(desc), 1);
1419#endif
1420
1421 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1422 req);
1423 if (!ret) {
1424 ret = -EINPROGRESS;
1425 state->update = ahash_update_ctx;
1426 state->finup = ahash_finup_ctx;
1427 state->final = ahash_final_ctx;
1428 } else {
1429 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1430 DMA_TO_DEVICE);
1431 kfree(edesc);
1432 }
1433 } else if (*next_buflen) {
1434 state->update = ahash_update_no_ctx;
1435 state->finup = ahash_finup_no_ctx;
1436 state->final = ahash_final_no_ctx;
1437 sg_copy(next_buf, req->src, req->nbytes);
1438 }
1439#ifdef DEBUG
514df281 1440 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1441 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1442 *next_buflen, 1);
1443#endif
1444
1445 return ret;
1446}
1447
1448static int ahash_finup_first(struct ahash_request *req)
1449{
1450 return ahash_digest(req);
1451}
1452
1453static int ahash_init(struct ahash_request *req)
1454{
1455 struct caam_hash_state *state = ahash_request_ctx(req);
1456
1457 state->update = ahash_update_first;
1458 state->finup = ahash_finup_first;
1459 state->final = ahash_final_no_ctx;
1460
1461 state->current_buf = 0;
1462
1463 return 0;
1464}
1465
1466static int ahash_update(struct ahash_request *req)
1467{
1468 struct caam_hash_state *state = ahash_request_ctx(req);
1469
1470 return state->update(req);
1471}
1472
1473static int ahash_finup(struct ahash_request *req)
1474{
1475 struct caam_hash_state *state = ahash_request_ctx(req);
1476
1477 return state->finup(req);
1478}
1479
1480static int ahash_final(struct ahash_request *req)
1481{
1482 struct caam_hash_state *state = ahash_request_ctx(req);
1483
1484 return state->final(req);
1485}
1486
1487static int ahash_export(struct ahash_request *req, void *out)
1488{
1489 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1490 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1491 struct caam_hash_state *state = ahash_request_ctx(req);
1492
1493 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1494 memcpy(out + sizeof(struct caam_hash_ctx), state,
1495 sizeof(struct caam_hash_state));
1496 return 0;
1497}
1498
1499static int ahash_import(struct ahash_request *req, const void *in)
1500{
1501 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1503 struct caam_hash_state *state = ahash_request_ctx(req);
1504
1505 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1506 memcpy(state, in + sizeof(struct caam_hash_ctx),
1507 sizeof(struct caam_hash_state));
1508 return 0;
1509}
1510
1511struct caam_hash_template {
1512 char name[CRYPTO_MAX_ALG_NAME];
1513 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1514 char hmac_name[CRYPTO_MAX_ALG_NAME];
1515 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1516 unsigned int blocksize;
1517 struct ahash_alg template_ahash;
1518 u32 alg_type;
1519 u32 alg_op;
1520};
1521
1522/* ahash descriptors */
1523static struct caam_hash_template driver_hash[] = {
1524 {
b0e09bae
YK
1525 .name = "sha1",
1526 .driver_name = "sha1-caam",
1527 .hmac_name = "hmac(sha1)",
1528 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1529 .blocksize = SHA1_BLOCK_SIZE,
1530 .template_ahash = {
1531 .init = ahash_init,
1532 .update = ahash_update,
1533 .final = ahash_final,
1534 .finup = ahash_finup,
1535 .digest = ahash_digest,
1536 .export = ahash_export,
1537 .import = ahash_import,
1538 .setkey = ahash_setkey,
1539 .halg = {
1540 .digestsize = SHA1_DIGEST_SIZE,
1541 },
1542 },
1543 .alg_type = OP_ALG_ALGSEL_SHA1,
1544 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1545 }, {
b0e09bae
YK
1546 .name = "sha224",
1547 .driver_name = "sha224-caam",
1548 .hmac_name = "hmac(sha224)",
1549 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1550 .blocksize = SHA224_BLOCK_SIZE,
1551 .template_ahash = {
1552 .init = ahash_init,
1553 .update = ahash_update,
1554 .final = ahash_final,
1555 .finup = ahash_finup,
1556 .digest = ahash_digest,
1557 .export = ahash_export,
1558 .import = ahash_import,
1559 .setkey = ahash_setkey,
1560 .halg = {
1561 .digestsize = SHA224_DIGEST_SIZE,
1562 },
1563 },
1564 .alg_type = OP_ALG_ALGSEL_SHA224,
1565 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1566 }, {
b0e09bae
YK
1567 .name = "sha256",
1568 .driver_name = "sha256-caam",
1569 .hmac_name = "hmac(sha256)",
1570 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1571 .blocksize = SHA256_BLOCK_SIZE,
1572 .template_ahash = {
1573 .init = ahash_init,
1574 .update = ahash_update,
1575 .final = ahash_final,
1576 .finup = ahash_finup,
1577 .digest = ahash_digest,
1578 .export = ahash_export,
1579 .import = ahash_import,
1580 .setkey = ahash_setkey,
1581 .halg = {
1582 .digestsize = SHA256_DIGEST_SIZE,
1583 },
1584 },
1585 .alg_type = OP_ALG_ALGSEL_SHA256,
1586 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1587 }, {
b0e09bae
YK
1588 .name = "sha384",
1589 .driver_name = "sha384-caam",
1590 .hmac_name = "hmac(sha384)",
1591 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1592 .blocksize = SHA384_BLOCK_SIZE,
1593 .template_ahash = {
1594 .init = ahash_init,
1595 .update = ahash_update,
1596 .final = ahash_final,
1597 .finup = ahash_finup,
1598 .digest = ahash_digest,
1599 .export = ahash_export,
1600 .import = ahash_import,
1601 .setkey = ahash_setkey,
1602 .halg = {
1603 .digestsize = SHA384_DIGEST_SIZE,
1604 },
1605 },
1606 .alg_type = OP_ALG_ALGSEL_SHA384,
1607 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1608 }, {
b0e09bae
YK
1609 .name = "sha512",
1610 .driver_name = "sha512-caam",
1611 .hmac_name = "hmac(sha512)",
1612 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1613 .blocksize = SHA512_BLOCK_SIZE,
1614 .template_ahash = {
1615 .init = ahash_init,
1616 .update = ahash_update,
1617 .final = ahash_final,
1618 .finup = ahash_finup,
1619 .digest = ahash_digest,
1620 .export = ahash_export,
1621 .import = ahash_import,
1622 .setkey = ahash_setkey,
1623 .halg = {
1624 .digestsize = SHA512_DIGEST_SIZE,
1625 },
1626 },
1627 .alg_type = OP_ALG_ALGSEL_SHA512,
1628 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1629 }, {
b0e09bae
YK
1630 .name = "md5",
1631 .driver_name = "md5-caam",
1632 .hmac_name = "hmac(md5)",
1633 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1634 .blocksize = MD5_BLOCK_WORDS * 4,
1635 .template_ahash = {
1636 .init = ahash_init,
1637 .update = ahash_update,
1638 .final = ahash_final,
1639 .finup = ahash_finup,
1640 .digest = ahash_digest,
1641 .export = ahash_export,
1642 .import = ahash_import,
1643 .setkey = ahash_setkey,
1644 .halg = {
1645 .digestsize = MD5_DIGEST_SIZE,
1646 },
1647 },
1648 .alg_type = OP_ALG_ALGSEL_MD5,
1649 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1650 },
1651};
1652
1653struct caam_hash_alg {
1654 struct list_head entry;
045e3678
YK
1655 int alg_type;
1656 int alg_op;
1657 struct ahash_alg ahash_alg;
1658};
1659
1660static int caam_hash_cra_init(struct crypto_tfm *tfm)
1661{
1662 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1663 struct crypto_alg *base = tfm->__crt_alg;
1664 struct hash_alg_common *halg =
1665 container_of(base, struct hash_alg_common, base);
1666 struct ahash_alg *alg =
1667 container_of(halg, struct ahash_alg, halg);
1668 struct caam_hash_alg *caam_hash =
1669 container_of(alg, struct caam_hash_alg, ahash_alg);
1670 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
045e3678
YK
1671 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1672 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1673 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1674 HASH_MSG_LEN + 32,
1675 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1676 HASH_MSG_LEN + 64,
1677 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
045e3678
YK
1678 int ret = 0;
1679
1680 /*
cfc6f11b 1681 * Get a Job ring from Job Ring driver to ensure in-order
045e3678
YK
1682 * crypto request processing per tfm
1683 */
cfc6f11b
RG
1684 ctx->jrdev = caam_jr_alloc();
1685 if (IS_ERR(ctx->jrdev)) {
1686 pr_err("Job Ring Device allocation for transform failed\n");
1687 return PTR_ERR(ctx->jrdev);
1688 }
045e3678
YK
1689 /* copy descriptor header template value */
1690 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1691 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1692
1693 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1694 OP_ALG_ALGSEL_SHIFT];
1695
1696 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1697 sizeof(struct caam_hash_state));
1698
1699 ret = ahash_set_sh_desc(ahash);
1700
1701 return ret;
1702}
1703
1704static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1705{
1706 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1707
1708 if (ctx->sh_desc_update_dma &&
1709 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1710 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1711 desc_bytes(ctx->sh_desc_update),
1712 DMA_TO_DEVICE);
1713 if (ctx->sh_desc_update_first_dma &&
1714 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1715 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1716 desc_bytes(ctx->sh_desc_update_first),
1717 DMA_TO_DEVICE);
1718 if (ctx->sh_desc_fin_dma &&
1719 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1720 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1721 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1722 if (ctx->sh_desc_digest_dma &&
1723 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1724 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1725 desc_bytes(ctx->sh_desc_digest),
1726 DMA_TO_DEVICE);
1727 if (ctx->sh_desc_finup_dma &&
1728 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1729 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1730 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
cfc6f11b
RG
1731
1732 caam_jr_free(ctx->jrdev);
045e3678
YK
1733}
1734
1735static void __exit caam_algapi_hash_exit(void)
1736{
045e3678
YK
1737 struct caam_hash_alg *t_alg, *n;
1738
cfc6f11b 1739 if (!hash_list.next)
045e3678
YK
1740 return;
1741
cfc6f11b 1742 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
045e3678
YK
1743 crypto_unregister_ahash(&t_alg->ahash_alg);
1744 list_del(&t_alg->entry);
1745 kfree(t_alg);
1746 }
1747}
1748
1749static struct caam_hash_alg *
cfc6f11b 1750caam_hash_alloc(struct caam_hash_template *template,
b0e09bae 1751 bool keyed)
045e3678
YK
1752{
1753 struct caam_hash_alg *t_alg;
1754 struct ahash_alg *halg;
1755 struct crypto_alg *alg;
1756
1757 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1758 if (!t_alg) {
cfc6f11b 1759 pr_err("failed to allocate t_alg\n");
045e3678
YK
1760 return ERR_PTR(-ENOMEM);
1761 }
1762
1763 t_alg->ahash_alg = template->template_ahash;
1764 halg = &t_alg->ahash_alg;
1765 alg = &halg->halg.base;
1766
b0e09bae
YK
1767 if (keyed) {
1768 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1769 template->hmac_name);
1770 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1771 template->hmac_driver_name);
1772 } else {
1773 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1774 template->name);
1775 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1776 template->driver_name);
1777 }
045e3678
YK
1778 alg->cra_module = THIS_MODULE;
1779 alg->cra_init = caam_hash_cra_init;
1780 alg->cra_exit = caam_hash_cra_exit;
1781 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1782 alg->cra_priority = CAAM_CRA_PRIORITY;
1783 alg->cra_blocksize = template->blocksize;
1784 alg->cra_alignmask = 0;
1785 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1786 alg->cra_type = &crypto_ahash_type;
1787
1788 t_alg->alg_type = template->alg_type;
1789 t_alg->alg_op = template->alg_op;
045e3678
YK
1790
1791 return t_alg;
1792}
1793
1794static int __init caam_algapi_hash_init(void)
1795{
045e3678
YK
1796 int i = 0, err = 0;
1797
cfc6f11b 1798 INIT_LIST_HEAD(&hash_list);
045e3678
YK
1799
1800 /* register crypto algorithms the device supports */
1801 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1802 /* TODO: check if h/w supports alg */
1803 struct caam_hash_alg *t_alg;
1804
b0e09bae 1805 /* register hmac version */
cfc6f11b 1806 t_alg = caam_hash_alloc(&driver_hash[i], true);
b0e09bae
YK
1807 if (IS_ERR(t_alg)) {
1808 err = PTR_ERR(t_alg);
cfc6f11b
RG
1809 pr_warn("%s alg allocation failed\n",
1810 driver_hash[i].driver_name);
b0e09bae
YK
1811 continue;
1812 }
1813
1814 err = crypto_register_ahash(&t_alg->ahash_alg);
1815 if (err) {
cfc6f11b 1816 pr_warn("%s alg registration failed\n",
b0e09bae
YK
1817 t_alg->ahash_alg.halg.base.cra_driver_name);
1818 kfree(t_alg);
1819 } else
cfc6f11b 1820 list_add_tail(&t_alg->entry, &hash_list);
b0e09bae
YK
1821
1822 /* register unkeyed version */
cfc6f11b 1823 t_alg = caam_hash_alloc(&driver_hash[i], false);
045e3678
YK
1824 if (IS_ERR(t_alg)) {
1825 err = PTR_ERR(t_alg);
cfc6f11b
RG
1826 pr_warn("%s alg allocation failed\n",
1827 driver_hash[i].driver_name);
045e3678
YK
1828 continue;
1829 }
1830
1831 err = crypto_register_ahash(&t_alg->ahash_alg);
1832 if (err) {
cfc6f11b 1833 pr_warn("%s alg registration failed\n",
045e3678
YK
1834 t_alg->ahash_alg.halg.base.cra_driver_name);
1835 kfree(t_alg);
1836 } else
cfc6f11b 1837 list_add_tail(&t_alg->entry, &hash_list);
045e3678
YK
1838 }
1839
1840 return err;
1841}
1842
1843module_init(caam_algapi_hash_init);
1844module_exit(caam_algapi_hash_exit);
1845
1846MODULE_LICENSE("GPL");
1847MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1848MODULE_AUTHOR("Freescale Semiconductor - NMG");