1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
69 #define CAAM_CRA_PRIORITY 3000
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN 8
83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85 static struct list_head hash_list;
87 /* ahash per-session context */
88 struct caam_hash_ctx {
89 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95 dma_addr_t sh_desc_update_first_dma;
96 dma_addr_t sh_desc_fin_dma;
97 dma_addr_t sh_desc_digest_dma;
98 enum dma_data_direction dir;
99 enum dma_data_direction key_dir;
100 struct device *jrdev;
102 struct alginfo adata;
106 struct caam_hash_state {
110 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
114 int (*update)(struct ahash_request *req);
115 int (*final)(struct ahash_request *req);
116 int (*finup)(struct ahash_request *req);
119 struct caam_export_state {
120 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
121 u8 caam_ctx[MAX_CTX_LEN];
123 int (*update)(struct ahash_request *req);
124 int (*final)(struct ahash_request *req);
125 int (*finup)(struct ahash_request *req);
128 static inline bool is_cmac_aes(u32 algtype)
130 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
131 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
133 /* Common job descriptor seq in/out ptr routines */
135 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
136 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
137 struct caam_hash_state *state,
140 state->ctx_dma_len = ctx_len;
141 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
142 ctx_len, DMA_FROM_DEVICE);
143 if (dma_mapping_error(jrdev, state->ctx_dma)) {
144 dev_err(jrdev, "unable to map ctx\n");
149 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
154 /* Map current buffer in state (if length > 0) and put it in link table */
155 static inline int buf_map_to_sec4_sg(struct device *jrdev,
156 struct sec4_sg_entry *sec4_sg,
157 struct caam_hash_state *state)
159 int buflen = state->buflen;
164 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
166 if (dma_mapping_error(jrdev, state->buf_dma)) {
167 dev_err(jrdev, "unable to map buf\n");
172 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
177 /* Map state->caam_ctx, and add it to link table */
178 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
179 struct caam_hash_state *state, int ctx_len,
180 struct sec4_sg_entry *sec4_sg, u32 flag)
182 state->ctx_dma_len = ctx_len;
183 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
184 if (dma_mapping_error(jrdev, state->ctx_dma)) {
185 dev_err(jrdev, "unable to map ctx\n");
190 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
195 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
197 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
198 int digestsize = crypto_ahash_digestsize(ahash);
199 struct device *jrdev = ctx->jrdev;
200 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
203 ctx->adata.key_virt = ctx->key;
205 /* ahash_update shared descriptor */
206 desc = ctx->sh_desc_update;
207 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
208 ctx->ctx_len, true, ctrlpriv->era);
209 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
210 desc_bytes(desc), ctx->dir);
212 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
213 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
216 /* ahash_update_first shared descriptor */
217 desc = ctx->sh_desc_update_first;
218 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
219 ctx->ctx_len, false, ctrlpriv->era);
220 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
221 desc_bytes(desc), ctx->dir);
222 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
223 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
224 desc_bytes(desc), 1);
226 /* ahash_final shared descriptor */
227 desc = ctx->sh_desc_fin;
228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
229 ctx->ctx_len, true, ctrlpriv->era);
230 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
231 desc_bytes(desc), ctx->dir);
233 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
234 DUMP_PREFIX_ADDRESS, 16, 4, desc,
235 desc_bytes(desc), 1);
237 /* ahash_digest shared descriptor */
238 desc = ctx->sh_desc_digest;
239 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
240 ctx->ctx_len, false, ctrlpriv->era);
241 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
242 desc_bytes(desc), ctx->dir);
244 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
245 DUMP_PREFIX_ADDRESS, 16, 4, desc,
246 desc_bytes(desc), 1);
251 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
253 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
254 int digestsize = crypto_ahash_digestsize(ahash);
255 struct device *jrdev = ctx->jrdev;
258 /* shared descriptor for ahash_update */
259 desc = ctx->sh_desc_update;
260 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
261 ctx->ctx_len, ctx->ctx_len);
262 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
263 desc_bytes(desc), ctx->dir);
264 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
265 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
268 /* shared descriptor for ahash_{final,finup} */
269 desc = ctx->sh_desc_fin;
270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
271 digestsize, ctx->ctx_len);
272 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
273 desc_bytes(desc), ctx->dir);
274 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
278 /* key is immediate data for INIT and INITFINAL states */
279 ctx->adata.key_virt = ctx->key;
281 /* shared descriptor for first invocation of ahash_update */
282 desc = ctx->sh_desc_update_first;
283 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
285 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
286 desc_bytes(desc), ctx->dir);
287 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
288 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
289 desc_bytes(desc), 1);
291 /* shared descriptor for ahash_digest */
292 desc = ctx->sh_desc_digest;
293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
294 digestsize, ctx->ctx_len);
295 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
296 desc_bytes(desc), ctx->dir);
297 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
298 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
303 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
305 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
306 int digestsize = crypto_ahash_digestsize(ahash);
307 struct device *jrdev = ctx->jrdev;
310 /* shared descriptor for ahash_update */
311 desc = ctx->sh_desc_update;
312 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
313 ctx->ctx_len, ctx->ctx_len);
314 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
315 desc_bytes(desc), ctx->dir);
316 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
317 DUMP_PREFIX_ADDRESS, 16, 4, desc,
318 desc_bytes(desc), 1);
320 /* shared descriptor for ahash_{final,finup} */
321 desc = ctx->sh_desc_fin;
322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
323 digestsize, ctx->ctx_len);
324 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
325 desc_bytes(desc), ctx->dir);
326 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
327 DUMP_PREFIX_ADDRESS, 16, 4, desc,
328 desc_bytes(desc), 1);
330 /* shared descriptor for first invocation of ahash_update */
331 desc = ctx->sh_desc_update_first;
332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
334 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
335 desc_bytes(desc), ctx->dir);
336 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
337 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 desc_bytes(desc), 1);
340 /* shared descriptor for ahash_digest */
341 desc = ctx->sh_desc_digest;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
343 digestsize, ctx->ctx_len);
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
345 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
353 /* Digest hash size if it is too large */
354 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
357 struct device *jrdev = ctx->jrdev;
359 struct split_key_result result;
363 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
365 dev_err(jrdev, "unable to allocate key input memory\n");
369 init_job_desc(desc, 0);
371 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
372 if (dma_mapping_error(jrdev, key_dma)) {
373 dev_err(jrdev, "unable to map key memory\n");
378 /* Job descriptor to perform unkeyed hash on key_in */
379 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
380 OP_ALG_AS_INITFINAL);
381 append_seq_in_ptr(desc, key_dma, *keylen, 0);
382 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
383 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
384 append_seq_out_ptr(desc, key_dma, digestsize, 0);
385 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
386 LDST_SRCDST_BYTE_CONTEXT);
388 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
389 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
390 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
391 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
395 init_completion(&result.completion);
397 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
400 wait_for_completion(&result.completion);
403 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
404 DUMP_PREFIX_ADDRESS, 16, 4, key,
407 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
409 *keylen = digestsize;
416 static int ahash_setkey(struct crypto_ahash *ahash,
417 const u8 *key, unsigned int keylen)
419 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
420 struct device *jrdev = ctx->jrdev;
421 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
422 int digestsize = crypto_ahash_digestsize(ahash);
423 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
425 u8 *hashed_key = NULL;
427 dev_dbg(jrdev, "keylen %d\n", keylen);
429 if (keylen > blocksize) {
430 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
433 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
440 * If DKP is supported, use it in the shared descriptor to generate
443 if (ctrlpriv->era >= 6) {
444 ctx->adata.key_inline = true;
445 ctx->adata.keylen = keylen;
446 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
449 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
452 memcpy(ctx->key, key, keylen);
455 * In case |user key| > |derived key|, using DKP<imm,imm>
456 * would result in invalid opcodes (last bytes of user key) in
457 * the resulting descriptor. Use DKP<ptr,imm> instead => both
458 * virtual and dma key addresses are needed.
460 if (keylen > ctx->adata.keylen_pad)
461 dma_sync_single_for_device(ctx->jrdev,
463 ctx->adata.keylen_pad,
466 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
467 keylen, CAAM_MAX_HASH_KEY_SIZE);
473 return ahash_set_sh_desc(ahash);
479 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
482 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
483 struct device *jrdev = ctx->jrdev;
485 if (keylen != AES_KEYSIZE_128)
488 memcpy(ctx->key, key, keylen);
489 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
491 ctx->adata.keylen = keylen;
493 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
494 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
496 return axcbc_set_sh_desc(ahash);
499 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
505 err = aes_check_keylen(keylen);
509 /* key is immediate data for all cmac shared descriptors */
510 ctx->adata.key_virt = key;
511 ctx->adata.keylen = keylen;
513 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
514 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
516 return acmac_set_sh_desc(ahash);
520 * ahash_edesc - s/w-extended ahash descriptor
521 * @sec4_sg_dma: physical mapped address of h/w link table
522 * @src_nents: number of segments in input scatterlist
523 * @sec4_sg_bytes: length of dma mapped sec4_sg space
524 * @hw_desc: the h/w job descriptor followed by any referenced link tables
525 * @sec4_sg: h/w link table
528 dma_addr_t sec4_sg_dma;
531 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
532 struct sec4_sg_entry sec4_sg[0];
535 static inline void ahash_unmap(struct device *dev,
536 struct ahash_edesc *edesc,
537 struct ahash_request *req, int dst_len)
539 struct caam_hash_state *state = ahash_request_ctx(req);
541 if (edesc->src_nents)
542 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
544 if (edesc->sec4_sg_bytes)
545 dma_unmap_single(dev, edesc->sec4_sg_dma,
546 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
548 if (state->buf_dma) {
549 dma_unmap_single(dev, state->buf_dma, state->buflen,
555 static inline void ahash_unmap_ctx(struct device *dev,
556 struct ahash_edesc *edesc,
557 struct ahash_request *req, int dst_len, u32 flag)
559 struct caam_hash_state *state = ahash_request_ctx(req);
561 if (state->ctx_dma) {
562 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
565 ahash_unmap(dev, edesc, req, dst_len);
568 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
571 struct ahash_request *req = context;
572 struct ahash_edesc *edesc;
573 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
574 int digestsize = crypto_ahash_digestsize(ahash);
575 struct caam_hash_state *state = ahash_request_ctx(req);
576 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
579 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
581 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
583 ecode = caam_jr_strstatus(jrdev, err);
585 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
586 memcpy(req->result, state->caam_ctx, digestsize);
589 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
590 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
593 req->base.complete(&req->base, ecode);
596 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
599 struct ahash_request *req = context;
600 struct ahash_edesc *edesc;
601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
602 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
603 struct caam_hash_state *state = ahash_request_ctx(req);
604 int digestsize = crypto_ahash_digestsize(ahash);
607 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
609 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
611 ecode = caam_jr_strstatus(jrdev, err);
613 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
616 scatterwalk_map_and_copy(state->buf, req->src,
617 req->nbytes - state->next_buflen,
618 state->next_buflen, 0);
619 state->buflen = state->next_buflen;
621 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
622 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
625 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
626 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
629 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
630 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
633 req->base.complete(&req->base, ecode);
636 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
639 struct ahash_request *req = context;
640 struct ahash_edesc *edesc;
641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 int digestsize = crypto_ahash_digestsize(ahash);
643 struct caam_hash_state *state = ahash_request_ctx(req);
644 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
647 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
651 ecode = caam_jr_strstatus(jrdev, err);
653 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
654 memcpy(req->result, state->caam_ctx, digestsize);
657 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
658 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
661 req->base.complete(&req->base, ecode);
664 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
667 struct ahash_request *req = context;
668 struct ahash_edesc *edesc;
669 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
671 struct caam_hash_state *state = ahash_request_ctx(req);
672 int digestsize = crypto_ahash_digestsize(ahash);
675 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
677 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
679 ecode = caam_jr_strstatus(jrdev, err);
681 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
684 scatterwalk_map_and_copy(state->buf, req->src,
685 req->nbytes - state->next_buflen,
686 state->next_buflen, 0);
687 state->buflen = state->next_buflen;
689 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
690 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
693 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
694 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
697 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
698 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
701 req->base.complete(&req->base, ecode);
705 * Allocate an enhanced descriptor, which contains the hardware descriptor
706 * and space for hardware scatter table containing sg_num entries.
708 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
709 int sg_num, u32 *sh_desc,
710 dma_addr_t sh_desc_dma,
713 struct ahash_edesc *edesc;
714 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
716 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
718 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
722 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
723 HDR_SHARE_DEFER | HDR_REVERSE);
728 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
729 struct ahash_edesc *edesc,
730 struct ahash_request *req, int nents,
731 unsigned int first_sg,
732 unsigned int first_bytes, size_t to_hash)
737 if (nents > 1 || first_sg) {
738 struct sec4_sg_entry *sg = edesc->sec4_sg;
739 unsigned int sgsize = sizeof(*sg) *
740 pad_sg_nents(first_sg + nents);
742 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
744 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
745 if (dma_mapping_error(ctx->jrdev, src_dma)) {
746 dev_err(ctx->jrdev, "unable to map S/G table\n");
750 edesc->sec4_sg_bytes = sgsize;
751 edesc->sec4_sg_dma = src_dma;
754 src_dma = sg_dma_address(req->src);
758 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
764 /* submit update job descriptor */
765 static int ahash_update_ctx(struct ahash_request *req)
767 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
768 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
769 struct caam_hash_state *state = ahash_request_ctx(req);
770 struct device *jrdev = ctx->jrdev;
771 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
772 GFP_KERNEL : GFP_ATOMIC;
773 u8 *buf = state->buf;
774 int *buflen = &state->buflen;
775 int *next_buflen = &state->next_buflen;
776 int blocksize = crypto_ahash_blocksize(ahash);
777 int in_len = *buflen + req->nbytes, to_hash;
779 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
780 struct ahash_edesc *edesc;
783 *next_buflen = in_len & (blocksize - 1);
784 to_hash = in_len - *next_buflen;
787 * For XCBC and CMAC, if to_hash is multiple of block size,
788 * keep last block in internal buffer
790 if ((is_xcbc_aes(ctx->adata.algtype) ||
791 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
792 (*next_buflen == 0)) {
793 *next_buflen = blocksize;
794 to_hash -= blocksize;
799 int src_len = req->nbytes - *next_buflen;
801 src_nents = sg_nents_for_len(req->src, src_len);
803 dev_err(jrdev, "Invalid number of src SG.\n");
808 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
811 dev_err(jrdev, "unable to DMA map source\n");
818 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
819 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
820 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
823 * allocate space for base edesc and hw desc commands,
826 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
827 ctx->sh_desc_update_dma, flags);
829 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
833 edesc->src_nents = src_nents;
834 edesc->sec4_sg_bytes = sec4_sg_bytes;
836 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
837 edesc->sec4_sg, DMA_BIDIRECTIONAL);
841 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
846 sg_to_sec4_sg_last(req->src, src_len,
847 edesc->sec4_sg + sec4_sg_src_index,
850 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
853 desc = edesc->hw_desc;
855 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
858 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
859 dev_err(jrdev, "unable to map S/G table\n");
864 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
867 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
869 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
870 DUMP_PREFIX_ADDRESS, 16, 4, desc,
871 desc_bytes(desc), 1);
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
878 } else if (*next_buflen) {
879 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
881 *buflen = *next_buflen;
883 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
884 DUMP_PREFIX_ADDRESS, 16, 4, buf,
890 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
895 static int ahash_final_ctx(struct ahash_request *req)
897 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
898 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
899 struct caam_hash_state *state = ahash_request_ctx(req);
900 struct device *jrdev = ctx->jrdev;
901 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
902 GFP_KERNEL : GFP_ATOMIC;
903 int buflen = state->buflen;
906 int digestsize = crypto_ahash_digestsize(ahash);
907 struct ahash_edesc *edesc;
910 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
911 sizeof(struct sec4_sg_entry);
913 /* allocate space for base edesc and hw desc commands, link tables */
914 edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
915 ctx->sh_desc_fin_dma, flags);
919 desc = edesc->hw_desc;
921 edesc->sec4_sg_bytes = sec4_sg_bytes;
923 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
924 edesc->sec4_sg, DMA_BIDIRECTIONAL);
928 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
932 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
934 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
935 sec4_sg_bytes, DMA_TO_DEVICE);
936 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
937 dev_err(jrdev, "unable to map S/G table\n");
942 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
944 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
946 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
947 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
950 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
956 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
961 static int ahash_finup_ctx(struct ahash_request *req)
963 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
964 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
965 struct caam_hash_state *state = ahash_request_ctx(req);
966 struct device *jrdev = ctx->jrdev;
967 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
968 GFP_KERNEL : GFP_ATOMIC;
969 int buflen = state->buflen;
971 int sec4_sg_src_index;
972 int src_nents, mapped_nents;
973 int digestsize = crypto_ahash_digestsize(ahash);
974 struct ahash_edesc *edesc;
977 src_nents = sg_nents_for_len(req->src, req->nbytes);
979 dev_err(jrdev, "Invalid number of src SG.\n");
984 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
987 dev_err(jrdev, "unable to DMA map source\n");
994 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
996 /* allocate space for base edesc and hw desc commands, link tables */
997 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
998 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1001 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1005 desc = edesc->hw_desc;
1007 edesc->src_nents = src_nents;
1009 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1010 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1014 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1018 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1019 sec4_sg_src_index, ctx->ctx_len + buflen,
1024 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1026 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1027 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1030 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1034 return -EINPROGRESS;
1036 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1041 static int ahash_digest(struct ahash_request *req)
1043 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1044 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1045 struct caam_hash_state *state = ahash_request_ctx(req);
1046 struct device *jrdev = ctx->jrdev;
1047 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1048 GFP_KERNEL : GFP_ATOMIC;
1050 int digestsize = crypto_ahash_digestsize(ahash);
1051 int src_nents, mapped_nents;
1052 struct ahash_edesc *edesc;
1057 src_nents = sg_nents_for_len(req->src, req->nbytes);
1058 if (src_nents < 0) {
1059 dev_err(jrdev, "Invalid number of src SG.\n");
1064 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1066 if (!mapped_nents) {
1067 dev_err(jrdev, "unable to map source for DMA\n");
1074 /* allocate space for base edesc and hw desc commands, link tables */
1075 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1076 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1079 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1083 edesc->src_nents = src_nents;
1085 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1088 ahash_unmap(jrdev, edesc, req, digestsize);
1093 desc = edesc->hw_desc;
1095 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1097 ahash_unmap(jrdev, edesc, req, digestsize);
1102 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1103 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1106 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1110 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1117 /* submit ahash final if it the first job descriptor */
1118 static int ahash_final_no_ctx(struct ahash_request *req)
1120 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1121 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1122 struct caam_hash_state *state = ahash_request_ctx(req);
1123 struct device *jrdev = ctx->jrdev;
1124 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1125 GFP_KERNEL : GFP_ATOMIC;
1126 u8 *buf = state->buf;
1127 int buflen = state->buflen;
1129 int digestsize = crypto_ahash_digestsize(ahash);
1130 struct ahash_edesc *edesc;
1133 /* allocate space for base edesc and hw desc commands, link tables */
1134 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1135 ctx->sh_desc_digest_dma, flags);
1139 desc = edesc->hw_desc;
1142 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1144 if (dma_mapping_error(jrdev, state->buf_dma)) {
1145 dev_err(jrdev, "unable to map src\n");
1149 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1152 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1156 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1157 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1160 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1164 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1170 ahash_unmap(jrdev, edesc, req, digestsize);
1176 /* submit ahash update if it the first job descriptor after update */
1177 static int ahash_update_no_ctx(struct ahash_request *req)
1179 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1180 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1181 struct caam_hash_state *state = ahash_request_ctx(req);
1182 struct device *jrdev = ctx->jrdev;
1183 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1184 GFP_KERNEL : GFP_ATOMIC;
1185 u8 *buf = state->buf;
1186 int *buflen = &state->buflen;
1187 int *next_buflen = &state->next_buflen;
1188 int blocksize = crypto_ahash_blocksize(ahash);
1189 int in_len = *buflen + req->nbytes, to_hash;
1190 int sec4_sg_bytes, src_nents, mapped_nents;
1191 struct ahash_edesc *edesc;
1195 *next_buflen = in_len & (blocksize - 1);
1196 to_hash = in_len - *next_buflen;
1199 * For XCBC and CMAC, if to_hash is multiple of block size,
1200 * keep last block in internal buffer
1202 if ((is_xcbc_aes(ctx->adata.algtype) ||
1203 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1204 (*next_buflen == 0)) {
1205 *next_buflen = blocksize;
1206 to_hash -= blocksize;
1211 int src_len = req->nbytes - *next_buflen;
1213 src_nents = sg_nents_for_len(req->src, src_len);
1214 if (src_nents < 0) {
1215 dev_err(jrdev, "Invalid number of src SG.\n");
1220 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1222 if (!mapped_nents) {
1223 dev_err(jrdev, "unable to DMA map source\n");
1230 pad_nents = pad_sg_nents(1 + mapped_nents);
1231 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1234 * allocate space for base edesc and hw desc commands,
1237 edesc = ahash_edesc_alloc(ctx, pad_nents,
1238 ctx->sh_desc_update_first,
1239 ctx->sh_desc_update_first_dma,
1242 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1246 edesc->src_nents = src_nents;
1247 edesc->sec4_sg_bytes = sec4_sg_bytes;
1249 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1253 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1255 desc = edesc->hw_desc;
1257 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1260 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1261 dev_err(jrdev, "unable to map S/G table\n");
1266 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1268 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1272 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1273 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1274 desc_bytes(desc), 1);
1276 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1281 state->update = ahash_update_ctx;
1282 state->finup = ahash_finup_ctx;
1283 state->final = ahash_final_ctx;
1284 } else if (*next_buflen) {
1285 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1287 *buflen = *next_buflen;
1289 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1290 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1296 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1301 /* submit ahash finup if it the first job descriptor after update */
1302 static int ahash_finup_no_ctx(struct ahash_request *req)
1304 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1305 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1306 struct caam_hash_state *state = ahash_request_ctx(req);
1307 struct device *jrdev = ctx->jrdev;
1308 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1309 GFP_KERNEL : GFP_ATOMIC;
1310 int buflen = state->buflen;
1312 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1313 int digestsize = crypto_ahash_digestsize(ahash);
1314 struct ahash_edesc *edesc;
1317 src_nents = sg_nents_for_len(req->src, req->nbytes);
1318 if (src_nents < 0) {
1319 dev_err(jrdev, "Invalid number of src SG.\n");
1324 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1326 if (!mapped_nents) {
1327 dev_err(jrdev, "unable to DMA map source\n");
1334 sec4_sg_src_index = 2;
1335 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1336 sizeof(struct sec4_sg_entry);
1338 /* allocate space for base edesc and hw desc commands, link tables */
1339 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1340 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1343 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1347 desc = edesc->hw_desc;
1349 edesc->src_nents = src_nents;
1350 edesc->sec4_sg_bytes = sec4_sg_bytes;
1352 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1356 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1359 dev_err(jrdev, "unable to map S/G table\n");
1363 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1367 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1368 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1371 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1375 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1381 ahash_unmap(jrdev, edesc, req, digestsize);
1387 /* submit first update job descriptor after init */
1388 static int ahash_update_first(struct ahash_request *req)
1390 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1391 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1392 struct caam_hash_state *state = ahash_request_ctx(req);
1393 struct device *jrdev = ctx->jrdev;
1394 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1395 GFP_KERNEL : GFP_ATOMIC;
1396 u8 *buf = state->buf;
1397 int *buflen = &state->buflen;
1398 int *next_buflen = &state->next_buflen;
1400 int blocksize = crypto_ahash_blocksize(ahash);
1402 int src_nents, mapped_nents;
1403 struct ahash_edesc *edesc;
1406 *next_buflen = req->nbytes & (blocksize - 1);
1407 to_hash = req->nbytes - *next_buflen;
1410 * For XCBC and CMAC, if to_hash is multiple of block size,
1411 * keep last block in internal buffer
1413 if ((is_xcbc_aes(ctx->adata.algtype) ||
1414 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1415 (*next_buflen == 0)) {
1416 *next_buflen = blocksize;
1417 to_hash -= blocksize;
1421 src_nents = sg_nents_for_len(req->src,
1422 req->nbytes - *next_buflen);
1423 if (src_nents < 0) {
1424 dev_err(jrdev, "Invalid number of src SG.\n");
1429 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1431 if (!mapped_nents) {
1432 dev_err(jrdev, "unable to map source for DMA\n");
1440 * allocate space for base edesc and hw desc commands,
1443 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1445 ctx->sh_desc_update_first,
1446 ctx->sh_desc_update_first_dma,
1449 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1453 edesc->src_nents = src_nents;
1455 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1460 desc = edesc->hw_desc;
1462 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1466 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1467 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1468 desc_bytes(desc), 1);
1470 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1475 state->update = ahash_update_ctx;
1476 state->finup = ahash_finup_ctx;
1477 state->final = ahash_final_ctx;
1478 } else if (*next_buflen) {
1479 state->update = ahash_update_no_ctx;
1480 state->finup = ahash_finup_no_ctx;
1481 state->final = ahash_final_no_ctx;
1482 scatterwalk_map_and_copy(buf, req->src, 0,
1484 *buflen = *next_buflen;
1486 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1487 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1493 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1498 static int ahash_finup_first(struct ahash_request *req)
1500 return ahash_digest(req);
1503 static int ahash_init(struct ahash_request *req)
1505 struct caam_hash_state *state = ahash_request_ctx(req);
1507 state->update = ahash_update_first;
1508 state->finup = ahash_finup_first;
1509 state->final = ahash_final_no_ctx;
1512 state->ctx_dma_len = 0;
1515 state->next_buflen = 0;
1520 static int ahash_update(struct ahash_request *req)
1522 struct caam_hash_state *state = ahash_request_ctx(req);
1524 return state->update(req);
1527 static int ahash_finup(struct ahash_request *req)
1529 struct caam_hash_state *state = ahash_request_ctx(req);
1531 return state->finup(req);
1534 static int ahash_final(struct ahash_request *req)
1536 struct caam_hash_state *state = ahash_request_ctx(req);
1538 return state->final(req);
1541 static int ahash_export(struct ahash_request *req, void *out)
1543 struct caam_hash_state *state = ahash_request_ctx(req);
1544 struct caam_export_state *export = out;
1545 u8 *buf = state->buf;
1546 int len = state->buflen;
1548 memcpy(export->buf, buf, len);
1549 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1550 export->buflen = len;
1551 export->update = state->update;
1552 export->final = state->final;
1553 export->finup = state->finup;
1558 static int ahash_import(struct ahash_request *req, const void *in)
1560 struct caam_hash_state *state = ahash_request_ctx(req);
1561 const struct caam_export_state *export = in;
1563 memset(state, 0, sizeof(*state));
1564 memcpy(state->buf, export->buf, export->buflen);
1565 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1566 state->buflen = export->buflen;
1567 state->update = export->update;
1568 state->final = export->final;
1569 state->finup = export->finup;
1574 struct caam_hash_template {
1575 char name[CRYPTO_MAX_ALG_NAME];
1576 char driver_name[CRYPTO_MAX_ALG_NAME];
1577 char hmac_name[CRYPTO_MAX_ALG_NAME];
1578 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1579 unsigned int blocksize;
1580 struct ahash_alg template_ahash;
1584 /* ahash descriptors */
1585 static struct caam_hash_template driver_hash[] = {
1588 .driver_name = "sha1-caam",
1589 .hmac_name = "hmac(sha1)",
1590 .hmac_driver_name = "hmac-sha1-caam",
1591 .blocksize = SHA1_BLOCK_SIZE,
1594 .update = ahash_update,
1595 .final = ahash_final,
1596 .finup = ahash_finup,
1597 .digest = ahash_digest,
1598 .export = ahash_export,
1599 .import = ahash_import,
1600 .setkey = ahash_setkey,
1602 .digestsize = SHA1_DIGEST_SIZE,
1603 .statesize = sizeof(struct caam_export_state),
1606 .alg_type = OP_ALG_ALGSEL_SHA1,
1609 .driver_name = "sha224-caam",
1610 .hmac_name = "hmac(sha224)",
1611 .hmac_driver_name = "hmac-sha224-caam",
1612 .blocksize = SHA224_BLOCK_SIZE,
1615 .update = ahash_update,
1616 .final = ahash_final,
1617 .finup = ahash_finup,
1618 .digest = ahash_digest,
1619 .export = ahash_export,
1620 .import = ahash_import,
1621 .setkey = ahash_setkey,
1623 .digestsize = SHA224_DIGEST_SIZE,
1624 .statesize = sizeof(struct caam_export_state),
1627 .alg_type = OP_ALG_ALGSEL_SHA224,
1630 .driver_name = "sha256-caam",
1631 .hmac_name = "hmac(sha256)",
1632 .hmac_driver_name = "hmac-sha256-caam",
1633 .blocksize = SHA256_BLOCK_SIZE,
1636 .update = ahash_update,
1637 .final = ahash_final,
1638 .finup = ahash_finup,
1639 .digest = ahash_digest,
1640 .export = ahash_export,
1641 .import = ahash_import,
1642 .setkey = ahash_setkey,
1644 .digestsize = SHA256_DIGEST_SIZE,
1645 .statesize = sizeof(struct caam_export_state),
1648 .alg_type = OP_ALG_ALGSEL_SHA256,
1651 .driver_name = "sha384-caam",
1652 .hmac_name = "hmac(sha384)",
1653 .hmac_driver_name = "hmac-sha384-caam",
1654 .blocksize = SHA384_BLOCK_SIZE,
1657 .update = ahash_update,
1658 .final = ahash_final,
1659 .finup = ahash_finup,
1660 .digest = ahash_digest,
1661 .export = ahash_export,
1662 .import = ahash_import,
1663 .setkey = ahash_setkey,
1665 .digestsize = SHA384_DIGEST_SIZE,
1666 .statesize = sizeof(struct caam_export_state),
1669 .alg_type = OP_ALG_ALGSEL_SHA384,
1672 .driver_name = "sha512-caam",
1673 .hmac_name = "hmac(sha512)",
1674 .hmac_driver_name = "hmac-sha512-caam",
1675 .blocksize = SHA512_BLOCK_SIZE,
1678 .update = ahash_update,
1679 .final = ahash_final,
1680 .finup = ahash_finup,
1681 .digest = ahash_digest,
1682 .export = ahash_export,
1683 .import = ahash_import,
1684 .setkey = ahash_setkey,
1686 .digestsize = SHA512_DIGEST_SIZE,
1687 .statesize = sizeof(struct caam_export_state),
1690 .alg_type = OP_ALG_ALGSEL_SHA512,
1693 .driver_name = "md5-caam",
1694 .hmac_name = "hmac(md5)",
1695 .hmac_driver_name = "hmac-md5-caam",
1696 .blocksize = MD5_BLOCK_WORDS * 4,
1699 .update = ahash_update,
1700 .final = ahash_final,
1701 .finup = ahash_finup,
1702 .digest = ahash_digest,
1703 .export = ahash_export,
1704 .import = ahash_import,
1705 .setkey = ahash_setkey,
1707 .digestsize = MD5_DIGEST_SIZE,
1708 .statesize = sizeof(struct caam_export_state),
1711 .alg_type = OP_ALG_ALGSEL_MD5,
1713 .hmac_name = "xcbc(aes)",
1714 .hmac_driver_name = "xcbc-aes-caam",
1715 .blocksize = AES_BLOCK_SIZE,
1718 .update = ahash_update,
1719 .final = ahash_final,
1720 .finup = ahash_finup,
1721 .digest = ahash_digest,
1722 .export = ahash_export,
1723 .import = ahash_import,
1724 .setkey = axcbc_setkey,
1726 .digestsize = AES_BLOCK_SIZE,
1727 .statesize = sizeof(struct caam_export_state),
1730 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1732 .hmac_name = "cmac(aes)",
1733 .hmac_driver_name = "cmac-aes-caam",
1734 .blocksize = AES_BLOCK_SIZE,
1737 .update = ahash_update,
1738 .final = ahash_final,
1739 .finup = ahash_finup,
1740 .digest = ahash_digest,
1741 .export = ahash_export,
1742 .import = ahash_import,
1743 .setkey = acmac_setkey,
1745 .digestsize = AES_BLOCK_SIZE,
1746 .statesize = sizeof(struct caam_export_state),
1749 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1753 struct caam_hash_alg {
1754 struct list_head entry;
1756 struct ahash_alg ahash_alg;
1759 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1761 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1762 struct crypto_alg *base = tfm->__crt_alg;
1763 struct hash_alg_common *halg =
1764 container_of(base, struct hash_alg_common, base);
1765 struct ahash_alg *alg =
1766 container_of(halg, struct ahash_alg, halg);
1767 struct caam_hash_alg *caam_hash =
1768 container_of(alg, struct caam_hash_alg, ahash_alg);
1769 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1770 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1771 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1772 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1774 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1776 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1777 dma_addr_t dma_addr;
1778 struct caam_drv_private *priv;
1781 * Get a Job ring from Job Ring driver to ensure in-order
1782 * crypto request processing per tfm
1784 ctx->jrdev = caam_jr_alloc();
1785 if (IS_ERR(ctx->jrdev)) {
1786 pr_err("Job Ring Device allocation for transform failed\n");
1787 return PTR_ERR(ctx->jrdev);
1790 priv = dev_get_drvdata(ctx->jrdev->parent);
1792 if (is_xcbc_aes(caam_hash->alg_type)) {
1793 ctx->dir = DMA_TO_DEVICE;
1794 ctx->key_dir = DMA_BIDIRECTIONAL;
1795 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1797 } else if (is_cmac_aes(caam_hash->alg_type)) {
1798 ctx->dir = DMA_TO_DEVICE;
1799 ctx->key_dir = DMA_NONE;
1800 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1803 if (priv->era >= 6) {
1804 ctx->dir = DMA_BIDIRECTIONAL;
1805 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1807 ctx->dir = DMA_TO_DEVICE;
1808 ctx->key_dir = DMA_NONE;
1810 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1811 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1812 OP_ALG_ALGSEL_SUBMASK) >>
1813 OP_ALG_ALGSEL_SHIFT];
1816 if (ctx->key_dir != DMA_NONE) {
1817 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1818 ARRAY_SIZE(ctx->key),
1820 DMA_ATTR_SKIP_CPU_SYNC);
1821 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1822 dev_err(ctx->jrdev, "unable to map key\n");
1823 caam_jr_free(ctx->jrdev);
1828 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1829 offsetof(struct caam_hash_ctx, key),
1830 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1831 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1832 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1834 if (ctx->key_dir != DMA_NONE)
1835 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1836 ARRAY_SIZE(ctx->key),
1838 DMA_ATTR_SKIP_CPU_SYNC);
1840 caam_jr_free(ctx->jrdev);
1844 ctx->sh_desc_update_dma = dma_addr;
1845 ctx->sh_desc_update_first_dma = dma_addr +
1846 offsetof(struct caam_hash_ctx,
1847 sh_desc_update_first);
1848 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1850 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1853 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1854 sizeof(struct caam_hash_state));
1857 * For keyed hash algorithms shared descriptors
1858 * will be created later in setkey() callback
1860 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1863 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1865 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1867 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1868 offsetof(struct caam_hash_ctx, key),
1869 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1870 if (ctx->key_dir != DMA_NONE)
1871 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1872 ARRAY_SIZE(ctx->key), ctx->key_dir,
1873 DMA_ATTR_SKIP_CPU_SYNC);
1874 caam_jr_free(ctx->jrdev);
1877 void caam_algapi_hash_exit(void)
1879 struct caam_hash_alg *t_alg, *n;
1881 if (!hash_list.next)
1884 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1885 crypto_unregister_ahash(&t_alg->ahash_alg);
1886 list_del(&t_alg->entry);
1891 static struct caam_hash_alg *
1892 caam_hash_alloc(struct caam_hash_template *template,
1895 struct caam_hash_alg *t_alg;
1896 struct ahash_alg *halg;
1897 struct crypto_alg *alg;
1899 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1901 pr_err("failed to allocate t_alg\n");
1902 return ERR_PTR(-ENOMEM);
1905 t_alg->ahash_alg = template->template_ahash;
1906 halg = &t_alg->ahash_alg;
1907 alg = &halg->halg.base;
1910 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1911 template->hmac_name);
1912 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1913 template->hmac_driver_name);
1915 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1917 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 template->driver_name);
1919 t_alg->ahash_alg.setkey = NULL;
1921 alg->cra_module = THIS_MODULE;
1922 alg->cra_init = caam_hash_cra_init;
1923 alg->cra_exit = caam_hash_cra_exit;
1924 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1925 alg->cra_priority = CAAM_CRA_PRIORITY;
1926 alg->cra_blocksize = template->blocksize;
1927 alg->cra_alignmask = 0;
1928 alg->cra_flags = CRYPTO_ALG_ASYNC;
1930 t_alg->alg_type = template->alg_type;
1935 int caam_algapi_hash_init(struct device *ctrldev)
1938 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1939 unsigned int md_limit = SHA512_DIGEST_SIZE;
1940 u32 md_inst, md_vid;
1943 * Register crypto algorithms the device supports. First, identify
1944 * presence and attributes of MD block.
1946 if (priv->era < 10) {
1947 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1948 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1949 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1950 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1952 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1954 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1955 md_inst = mdha & CHA_VER_NUM_MASK;
1959 * Skip registration of any hashing algorithms if MD block
1965 /* Limit digest size based on LP256 */
1966 if (md_vid == CHA_VER_VID_MD_LP256)
1967 md_limit = SHA256_DIGEST_SIZE;
1969 INIT_LIST_HEAD(&hash_list);
1971 /* register crypto algorithms the device supports */
1972 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1973 struct caam_hash_alg *t_alg;
1974 struct caam_hash_template *alg = driver_hash + i;
1976 /* If MD size is not supported by device, skip registration */
1977 if (is_mdha(alg->alg_type) &&
1978 alg->template_ahash.halg.digestsize > md_limit)
1981 /* register hmac version */
1982 t_alg = caam_hash_alloc(alg, true);
1983 if (IS_ERR(t_alg)) {
1984 err = PTR_ERR(t_alg);
1985 pr_warn("%s alg allocation failed\n",
1986 alg->hmac_driver_name);
1990 err = crypto_register_ahash(&t_alg->ahash_alg);
1992 pr_warn("%s alg registration failed: %d\n",
1993 t_alg->ahash_alg.halg.base.cra_driver_name,
1997 list_add_tail(&t_alg->entry, &hash_list);
1999 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2002 /* register unkeyed version */
2003 t_alg = caam_hash_alloc(alg, false);
2004 if (IS_ERR(t_alg)) {
2005 err = PTR_ERR(t_alg);
2006 pr_warn("%s alg allocation failed\n", alg->driver_name);
2010 err = crypto_register_ahash(&t_alg->ahash_alg);
2012 pr_warn("%s alg registration failed: %d\n",
2013 t_alg->ahash_alg.halg.base.cra_driver_name,
2017 list_add_tail(&t_alg->entry, &hash_list);