Commit | Line | Data |
---|---|---|
618b5dc4 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
045e3678 YK |
2 | /* |
3 | * caam - Freescale FSL CAAM support for ahash functions of crypto API | |
4 | * | |
5 | * Copyright 2011 Freescale Semiconductor, Inc. | |
ae1dd17d | 6 | * Copyright 2018-2019, 2023 NXP |
045e3678 YK |
7 | * |
8 | * Based on caamalg.c crypto API driver. | |
9 | * | |
10 | * relationship of digest job descriptor or first job descriptor after init to | |
11 | * shared descriptors: | |
12 | * | |
13 | * --------------- --------------- | |
14 | * | JobDesc #1 |-------------------->| ShareDesc | | |
15 | * | *(packet 1) | | (hashKey) | | |
16 | * --------------- | (operation) | | |
17 | * --------------- | |
18 | * | |
19 | * relationship of subsequent job descriptors to shared descriptors: | |
20 | * | |
21 | * --------------- --------------- | |
22 | * | JobDesc #2 |-------------------->| ShareDesc | | |
23 | * | *(packet 2) | |------------->| (hashKey) | | |
24 | * --------------- | |-------->| (operation) | | |
25 | * . | | | (load ctx2) | | |
26 | * . | | --------------- | |
27 | * --------------- | | | |
28 | * | JobDesc #3 |------| | | |
29 | * | *(packet 3) | | | |
30 | * --------------- | | |
31 | * . | | |
32 | * . | | |
33 | * --------------- | | |
34 | * | JobDesc #4 |------------ | |
35 | * | *(packet 4) | | |
36 | * --------------- | |
37 | * | |
38 | * The SharedDesc never changes for a connection unless rekeyed, but | |
39 | * each packet will likely be in a different place. So all we need | |
40 | * to know to process the packet is where the input is, where the | |
41 | * output goes, and what context we want to process with. Context is | |
42 | * in the SharedDesc, packet references in the JobDesc. | |
43 | * | |
44 | * So, a job desc looks like: | |
45 | * | |
46 | * --------------------- | |
47 | * | Header | | |
48 | * | ShareDesc Pointer | | |
49 | * | SEQ_OUT_PTR | | |
50 | * | (output buffer) | | |
51 | * | (output length) | | |
52 | * | SEQ_IN_PTR | | |
53 | * | (input buffer) | | |
54 | * | (input length) | | |
55 | * --------------------- | |
56 | */ | |
57 | ||
58 | #include "compat.h" | |
59 | ||
60 | #include "regs.h" | |
61 | #include "intern.h" | |
62 | #include "desc_constr.h" | |
63 | #include "jr.h" | |
64 | #include "error.h" | |
65 | #include "sg_sw_sec4.h" | |
66 | #include "key_gen.h" | |
0efa7579 | 67 | #include "caamhash_desc.h" |
4ac1a2d8 | 68 | #include <crypto/internal/engine.h> |
623814c0 | 69 | #include <crypto/internal/hash.h> |
199354d7 | 70 | #include <linux/dma-mapping.h> |
623814c0 | 71 | #include <linux/err.h> |
199354d7 | 72 | #include <linux/kernel.h> |
623814c0 HX |
73 | #include <linux/slab.h> |
74 | #include <linux/string.h> | |
045e3678 YK |
75 | |
76 | #define CAAM_CRA_PRIORITY 3000 | |
77 | ||
78 | /* max hash key is max split key size */ | |
79 | #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) | |
80 | ||
81 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE | |
82 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | |
83 | ||
045e3678 YK |
84 | #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ |
85 | CAAM_MAX_HASH_KEY_SIZE) | |
86 | #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) | |
87 | ||
88 | /* caam context sizes for hashes: running digest + 8 */ | |
89 | #define HASH_MSG_LEN 8 | |
90 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | |
91 | ||
cfc6f11b RG |
92 | static struct list_head hash_list; |
93 | ||
045e3678 YK |
94 | /* ahash per-session context */ |
95 | struct caam_hash_ctx { | |
e11793f5 RK |
96 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
97 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | |
98 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | |
99 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | |
12b8567f | 100 | u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; |
e11793f5 | 101 | dma_addr_t sh_desc_update_dma ____cacheline_aligned; |
045e3678 YK |
102 | dma_addr_t sh_desc_update_first_dma; |
103 | dma_addr_t sh_desc_fin_dma; | |
104 | dma_addr_t sh_desc_digest_dma; | |
7e0880b9 | 105 | enum dma_data_direction dir; |
e9b4913a | 106 | enum dma_data_direction key_dir; |
e11793f5 | 107 | struct device *jrdev; |
045e3678 | 108 | int ctx_len; |
db57656b | 109 | struct alginfo adata; |
045e3678 YK |
110 | }; |
111 | ||
112 | /* ahash state */ | |
113 | struct caam_hash_state { | |
114 | dma_addr_t buf_dma; | |
115 | dma_addr_t ctx_dma; | |
65055e21 | 116 | int ctx_dma_len; |
46b49abc AB |
117 | u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
118 | int buflen; | |
119 | int next_buflen; | |
e7472422 | 120 | u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; |
21b014f0 | 121 | int (*update)(struct ahash_request *req) ____cacheline_aligned; |
045e3678 YK |
122 | int (*final)(struct ahash_request *req); |
123 | int (*finup)(struct ahash_request *req); | |
21b014f0 IP |
124 | struct ahash_edesc *edesc; |
125 | void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err, | |
126 | void *context); | |
045e3678 YK |
127 | }; |
128 | ||
5ec90831 RK |
129 | struct caam_export_state { |
130 | u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; | |
131 | u8 caam_ctx[MAX_CTX_LEN]; | |
132 | int buflen; | |
133 | int (*update)(struct ahash_request *req); | |
134 | int (*final)(struct ahash_request *req); | |
135 | int (*finup)(struct ahash_request *req); | |
136 | }; | |
137 | ||
87870cfb | 138 | static inline bool is_cmac_aes(u32 algtype) |
12b8567f IP |
139 | { |
140 | return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == | |
87870cfb | 141 | (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); |
12b8567f | 142 | } |
045e3678 YK |
143 | /* Common job descriptor seq in/out ptr routines */ |
144 | ||
145 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | |
ce572085 HG |
146 | static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, |
147 | struct caam_hash_state *state, | |
148 | int ctx_len) | |
045e3678 | 149 | { |
65055e21 | 150 | state->ctx_dma_len = ctx_len; |
045e3678 YK |
151 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, |
152 | ctx_len, DMA_FROM_DEVICE); | |
ce572085 HG |
153 | if (dma_mapping_error(jrdev, state->ctx_dma)) { |
154 | dev_err(jrdev, "unable to map ctx\n"); | |
87ec02e7 | 155 | state->ctx_dma = 0; |
ce572085 HG |
156 | return -ENOMEM; |
157 | } | |
158 | ||
045e3678 | 159 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); |
ce572085 HG |
160 | |
161 | return 0; | |
045e3678 YK |
162 | } |
163 | ||
944c3d4d HG |
164 | /* Map current buffer in state (if length > 0) and put it in link table */ |
165 | static inline int buf_map_to_sec4_sg(struct device *jrdev, | |
166 | struct sec4_sg_entry *sec4_sg, | |
167 | struct caam_hash_state *state) | |
045e3678 | 168 | { |
46b49abc | 169 | int buflen = state->buflen; |
045e3678 | 170 | |
944c3d4d HG |
171 | if (!buflen) |
172 | return 0; | |
045e3678 | 173 | |
46b49abc | 174 | state->buf_dma = dma_map_single(jrdev, state->buf, buflen, |
944c3d4d HG |
175 | DMA_TO_DEVICE); |
176 | if (dma_mapping_error(jrdev, state->buf_dma)) { | |
177 | dev_err(jrdev, "unable to map buf\n"); | |
178 | state->buf_dma = 0; | |
179 | return -ENOMEM; | |
180 | } | |
045e3678 | 181 | |
944c3d4d HG |
182 | dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); |
183 | ||
184 | return 0; | |
045e3678 YK |
185 | } |
186 | ||
187 | /* Map state->caam_ctx, and add it to link table */ | |
dfcd8393 | 188 | static inline int ctx_map_to_sec4_sg(struct device *jrdev, |
ce572085 HG |
189 | struct caam_hash_state *state, int ctx_len, |
190 | struct sec4_sg_entry *sec4_sg, u32 flag) | |
045e3678 | 191 | { |
65055e21 | 192 | state->ctx_dma_len = ctx_len; |
045e3678 | 193 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); |
ce572085 HG |
194 | if (dma_mapping_error(jrdev, state->ctx_dma)) { |
195 | dev_err(jrdev, "unable to map ctx\n"); | |
87ec02e7 | 196 | state->ctx_dma = 0; |
ce572085 HG |
197 | return -ENOMEM; |
198 | } | |
199 | ||
045e3678 | 200 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); |
ce572085 HG |
201 | |
202 | return 0; | |
045e3678 YK |
203 | } |
204 | ||
045e3678 YK |
205 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) |
206 | { | |
4cb4f7c1 | 207 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
045e3678 YK |
208 | int digestsize = crypto_ahash_digestsize(ahash); |
209 | struct device *jrdev = ctx->jrdev; | |
7e0880b9 | 210 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
045e3678 YK |
211 | u32 *desc; |
212 | ||
7e0880b9 HG |
213 | ctx->adata.key_virt = ctx->key; |
214 | ||
045e3678 YK |
215 | /* ahash_update shared descriptor */ |
216 | desc = ctx->sh_desc_update; | |
0efa7579 HG |
217 | cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, |
218 | ctx->ctx_len, true, ctrlpriv->era); | |
bbf22344 | 219 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
7e0880b9 | 220 | desc_bytes(desc), ctx->dir); |
6e005503 SH |
221 | |
222 | print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", | |
223 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
224 | 1); | |
045e3678 YK |
225 | |
226 | /* ahash_update_first shared descriptor */ | |
227 | desc = ctx->sh_desc_update_first; | |
0efa7579 HG |
228 | cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
229 | ctx->ctx_len, false, ctrlpriv->era); | |
bbf22344 | 230 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
7e0880b9 | 231 | desc_bytes(desc), ctx->dir); |
6e005503 SH |
232 | print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) |
233 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
234 | desc_bytes(desc), 1); | |
045e3678 YK |
235 | |
236 | /* ahash_final shared descriptor */ | |
237 | desc = ctx->sh_desc_fin; | |
0efa7579 HG |
238 | cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, |
239 | ctx->ctx_len, true, ctrlpriv->era); | |
bbf22344 | 240 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
7e0880b9 | 241 | desc_bytes(desc), ctx->dir); |
6e005503 SH |
242 | |
243 | print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", | |
244 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
245 | desc_bytes(desc), 1); | |
045e3678 | 246 | |
045e3678 YK |
247 | /* ahash_digest shared descriptor */ |
248 | desc = ctx->sh_desc_digest; | |
0efa7579 HG |
249 | cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, |
250 | ctx->ctx_len, false, ctrlpriv->era); | |
bbf22344 | 251 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
7e0880b9 | 252 | desc_bytes(desc), ctx->dir); |
6e005503 SH |
253 | |
254 | print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", | |
255 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
256 | desc_bytes(desc), 1); | |
045e3678 YK |
257 | |
258 | return 0; | |
259 | } | |
260 | ||
12b8567f IP |
261 | static int axcbc_set_sh_desc(struct crypto_ahash *ahash) |
262 | { | |
4cb4f7c1 | 263 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
12b8567f IP |
264 | int digestsize = crypto_ahash_digestsize(ahash); |
265 | struct device *jrdev = ctx->jrdev; | |
266 | u32 *desc; | |
267 | ||
12b8567f IP |
268 | /* shared descriptor for ahash_update */ |
269 | desc = ctx->sh_desc_update; | |
87870cfb | 270 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, |
a2fb864c | 271 | ctx->ctx_len, ctx->ctx_len); |
12b8567f IP |
272 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
273 | desc_bytes(desc), ctx->dir); | |
274 | print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", | |
275 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
276 | 1); | |
277 | ||
278 | /* shared descriptor for ahash_{final,finup} */ | |
279 | desc = ctx->sh_desc_fin; | |
87870cfb | 280 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, |
a2fb864c | 281 | digestsize, ctx->ctx_len); |
12b8567f IP |
282 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
283 | desc_bytes(desc), ctx->dir); | |
284 | print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", | |
285 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
286 | 1); | |
287 | ||
288 | /* key is immediate data for INIT and INITFINAL states */ | |
289 | ctx->adata.key_virt = ctx->key; | |
290 | ||
291 | /* shared descriptor for first invocation of ahash_update */ | |
292 | desc = ctx->sh_desc_update_first; | |
87870cfb | 293 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
a2fb864c | 294 | ctx->ctx_len); |
12b8567f IP |
295 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
296 | desc_bytes(desc), ctx->dir); | |
6e005503 SH |
297 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) |
298 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
299 | desc_bytes(desc), 1); | |
12b8567f IP |
300 | |
301 | /* shared descriptor for ahash_digest */ | |
302 | desc = ctx->sh_desc_digest; | |
87870cfb | 303 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, |
a2fb864c | 304 | digestsize, ctx->ctx_len); |
12b8567f IP |
305 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
306 | desc_bytes(desc), ctx->dir); | |
307 | print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", | |
308 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
309 | 1); | |
87870cfb IP |
310 | return 0; |
311 | } | |
312 | ||
313 | static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |
314 | { | |
4cb4f7c1 | 315 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
87870cfb IP |
316 | int digestsize = crypto_ahash_digestsize(ahash); |
317 | struct device *jrdev = ctx->jrdev; | |
318 | u32 *desc; | |
319 | ||
320 | /* shared descriptor for ahash_update */ | |
321 | desc = ctx->sh_desc_update; | |
322 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, | |
a2fb864c | 323 | ctx->ctx_len, ctx->ctx_len); |
87870cfb IP |
324 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
325 | desc_bytes(desc), ctx->dir); | |
326 | print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", | |
327 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
328 | desc_bytes(desc), 1); | |
329 | ||
330 | /* shared descriptor for ahash_{final,finup} */ | |
331 | desc = ctx->sh_desc_fin; | |
332 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, | |
a2fb864c | 333 | digestsize, ctx->ctx_len); |
87870cfb IP |
334 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
335 | desc_bytes(desc), ctx->dir); | |
336 | print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", | |
337 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
338 | desc_bytes(desc), 1); | |
339 | ||
340 | /* shared descriptor for first invocation of ahash_update */ | |
341 | desc = ctx->sh_desc_update_first; | |
342 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, | |
a2fb864c | 343 | ctx->ctx_len); |
87870cfb IP |
344 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
345 | desc_bytes(desc), ctx->dir); | |
6e005503 SH |
346 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) |
347 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
87870cfb IP |
348 | desc_bytes(desc), 1); |
349 | ||
350 | /* shared descriptor for ahash_digest */ | |
351 | desc = ctx->sh_desc_digest; | |
352 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, | |
a2fb864c | 353 | digestsize, ctx->ctx_len); |
87870cfb IP |
354 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
355 | desc_bytes(desc), ctx->dir); | |
356 | print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", | |
357 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
358 | desc_bytes(desc), 1); | |
12b8567f IP |
359 | |
360 | return 0; | |
361 | } | |
362 | ||
045e3678 | 363 | /* Digest hash size if it is too large */ |
30724445 HG |
364 | static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, |
365 | u32 digestsize) | |
045e3678 YK |
366 | { |
367 | struct device *jrdev = ctx->jrdev; | |
368 | u32 *desc; | |
369 | struct split_key_result result; | |
30724445 | 370 | dma_addr_t key_dma; |
9e6df0fd | 371 | int ret; |
045e3678 | 372 | |
199354d7 | 373 | desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); |
3de0152b | 374 | if (!desc) |
2af8f4a2 | 375 | return -ENOMEM; |
045e3678 YK |
376 | |
377 | init_job_desc(desc, 0); | |
378 | ||
30724445 HG |
379 | key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); |
380 | if (dma_mapping_error(jrdev, key_dma)) { | |
381 | dev_err(jrdev, "unable to map key memory\n"); | |
045e3678 YK |
382 | kfree(desc); |
383 | return -ENOMEM; | |
384 | } | |
385 | ||
386 | /* Job descriptor to perform unkeyed hash on key_in */ | |
db57656b | 387 | append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | |
045e3678 | 388 | OP_ALG_AS_INITFINAL); |
30724445 | 389 | append_seq_in_ptr(desc, key_dma, *keylen, 0); |
045e3678 YK |
390 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | |
391 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); | |
30724445 | 392 | append_seq_out_ptr(desc, key_dma, digestsize, 0); |
045e3678 YK |
393 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
394 | LDST_SRCDST_BYTE_CONTEXT); | |
395 | ||
6e005503 SH |
396 | print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", |
397 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); | |
398 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", | |
399 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
400 | 1); | |
045e3678 YK |
401 | |
402 | result.err = 0; | |
403 | init_completion(&result.completion); | |
404 | ||
405 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | |
4d370a10 | 406 | if (ret == -EINPROGRESS) { |
045e3678 | 407 | /* in progress */ |
7459e1d2 | 408 | wait_for_completion(&result.completion); |
045e3678 | 409 | ret = result.err; |
6e005503 SH |
410 | |
411 | print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", | |
412 | DUMP_PREFIX_ADDRESS, 16, 4, key, | |
413 | digestsize, 1); | |
045e3678 | 414 | } |
30724445 | 415 | dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); |
045e3678 | 416 | |
e11aa9f1 HG |
417 | *keylen = digestsize; |
418 | ||
045e3678 YK |
419 | kfree(desc); |
420 | ||
421 | return ret; | |
422 | } | |
423 | ||
424 | static int ahash_setkey(struct crypto_ahash *ahash, | |
425 | const u8 *key, unsigned int keylen) | |
426 | { | |
4cb4f7c1 | 427 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
6e005503 | 428 | struct device *jrdev = ctx->jrdev; |
045e3678 YK |
429 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
430 | int digestsize = crypto_ahash_digestsize(ahash); | |
7e0880b9 | 431 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
9e6df0fd | 432 | int ret; |
045e3678 YK |
433 | u8 *hashed_key = NULL; |
434 | ||
6e005503 | 435 | dev_dbg(jrdev, "keylen %d\n", keylen); |
045e3678 YK |
436 | |
437 | if (keylen > blocksize) { | |
199354d7 HX |
438 | unsigned int aligned_len = |
439 | ALIGN(keylen, dma_get_cache_alignment()); | |
440 | ||
441 | if (aligned_len < keylen) | |
442 | return -EOVERFLOW; | |
443 | ||
444 | hashed_key = kmemdup(key, keylen, GFP_KERNEL); | |
045e3678 YK |
445 | if (!hashed_key) |
446 | return -ENOMEM; | |
30724445 | 447 | ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); |
045e3678 | 448 | if (ret) |
d6e7a7d0 | 449 | goto bad_free_key; |
045e3678 YK |
450 | key = hashed_key; |
451 | } | |
452 | ||
7e0880b9 HG |
453 | /* |
454 | * If DKP is supported, use it in the shared descriptor to generate | |
455 | * the split key. | |
456 | */ | |
457 | if (ctrlpriv->era >= 6) { | |
458 | ctx->adata.key_inline = true; | |
459 | ctx->adata.keylen = keylen; | |
460 | ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & | |
461 | OP_ALG_ALGSEL_MASK); | |
045e3678 | 462 | |
7e0880b9 HG |
463 | if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) |
464 | goto bad_free_key; | |
465 | ||
466 | memcpy(ctx->key, key, keylen); | |
e9b4913a HG |
467 | |
468 | /* | |
469 | * In case |user key| > |derived key|, using DKP<imm,imm> | |
470 | * would result in invalid opcodes (last bytes of user key) in | |
471 | * the resulting descriptor. Use DKP<ptr,imm> instead => both | |
472 | * virtual and dma key addresses are needed. | |
473 | */ | |
474 | if (keylen > ctx->adata.keylen_pad) | |
475 | dma_sync_single_for_device(ctx->jrdev, | |
476 | ctx->adata.key_dma, | |
477 | ctx->adata.keylen_pad, | |
478 | DMA_TO_DEVICE); | |
7e0880b9 HG |
479 | } else { |
480 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, | |
481 | keylen, CAAM_MAX_HASH_KEY_SIZE); | |
482 | if (ret) | |
483 | goto bad_free_key; | |
484 | } | |
045e3678 | 485 | |
045e3678 | 486 | kfree(hashed_key); |
cfb725f6 | 487 | return ahash_set_sh_desc(ahash); |
d6e7a7d0 | 488 | bad_free_key: |
045e3678 | 489 | kfree(hashed_key); |
045e3678 YK |
490 | return -EINVAL; |
491 | } | |
492 | ||
12b8567f IP |
493 | static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, |
494 | unsigned int keylen) | |
495 | { | |
4cb4f7c1 | 496 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
12b8567f IP |
497 | struct device *jrdev = ctx->jrdev; |
498 | ||
674f368a | 499 | if (keylen != AES_KEYSIZE_128) |
836d8f43 | 500 | return -EINVAL; |
836d8f43 | 501 | |
12b8567f | 502 | memcpy(ctx->key, key, keylen); |
a2fb864c HG |
503 | dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, |
504 | DMA_TO_DEVICE); | |
12b8567f IP |
505 | ctx->adata.keylen = keylen; |
506 | ||
507 | print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", | |
508 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); | |
509 | ||
510 | return axcbc_set_sh_desc(ahash); | |
511 | } | |
87870cfb IP |
512 | |
513 | static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, | |
514 | unsigned int keylen) | |
515 | { | |
4cb4f7c1 | 516 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
836d8f43 IP |
517 | int err; |
518 | ||
519 | err = aes_check_keylen(keylen); | |
674f368a | 520 | if (err) |
836d8f43 | 521 | return err; |
87870cfb IP |
522 | |
523 | /* key is immediate data for all cmac shared descriptors */ | |
524 | ctx->adata.key_virt = key; | |
525 | ctx->adata.keylen = keylen; | |
526 | ||
527 | print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", | |
528 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | |
529 | ||
530 | return acmac_set_sh_desc(ahash); | |
531 | } | |
532 | ||
045e3678 YK |
533 | /* |
534 | * ahash_edesc - s/w-extended ahash descriptor | |
045e3678 YK |
535 | * @sec4_sg_dma: physical mapped address of h/w link table |
536 | * @src_nents: number of segments in input scatterlist | |
537 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | |
21b014f0 | 538 | * @bklog: stored to determine if the request needs backlog |
045e3678 | 539 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
343e44b1 | 540 | * @sec4_sg: h/w link table |
045e3678 YK |
541 | */ |
542 | struct ahash_edesc { | |
045e3678 YK |
543 | dma_addr_t sec4_sg_dma; |
544 | int src_nents; | |
545 | int sec4_sg_bytes; | |
21b014f0 | 546 | bool bklog; |
1a3daadc | 547 | u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; |
5a8a0765 | 548 | struct sec4_sg_entry sec4_sg[]; |
045e3678 YK |
549 | }; |
550 | ||
551 | static inline void ahash_unmap(struct device *dev, | |
552 | struct ahash_edesc *edesc, | |
553 | struct ahash_request *req, int dst_len) | |
554 | { | |
4cb4f7c1 | 555 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
944c3d4d | 556 | |
045e3678 | 557 | if (edesc->src_nents) |
13fb8fd7 | 558 | dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); |
045e3678 YK |
559 | |
560 | if (edesc->sec4_sg_bytes) | |
561 | dma_unmap_single(dev, edesc->sec4_sg_dma, | |
562 | edesc->sec4_sg_bytes, DMA_TO_DEVICE); | |
944c3d4d HG |
563 | |
564 | if (state->buf_dma) { | |
46b49abc | 565 | dma_unmap_single(dev, state->buf_dma, state->buflen, |
944c3d4d HG |
566 | DMA_TO_DEVICE); |
567 | state->buf_dma = 0; | |
568 | } | |
045e3678 YK |
569 | } |
570 | ||
571 | static inline void ahash_unmap_ctx(struct device *dev, | |
572 | struct ahash_edesc *edesc, | |
573 | struct ahash_request *req, int dst_len, u32 flag) | |
574 | { | |
4cb4f7c1 | 575 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
045e3678 | 576 | |
87ec02e7 | 577 | if (state->ctx_dma) { |
65055e21 | 578 | dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); |
87ec02e7 HG |
579 | state->ctx_dma = 0; |
580 | } | |
045e3678 YK |
581 | ahash_unmap(dev, edesc, req, dst_len); |
582 | } | |
583 | ||
c3f7394e IP |
584 | static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, |
585 | void *context, enum dma_data_direction dir) | |
045e3678 YK |
586 | { |
587 | struct ahash_request *req = context; | |
21b014f0 | 588 | struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); |
045e3678 YK |
589 | struct ahash_edesc *edesc; |
590 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
591 | int digestsize = crypto_ahash_digestsize(ahash); | |
4cb4f7c1 HX |
592 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
593 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); | |
1984aaee | 594 | int ecode = 0; |
63db32e6 | 595 | bool has_bklog; |
045e3678 | 596 | |
6e005503 | 597 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
045e3678 | 598 | |
21b014f0 | 599 | edesc = state->edesc; |
63db32e6 | 600 | has_bklog = edesc->bklog; |
21b014f0 | 601 | |
fa9659cd | 602 | if (err) |
1984aaee | 603 | ecode = caam_jr_strstatus(jrdev, err); |
045e3678 | 604 | |
c3f7394e | 605 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); |
c19650d6 | 606 | memcpy(req->result, state->caam_ctx, digestsize); |
045e3678 YK |
607 | kfree(edesc); |
608 | ||
6e005503 SH |
609 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
610 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | |
611 | ctx->ctx_len, 1); | |
045e3678 | 612 | |
21b014f0 IP |
613 | /* |
614 | * If no backlog flag, the completion of the request is done | |
615 | * by CAAM, not crypto engine. | |
616 | */ | |
63db32e6 | 617 | if (!has_bklog) |
4bc713a4 | 618 | ahash_request_complete(req, ecode); |
21b014f0 IP |
619 | else |
620 | crypto_finalize_hash_request(jrp->engine, req, ecode); | |
045e3678 YK |
621 | } |
622 | ||
c3f7394e IP |
623 | static void ahash_done(struct device *jrdev, u32 *desc, u32 err, |
624 | void *context) | |
045e3678 | 625 | { |
c3f7394e | 626 | ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE); |
045e3678 YK |
627 | } |
628 | ||
629 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |
630 | void *context) | |
631 | { | |
c3f7394e | 632 | ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL); |
045e3678 YK |
633 | } |
634 | ||
c3f7394e IP |
635 | static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, |
636 | void *context, enum dma_data_direction dir) | |
045e3678 YK |
637 | { |
638 | struct ahash_request *req = context; | |
21b014f0 | 639 | struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); |
045e3678 YK |
640 | struct ahash_edesc *edesc; |
641 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
642 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
643 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 644 | int digestsize = crypto_ahash_digestsize(ahash); |
1984aaee | 645 | int ecode = 0; |
63db32e6 | 646 | bool has_bklog; |
045e3678 | 647 | |
6e005503 | 648 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
045e3678 | 649 | |
21b014f0 | 650 | edesc = state->edesc; |
63db32e6 | 651 | has_bklog = edesc->bklog; |
fa9659cd | 652 | if (err) |
1984aaee | 653 | ecode = caam_jr_strstatus(jrdev, err); |
045e3678 | 654 | |
c3f7394e | 655 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); |
045e3678 YK |
656 | kfree(edesc); |
657 | ||
46b49abc AB |
658 | scatterwalk_map_and_copy(state->buf, req->src, |
659 | req->nbytes - state->next_buflen, | |
660 | state->next_buflen, 0); | |
661 | state->buflen = state->next_buflen; | |
662 | ||
663 | print_hex_dump_debug("buf@" __stringify(__LINE__)": ", | |
664 | DUMP_PREFIX_ADDRESS, 16, 4, state->buf, | |
665 | state->buflen, 1); | |
666 | ||
6e005503 SH |
667 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
668 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | |
669 | ctx->ctx_len, 1); | |
045e3678 | 670 | if (req->result) |
6e005503 SH |
671 | print_hex_dump_debug("result@"__stringify(__LINE__)": ", |
672 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | |
673 | digestsize, 1); | |
045e3678 | 674 | |
21b014f0 IP |
675 | /* |
676 | * If no backlog flag, the completion of the request is done | |
677 | * by CAAM, not crypto engine. | |
678 | */ | |
63db32e6 | 679 | if (!has_bklog) |
4bc713a4 | 680 | ahash_request_complete(req, ecode); |
21b014f0 IP |
681 | else |
682 | crypto_finalize_hash_request(jrp->engine, req, ecode); | |
683 | ||
045e3678 YK |
684 | } |
685 | ||
c3f7394e IP |
686 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, |
687 | void *context) | |
688 | { | |
689 | ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL); | |
690 | } | |
691 | ||
692 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |
693 | void *context) | |
694 | { | |
695 | ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE); | |
696 | } | |
697 | ||
5588d039 RK |
698 | /* |
699 | * Allocate an enhanced descriptor, which contains the hardware descriptor | |
700 | * and space for hardware scatter table containing sg_num entries. | |
701 | */ | |
2ba1e798 | 702 | static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, |
30a43b44 | 703 | int sg_num, u32 *sh_desc, |
2ba1e798 | 704 | dma_addr_t sh_desc_dma) |
5588d039 | 705 | { |
4cb4f7c1 | 706 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
2ba1e798 IP |
707 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
708 | GFP_KERNEL : GFP_ATOMIC; | |
5588d039 | 709 | struct ahash_edesc *edesc; |
5588d039 | 710 | |
5124bc96 | 711 | sg_num = pad_sg_nents(sg_num); |
6df04505 | 712 | edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); |
3de0152b | 713 | if (!edesc) |
5588d039 | 714 | return NULL; |
5588d039 | 715 | |
21b014f0 IP |
716 | state->edesc = edesc; |
717 | ||
30a43b44 RK |
718 | init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), |
719 | HDR_SHARE_DEFER | HDR_REVERSE); | |
720 | ||
5588d039 RK |
721 | return edesc; |
722 | } | |
723 | ||
65cf164a RK |
724 | static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, |
725 | struct ahash_edesc *edesc, | |
726 | struct ahash_request *req, int nents, | |
727 | unsigned int first_sg, | |
728 | unsigned int first_bytes, size_t to_hash) | |
729 | { | |
730 | dma_addr_t src_dma; | |
731 | u32 options; | |
732 | ||
733 | if (nents > 1 || first_sg) { | |
734 | struct sec4_sg_entry *sg = edesc->sec4_sg; | |
a5e5c133 HG |
735 | unsigned int sgsize = sizeof(*sg) * |
736 | pad_sg_nents(first_sg + nents); | |
65cf164a | 737 | |
059d73ee | 738 | sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); |
65cf164a RK |
739 | |
740 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); | |
741 | if (dma_mapping_error(ctx->jrdev, src_dma)) { | |
742 | dev_err(ctx->jrdev, "unable to map S/G table\n"); | |
743 | return -ENOMEM; | |
744 | } | |
745 | ||
746 | edesc->sec4_sg_bytes = sgsize; | |
747 | edesc->sec4_sg_dma = src_dma; | |
748 | options = LDST_SGF; | |
749 | } else { | |
750 | src_dma = sg_dma_address(req->src); | |
751 | options = 0; | |
752 | } | |
753 | ||
754 | append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, | |
755 | options); | |
756 | ||
757 | return 0; | |
758 | } | |
759 | ||
21b014f0 IP |
760 | static int ahash_do_one_req(struct crypto_engine *engine, void *areq) |
761 | { | |
762 | struct ahash_request *req = ahash_request_cast(areq); | |
4cb4f7c1 HX |
763 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req)); |
764 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
21b014f0 IP |
765 | struct device *jrdev = ctx->jrdev; |
766 | u32 *desc = state->edesc->hw_desc; | |
767 | int ret; | |
768 | ||
769 | state->edesc->bklog = true; | |
770 | ||
771 | ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); | |
772 | ||
087e1d71 GJ |
773 | if (ret == -ENOSPC && engine->retry_support) |
774 | return ret; | |
775 | ||
21b014f0 IP |
776 | if (ret != -EINPROGRESS) { |
777 | ahash_unmap(jrdev, state->edesc, req, 0); | |
778 | kfree(state->edesc); | |
779 | } else { | |
780 | ret = 0; | |
781 | } | |
782 | ||
783 | return ret; | |
784 | } | |
785 | ||
786 | static int ahash_enqueue_req(struct device *jrdev, | |
787 | void (*cbk)(struct device *jrdev, u32 *desc, | |
788 | u32 err, void *context), | |
789 | struct ahash_request *req, | |
790 | int dst_len, enum dma_data_direction dir) | |
791 | { | |
792 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); | |
4cb4f7c1 | 793 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
21b014f0 IP |
794 | struct ahash_edesc *edesc = state->edesc; |
795 | u32 *desc = edesc->hw_desc; | |
796 | int ret; | |
797 | ||
798 | state->ahash_op_done = cbk; | |
799 | ||
800 | /* | |
801 | * Only the backlog request are sent to crypto-engine since the others | |
802 | * can be handled by CAAM, if free, especially since JR has up to 1024 | |
803 | * entries (more than the 10 entries from crypto-engine). | |
804 | */ | |
805 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) | |
806 | ret = crypto_transfer_hash_request_to_engine(jrpriv->engine, | |
807 | req); | |
808 | else | |
809 | ret = caam_jr_enqueue(jrdev, desc, cbk, req); | |
810 | ||
811 | if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { | |
812 | ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); | |
813 | kfree(edesc); | |
814 | } | |
815 | ||
816 | return ret; | |
817 | } | |
818 | ||
045e3678 YK |
819 | /* submit update job descriptor */ |
820 | static int ahash_update_ctx(struct ahash_request *req) | |
821 | { | |
822 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
823 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
824 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 825 | struct device *jrdev = ctx->jrdev; |
46b49abc AB |
826 | u8 *buf = state->buf; |
827 | int *buflen = &state->buflen; | |
828 | int *next_buflen = &state->next_buflen; | |
12b8567f | 829 | int blocksize = crypto_ahash_blocksize(ahash); |
045e3678 | 830 | int in_len = *buflen + req->nbytes, to_hash; |
30a43b44 | 831 | u32 *desc; |
bc13c69e | 832 | int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; |
045e3678 YK |
833 | struct ahash_edesc *edesc; |
834 | int ret = 0; | |
045e3678 | 835 | |
12b8567f | 836 | *next_buflen = in_len & (blocksize - 1); |
045e3678 YK |
837 | to_hash = in_len - *next_buflen; |
838 | ||
12b8567f | 839 | /* |
87870cfb | 840 | * For XCBC and CMAC, if to_hash is multiple of block size, |
12b8567f IP |
841 | * keep last block in internal buffer |
842 | */ | |
87870cfb IP |
843 | if ((is_xcbc_aes(ctx->adata.algtype) || |
844 | is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && | |
845 | (*next_buflen == 0)) { | |
12b8567f IP |
846 | *next_buflen = blocksize; |
847 | to_hash -= blocksize; | |
848 | } | |
849 | ||
045e3678 | 850 | if (to_hash) { |
a5e5c133 | 851 | int pad_nents; |
059d73ee | 852 | int src_len = req->nbytes - *next_buflen; |
a5e5c133 | 853 | |
059d73ee | 854 | src_nents = sg_nents_for_len(req->src, src_len); |
f9970c28 LC |
855 | if (src_nents < 0) { |
856 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
857 | return src_nents; | |
858 | } | |
bc13c69e RK |
859 | |
860 | if (src_nents) { | |
861 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
862 | DMA_TO_DEVICE); | |
863 | if (!mapped_nents) { | |
864 | dev_err(jrdev, "unable to DMA map source\n"); | |
865 | return -ENOMEM; | |
866 | } | |
867 | } else { | |
868 | mapped_nents = 0; | |
869 | } | |
870 | ||
045e3678 | 871 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
a5e5c133 HG |
872 | pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); |
873 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); | |
045e3678 YK |
874 | |
875 | /* | |
876 | * allocate space for base edesc and hw desc commands, | |
877 | * link tables | |
878 | */ | |
2ba1e798 IP |
879 | edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, |
880 | ctx->sh_desc_update_dma); | |
045e3678 | 881 | if (!edesc) { |
bc13c69e | 882 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
883 | return -ENOMEM; |
884 | } | |
885 | ||
886 | edesc->src_nents = src_nents; | |
887 | edesc->sec4_sg_bytes = sec4_sg_bytes; | |
045e3678 | 888 | |
dfcd8393 | 889 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
ce572085 HG |
890 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
891 | if (ret) | |
58b0e5d0 | 892 | goto unmap_ctx; |
045e3678 | 893 | |
944c3d4d HG |
894 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); |
895 | if (ret) | |
896 | goto unmap_ctx; | |
045e3678 | 897 | |
b4e9e931 | 898 | if (mapped_nents) |
059d73ee | 899 | sg_to_sec4_sg_last(req->src, src_len, |
bc13c69e RK |
900 | edesc->sec4_sg + sec4_sg_src_index, |
901 | 0); | |
b4e9e931 | 902 | else |
297b9ceb HG |
903 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - |
904 | 1); | |
045e3678 | 905 | |
045e3678 | 906 | desc = edesc->hw_desc; |
045e3678 | 907 | |
1da2be33 RG |
908 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
909 | sec4_sg_bytes, | |
910 | DMA_TO_DEVICE); | |
ce572085 HG |
911 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
912 | dev_err(jrdev, "unable to map S/G table\n"); | |
32686d34 | 913 | ret = -ENOMEM; |
58b0e5d0 | 914 | goto unmap_ctx; |
ce572085 | 915 | } |
1da2be33 | 916 | |
045e3678 YK |
917 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
918 | to_hash, LDST_SGF); | |
919 | ||
920 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | |
921 | ||
6e005503 SH |
922 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
923 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
924 | desc_bytes(desc), 1); | |
045e3678 | 925 | |
21b014f0 IP |
926 | ret = ahash_enqueue_req(jrdev, ahash_done_bi, req, |
927 | ctx->ctx_len, DMA_BIDIRECTIONAL); | |
045e3678 | 928 | } else if (*next_buflen) { |
307fd543 CS |
929 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
930 | req->nbytes, 0); | |
045e3678 | 931 | *buflen = *next_buflen; |
6e005503 | 932 | |
46b49abc AB |
933 | print_hex_dump_debug("buf@" __stringify(__LINE__)": ", |
934 | DUMP_PREFIX_ADDRESS, 16, 4, buf, | |
935 | *buflen, 1); | |
936 | } | |
045e3678 YK |
937 | |
938 | return ret; | |
16c8ad7b | 939 | unmap_ctx: |
32686d34 RK |
940 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); |
941 | kfree(edesc); | |
942 | return ret; | |
045e3678 YK |
943 | } |
944 | ||
945 | static int ahash_final_ctx(struct ahash_request *req) | |
946 | { | |
947 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
948 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
949 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 950 | struct device *jrdev = ctx->jrdev; |
46b49abc | 951 | int buflen = state->buflen; |
30a43b44 | 952 | u32 *desc; |
a5e5c133 | 953 | int sec4_sg_bytes; |
045e3678 YK |
954 | int digestsize = crypto_ahash_digestsize(ahash); |
955 | struct ahash_edesc *edesc; | |
9e6df0fd | 956 | int ret; |
045e3678 | 957 | |
a5e5c133 HG |
958 | sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * |
959 | sizeof(struct sec4_sg_entry); | |
045e3678 YK |
960 | |
961 | /* allocate space for base edesc and hw desc commands, link tables */ | |
2ba1e798 IP |
962 | edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, |
963 | ctx->sh_desc_fin_dma); | |
5588d039 | 964 | if (!edesc) |
045e3678 | 965 | return -ENOMEM; |
045e3678 | 966 | |
045e3678 | 967 | desc = edesc->hw_desc; |
045e3678 YK |
968 | |
969 | edesc->sec4_sg_bytes = sec4_sg_bytes; | |
045e3678 | 970 | |
dfcd8393 | 971 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
c19650d6 | 972 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
ce572085 | 973 | if (ret) |
58b0e5d0 | 974 | goto unmap_ctx; |
045e3678 | 975 | |
944c3d4d HG |
976 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); |
977 | if (ret) | |
978 | goto unmap_ctx; | |
979 | ||
a5e5c133 | 980 | sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); |
045e3678 | 981 | |
1da2be33 RG |
982 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
983 | sec4_sg_bytes, DMA_TO_DEVICE); | |
ce572085 HG |
984 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
985 | dev_err(jrdev, "unable to map S/G table\n"); | |
32686d34 | 986 | ret = -ENOMEM; |
58b0e5d0 | 987 | goto unmap_ctx; |
ce572085 | 988 | } |
1da2be33 | 989 | |
045e3678 YK |
990 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, |
991 | LDST_SGF); | |
c19650d6 | 992 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
045e3678 | 993 | |
6e005503 SH |
994 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
995 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
996 | 1); | |
045e3678 | 997 | |
21b014f0 IP |
998 | return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, |
999 | digestsize, DMA_BIDIRECTIONAL); | |
58b0e5d0 | 1000 | unmap_ctx: |
c19650d6 | 1001 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); |
32686d34 | 1002 | kfree(edesc); |
045e3678 YK |
1003 | return ret; |
1004 | } | |
1005 | ||
1006 | static int ahash_finup_ctx(struct ahash_request *req) | |
1007 | { | |
1008 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1009 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1010 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1011 | struct device *jrdev = ctx->jrdev; |
46b49abc | 1012 | int buflen = state->buflen; |
30a43b44 | 1013 | u32 *desc; |
65cf164a | 1014 | int sec4_sg_src_index; |
bc13c69e | 1015 | int src_nents, mapped_nents; |
045e3678 YK |
1016 | int digestsize = crypto_ahash_digestsize(ahash); |
1017 | struct ahash_edesc *edesc; | |
9e6df0fd | 1018 | int ret; |
045e3678 | 1019 | |
13fb8fd7 | 1020 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
f9970c28 LC |
1021 | if (src_nents < 0) { |
1022 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
1023 | return src_nents; | |
1024 | } | |
bc13c69e RK |
1025 | |
1026 | if (src_nents) { | |
1027 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
1028 | DMA_TO_DEVICE); | |
1029 | if (!mapped_nents) { | |
1030 | dev_err(jrdev, "unable to DMA map source\n"); | |
1031 | return -ENOMEM; | |
1032 | } | |
1033 | } else { | |
1034 | mapped_nents = 0; | |
1035 | } | |
1036 | ||
045e3678 | 1037 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
045e3678 YK |
1038 | |
1039 | /* allocate space for base edesc and hw desc commands, link tables */ | |
2ba1e798 IP |
1040 | edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, |
1041 | ctx->sh_desc_fin, ctx->sh_desc_fin_dma); | |
045e3678 | 1042 | if (!edesc) { |
bc13c69e | 1043 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
1044 | return -ENOMEM; |
1045 | } | |
1046 | ||
045e3678 | 1047 | desc = edesc->hw_desc; |
045e3678 YK |
1048 | |
1049 | edesc->src_nents = src_nents; | |
045e3678 | 1050 | |
dfcd8393 | 1051 | ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
c19650d6 | 1052 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
ce572085 | 1053 | if (ret) |
58b0e5d0 | 1054 | goto unmap_ctx; |
045e3678 | 1055 | |
944c3d4d HG |
1056 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); |
1057 | if (ret) | |
1058 | goto unmap_ctx; | |
045e3678 | 1059 | |
65cf164a RK |
1060 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, |
1061 | sec4_sg_src_index, ctx->ctx_len + buflen, | |
1062 | req->nbytes); | |
1063 | if (ret) | |
58b0e5d0 | 1064 | goto unmap_ctx; |
045e3678 | 1065 | |
c19650d6 | 1066 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
045e3678 | 1067 | |
6e005503 SH |
1068 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1069 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
1070 | 1); | |
045e3678 | 1071 | |
21b014f0 IP |
1072 | return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, |
1073 | digestsize, DMA_BIDIRECTIONAL); | |
58b0e5d0 | 1074 | unmap_ctx: |
c19650d6 | 1075 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); |
32686d34 | 1076 | kfree(edesc); |
045e3678 YK |
1077 | return ret; |
1078 | } | |
1079 | ||
1080 | static int ahash_digest(struct ahash_request *req) | |
1081 | { | |
1082 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1083 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1084 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1085 | struct device *jrdev = ctx->jrdev; |
30a43b44 | 1086 | u32 *desc; |
045e3678 | 1087 | int digestsize = crypto_ahash_digestsize(ahash); |
65cf164a | 1088 | int src_nents, mapped_nents; |
045e3678 | 1089 | struct ahash_edesc *edesc; |
9e6df0fd | 1090 | int ret; |
045e3678 | 1091 | |
944c3d4d HG |
1092 | state->buf_dma = 0; |
1093 | ||
3d5a2db6 | 1094 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
f9970c28 LC |
1095 | if (src_nents < 0) { |
1096 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
1097 | return src_nents; | |
1098 | } | |
bc13c69e RK |
1099 | |
1100 | if (src_nents) { | |
1101 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
1102 | DMA_TO_DEVICE); | |
1103 | if (!mapped_nents) { | |
1104 | dev_err(jrdev, "unable to map source for DMA\n"); | |
1105 | return -ENOMEM; | |
1106 | } | |
1107 | } else { | |
1108 | mapped_nents = 0; | |
1109 | } | |
1110 | ||
045e3678 | 1111 | /* allocate space for base edesc and hw desc commands, link tables */ |
2ba1e798 IP |
1112 | edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, |
1113 | ctx->sh_desc_digest, ctx->sh_desc_digest_dma); | |
045e3678 | 1114 | if (!edesc) { |
bc13c69e | 1115 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
1116 | return -ENOMEM; |
1117 | } | |
343e44b1 | 1118 | |
045e3678 YK |
1119 | edesc->src_nents = src_nents; |
1120 | ||
65cf164a RK |
1121 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
1122 | req->nbytes); | |
1123 | if (ret) { | |
1124 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1125 | kfree(edesc); | |
1126 | return ret; | |
045e3678 | 1127 | } |
65cf164a RK |
1128 | |
1129 | desc = edesc->hw_desc; | |
045e3678 | 1130 | |
c19650d6 HG |
1131 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); |
1132 | if (ret) { | |
32686d34 RK |
1133 | ahash_unmap(jrdev, edesc, req, digestsize); |
1134 | kfree(edesc); | |
ce572085 HG |
1135 | return -ENOMEM; |
1136 | } | |
045e3678 | 1137 | |
6e005503 SH |
1138 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1139 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
1140 | 1); | |
045e3678 | 1141 | |
21b014f0 IP |
1142 | return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, |
1143 | DMA_FROM_DEVICE); | |
045e3678 YK |
1144 | } |
1145 | ||
1146 | /* submit ahash final if it the first job descriptor */ | |
1147 | static int ahash_final_no_ctx(struct ahash_request *req) | |
1148 | { | |
1149 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1150 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1151 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1152 | struct device *jrdev = ctx->jrdev; |
46b49abc AB |
1153 | u8 *buf = state->buf; |
1154 | int buflen = state->buflen; | |
30a43b44 | 1155 | u32 *desc; |
045e3678 YK |
1156 | int digestsize = crypto_ahash_digestsize(ahash); |
1157 | struct ahash_edesc *edesc; | |
9e6df0fd | 1158 | int ret; |
045e3678 YK |
1159 | |
1160 | /* allocate space for base edesc and hw desc commands, link tables */ | |
2ba1e798 IP |
1161 | edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, |
1162 | ctx->sh_desc_digest_dma); | |
5588d039 | 1163 | if (!edesc) |
045e3678 | 1164 | return -ENOMEM; |
045e3678 | 1165 | |
045e3678 | 1166 | desc = edesc->hw_desc; |
045e3678 | 1167 | |
04e6d25c AS |
1168 | if (buflen) { |
1169 | state->buf_dma = dma_map_single(jrdev, buf, buflen, | |
1170 | DMA_TO_DEVICE); | |
1171 | if (dma_mapping_error(jrdev, state->buf_dma)) { | |
1172 | dev_err(jrdev, "unable to map src\n"); | |
1173 | goto unmap; | |
1174 | } | |
045e3678 | 1175 | |
04e6d25c AS |
1176 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1177 | } | |
045e3678 | 1178 | |
c19650d6 HG |
1179 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); |
1180 | if (ret) | |
06435f34 | 1181 | goto unmap; |
045e3678 | 1182 | |
6e005503 SH |
1183 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1184 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
1185 | 1); | |
045e3678 | 1186 | |
21b014f0 IP |
1187 | return ahash_enqueue_req(jrdev, ahash_done, req, |
1188 | digestsize, DMA_FROM_DEVICE); | |
06435f34 ME |
1189 | unmap: |
1190 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1191 | kfree(edesc); | |
1192 | return -ENOMEM; | |
045e3678 YK |
1193 | } |
1194 | ||
1195 | /* submit ahash update if it the first job descriptor after update */ | |
1196 | static int ahash_update_no_ctx(struct ahash_request *req) | |
1197 | { | |
1198 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1199 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1200 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1201 | struct device *jrdev = ctx->jrdev; |
46b49abc AB |
1202 | u8 *buf = state->buf; |
1203 | int *buflen = &state->buflen; | |
1204 | int *next_buflen = &state->next_buflen; | |
12b8567f | 1205 | int blocksize = crypto_ahash_blocksize(ahash); |
045e3678 | 1206 | int in_len = *buflen + req->nbytes, to_hash; |
bc13c69e | 1207 | int sec4_sg_bytes, src_nents, mapped_nents; |
045e3678 | 1208 | struct ahash_edesc *edesc; |
30a43b44 | 1209 | u32 *desc; |
045e3678 | 1210 | int ret = 0; |
045e3678 | 1211 | |
12b8567f | 1212 | *next_buflen = in_len & (blocksize - 1); |
045e3678 YK |
1213 | to_hash = in_len - *next_buflen; |
1214 | ||
12b8567f | 1215 | /* |
87870cfb | 1216 | * For XCBC and CMAC, if to_hash is multiple of block size, |
12b8567f IP |
1217 | * keep last block in internal buffer |
1218 | */ | |
87870cfb IP |
1219 | if ((is_xcbc_aes(ctx->adata.algtype) || |
1220 | is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && | |
1221 | (*next_buflen == 0)) { | |
12b8567f IP |
1222 | *next_buflen = blocksize; |
1223 | to_hash -= blocksize; | |
1224 | } | |
1225 | ||
045e3678 | 1226 | if (to_hash) { |
a5e5c133 | 1227 | int pad_nents; |
059d73ee | 1228 | int src_len = req->nbytes - *next_buflen; |
a5e5c133 | 1229 | |
059d73ee | 1230 | src_nents = sg_nents_for_len(req->src, src_len); |
f9970c28 LC |
1231 | if (src_nents < 0) { |
1232 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
1233 | return src_nents; | |
1234 | } | |
bc13c69e RK |
1235 | |
1236 | if (src_nents) { | |
1237 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
1238 | DMA_TO_DEVICE); | |
1239 | if (!mapped_nents) { | |
1240 | dev_err(jrdev, "unable to DMA map source\n"); | |
1241 | return -ENOMEM; | |
1242 | } | |
1243 | } else { | |
1244 | mapped_nents = 0; | |
1245 | } | |
1246 | ||
a5e5c133 HG |
1247 | pad_nents = pad_sg_nents(1 + mapped_nents); |
1248 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); | |
045e3678 YK |
1249 | |
1250 | /* | |
1251 | * allocate space for base edesc and hw desc commands, | |
1252 | * link tables | |
1253 | */ | |
2ba1e798 | 1254 | edesc = ahash_edesc_alloc(req, pad_nents, |
30a43b44 | 1255 | ctx->sh_desc_update_first, |
2ba1e798 | 1256 | ctx->sh_desc_update_first_dma); |
045e3678 | 1257 | if (!edesc) { |
bc13c69e | 1258 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
1259 | return -ENOMEM; |
1260 | } | |
1261 | ||
1262 | edesc->src_nents = src_nents; | |
1263 | edesc->sec4_sg_bytes = sec4_sg_bytes; | |
045e3678 | 1264 | |
944c3d4d HG |
1265 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); |
1266 | if (ret) | |
1267 | goto unmap_ctx; | |
1268 | ||
059d73ee | 1269 | sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); |
bc13c69e | 1270 | |
045e3678 | 1271 | desc = edesc->hw_desc; |
045e3678 | 1272 | |
1da2be33 RG |
1273 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1274 | sec4_sg_bytes, | |
1275 | DMA_TO_DEVICE); | |
ce572085 HG |
1276 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1277 | dev_err(jrdev, "unable to map S/G table\n"); | |
32686d34 | 1278 | ret = -ENOMEM; |
58b0e5d0 | 1279 | goto unmap_ctx; |
ce572085 | 1280 | } |
1da2be33 | 1281 | |
045e3678 YK |
1282 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); |
1283 | ||
ce572085 HG |
1284 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1285 | if (ret) | |
58b0e5d0 | 1286 | goto unmap_ctx; |
045e3678 | 1287 | |
6e005503 SH |
1288 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1289 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
1290 | desc_bytes(desc), 1); | |
045e3678 | 1291 | |
21b014f0 IP |
1292 | ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, |
1293 | ctx->ctx_len, DMA_TO_DEVICE); | |
1294 | if ((ret != -EINPROGRESS) && (ret != -EBUSY)) | |
1295 | return ret; | |
32686d34 RK |
1296 | state->update = ahash_update_ctx; |
1297 | state->finup = ahash_finup_ctx; | |
1298 | state->final = ahash_final_ctx; | |
045e3678 | 1299 | } else if (*next_buflen) { |
307fd543 CS |
1300 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
1301 | req->nbytes, 0); | |
045e3678 | 1302 | *buflen = *next_buflen; |
6e005503 | 1303 | |
46b49abc AB |
1304 | print_hex_dump_debug("buf@" __stringify(__LINE__)": ", |
1305 | DUMP_PREFIX_ADDRESS, 16, 4, buf, | |
1306 | *buflen, 1); | |
1307 | } | |
045e3678 YK |
1308 | |
1309 | return ret; | |
58b0e5d0 | 1310 | unmap_ctx: |
32686d34 RK |
1311 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); |
1312 | kfree(edesc); | |
1313 | return ret; | |
045e3678 YK |
1314 | } |
1315 | ||
1316 | /* submit ahash finup if it the first job descriptor after update */ | |
1317 | static int ahash_finup_no_ctx(struct ahash_request *req) | |
1318 | { | |
1319 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1320 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1321 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1322 | struct device *jrdev = ctx->jrdev; |
46b49abc | 1323 | int buflen = state->buflen; |
30a43b44 | 1324 | u32 *desc; |
bc13c69e | 1325 | int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; |
045e3678 YK |
1326 | int digestsize = crypto_ahash_digestsize(ahash); |
1327 | struct ahash_edesc *edesc; | |
9e6df0fd | 1328 | int ret; |
045e3678 | 1329 | |
13fb8fd7 | 1330 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
f9970c28 LC |
1331 | if (src_nents < 0) { |
1332 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
1333 | return src_nents; | |
1334 | } | |
bc13c69e RK |
1335 | |
1336 | if (src_nents) { | |
1337 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
1338 | DMA_TO_DEVICE); | |
1339 | if (!mapped_nents) { | |
1340 | dev_err(jrdev, "unable to DMA map source\n"); | |
1341 | return -ENOMEM; | |
1342 | } | |
1343 | } else { | |
1344 | mapped_nents = 0; | |
1345 | } | |
1346 | ||
045e3678 | 1347 | sec4_sg_src_index = 2; |
bc13c69e | 1348 | sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * |
045e3678 YK |
1349 | sizeof(struct sec4_sg_entry); |
1350 | ||
1351 | /* allocate space for base edesc and hw desc commands, link tables */ | |
2ba1e798 IP |
1352 | edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, |
1353 | ctx->sh_desc_digest, ctx->sh_desc_digest_dma); | |
045e3678 | 1354 | if (!edesc) { |
bc13c69e | 1355 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
1356 | return -ENOMEM; |
1357 | } | |
1358 | ||
045e3678 | 1359 | desc = edesc->hw_desc; |
045e3678 YK |
1360 | |
1361 | edesc->src_nents = src_nents; | |
1362 | edesc->sec4_sg_bytes = sec4_sg_bytes; | |
045e3678 | 1363 | |
944c3d4d HG |
1364 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); |
1365 | if (ret) | |
1366 | goto unmap; | |
045e3678 | 1367 | |
65cf164a RK |
1368 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, |
1369 | req->nbytes); | |
1370 | if (ret) { | |
ce572085 | 1371 | dev_err(jrdev, "unable to map S/G table\n"); |
06435f34 | 1372 | goto unmap; |
ce572085 | 1373 | } |
1da2be33 | 1374 | |
c19650d6 HG |
1375 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); |
1376 | if (ret) | |
06435f34 | 1377 | goto unmap; |
045e3678 | 1378 | |
6e005503 SH |
1379 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1380 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | |
1381 | 1); | |
045e3678 | 1382 | |
21b014f0 IP |
1383 | return ahash_enqueue_req(jrdev, ahash_done, req, |
1384 | digestsize, DMA_FROM_DEVICE); | |
06435f34 ME |
1385 | unmap: |
1386 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1387 | kfree(edesc); | |
1388 | return -ENOMEM; | |
1389 | ||
045e3678 YK |
1390 | } |
1391 | ||
1392 | /* submit first update job descriptor after init */ | |
1393 | static int ahash_update_first(struct ahash_request *req) | |
1394 | { | |
1395 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
4cb4f7c1 HX |
1396 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
1397 | struct caam_hash_state *state = ahash_request_ctx_dma(req); | |
045e3678 | 1398 | struct device *jrdev = ctx->jrdev; |
46b49abc AB |
1399 | u8 *buf = state->buf; |
1400 | int *buflen = &state->buflen; | |
1401 | int *next_buflen = &state->next_buflen; | |
045e3678 | 1402 | int to_hash; |
12b8567f | 1403 | int blocksize = crypto_ahash_blocksize(ahash); |
30a43b44 | 1404 | u32 *desc; |
65cf164a | 1405 | int src_nents, mapped_nents; |
045e3678 YK |
1406 | struct ahash_edesc *edesc; |
1407 | int ret = 0; | |
045e3678 | 1408 | |
12b8567f | 1409 | *next_buflen = req->nbytes & (blocksize - 1); |
045e3678 YK |
1410 | to_hash = req->nbytes - *next_buflen; |
1411 | ||
12b8567f | 1412 | /* |
87870cfb | 1413 | * For XCBC and CMAC, if to_hash is multiple of block size, |
12b8567f IP |
1414 | * keep last block in internal buffer |
1415 | */ | |
87870cfb IP |
1416 | if ((is_xcbc_aes(ctx->adata.algtype) || |
1417 | is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && | |
1418 | (*next_buflen == 0)) { | |
12b8567f IP |
1419 | *next_buflen = blocksize; |
1420 | to_hash -= blocksize; | |
1421 | } | |
1422 | ||
045e3678 | 1423 | if (to_hash) { |
3d5a2db6 RK |
1424 | src_nents = sg_nents_for_len(req->src, |
1425 | req->nbytes - *next_buflen); | |
f9970c28 LC |
1426 | if (src_nents < 0) { |
1427 | dev_err(jrdev, "Invalid number of src SG.\n"); | |
1428 | return src_nents; | |
1429 | } | |
bc13c69e RK |
1430 | |
1431 | if (src_nents) { | |
1432 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | |
1433 | DMA_TO_DEVICE); | |
1434 | if (!mapped_nents) { | |
1435 | dev_err(jrdev, "unable to map source for DMA\n"); | |
1436 | return -ENOMEM; | |
1437 | } | |
1438 | } else { | |
1439 | mapped_nents = 0; | |
1440 | } | |
045e3678 YK |
1441 | |
1442 | /* | |
1443 | * allocate space for base edesc and hw desc commands, | |
1444 | * link tables | |
1445 | */ | |
2ba1e798 | 1446 | edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? |
30a43b44 RK |
1447 | mapped_nents : 0, |
1448 | ctx->sh_desc_update_first, | |
2ba1e798 | 1449 | ctx->sh_desc_update_first_dma); |
045e3678 | 1450 | if (!edesc) { |
bc13c69e | 1451 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
045e3678 YK |
1452 | return -ENOMEM; |
1453 | } | |
1454 | ||
1455 | edesc->src_nents = src_nents; | |
045e3678 | 1456 | |
65cf164a RK |
1457 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
1458 | to_hash); | |
1459 | if (ret) | |
58b0e5d0 | 1460 | goto unmap_ctx; |
045e3678 | 1461 | |
045e3678 | 1462 | desc = edesc->hw_desc; |
045e3678 | 1463 | |
ce572085 HG |
1464 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1465 | if (ret) | |
58b0e5d0 | 1466 | goto unmap_ctx; |
045e3678 | 1467 | |
6e005503 SH |
1468 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1469 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | |
1470 | desc_bytes(desc), 1); | |
045e3678 | 1471 | |
21b014f0 IP |
1472 | ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, |
1473 | ctx->ctx_len, DMA_TO_DEVICE); | |
1474 | if ((ret != -EINPROGRESS) && (ret != -EBUSY)) | |
1475 | return ret; | |
32686d34 RK |
1476 | state->update = ahash_update_ctx; |
1477 | state->finup = ahash_finup_ctx; | |
1478 | state->final = ahash_final_ctx; | |
045e3678 YK |
1479 | } else if (*next_buflen) { |
1480 | state->update = ahash_update_no_ctx; | |
1481 | state->finup = ahash_finup_no_ctx; | |
1482 | state->final = ahash_final_no_ctx; | |
46b49abc | 1483 | scatterwalk_map_and_copy(buf, req->src, 0, |
307fd543 | 1484 | req->nbytes, 0); |
46b49abc | 1485 | *buflen = *next_buflen; |
6e005503 | 1486 | |
46b49abc AB |
1487 | print_hex_dump_debug("buf@" __stringify(__LINE__)": ", |
1488 | DUMP_PREFIX_ADDRESS, 16, 4, buf, | |
1489 | *buflen, 1); | |
1490 | } | |
045e3678 YK |
1491 | |
1492 | return ret; | |
58b0e5d0 | 1493 | unmap_ctx: |
32686d34 RK |
1494 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); |
1495 | kfree(edesc); | |
1496 | return ret; | |
045e3678 YK |
1497 | } |
1498 | ||
1499 | static int ahash_finup_first(struct ahash_request *req) | |
1500 | { | |
1501 | return ahash_digest(req); | |
1502 | } | |
1503 | ||
1504 | static int ahash_init(struct ahash_request *req) | |
1505 | { | |
4cb4f7c1 | 1506 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
045e3678 YK |
1507 | |
1508 | state->update = ahash_update_first; | |
1509 | state->finup = ahash_finup_first; | |
1510 | state->final = ahash_final_no_ctx; | |
1511 | ||
87ec02e7 | 1512 | state->ctx_dma = 0; |
65055e21 | 1513 | state->ctx_dma_len = 0; |
de0e35ec | 1514 | state->buf_dma = 0; |
46b49abc AB |
1515 | state->buflen = 0; |
1516 | state->next_buflen = 0; | |
045e3678 YK |
1517 | |
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | static int ahash_update(struct ahash_request *req) | |
1522 | { | |
4cb4f7c1 | 1523 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
045e3678 YK |
1524 | |
1525 | return state->update(req); | |
1526 | } | |
1527 | ||
1528 | static int ahash_finup(struct ahash_request *req) | |
1529 | { | |
4cb4f7c1 | 1530 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
045e3678 YK |
1531 | |
1532 | return state->finup(req); | |
1533 | } | |
1534 | ||
1535 | static int ahash_final(struct ahash_request *req) | |
1536 | { | |
4cb4f7c1 | 1537 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
045e3678 YK |
1538 | |
1539 | return state->final(req); | |
1540 | } | |
1541 | ||
1542 | static int ahash_export(struct ahash_request *req, void *out) | |
1543 | { | |
4cb4f7c1 | 1544 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
5ec90831 | 1545 | struct caam_export_state *export = out; |
46b49abc AB |
1546 | u8 *buf = state->buf; |
1547 | int len = state->buflen; | |
5ec90831 RK |
1548 | |
1549 | memcpy(export->buf, buf, len); | |
1550 | memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); | |
1551 | export->buflen = len; | |
1552 | export->update = state->update; | |
1553 | export->final = state->final; | |
1554 | export->finup = state->finup; | |
434b4212 | 1555 | |
045e3678 YK |
1556 | return 0; |
1557 | } | |
1558 | ||
1559 | static int ahash_import(struct ahash_request *req, const void *in) | |
1560 | { | |
4cb4f7c1 | 1561 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
5ec90831 | 1562 | const struct caam_export_state *export = in; |
045e3678 | 1563 | |
5ec90831 | 1564 | memset(state, 0, sizeof(*state)); |
46b49abc | 1565 | memcpy(state->buf, export->buf, export->buflen); |
5ec90831 | 1566 | memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); |
46b49abc | 1567 | state->buflen = export->buflen; |
5ec90831 RK |
1568 | state->update = export->update; |
1569 | state->final = export->final; | |
1570 | state->finup = export->finup; | |
434b4212 | 1571 | |
045e3678 YK |
1572 | return 0; |
1573 | } | |
1574 | ||
1575 | struct caam_hash_template { | |
1576 | char name[CRYPTO_MAX_ALG_NAME]; | |
1577 | char driver_name[CRYPTO_MAX_ALG_NAME]; | |
b0e09bae YK |
1578 | char hmac_name[CRYPTO_MAX_ALG_NAME]; |
1579 | char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; | |
045e3678 YK |
1580 | unsigned int blocksize; |
1581 | struct ahash_alg template_ahash; | |
1582 | u32 alg_type; | |
045e3678 YK |
1583 | }; |
1584 | ||
1585 | /* ahash descriptors */ | |
1586 | static struct caam_hash_template driver_hash[] = { | |
1587 | { | |
b0e09bae YK |
1588 | .name = "sha1", |
1589 | .driver_name = "sha1-caam", | |
1590 | .hmac_name = "hmac(sha1)", | |
1591 | .hmac_driver_name = "hmac-sha1-caam", | |
045e3678 YK |
1592 | .blocksize = SHA1_BLOCK_SIZE, |
1593 | .template_ahash = { | |
1594 | .init = ahash_init, | |
1595 | .update = ahash_update, | |
1596 | .final = ahash_final, | |
1597 | .finup = ahash_finup, | |
1598 | .digest = ahash_digest, | |
1599 | .export = ahash_export, | |
1600 | .import = ahash_import, | |
1601 | .setkey = ahash_setkey, | |
1602 | .halg = { | |
1603 | .digestsize = SHA1_DIGEST_SIZE, | |
5ec90831 | 1604 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1605 | }, |
659f313d | 1606 | }, |
045e3678 | 1607 | .alg_type = OP_ALG_ALGSEL_SHA1, |
045e3678 | 1608 | }, { |
b0e09bae YK |
1609 | .name = "sha224", |
1610 | .driver_name = "sha224-caam", | |
1611 | .hmac_name = "hmac(sha224)", | |
1612 | .hmac_driver_name = "hmac-sha224-caam", | |
045e3678 YK |
1613 | .blocksize = SHA224_BLOCK_SIZE, |
1614 | .template_ahash = { | |
1615 | .init = ahash_init, | |
1616 | .update = ahash_update, | |
1617 | .final = ahash_final, | |
1618 | .finup = ahash_finup, | |
1619 | .digest = ahash_digest, | |
1620 | .export = ahash_export, | |
1621 | .import = ahash_import, | |
1622 | .setkey = ahash_setkey, | |
1623 | .halg = { | |
1624 | .digestsize = SHA224_DIGEST_SIZE, | |
5ec90831 | 1625 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1626 | }, |
659f313d | 1627 | }, |
045e3678 | 1628 | .alg_type = OP_ALG_ALGSEL_SHA224, |
045e3678 | 1629 | }, { |
b0e09bae YK |
1630 | .name = "sha256", |
1631 | .driver_name = "sha256-caam", | |
1632 | .hmac_name = "hmac(sha256)", | |
1633 | .hmac_driver_name = "hmac-sha256-caam", | |
045e3678 YK |
1634 | .blocksize = SHA256_BLOCK_SIZE, |
1635 | .template_ahash = { | |
1636 | .init = ahash_init, | |
1637 | .update = ahash_update, | |
1638 | .final = ahash_final, | |
1639 | .finup = ahash_finup, | |
1640 | .digest = ahash_digest, | |
1641 | .export = ahash_export, | |
1642 | .import = ahash_import, | |
1643 | .setkey = ahash_setkey, | |
1644 | .halg = { | |
1645 | .digestsize = SHA256_DIGEST_SIZE, | |
5ec90831 | 1646 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1647 | }, |
659f313d | 1648 | }, |
045e3678 | 1649 | .alg_type = OP_ALG_ALGSEL_SHA256, |
045e3678 | 1650 | }, { |
b0e09bae YK |
1651 | .name = "sha384", |
1652 | .driver_name = "sha384-caam", | |
1653 | .hmac_name = "hmac(sha384)", | |
1654 | .hmac_driver_name = "hmac-sha384-caam", | |
045e3678 YK |
1655 | .blocksize = SHA384_BLOCK_SIZE, |
1656 | .template_ahash = { | |
1657 | .init = ahash_init, | |
1658 | .update = ahash_update, | |
1659 | .final = ahash_final, | |
1660 | .finup = ahash_finup, | |
1661 | .digest = ahash_digest, | |
1662 | .export = ahash_export, | |
1663 | .import = ahash_import, | |
1664 | .setkey = ahash_setkey, | |
1665 | .halg = { | |
1666 | .digestsize = SHA384_DIGEST_SIZE, | |
5ec90831 | 1667 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1668 | }, |
659f313d | 1669 | }, |
045e3678 | 1670 | .alg_type = OP_ALG_ALGSEL_SHA384, |
045e3678 | 1671 | }, { |
b0e09bae YK |
1672 | .name = "sha512", |
1673 | .driver_name = "sha512-caam", | |
1674 | .hmac_name = "hmac(sha512)", | |
1675 | .hmac_driver_name = "hmac-sha512-caam", | |
045e3678 YK |
1676 | .blocksize = SHA512_BLOCK_SIZE, |
1677 | .template_ahash = { | |
1678 | .init = ahash_init, | |
1679 | .update = ahash_update, | |
1680 | .final = ahash_final, | |
1681 | .finup = ahash_finup, | |
1682 | .digest = ahash_digest, | |
1683 | .export = ahash_export, | |
1684 | .import = ahash_import, | |
1685 | .setkey = ahash_setkey, | |
1686 | .halg = { | |
1687 | .digestsize = SHA512_DIGEST_SIZE, | |
5ec90831 | 1688 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1689 | }, |
659f313d | 1690 | }, |
045e3678 | 1691 | .alg_type = OP_ALG_ALGSEL_SHA512, |
045e3678 | 1692 | }, { |
b0e09bae YK |
1693 | .name = "md5", |
1694 | .driver_name = "md5-caam", | |
1695 | .hmac_name = "hmac(md5)", | |
1696 | .hmac_driver_name = "hmac-md5-caam", | |
045e3678 YK |
1697 | .blocksize = MD5_BLOCK_WORDS * 4, |
1698 | .template_ahash = { | |
1699 | .init = ahash_init, | |
1700 | .update = ahash_update, | |
1701 | .final = ahash_final, | |
1702 | .finup = ahash_finup, | |
1703 | .digest = ahash_digest, | |
1704 | .export = ahash_export, | |
1705 | .import = ahash_import, | |
1706 | .setkey = ahash_setkey, | |
1707 | .halg = { | |
1708 | .digestsize = MD5_DIGEST_SIZE, | |
5ec90831 | 1709 | .statesize = sizeof(struct caam_export_state), |
045e3678 | 1710 | }, |
659f313d | 1711 | }, |
045e3678 | 1712 | .alg_type = OP_ALG_ALGSEL_MD5, |
12b8567f IP |
1713 | }, { |
1714 | .hmac_name = "xcbc(aes)", | |
1715 | .hmac_driver_name = "xcbc-aes-caam", | |
1716 | .blocksize = AES_BLOCK_SIZE, | |
1717 | .template_ahash = { | |
1718 | .init = ahash_init, | |
1719 | .update = ahash_update, | |
1720 | .final = ahash_final, | |
1721 | .finup = ahash_finup, | |
1722 | .digest = ahash_digest, | |
1723 | .export = ahash_export, | |
1724 | .import = ahash_import, | |
1725 | .setkey = axcbc_setkey, | |
1726 | .halg = { | |
1727 | .digestsize = AES_BLOCK_SIZE, | |
1728 | .statesize = sizeof(struct caam_export_state), | |
1729 | }, | |
1730 | }, | |
1731 | .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, | |
87870cfb IP |
1732 | }, { |
1733 | .hmac_name = "cmac(aes)", | |
1734 | .hmac_driver_name = "cmac-aes-caam", | |
1735 | .blocksize = AES_BLOCK_SIZE, | |
1736 | .template_ahash = { | |
1737 | .init = ahash_init, | |
1738 | .update = ahash_update, | |
1739 | .final = ahash_final, | |
1740 | .finup = ahash_finup, | |
1741 | .digest = ahash_digest, | |
1742 | .export = ahash_export, | |
1743 | .import = ahash_import, | |
1744 | .setkey = acmac_setkey, | |
1745 | .halg = { | |
1746 | .digestsize = AES_BLOCK_SIZE, | |
1747 | .statesize = sizeof(struct caam_export_state), | |
1748 | }, | |
1749 | }, | |
1750 | .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, | |
045e3678 YK |
1751 | }, |
1752 | }; | |
1753 | ||
1754 | struct caam_hash_alg { | |
1755 | struct list_head entry; | |
045e3678 | 1756 | int alg_type; |
c5a2f74d | 1757 | bool is_hmac; |
623814c0 | 1758 | struct ahash_engine_alg ahash_alg; |
045e3678 YK |
1759 | }; |
1760 | ||
1761 | static int caam_hash_cra_init(struct crypto_tfm *tfm) | |
1762 | { | |
1763 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
1764 | struct crypto_alg *base = tfm->__crt_alg; | |
1765 | struct hash_alg_common *halg = | |
1766 | container_of(base, struct hash_alg_common, base); | |
1767 | struct ahash_alg *alg = | |
1768 | container_of(halg, struct ahash_alg, halg); | |
1769 | struct caam_hash_alg *caam_hash = | |
623814c0 | 1770 | container_of(alg, struct caam_hash_alg, ahash_alg.base); |
4cb4f7c1 | 1771 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); |
045e3678 YK |
1772 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
1773 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | |
1774 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | |
1775 | HASH_MSG_LEN + 32, | |
1776 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | |
1777 | HASH_MSG_LEN + 64, | |
1778 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | |
21b014f0 IP |
1779 | const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx, |
1780 | sh_desc_update); | |
bbf22344 | 1781 | dma_addr_t dma_addr; |
7e0880b9 | 1782 | struct caam_drv_private *priv; |
045e3678 YK |
1783 | |
1784 | /* | |
cfc6f11b | 1785 | * Get a Job ring from Job Ring driver to ensure in-order |
045e3678 YK |
1786 | * crypto request processing per tfm |
1787 | */ | |
cfc6f11b RG |
1788 | ctx->jrdev = caam_jr_alloc(); |
1789 | if (IS_ERR(ctx->jrdev)) { | |
1790 | pr_err("Job Ring Device allocation for transform failed\n"); | |
1791 | return PTR_ERR(ctx->jrdev); | |
1792 | } | |
bbf22344 | 1793 | |
7e0880b9 | 1794 | priv = dev_get_drvdata(ctx->jrdev->parent); |
12b8567f IP |
1795 | |
1796 | if (is_xcbc_aes(caam_hash->alg_type)) { | |
1797 | ctx->dir = DMA_TO_DEVICE; | |
e9b4913a | 1798 | ctx->key_dir = DMA_BIDIRECTIONAL; |
12b8567f IP |
1799 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; |
1800 | ctx->ctx_len = 48; | |
87870cfb IP |
1801 | } else if (is_cmac_aes(caam_hash->alg_type)) { |
1802 | ctx->dir = DMA_TO_DEVICE; | |
e9b4913a | 1803 | ctx->key_dir = DMA_NONE; |
87870cfb IP |
1804 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; |
1805 | ctx->ctx_len = 32; | |
12b8567f | 1806 | } else { |
e9b4913a HG |
1807 | if (priv->era >= 6) { |
1808 | ctx->dir = DMA_BIDIRECTIONAL; | |
c5a2f74d | 1809 | ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE; |
e9b4913a HG |
1810 | } else { |
1811 | ctx->dir = DMA_TO_DEVICE; | |
1812 | ctx->key_dir = DMA_NONE; | |
1813 | } | |
12b8567f IP |
1814 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
1815 | ctx->ctx_len = runninglen[(ctx->adata.algtype & | |
1816 | OP_ALG_ALGSEL_SUBMASK) >> | |
1817 | OP_ALG_ALGSEL_SHIFT]; | |
1818 | } | |
7e0880b9 | 1819 | |
e9b4913a HG |
1820 | if (ctx->key_dir != DMA_NONE) { |
1821 | ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, | |
1822 | ARRAY_SIZE(ctx->key), | |
1823 | ctx->key_dir, | |
1824 | DMA_ATTR_SKIP_CPU_SYNC); | |
1825 | if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { | |
1826 | dev_err(ctx->jrdev, "unable to map key\n"); | |
1827 | caam_jr_free(ctx->jrdev); | |
1828 | return -ENOMEM; | |
1829 | } | |
1830 | } | |
1831 | ||
bbf22344 | 1832 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, |
21b014f0 IP |
1833 | offsetof(struct caam_hash_ctx, key) - |
1834 | sh_desc_update_offset, | |
7e0880b9 | 1835 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
bbf22344 HG |
1836 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
1837 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); | |
12b8567f | 1838 | |
e9b4913a | 1839 | if (ctx->key_dir != DMA_NONE) |
a2fb864c | 1840 | dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, |
12b8567f | 1841 | ARRAY_SIZE(ctx->key), |
e9b4913a | 1842 | ctx->key_dir, |
12b8567f IP |
1843 | DMA_ATTR_SKIP_CPU_SYNC); |
1844 | ||
bbf22344 HG |
1845 | caam_jr_free(ctx->jrdev); |
1846 | return -ENOMEM; | |
1847 | } | |
1848 | ||
1849 | ctx->sh_desc_update_dma = dma_addr; | |
1850 | ctx->sh_desc_update_first_dma = dma_addr + | |
1851 | offsetof(struct caam_hash_ctx, | |
21b014f0 IP |
1852 | sh_desc_update_first) - |
1853 | sh_desc_update_offset; | |
bbf22344 | 1854 | ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, |
21b014f0 IP |
1855 | sh_desc_fin) - |
1856 | sh_desc_update_offset; | |
bbf22344 | 1857 | ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, |
21b014f0 IP |
1858 | sh_desc_digest) - |
1859 | sh_desc_update_offset; | |
1860 | ||
4cb4f7c1 | 1861 | crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); |
9a2537d0 IP |
1862 | |
1863 | /* | |
1864 | * For keyed hash algorithms shared descriptors | |
1865 | * will be created later in setkey() callback | |
1866 | */ | |
c5a2f74d | 1867 | return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); |
045e3678 YK |
1868 | } |
1869 | ||
1870 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |
1871 | { | |
4cb4f7c1 | 1872 | struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
045e3678 | 1873 | |
bbf22344 | 1874 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, |
21b014f0 IP |
1875 | offsetof(struct caam_hash_ctx, key) - |
1876 | offsetof(struct caam_hash_ctx, sh_desc_update), | |
7e0880b9 | 1877 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
e9b4913a | 1878 | if (ctx->key_dir != DMA_NONE) |
a2fb864c | 1879 | dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, |
e9b4913a | 1880 | ARRAY_SIZE(ctx->key), ctx->key_dir, |
12b8567f | 1881 | DMA_ATTR_SKIP_CPU_SYNC); |
cfc6f11b | 1882 | caam_jr_free(ctx->jrdev); |
045e3678 YK |
1883 | } |
1884 | ||
1b46c90c | 1885 | void caam_algapi_hash_exit(void) |
045e3678 | 1886 | { |
045e3678 YK |
1887 | struct caam_hash_alg *t_alg, *n; |
1888 | ||
cfc6f11b | 1889 | if (!hash_list.next) |
045e3678 YK |
1890 | return; |
1891 | ||
cfc6f11b | 1892 | list_for_each_entry_safe(t_alg, n, &hash_list, entry) { |
623814c0 | 1893 | crypto_engine_unregister_ahash(&t_alg->ahash_alg); |
045e3678 YK |
1894 | list_del(&t_alg->entry); |
1895 | kfree(t_alg); | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | static struct caam_hash_alg * | |
cfc6f11b | 1900 | caam_hash_alloc(struct caam_hash_template *template, |
b0e09bae | 1901 | bool keyed) |
045e3678 YK |
1902 | { |
1903 | struct caam_hash_alg *t_alg; | |
1904 | struct ahash_alg *halg; | |
1905 | struct crypto_alg *alg; | |
1906 | ||
9c4f9733 | 1907 | t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); |
3de0152b | 1908 | if (!t_alg) |
045e3678 | 1909 | return ERR_PTR(-ENOMEM); |
045e3678 | 1910 | |
623814c0 HX |
1911 | t_alg->ahash_alg.base = template->template_ahash; |
1912 | halg = &t_alg->ahash_alg.base; | |
045e3678 YK |
1913 | alg = &halg->halg.base; |
1914 | ||
b0e09bae YK |
1915 | if (keyed) { |
1916 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1917 | template->hmac_name); | |
1918 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1919 | template->hmac_driver_name); | |
c5a2f74d | 1920 | t_alg->is_hmac = true; |
b0e09bae YK |
1921 | } else { |
1922 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1923 | template->name); | |
1924 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1925 | template->driver_name); | |
623814c0 | 1926 | halg->setkey = NULL; |
c5a2f74d | 1927 | t_alg->is_hmac = false; |
b0e09bae | 1928 | } |
045e3678 YK |
1929 | alg->cra_module = THIS_MODULE; |
1930 | alg->cra_init = caam_hash_cra_init; | |
1931 | alg->cra_exit = caam_hash_cra_exit; | |
4cb4f7c1 | 1932 | alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); |
045e3678 YK |
1933 | alg->cra_priority = CAAM_CRA_PRIORITY; |
1934 | alg->cra_blocksize = template->blocksize; | |
1935 | alg->cra_alignmask = 0; | |
b8aa7dc5 | 1936 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; |
045e3678 YK |
1937 | |
1938 | t_alg->alg_type = template->alg_type; | |
623814c0 | 1939 | t_alg->ahash_alg.op.do_one_request = ahash_do_one_req; |
045e3678 YK |
1940 | |
1941 | return t_alg; | |
1942 | } | |
1943 | ||
1b46c90c | 1944 | int caam_algapi_hash_init(struct device *ctrldev) |
045e3678 | 1945 | { |
045e3678 | 1946 | int i = 0, err = 0; |
1b46c90c | 1947 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
bf83490e | 1948 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
d239b10d | 1949 | u32 md_inst, md_vid; |
045e3678 | 1950 | |
bf83490e VM |
1951 | /* |
1952 | * Register crypto algorithms the device supports. First, identify | |
1953 | * presence and attributes of MD block. | |
1954 | */ | |
d239b10d | 1955 | if (priv->era < 10) { |
ae1dd17d HG |
1956 | struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; |
1957 | ||
1958 | md_vid = (rd_reg32(&perfmon->cha_id_ls) & | |
d239b10d | 1959 | CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; |
ae1dd17d | 1960 | md_inst = (rd_reg32(&perfmon->cha_num_ls) & |
d239b10d HG |
1961 | CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; |
1962 | } else { | |
ae1dd17d | 1963 | u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha); |
d239b10d HG |
1964 | |
1965 | md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; | |
1966 | md_inst = mdha & CHA_VER_NUM_MASK; | |
1967 | } | |
bf83490e VM |
1968 | |
1969 | /* | |
1970 | * Skip registration of any hashing algorithms if MD block | |
1971 | * is not present. | |
1972 | */ | |
1b46c90c | 1973 | if (!md_inst) |
0435d47e | 1974 | return 0; |
bf83490e VM |
1975 | |
1976 | /* Limit digest size based on LP256 */ | |
d239b10d | 1977 | if (md_vid == CHA_VER_VID_MD_LP256) |
bf83490e VM |
1978 | md_limit = SHA256_DIGEST_SIZE; |
1979 | ||
cfc6f11b | 1980 | INIT_LIST_HEAD(&hash_list); |
045e3678 YK |
1981 | |
1982 | /* register crypto algorithms the device supports */ | |
1983 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | |
045e3678 | 1984 | struct caam_hash_alg *t_alg; |
bf83490e VM |
1985 | struct caam_hash_template *alg = driver_hash + i; |
1986 | ||
1987 | /* If MD size is not supported by device, skip registration */ | |
12b8567f IP |
1988 | if (is_mdha(alg->alg_type) && |
1989 | alg->template_ahash.halg.digestsize > md_limit) | |
bf83490e | 1990 | continue; |
045e3678 | 1991 | |
b0e09bae | 1992 | /* register hmac version */ |
bf83490e | 1993 | t_alg = caam_hash_alloc(alg, true); |
b0e09bae YK |
1994 | if (IS_ERR(t_alg)) { |
1995 | err = PTR_ERR(t_alg); | |
0f103b37 IP |
1996 | pr_warn("%s alg allocation failed\n", |
1997 | alg->hmac_driver_name); | |
b0e09bae YK |
1998 | continue; |
1999 | } | |
2000 | ||
623814c0 | 2001 | err = crypto_engine_register_ahash(&t_alg->ahash_alg); |
b0e09bae | 2002 | if (err) { |
6ea30f0a | 2003 | pr_warn("%s alg registration failed: %d\n", |
623814c0 | 2004 | t_alg->ahash_alg.base.halg.base.cra_driver_name, |
6ea30f0a | 2005 | err); |
b0e09bae YK |
2006 | kfree(t_alg); |
2007 | } else | |
cfc6f11b | 2008 | list_add_tail(&t_alg->entry, &hash_list); |
b0e09bae | 2009 | |
12b8567f IP |
2010 | if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) |
2011 | continue; | |
2012 | ||
b0e09bae | 2013 | /* register unkeyed version */ |
bf83490e | 2014 | t_alg = caam_hash_alloc(alg, false); |
045e3678 YK |
2015 | if (IS_ERR(t_alg)) { |
2016 | err = PTR_ERR(t_alg); | |
bf83490e | 2017 | pr_warn("%s alg allocation failed\n", alg->driver_name); |
045e3678 YK |
2018 | continue; |
2019 | } | |
2020 | ||
623814c0 | 2021 | err = crypto_engine_register_ahash(&t_alg->ahash_alg); |
045e3678 | 2022 | if (err) { |
6ea30f0a | 2023 | pr_warn("%s alg registration failed: %d\n", |
623814c0 | 2024 | t_alg->ahash_alg.base.halg.base.cra_driver_name, |
6ea30f0a | 2025 | err); |
045e3678 YK |
2026 | kfree(t_alg); |
2027 | } else | |
cfc6f11b | 2028 | list_add_tail(&t_alg->entry, &hash_list); |
045e3678 YK |
2029 | } |
2030 | ||
2031 | return err; | |
2032 | } |