Merge tag 'vfio-v4.18-rc1' of git://github.com/awilliam/linux-vfio
[linux-2.6-block.git] / drivers / crypto / ccree / cc_hash.c
CommitLineData
63893811
GBY
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
9#include <crypto/internal/hash.h>
10
11#include "cc_driver.h"
12#include "cc_request_mgr.h"
13#include "cc_buffer_mgr.h"
14#include "cc_hash.h"
15#include "cc_sram_mgr.h"
16
17#define CC_MAX_HASH_SEQ_LEN 12
18#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
19
20struct cc_hash_handle {
21 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
22 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
23 struct list_head hash_list;
24};
25
26static const u32 digest_len_init[] = {
27 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
28static const u32 md5_init[] = {
29 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
30static const u32 sha1_init[] = {
31 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 sha224_init[] = {
33 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
34 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
35static const u32 sha256_init[] = {
36 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
37 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
63893811
GBY
38static const u32 digest_len_sha512_init[] = {
39 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
40static u64 sha384_init[] = {
41 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
42 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
43static u64 sha512_init[] = {
44 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
45 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
63893811
GBY
46
47static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
48 unsigned int *seq_size);
49
50static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
51 unsigned int *seq_size);
52
53static const void *cc_larval_digest(struct device *dev, u32 mode);
54
55struct cc_hash_alg {
56 struct list_head entry;
57 int hash_mode;
58 int hw_mode;
59 int inter_digestsize;
60 struct cc_drvdata *drvdata;
61 struct ahash_alg ahash_alg;
62};
63
64struct hash_key_req_ctx {
65 u32 keylen;
66 dma_addr_t key_dma_addr;
67};
68
69/* hash per-session context */
70struct cc_hash_ctx {
71 struct cc_drvdata *drvdata;
72 /* holds the origin digest; the digest after "setkey" if HMAC,*
73 * the initial digest if HASH.
74 */
75 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
76 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
77
78 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
79 dma_addr_t digest_buff_dma_addr;
80 /* use for hmac with key large then mode block size */
81 struct hash_key_req_ctx key_params;
82 int hash_mode;
83 int hw_mode;
84 int inter_digestsize;
85 struct completion setkey_comp;
86 bool is_hmac;
87};
88
89static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
90 unsigned int flow_mode, struct cc_hw_desc desc[],
91 bool is_not_last_data, unsigned int *seq_size);
92
93static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
94{
95 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
96 mode == DRV_HASH_SHA512) {
97 set_bytes_swap(desc, 1);
98 } else {
99 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
100 }
101}
102
103static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
104 unsigned int digestsize)
105{
106 state->digest_result_dma_addr =
107 dma_map_single(dev, state->digest_result_buff,
108 digestsize, DMA_BIDIRECTIONAL);
109 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
110 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
111 digestsize);
112 return -ENOMEM;
113 }
114 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
115 digestsize, state->digest_result_buff,
116 &state->digest_result_dma_addr);
117
118 return 0;
119}
120
121static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
122 struct cc_hash_ctx *ctx)
123{
124 bool is_hmac = ctx->is_hmac;
125
126 memset(state, 0, sizeof(*state));
127
128 if (is_hmac) {
129 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
130 ctx->hw_mode != DRV_CIPHER_CMAC) {
131 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
132 ctx->inter_digestsize,
133 DMA_BIDIRECTIONAL);
134
135 memcpy(state->digest_buff, ctx->digest_buff,
136 ctx->inter_digestsize);
63893811
GBY
137 if (ctx->hash_mode == DRV_HASH_SHA512 ||
138 ctx->hash_mode == DRV_HASH_SHA384)
139 memcpy(state->digest_bytes_len,
27b3b22d
GBY
140 digest_len_sha512_init,
141 ctx->drvdata->hash_len_sz);
63893811 142 else
27b3b22d
GBY
143 memcpy(state->digest_bytes_len, digest_len_init,
144 ctx->drvdata->hash_len_sz);
63893811
GBY
145 }
146
147 if (ctx->hash_mode != DRV_HASH_NULL) {
148 dma_sync_single_for_cpu(dev,
149 ctx->opad_tmp_keys_dma_addr,
150 ctx->inter_digestsize,
151 DMA_BIDIRECTIONAL);
152 memcpy(state->opad_digest_buff,
153 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
154 }
155 } else { /*hash*/
156 /* Copy the initial digests if hash flow. */
157 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
158
159 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
160 }
161}
162
163static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
164 struct cc_hash_ctx *ctx)
165{
166 bool is_hmac = ctx->is_hmac;
167
168 state->digest_buff_dma_addr =
169 dma_map_single(dev, state->digest_buff,
170 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
171 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
172 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
173 ctx->inter_digestsize, state->digest_buff);
174 return -EINVAL;
175 }
176 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
177 ctx->inter_digestsize, state->digest_buff,
178 &state->digest_buff_dma_addr);
179
180 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
181 state->digest_bytes_len_dma_addr =
182 dma_map_single(dev, state->digest_bytes_len,
27b3b22d 183 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
63893811
GBY
184 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
185 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
27b3b22d 186 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
63893811
GBY
187 goto unmap_digest_buf;
188 }
189 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
27b3b22d 190 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
63893811
GBY
191 &state->digest_bytes_len_dma_addr);
192 }
193
194 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
195 state->opad_digest_dma_addr =
196 dma_map_single(dev, state->opad_digest_buff,
197 ctx->inter_digestsize,
198 DMA_BIDIRECTIONAL);
199 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
200 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
201 ctx->inter_digestsize,
202 state->opad_digest_buff);
203 goto unmap_digest_len;
204 }
205 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
206 ctx->inter_digestsize, state->opad_digest_buff,
207 &state->opad_digest_dma_addr);
208 }
209
210 return 0;
211
212unmap_digest_len:
213 if (state->digest_bytes_len_dma_addr) {
214 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
27b3b22d 215 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
63893811
GBY
216 state->digest_bytes_len_dma_addr = 0;
217 }
218unmap_digest_buf:
219 if (state->digest_buff_dma_addr) {
220 dma_unmap_single(dev, state->digest_buff_dma_addr,
221 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
222 state->digest_buff_dma_addr = 0;
223 }
224
225 return -EINVAL;
226}
227
228static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
229 struct cc_hash_ctx *ctx)
230{
231 if (state->digest_buff_dma_addr) {
232 dma_unmap_single(dev, state->digest_buff_dma_addr,
233 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
234 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
235 &state->digest_buff_dma_addr);
236 state->digest_buff_dma_addr = 0;
237 }
238 if (state->digest_bytes_len_dma_addr) {
239 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
27b3b22d 240 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
63893811
GBY
241 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
242 &state->digest_bytes_len_dma_addr);
243 state->digest_bytes_len_dma_addr = 0;
244 }
245 if (state->opad_digest_dma_addr) {
246 dma_unmap_single(dev, state->opad_digest_dma_addr,
247 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
248 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
249 &state->opad_digest_dma_addr);
250 state->opad_digest_dma_addr = 0;
251 }
252}
253
254static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
255 unsigned int digestsize, u8 *result)
256{
257 if (state->digest_result_dma_addr) {
258 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
259 DMA_BIDIRECTIONAL);
260 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
261 state->digest_result_buff,
262 &state->digest_result_dma_addr, digestsize);
263 memcpy(result, state->digest_result_buff, digestsize);
264 }
265 state->digest_result_dma_addr = 0;
266}
267
268static void cc_update_complete(struct device *dev, void *cc_req, int err)
269{
270 struct ahash_request *req = (struct ahash_request *)cc_req;
271 struct ahash_req_ctx *state = ahash_request_ctx(req);
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
274
275 dev_dbg(dev, "req=%pK\n", req);
276
277 cc_unmap_hash_request(dev, state, req->src, false);
278 cc_unmap_req(dev, state, ctx);
279 req->base.complete(&req->base, err);
280}
281
282static void cc_digest_complete(struct device *dev, void *cc_req, int err)
283{
284 struct ahash_request *req = (struct ahash_request *)cc_req;
285 struct ahash_req_ctx *state = ahash_request_ctx(req);
286 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
287 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
288 u32 digestsize = crypto_ahash_digestsize(tfm);
289
290 dev_dbg(dev, "req=%pK\n", req);
291
292 cc_unmap_hash_request(dev, state, req->src, false);
293 cc_unmap_result(dev, state, digestsize, req->result);
294 cc_unmap_req(dev, state, ctx);
295 req->base.complete(&req->base, err);
296}
297
298static void cc_hash_complete(struct device *dev, void *cc_req, int err)
299{
300 struct ahash_request *req = (struct ahash_request *)cc_req;
301 struct ahash_req_ctx *state = ahash_request_ctx(req);
302 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
303 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
304 u32 digestsize = crypto_ahash_digestsize(tfm);
305
306 dev_dbg(dev, "req=%pK\n", req);
307
308 cc_unmap_hash_request(dev, state, req->src, false);
309 cc_unmap_result(dev, state, digestsize, req->result);
310 cc_unmap_req(dev, state, ctx);
311 req->base.complete(&req->base, err);
312}
313
314static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
315 int idx)
316{
317 struct ahash_req_ctx *state = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 u32 digestsize = crypto_ahash_digestsize(tfm);
321
322 /* Get final MAC result */
323 hw_desc_init(&desc[idx]);
324 set_cipher_mode(&desc[idx], ctx->hw_mode);
325 /* TODO */
326 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
327 NS_BIT, 1);
27b3b22d 328 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
329 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
330 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
331 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
332 cc_set_endianity(ctx->hash_mode, &desc[idx]);
333 idx++;
334
335 return idx;
336}
337
338static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
339 int idx)
340{
341 struct ahash_req_ctx *state = ahash_request_ctx(req);
342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
344 u32 digestsize = crypto_ahash_digestsize(tfm);
345
346 /* store the hash digest result in the context */
347 hw_desc_init(&desc[idx]);
348 set_cipher_mode(&desc[idx], ctx->hw_mode);
349 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
350 NS_BIT, 0);
351 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
352 cc_set_endianity(ctx->hash_mode, &desc[idx]);
353 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
354 idx++;
355
356 /* Loading hash opad xor key state */
357 hw_desc_init(&desc[idx]);
358 set_cipher_mode(&desc[idx], ctx->hw_mode);
359 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
360 ctx->inter_digestsize, NS_BIT);
361 set_flow_mode(&desc[idx], S_DIN_to_HASH);
362 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
363 idx++;
364
365 /* Load the hash current length */
366 hw_desc_init(&desc[idx]);
367 set_cipher_mode(&desc[idx], ctx->hw_mode);
368 set_din_sram(&desc[idx],
369 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
27b3b22d 370 ctx->drvdata->hash_len_sz);
63893811
GBY
371 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
372 set_flow_mode(&desc[idx], S_DIN_to_HASH);
373 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
374 idx++;
375
376 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
377 hw_desc_init(&desc[idx]);
378 set_din_no_dma(&desc[idx], 0, 0xfffff0);
379 set_dout_no_dma(&desc[idx], 0, 0, 1);
380 idx++;
381
382 /* Perform HASH update */
383 hw_desc_init(&desc[idx]);
384 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
385 digestsize, NS_BIT);
386 set_flow_mode(&desc[idx], DIN_HASH);
387 idx++;
388
389 return idx;
390}
391
392static int cc_hash_digest(struct ahash_request *req)
393{
394 struct ahash_req_ctx *state = ahash_request_ctx(req);
395 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
397 u32 digestsize = crypto_ahash_digestsize(tfm);
398 struct scatterlist *src = req->src;
399 unsigned int nbytes = req->nbytes;
400 u8 *result = req->result;
401 struct device *dev = drvdata_to_dev(ctx->drvdata);
402 bool is_hmac = ctx->is_hmac;
403 struct cc_crypto_req cc_req = {};
404 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
405 cc_sram_addr_t larval_digest_addr =
406 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
407 int idx = 0;
408 int rc = 0;
409 gfp_t flags = cc_gfp_flags(&req->base);
410
411 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
412 nbytes);
413
414 cc_init_req(dev, state, ctx);
415
416 if (cc_map_req(dev, state, ctx)) {
417 dev_err(dev, "map_ahash_source() failed\n");
418 return -ENOMEM;
419 }
420
421 if (cc_map_result(dev, state, digestsize)) {
422 dev_err(dev, "map_ahash_digest() failed\n");
423 cc_unmap_req(dev, state, ctx);
424 return -ENOMEM;
425 }
426
427 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
428 flags)) {
429 dev_err(dev, "map_ahash_request_final() failed\n");
430 cc_unmap_result(dev, state, digestsize, result);
431 cc_unmap_req(dev, state, ctx);
432 return -ENOMEM;
433 }
434
435 /* Setup request structure */
436 cc_req.user_cb = cc_digest_complete;
437 cc_req.user_arg = req;
438
439 /* If HMAC then load hash IPAD xor key, if HASH then load initial
440 * digest
441 */
442 hw_desc_init(&desc[idx]);
443 set_cipher_mode(&desc[idx], ctx->hw_mode);
444 if (is_hmac) {
445 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
446 ctx->inter_digestsize, NS_BIT);
447 } else {
448 set_din_sram(&desc[idx], larval_digest_addr,
449 ctx->inter_digestsize);
450 }
451 set_flow_mode(&desc[idx], S_DIN_to_HASH);
452 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
453 idx++;
454
455 /* Load the hash current length */
456 hw_desc_init(&desc[idx]);
457 set_cipher_mode(&desc[idx], ctx->hw_mode);
458
459 if (is_hmac) {
460 set_din_type(&desc[idx], DMA_DLLI,
27b3b22d
GBY
461 state->digest_bytes_len_dma_addr,
462 ctx->drvdata->hash_len_sz, NS_BIT);
63893811 463 } else {
27b3b22d 464 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
63893811
GBY
465 if (nbytes)
466 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
467 else
468 set_cipher_do(&desc[idx], DO_PAD);
469 }
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
472 idx++;
473
474 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
475
476 if (is_hmac) {
477 /* HW last hash block padding (aka. "DO_PAD") */
478 hw_desc_init(&desc[idx]);
479 set_cipher_mode(&desc[idx], ctx->hw_mode);
480 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
27b3b22d 481 ctx->drvdata->hash_len_sz, NS_BIT, 0);
63893811
GBY
482 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
483 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
484 set_cipher_do(&desc[idx], DO_PAD);
485 idx++;
486
487 idx = cc_fin_hmac(desc, req, idx);
488 }
489
490 idx = cc_fin_result(desc, req, idx);
491
492 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
493 if (rc != -EINPROGRESS && rc != -EBUSY) {
494 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
495 cc_unmap_hash_request(dev, state, src, true);
496 cc_unmap_result(dev, state, digestsize, result);
497 cc_unmap_req(dev, state, ctx);
498 }
499 return rc;
500}
501
502static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
503 struct ahash_req_ctx *state, unsigned int idx)
504{
505 /* Restore hash digest */
506 hw_desc_init(&desc[idx]);
507 set_cipher_mode(&desc[idx], ctx->hw_mode);
508 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
509 ctx->inter_digestsize, NS_BIT);
510 set_flow_mode(&desc[idx], S_DIN_to_HASH);
511 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
512 idx++;
513
514 /* Restore hash current length */
515 hw_desc_init(&desc[idx]);
516 set_cipher_mode(&desc[idx], ctx->hw_mode);
517 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
518 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
27b3b22d 519 ctx->drvdata->hash_len_sz, NS_BIT);
63893811
GBY
520 set_flow_mode(&desc[idx], S_DIN_to_HASH);
521 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
522 idx++;
523
524 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
525
526 return idx;
527}
528
529static int cc_hash_update(struct ahash_request *req)
530{
531 struct ahash_req_ctx *state = ahash_request_ctx(req);
532 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
533 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
534 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
535 struct scatterlist *src = req->src;
536 unsigned int nbytes = req->nbytes;
537 struct device *dev = drvdata_to_dev(ctx->drvdata);
538 struct cc_crypto_req cc_req = {};
539 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
540 u32 idx = 0;
541 int rc;
542 gfp_t flags = cc_gfp_flags(&req->base);
543
544 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
545 "hmac" : "hash", nbytes);
546
547 if (nbytes == 0) {
548 /* no real updates required */
549 return 0;
550 }
551
552 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
553 block_size, flags);
554 if (rc) {
555 if (rc == 1) {
556 dev_dbg(dev, " data size not require HW update %x\n",
557 nbytes);
558 /* No hardware updates are required */
559 return 0;
560 }
561 dev_err(dev, "map_ahash_request_update() failed\n");
562 return -ENOMEM;
563 }
564
565 if (cc_map_req(dev, state, ctx)) {
566 dev_err(dev, "map_ahash_source() failed\n");
567 cc_unmap_hash_request(dev, state, src, true);
568 return -EINVAL;
569 }
570
571 /* Setup request structure */
572 cc_req.user_cb = cc_update_complete;
573 cc_req.user_arg = req;
574
575 idx = cc_restore_hash(desc, ctx, state, idx);
576
577 /* store the hash digest result in context */
578 hw_desc_init(&desc[idx]);
579 set_cipher_mode(&desc[idx], ctx->hw_mode);
580 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
581 ctx->inter_digestsize, NS_BIT, 0);
582 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
583 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
584 idx++;
585
586 /* store current hash length in context */
587 hw_desc_init(&desc[idx]);
588 set_cipher_mode(&desc[idx], ctx->hw_mode);
589 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
27b3b22d
GBY
590 ctx->drvdata->hash_len_sz, NS_BIT, 1);
591 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
592 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
593 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
594 idx++;
595
596 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
597 if (rc != -EINPROGRESS && rc != -EBUSY) {
598 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
599 cc_unmap_hash_request(dev, state, src, true);
600 cc_unmap_req(dev, state, ctx);
601 }
602 return rc;
603}
604
605static int cc_hash_finup(struct ahash_request *req)
606{
607 struct ahash_req_ctx *state = ahash_request_ctx(req);
608 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
609 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
610 u32 digestsize = crypto_ahash_digestsize(tfm);
611 struct scatterlist *src = req->src;
612 unsigned int nbytes = req->nbytes;
613 u8 *result = req->result;
614 struct device *dev = drvdata_to_dev(ctx->drvdata);
615 bool is_hmac = ctx->is_hmac;
616 struct cc_crypto_req cc_req = {};
617 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
618 unsigned int idx = 0;
619 int rc;
620 gfp_t flags = cc_gfp_flags(&req->base);
621
622 dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
623 nbytes);
624
625 if (cc_map_req(dev, state, ctx)) {
626 dev_err(dev, "map_ahash_source() failed\n");
627 return -EINVAL;
628 }
629
630 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
631 flags)) {
632 dev_err(dev, "map_ahash_request_final() failed\n");
633 cc_unmap_req(dev, state, ctx);
634 return -ENOMEM;
635 }
636 if (cc_map_result(dev, state, digestsize)) {
637 dev_err(dev, "map_ahash_digest() failed\n");
638 cc_unmap_hash_request(dev, state, src, true);
639 cc_unmap_req(dev, state, ctx);
640 return -ENOMEM;
641 }
642
643 /* Setup request structure */
644 cc_req.user_cb = cc_hash_complete;
645 cc_req.user_arg = req;
646
647 idx = cc_restore_hash(desc, ctx, state, idx);
648
649 if (is_hmac)
650 idx = cc_fin_hmac(desc, req, idx);
651
652 idx = cc_fin_result(desc, req, idx);
653
654 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
655 if (rc != -EINPROGRESS && rc != -EBUSY) {
656 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
657 cc_unmap_hash_request(dev, state, src, true);
658 cc_unmap_result(dev, state, digestsize, result);
659 cc_unmap_req(dev, state, ctx);
660 }
661 return rc;
662}
663
664static int cc_hash_final(struct ahash_request *req)
665{
666 struct ahash_req_ctx *state = ahash_request_ctx(req);
667 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
668 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
669 u32 digestsize = crypto_ahash_digestsize(tfm);
670 struct scatterlist *src = req->src;
671 unsigned int nbytes = req->nbytes;
672 u8 *result = req->result;
673 struct device *dev = drvdata_to_dev(ctx->drvdata);
674 bool is_hmac = ctx->is_hmac;
675 struct cc_crypto_req cc_req = {};
676 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
677 unsigned int idx = 0;
678 int rc;
679 gfp_t flags = cc_gfp_flags(&req->base);
680
681 dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
682 nbytes);
683
684 if (cc_map_req(dev, state, ctx)) {
685 dev_err(dev, "map_ahash_source() failed\n");
686 return -EINVAL;
687 }
688
689 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
690 flags)) {
691 dev_err(dev, "map_ahash_request_final() failed\n");
692 cc_unmap_req(dev, state, ctx);
693 return -ENOMEM;
694 }
695
696 if (cc_map_result(dev, state, digestsize)) {
697 dev_err(dev, "map_ahash_digest() failed\n");
698 cc_unmap_hash_request(dev, state, src, true);
699 cc_unmap_req(dev, state, ctx);
700 return -ENOMEM;
701 }
702
703 /* Setup request structure */
704 cc_req.user_cb = cc_hash_complete;
705 cc_req.user_arg = req;
706
707 idx = cc_restore_hash(desc, ctx, state, idx);
708
709 /* "DO-PAD" must be enabled only when writing current length to HW */
710 hw_desc_init(&desc[idx]);
711 set_cipher_do(&desc[idx], DO_PAD);
712 set_cipher_mode(&desc[idx], ctx->hw_mode);
713 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
27b3b22d 714 ctx->drvdata->hash_len_sz, NS_BIT, 0);
63893811
GBY
715 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
716 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
717 idx++;
718
719 if (is_hmac)
720 idx = cc_fin_hmac(desc, req, idx);
721
722 idx = cc_fin_result(desc, req, idx);
723
724 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
725 if (rc != -EINPROGRESS && rc != -EBUSY) {
726 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
727 cc_unmap_hash_request(dev, state, src, true);
728 cc_unmap_result(dev, state, digestsize, result);
729 cc_unmap_req(dev, state, ctx);
730 }
731 return rc;
732}
733
734static int cc_hash_init(struct ahash_request *req)
735{
736 struct ahash_req_ctx *state = ahash_request_ctx(req);
737 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
738 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
739 struct device *dev = drvdata_to_dev(ctx->drvdata);
740
741 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
742
743 cc_init_req(dev, state, ctx);
744
745 return 0;
746}
747
748static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
749 unsigned int keylen)
750{
751 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
752 struct cc_crypto_req cc_req = {};
753 struct cc_hash_ctx *ctx = NULL;
754 int blocksize = 0;
755 int digestsize = 0;
756 int i, idx = 0, rc = 0;
757 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
758 cc_sram_addr_t larval_addr;
759 struct device *dev;
760
761 ctx = crypto_ahash_ctx(ahash);
762 dev = drvdata_to_dev(ctx->drvdata);
763 dev_dbg(dev, "start keylen: %d", keylen);
764
765 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
766 digestsize = crypto_ahash_digestsize(ahash);
767
768 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
769
770 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
771 * any NON-ZERO value utilizes HMAC flow
772 */
773 ctx->key_params.keylen = keylen;
774 ctx->key_params.key_dma_addr = 0;
775 ctx->is_hmac = true;
776
777 if (keylen) {
778 ctx->key_params.key_dma_addr =
779 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
780 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
781 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
782 key, keylen);
783 return -ENOMEM;
784 }
785 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
786 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
787
788 if (keylen > blocksize) {
789 /* Load hash initial state */
790 hw_desc_init(&desc[idx]);
791 set_cipher_mode(&desc[idx], ctx->hw_mode);
792 set_din_sram(&desc[idx], larval_addr,
793 ctx->inter_digestsize);
794 set_flow_mode(&desc[idx], S_DIN_to_HASH);
795 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
796 idx++;
797
798 /* Load the hash current length*/
799 hw_desc_init(&desc[idx]);
800 set_cipher_mode(&desc[idx], ctx->hw_mode);
27b3b22d 801 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
63893811
GBY
802 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
803 set_flow_mode(&desc[idx], S_DIN_to_HASH);
804 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
805 idx++;
806
807 hw_desc_init(&desc[idx]);
808 set_din_type(&desc[idx], DMA_DLLI,
809 ctx->key_params.key_dma_addr, keylen,
810 NS_BIT);
811 set_flow_mode(&desc[idx], DIN_HASH);
812 idx++;
813
814 /* Get hashed key */
815 hw_desc_init(&desc[idx]);
816 set_cipher_mode(&desc[idx], ctx->hw_mode);
817 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
818 digestsize, NS_BIT, 0);
819 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
820 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
821 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
822 cc_set_endianity(ctx->hash_mode, &desc[idx]);
823 idx++;
824
825 hw_desc_init(&desc[idx]);
826 set_din_const(&desc[idx], 0, (blocksize - digestsize));
827 set_flow_mode(&desc[idx], BYPASS);
828 set_dout_dlli(&desc[idx],
829 (ctx->opad_tmp_keys_dma_addr +
830 digestsize),
831 (blocksize - digestsize), NS_BIT, 0);
832 idx++;
833 } else {
834 hw_desc_init(&desc[idx]);
835 set_din_type(&desc[idx], DMA_DLLI,
836 ctx->key_params.key_dma_addr, keylen,
837 NS_BIT);
838 set_flow_mode(&desc[idx], BYPASS);
839 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
840 keylen, NS_BIT, 0);
841 idx++;
842
843 if ((blocksize - keylen)) {
844 hw_desc_init(&desc[idx]);
845 set_din_const(&desc[idx], 0,
846 (blocksize - keylen));
847 set_flow_mode(&desc[idx], BYPASS);
848 set_dout_dlli(&desc[idx],
849 (ctx->opad_tmp_keys_dma_addr +
850 keylen), (blocksize - keylen),
851 NS_BIT, 0);
852 idx++;
853 }
854 }
855 } else {
856 hw_desc_init(&desc[idx]);
857 set_din_const(&desc[idx], 0, blocksize);
858 set_flow_mode(&desc[idx], BYPASS);
859 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
860 blocksize, NS_BIT, 0);
861 idx++;
862 }
863
864 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
865 if (rc) {
866 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
867 goto out;
868 }
869
870 /* calc derived HMAC key */
871 for (idx = 0, i = 0; i < 2; i++) {
872 /* Load hash initial state */
873 hw_desc_init(&desc[idx]);
874 set_cipher_mode(&desc[idx], ctx->hw_mode);
875 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
876 set_flow_mode(&desc[idx], S_DIN_to_HASH);
877 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
878 idx++;
879
880 /* Load the hash current length*/
881 hw_desc_init(&desc[idx]);
882 set_cipher_mode(&desc[idx], ctx->hw_mode);
27b3b22d 883 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
63893811
GBY
884 set_flow_mode(&desc[idx], S_DIN_to_HASH);
885 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
886 idx++;
887
888 /* Prepare ipad key */
889 hw_desc_init(&desc[idx]);
890 set_xor_val(&desc[idx], hmac_pad_const[i]);
891 set_cipher_mode(&desc[idx], ctx->hw_mode);
892 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
894 idx++;
895
896 /* Perform HASH update */
897 hw_desc_init(&desc[idx]);
898 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
899 blocksize, NS_BIT);
900 set_cipher_mode(&desc[idx], ctx->hw_mode);
901 set_xor_active(&desc[idx]);
902 set_flow_mode(&desc[idx], DIN_HASH);
903 idx++;
904
905 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
906 * of the first HASH "update" state)
907 */
908 hw_desc_init(&desc[idx]);
909 set_cipher_mode(&desc[idx], ctx->hw_mode);
910 if (i > 0) /* Not first iteration */
911 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
912 ctx->inter_digestsize, NS_BIT, 0);
913 else /* First iteration */
914 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
915 ctx->inter_digestsize, NS_BIT, 0);
916 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
918 idx++;
919 }
920
921 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
922
923out:
924 if (rc)
925 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
926
927 if (ctx->key_params.key_dma_addr) {
928 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
929 ctx->key_params.keylen, DMA_TO_DEVICE);
930 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
931 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
932 }
933 return rc;
934}
935
936static int cc_xcbc_setkey(struct crypto_ahash *ahash,
937 const u8 *key, unsigned int keylen)
938{
939 struct cc_crypto_req cc_req = {};
940 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
941 struct device *dev = drvdata_to_dev(ctx->drvdata);
942 int rc = 0;
943 unsigned int idx = 0;
944 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
945
946 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
947
948 switch (keylen) {
949 case AES_KEYSIZE_128:
950 case AES_KEYSIZE_192:
951 case AES_KEYSIZE_256:
952 break;
953 default:
954 return -EINVAL;
955 }
956
957 ctx->key_params.keylen = keylen;
958
959 ctx->key_params.key_dma_addr =
960 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
961 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
962 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
963 key, keylen);
964 return -ENOMEM;
965 }
966 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
967 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
968
969 ctx->is_hmac = true;
970 /* 1. Load the AES key */
971 hw_desc_init(&desc[idx]);
972 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
973 keylen, NS_BIT);
974 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
975 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
976 set_key_size_aes(&desc[idx], keylen);
977 set_flow_mode(&desc[idx], S_DIN_to_AES);
978 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
979 idx++;
980
981 hw_desc_init(&desc[idx]);
982 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
983 set_flow_mode(&desc[idx], DIN_AES_DOUT);
984 set_dout_dlli(&desc[idx],
985 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
986 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
987 idx++;
988
989 hw_desc_init(&desc[idx]);
990 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
991 set_flow_mode(&desc[idx], DIN_AES_DOUT);
992 set_dout_dlli(&desc[idx],
993 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
994 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
995 idx++;
996
997 hw_desc_init(&desc[idx]);
998 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
999 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1000 set_dout_dlli(&desc[idx],
1001 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
1002 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1003 idx++;
1004
1005 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
1006
1007 if (rc)
1008 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1009
1010 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1011 ctx->key_params.keylen, DMA_TO_DEVICE);
1012 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1013 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1014
1015 return rc;
1016}
1017
1018static int cc_cmac_setkey(struct crypto_ahash *ahash,
1019 const u8 *key, unsigned int keylen)
1020{
1021 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1022 struct device *dev = drvdata_to_dev(ctx->drvdata);
1023
1024 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1025
1026 ctx->is_hmac = true;
1027
1028 switch (keylen) {
1029 case AES_KEYSIZE_128:
1030 case AES_KEYSIZE_192:
1031 case AES_KEYSIZE_256:
1032 break;
1033 default:
1034 return -EINVAL;
1035 }
1036
1037 ctx->key_params.keylen = keylen;
1038
1039 /* STAT_PHASE_1: Copy key to ctx */
1040
1041 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1042 keylen, DMA_TO_DEVICE);
1043
1044 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1045 if (keylen == 24) {
1046 memset(ctx->opad_tmp_keys_buff + 24, 0,
1047 CC_AES_KEY_SIZE_MAX - 24);
1048 }
1049
1050 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1051 keylen, DMA_TO_DEVICE);
1052
1053 ctx->key_params.keylen = keylen;
1054
1055 return 0;
1056}
1057
1058static void cc_free_ctx(struct cc_hash_ctx *ctx)
1059{
1060 struct device *dev = drvdata_to_dev(ctx->drvdata);
1061
1062 if (ctx->digest_buff_dma_addr) {
1063 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1064 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1065 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1066 &ctx->digest_buff_dma_addr);
1067 ctx->digest_buff_dma_addr = 0;
1068 }
1069 if (ctx->opad_tmp_keys_dma_addr) {
1070 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1071 sizeof(ctx->opad_tmp_keys_buff),
1072 DMA_BIDIRECTIONAL);
1073 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1074 &ctx->opad_tmp_keys_dma_addr);
1075 ctx->opad_tmp_keys_dma_addr = 0;
1076 }
1077
1078 ctx->key_params.keylen = 0;
1079}
1080
1081static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1082{
1083 struct device *dev = drvdata_to_dev(ctx->drvdata);
1084
1085 ctx->key_params.keylen = 0;
1086
1087 ctx->digest_buff_dma_addr =
1088 dma_map_single(dev, (void *)ctx->digest_buff,
1089 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1090 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1091 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1092 sizeof(ctx->digest_buff), ctx->digest_buff);
1093 goto fail;
1094 }
1095 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1096 sizeof(ctx->digest_buff), ctx->digest_buff,
1097 &ctx->digest_buff_dma_addr);
1098
1099 ctx->opad_tmp_keys_dma_addr =
1100 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1101 sizeof(ctx->opad_tmp_keys_buff),
1102 DMA_BIDIRECTIONAL);
1103 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1104 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1105 sizeof(ctx->opad_tmp_keys_buff),
1106 ctx->opad_tmp_keys_buff);
1107 goto fail;
1108 }
1109 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1110 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1111 &ctx->opad_tmp_keys_dma_addr);
1112
1113 ctx->is_hmac = false;
1114 return 0;
1115
1116fail:
1117 cc_free_ctx(ctx);
1118 return -ENOMEM;
1119}
1120
1121static int cc_cra_init(struct crypto_tfm *tfm)
1122{
1123 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct hash_alg_common *hash_alg_common =
1125 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1126 struct ahash_alg *ahash_alg =
1127 container_of(hash_alg_common, struct ahash_alg, halg);
1128 struct cc_hash_alg *cc_alg =
1129 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1130
1131 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1132 sizeof(struct ahash_req_ctx));
1133
1134 ctx->hash_mode = cc_alg->hash_mode;
1135 ctx->hw_mode = cc_alg->hw_mode;
1136 ctx->inter_digestsize = cc_alg->inter_digestsize;
1137 ctx->drvdata = cc_alg->drvdata;
1138
1139 return cc_alloc_ctx(ctx);
1140}
1141
1142static void cc_cra_exit(struct crypto_tfm *tfm)
1143{
1144 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1145 struct device *dev = drvdata_to_dev(ctx->drvdata);
1146
1147 dev_dbg(dev, "cc_cra_exit");
1148 cc_free_ctx(ctx);
1149}
1150
1151static int cc_mac_update(struct ahash_request *req)
1152{
1153 struct ahash_req_ctx *state = ahash_request_ctx(req);
1154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1155 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1156 struct device *dev = drvdata_to_dev(ctx->drvdata);
1157 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1158 struct cc_crypto_req cc_req = {};
1159 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1160 int rc;
1161 u32 idx = 0;
1162 gfp_t flags = cc_gfp_flags(&req->base);
1163
1164 if (req->nbytes == 0) {
1165 /* no real updates required */
1166 return 0;
1167 }
1168
1169 state->xcbc_count++;
1170
1171 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1172 req->nbytes, block_size, flags);
1173 if (rc) {
1174 if (rc == 1) {
1175 dev_dbg(dev, " data size not require HW update %x\n",
1176 req->nbytes);
1177 /* No hardware updates are required */
1178 return 0;
1179 }
1180 dev_err(dev, "map_ahash_request_update() failed\n");
1181 return -ENOMEM;
1182 }
1183
1184 if (cc_map_req(dev, state, ctx)) {
1185 dev_err(dev, "map_ahash_source() failed\n");
1186 return -EINVAL;
1187 }
1188
1189 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1190 cc_setup_xcbc(req, desc, &idx);
1191 else
1192 cc_setup_cmac(req, desc, &idx);
1193
1194 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1195
1196 /* store the hash digest result in context */
1197 hw_desc_init(&desc[idx]);
1198 set_cipher_mode(&desc[idx], ctx->hw_mode);
1199 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1200 ctx->inter_digestsize, NS_BIT, 1);
27b3b22d 1201 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
1202 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1203 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1204 idx++;
1205
1206 /* Setup request structure */
1207 cc_req.user_cb = (void *)cc_update_complete;
1208 cc_req.user_arg = (void *)req;
1209
1210 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1211 if (rc != -EINPROGRESS && rc != -EBUSY) {
1212 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1213 cc_unmap_hash_request(dev, state, req->src, true);
1214 cc_unmap_req(dev, state, ctx);
1215 }
1216 return rc;
1217}
1218
1219static int cc_mac_final(struct ahash_request *req)
1220{
1221 struct ahash_req_ctx *state = ahash_request_ctx(req);
1222 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1223 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1224 struct device *dev = drvdata_to_dev(ctx->drvdata);
1225 struct cc_crypto_req cc_req = {};
1226 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1227 int idx = 0;
1228 int rc = 0;
1229 u32 key_size, key_len;
1230 u32 digestsize = crypto_ahash_digestsize(tfm);
1231 gfp_t flags = cc_gfp_flags(&req->base);
1232 u32 rem_cnt = *cc_hash_buf_cnt(state);
1233
1234 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1235 key_size = CC_AES_128_BIT_KEY_SIZE;
1236 key_len = CC_AES_128_BIT_KEY_SIZE;
1237 } else {
1238 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1239 ctx->key_params.keylen;
1240 key_len = ctx->key_params.keylen;
1241 }
1242
1243 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1244
1245 if (cc_map_req(dev, state, ctx)) {
1246 dev_err(dev, "map_ahash_source() failed\n");
1247 return -EINVAL;
1248 }
1249
1250 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1251 req->nbytes, 0, flags)) {
1252 dev_err(dev, "map_ahash_request_final() failed\n");
1253 cc_unmap_req(dev, state, ctx);
1254 return -ENOMEM;
1255 }
1256
1257 if (cc_map_result(dev, state, digestsize)) {
1258 dev_err(dev, "map_ahash_digest() failed\n");
1259 cc_unmap_hash_request(dev, state, req->src, true);
1260 cc_unmap_req(dev, state, ctx);
1261 return -ENOMEM;
1262 }
1263
1264 /* Setup request structure */
1265 cc_req.user_cb = (void *)cc_hash_complete;
1266 cc_req.user_arg = (void *)req;
1267
1268 if (state->xcbc_count && rem_cnt == 0) {
1269 /* Load key for ECB decryption */
1270 hw_desc_init(&desc[idx]);
1271 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1272 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1273 set_din_type(&desc[idx], DMA_DLLI,
1274 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1275 key_size, NS_BIT);
1276 set_key_size_aes(&desc[idx], key_len);
1277 set_flow_mode(&desc[idx], S_DIN_to_AES);
1278 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1279 idx++;
1280
1281 /* Initiate decryption of block state to previous
1282 * block_state-XOR-M[n]
1283 */
1284 hw_desc_init(&desc[idx]);
1285 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1286 CC_AES_BLOCK_SIZE, NS_BIT);
1287 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1288 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1289 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1290 idx++;
1291
1292 /* Memory Barrier: wait for axi write to complete */
1293 hw_desc_init(&desc[idx]);
1294 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1295 set_dout_no_dma(&desc[idx], 0, 0, 1);
1296 idx++;
1297 }
1298
1299 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1300 cc_setup_xcbc(req, desc, &idx);
1301 else
1302 cc_setup_cmac(req, desc, &idx);
1303
1304 if (state->xcbc_count == 0) {
1305 hw_desc_init(&desc[idx]);
1306 set_cipher_mode(&desc[idx], ctx->hw_mode);
1307 set_key_size_aes(&desc[idx], key_len);
1308 set_cmac_size0_mode(&desc[idx]);
1309 set_flow_mode(&desc[idx], S_DIN_to_AES);
1310 idx++;
1311 } else if (rem_cnt > 0) {
1312 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1313 } else {
1314 hw_desc_init(&desc[idx]);
1315 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1316 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1317 idx++;
1318 }
1319
1320 /* Get final MAC result */
1321 hw_desc_init(&desc[idx]);
1322 /* TODO */
1323 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1324 digestsize, NS_BIT, 1);
27b3b22d 1325 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
1326 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1327 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1328 set_cipher_mode(&desc[idx], ctx->hw_mode);
1329 idx++;
1330
1331 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1332 if (rc != -EINPROGRESS && rc != -EBUSY) {
1333 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1334 cc_unmap_hash_request(dev, state, req->src, true);
1335 cc_unmap_result(dev, state, digestsize, req->result);
1336 cc_unmap_req(dev, state, ctx);
1337 }
1338 return rc;
1339}
1340
1341static int cc_mac_finup(struct ahash_request *req)
1342{
1343 struct ahash_req_ctx *state = ahash_request_ctx(req);
1344 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1345 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1346 struct device *dev = drvdata_to_dev(ctx->drvdata);
1347 struct cc_crypto_req cc_req = {};
1348 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1349 int idx = 0;
1350 int rc = 0;
1351 u32 key_len = 0;
1352 u32 digestsize = crypto_ahash_digestsize(tfm);
1353 gfp_t flags = cc_gfp_flags(&req->base);
1354
1355 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1356 if (state->xcbc_count > 0 && req->nbytes == 0) {
1357 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1358 return cc_mac_final(req);
1359 }
1360
1361 if (cc_map_req(dev, state, ctx)) {
1362 dev_err(dev, "map_ahash_source() failed\n");
1363 return -EINVAL;
1364 }
1365
1366 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1367 req->nbytes, 1, flags)) {
1368 dev_err(dev, "map_ahash_request_final() failed\n");
1369 cc_unmap_req(dev, state, ctx);
1370 return -ENOMEM;
1371 }
1372 if (cc_map_result(dev, state, digestsize)) {
1373 dev_err(dev, "map_ahash_digest() failed\n");
1374 cc_unmap_hash_request(dev, state, req->src, true);
1375 cc_unmap_req(dev, state, ctx);
1376 return -ENOMEM;
1377 }
1378
1379 /* Setup request structure */
1380 cc_req.user_cb = (void *)cc_hash_complete;
1381 cc_req.user_arg = (void *)req;
1382
1383 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1384 key_len = CC_AES_128_BIT_KEY_SIZE;
1385 cc_setup_xcbc(req, desc, &idx);
1386 } else {
1387 key_len = ctx->key_params.keylen;
1388 cc_setup_cmac(req, desc, &idx);
1389 }
1390
1391 if (req->nbytes == 0) {
1392 hw_desc_init(&desc[idx]);
1393 set_cipher_mode(&desc[idx], ctx->hw_mode);
1394 set_key_size_aes(&desc[idx], key_len);
1395 set_cmac_size0_mode(&desc[idx]);
1396 set_flow_mode(&desc[idx], S_DIN_to_AES);
1397 idx++;
1398 } else {
1399 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1400 }
1401
1402 /* Get final MAC result */
1403 hw_desc_init(&desc[idx]);
1404 /* TODO */
1405 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1406 digestsize, NS_BIT, 1);
27b3b22d 1407 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
1408 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1409 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1410 set_cipher_mode(&desc[idx], ctx->hw_mode);
1411 idx++;
1412
1413 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1414 if (rc != -EINPROGRESS && rc != -EBUSY) {
1415 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416 cc_unmap_hash_request(dev, state, req->src, true);
1417 cc_unmap_result(dev, state, digestsize, req->result);
1418 cc_unmap_req(dev, state, ctx);
1419 }
1420 return rc;
1421}
1422
1423static int cc_mac_digest(struct ahash_request *req)
1424{
1425 struct ahash_req_ctx *state = ahash_request_ctx(req);
1426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1427 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1428 struct device *dev = drvdata_to_dev(ctx->drvdata);
1429 u32 digestsize = crypto_ahash_digestsize(tfm);
1430 struct cc_crypto_req cc_req = {};
1431 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1432 u32 key_len;
1433 unsigned int idx = 0;
1434 int rc;
1435 gfp_t flags = cc_gfp_flags(&req->base);
1436
1437 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1438
1439 cc_init_req(dev, state, ctx);
1440
1441 if (cc_map_req(dev, state, ctx)) {
1442 dev_err(dev, "map_ahash_source() failed\n");
1443 return -ENOMEM;
1444 }
1445 if (cc_map_result(dev, state, digestsize)) {
1446 dev_err(dev, "map_ahash_digest() failed\n");
1447 cc_unmap_req(dev, state, ctx);
1448 return -ENOMEM;
1449 }
1450
1451 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1452 req->nbytes, 1, flags)) {
1453 dev_err(dev, "map_ahash_request_final() failed\n");
1454 cc_unmap_req(dev, state, ctx);
1455 return -ENOMEM;
1456 }
1457
1458 /* Setup request structure */
1459 cc_req.user_cb = (void *)cc_digest_complete;
1460 cc_req.user_arg = (void *)req;
1461
1462 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1463 key_len = CC_AES_128_BIT_KEY_SIZE;
1464 cc_setup_xcbc(req, desc, &idx);
1465 } else {
1466 key_len = ctx->key_params.keylen;
1467 cc_setup_cmac(req, desc, &idx);
1468 }
1469
1470 if (req->nbytes == 0) {
1471 hw_desc_init(&desc[idx]);
1472 set_cipher_mode(&desc[idx], ctx->hw_mode);
1473 set_key_size_aes(&desc[idx], key_len);
1474 set_cmac_size0_mode(&desc[idx]);
1475 set_flow_mode(&desc[idx], S_DIN_to_AES);
1476 idx++;
1477 } else {
1478 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1479 }
1480
1481 /* Get final MAC result */
1482 hw_desc_init(&desc[idx]);
1483 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1484 CC_AES_BLOCK_SIZE, NS_BIT, 1);
27b3b22d 1485 set_queue_last_ind(ctx->drvdata, &desc[idx]);
63893811
GBY
1486 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1487 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1488 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489 set_cipher_mode(&desc[idx], ctx->hw_mode);
1490 idx++;
1491
1492 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1493 if (rc != -EINPROGRESS && rc != -EBUSY) {
1494 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1495 cc_unmap_hash_request(dev, state, req->src, true);
1496 cc_unmap_result(dev, state, digestsize, req->result);
1497 cc_unmap_req(dev, state, ctx);
1498 }
1499 return rc;
1500}
1501
1502static int cc_hash_export(struct ahash_request *req, void *out)
1503{
1504 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1505 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1506 struct ahash_req_ctx *state = ahash_request_ctx(req);
1507 u8 *curr_buff = cc_hash_buf(state);
1508 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1509 const u32 tmp = CC_EXPORT_MAGIC;
1510
1511 memcpy(out, &tmp, sizeof(u32));
1512 out += sizeof(u32);
1513
1514 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1515 out += ctx->inter_digestsize;
1516
27b3b22d
GBY
1517 memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
1518 out += ctx->drvdata->hash_len_sz;
63893811
GBY
1519
1520 memcpy(out, &curr_buff_cnt, sizeof(u32));
1521 out += sizeof(u32);
1522
1523 memcpy(out, curr_buff, curr_buff_cnt);
1524
1525 return 0;
1526}
1527
1528static int cc_hash_import(struct ahash_request *req, const void *in)
1529{
1530 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1531 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1532 struct device *dev = drvdata_to_dev(ctx->drvdata);
1533 struct ahash_req_ctx *state = ahash_request_ctx(req);
1534 u32 tmp;
1535
1536 memcpy(&tmp, in, sizeof(u32));
1537 if (tmp != CC_EXPORT_MAGIC)
1538 return -EINVAL;
1539 in += sizeof(u32);
1540
1541 cc_init_req(dev, state, ctx);
1542
1543 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1544 in += ctx->inter_digestsize;
1545
27b3b22d
GBY
1546 memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
1547 in += ctx->drvdata->hash_len_sz;
63893811
GBY
1548
1549 /* Sanity check the data as much as possible */
1550 memcpy(&tmp, in, sizeof(u32));
1551 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1552 return -EINVAL;
1553 in += sizeof(u32);
1554
1555 state->buf_cnt[0] = tmp;
1556 memcpy(state->buffers[0], in, tmp);
1557
1558 return 0;
1559}
1560
1561struct cc_hash_template {
1562 char name[CRYPTO_MAX_ALG_NAME];
1563 char driver_name[CRYPTO_MAX_ALG_NAME];
1564 char mac_name[CRYPTO_MAX_ALG_NAME];
1565 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1566 unsigned int blocksize;
1567 bool synchronize;
1568 struct ahash_alg template_ahash;
1569 int hash_mode;
1570 int hw_mode;
1571 int inter_digestsize;
1572 struct cc_drvdata *drvdata;
27b3b22d 1573 u32 min_hw_rev;
63893811
GBY
1574};
1575
1576#define CC_STATE_SIZE(_x) \
27b3b22d 1577 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
63893811
GBY
1578
1579/* hash descriptors */
1580static struct cc_hash_template driver_hash[] = {
1581 //Asynchronize hash template
1582 {
1583 .name = "sha1",
1584 .driver_name = "sha1-ccree",
1585 .mac_name = "hmac(sha1)",
1586 .mac_driver_name = "hmac-sha1-ccree",
1587 .blocksize = SHA1_BLOCK_SIZE,
1588 .synchronize = false,
1589 .template_ahash = {
1590 .init = cc_hash_init,
1591 .update = cc_hash_update,
1592 .final = cc_hash_final,
1593 .finup = cc_hash_finup,
1594 .digest = cc_hash_digest,
1595 .export = cc_hash_export,
1596 .import = cc_hash_import,
1597 .setkey = cc_hash_setkey,
1598 .halg = {
1599 .digestsize = SHA1_DIGEST_SIZE,
1600 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601 },
1602 },
1603 .hash_mode = DRV_HASH_SHA1,
1604 .hw_mode = DRV_HASH_HW_SHA1,
1605 .inter_digestsize = SHA1_DIGEST_SIZE,
27b3b22d 1606 .min_hw_rev = CC_HW_REV_630,
63893811
GBY
1607 },
1608 {
1609 .name = "sha256",
1610 .driver_name = "sha256-ccree",
1611 .mac_name = "hmac(sha256)",
1612 .mac_driver_name = "hmac-sha256-ccree",
1613 .blocksize = SHA256_BLOCK_SIZE,
1614 .template_ahash = {
1615 .init = cc_hash_init,
1616 .update = cc_hash_update,
1617 .final = cc_hash_final,
1618 .finup = cc_hash_finup,
1619 .digest = cc_hash_digest,
1620 .export = cc_hash_export,
1621 .import = cc_hash_import,
1622 .setkey = cc_hash_setkey,
1623 .halg = {
1624 .digestsize = SHA256_DIGEST_SIZE,
1625 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1626 },
1627 },
1628 .hash_mode = DRV_HASH_SHA256,
1629 .hw_mode = DRV_HASH_HW_SHA256,
1630 .inter_digestsize = SHA256_DIGEST_SIZE,
27b3b22d 1631 .min_hw_rev = CC_HW_REV_630,
63893811
GBY
1632 },
1633 {
1634 .name = "sha224",
1635 .driver_name = "sha224-ccree",
1636 .mac_name = "hmac(sha224)",
1637 .mac_driver_name = "hmac-sha224-ccree",
1638 .blocksize = SHA224_BLOCK_SIZE,
1639 .template_ahash = {
1640 .init = cc_hash_init,
1641 .update = cc_hash_update,
1642 .final = cc_hash_final,
1643 .finup = cc_hash_finup,
1644 .digest = cc_hash_digest,
1645 .export = cc_hash_export,
1646 .import = cc_hash_import,
1647 .setkey = cc_hash_setkey,
1648 .halg = {
1649 .digestsize = SHA224_DIGEST_SIZE,
1650 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1651 },
1652 },
1653 .hash_mode = DRV_HASH_SHA224,
1654 .hw_mode = DRV_HASH_HW_SHA256,
1655 .inter_digestsize = SHA256_DIGEST_SIZE,
27b3b22d 1656 .min_hw_rev = CC_HW_REV_630,
63893811 1657 },
63893811
GBY
1658 {
1659 .name = "sha384",
1660 .driver_name = "sha384-ccree",
1661 .mac_name = "hmac(sha384)",
1662 .mac_driver_name = "hmac-sha384-ccree",
1663 .blocksize = SHA384_BLOCK_SIZE,
1664 .template_ahash = {
1665 .init = cc_hash_init,
1666 .update = cc_hash_update,
1667 .final = cc_hash_final,
1668 .finup = cc_hash_finup,
1669 .digest = cc_hash_digest,
1670 .export = cc_hash_export,
1671 .import = cc_hash_import,
1672 .setkey = cc_hash_setkey,
1673 .halg = {
1674 .digestsize = SHA384_DIGEST_SIZE,
1675 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1676 },
1677 },
1678 .hash_mode = DRV_HASH_SHA384,
1679 .hw_mode = DRV_HASH_HW_SHA512,
1680 .inter_digestsize = SHA512_DIGEST_SIZE,
27b3b22d 1681 .min_hw_rev = CC_HW_REV_712,
63893811
GBY
1682 },
1683 {
1684 .name = "sha512",
1685 .driver_name = "sha512-ccree",
1686 .mac_name = "hmac(sha512)",
1687 .mac_driver_name = "hmac-sha512-ccree",
1688 .blocksize = SHA512_BLOCK_SIZE,
1689 .template_ahash = {
1690 .init = cc_hash_init,
1691 .update = cc_hash_update,
1692 .final = cc_hash_final,
1693 .finup = cc_hash_finup,
1694 .digest = cc_hash_digest,
1695 .export = cc_hash_export,
1696 .import = cc_hash_import,
1697 .setkey = cc_hash_setkey,
1698 .halg = {
1699 .digestsize = SHA512_DIGEST_SIZE,
1700 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1701 },
1702 },
1703 .hash_mode = DRV_HASH_SHA512,
1704 .hw_mode = DRV_HASH_HW_SHA512,
1705 .inter_digestsize = SHA512_DIGEST_SIZE,
27b3b22d 1706 .min_hw_rev = CC_HW_REV_712,
63893811 1707 },
63893811
GBY
1708 {
1709 .name = "md5",
1710 .driver_name = "md5-ccree",
1711 .mac_name = "hmac(md5)",
1712 .mac_driver_name = "hmac-md5-ccree",
1713 .blocksize = MD5_HMAC_BLOCK_SIZE,
1714 .template_ahash = {
1715 .init = cc_hash_init,
1716 .update = cc_hash_update,
1717 .final = cc_hash_final,
1718 .finup = cc_hash_finup,
1719 .digest = cc_hash_digest,
1720 .export = cc_hash_export,
1721 .import = cc_hash_import,
1722 .setkey = cc_hash_setkey,
1723 .halg = {
1724 .digestsize = MD5_DIGEST_SIZE,
1725 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1726 },
1727 },
1728 .hash_mode = DRV_HASH_MD5,
1729 .hw_mode = DRV_HASH_HW_MD5,
1730 .inter_digestsize = MD5_DIGEST_SIZE,
27b3b22d 1731 .min_hw_rev = CC_HW_REV_630,
63893811
GBY
1732 },
1733 {
1734 .mac_name = "xcbc(aes)",
1735 .mac_driver_name = "xcbc-aes-ccree",
1736 .blocksize = AES_BLOCK_SIZE,
1737 .template_ahash = {
1738 .init = cc_hash_init,
1739 .update = cc_mac_update,
1740 .final = cc_mac_final,
1741 .finup = cc_mac_finup,
1742 .digest = cc_mac_digest,
1743 .setkey = cc_xcbc_setkey,
1744 .export = cc_hash_export,
1745 .import = cc_hash_import,
1746 .halg = {
1747 .digestsize = AES_BLOCK_SIZE,
1748 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1749 },
1750 },
1751 .hash_mode = DRV_HASH_NULL,
1752 .hw_mode = DRV_CIPHER_XCBC_MAC,
1753 .inter_digestsize = AES_BLOCK_SIZE,
27b3b22d 1754 .min_hw_rev = CC_HW_REV_630,
63893811
GBY
1755 },
1756 {
1757 .mac_name = "cmac(aes)",
1758 .mac_driver_name = "cmac-aes-ccree",
1759 .blocksize = AES_BLOCK_SIZE,
1760 .template_ahash = {
1761 .init = cc_hash_init,
1762 .update = cc_mac_update,
1763 .final = cc_mac_final,
1764 .finup = cc_mac_finup,
1765 .digest = cc_mac_digest,
1766 .setkey = cc_cmac_setkey,
1767 .export = cc_hash_export,
1768 .import = cc_hash_import,
1769 .halg = {
1770 .digestsize = AES_BLOCK_SIZE,
1771 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1772 },
1773 },
1774 .hash_mode = DRV_HASH_NULL,
1775 .hw_mode = DRV_CIPHER_CMAC,
1776 .inter_digestsize = AES_BLOCK_SIZE,
27b3b22d 1777 .min_hw_rev = CC_HW_REV_630,
63893811
GBY
1778 },
1779};
1780
1781static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1782 struct device *dev, bool keyed)
1783{
1784 struct cc_hash_alg *t_crypto_alg;
1785 struct crypto_alg *alg;
1786 struct ahash_alg *halg;
1787
1788 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1789 if (!t_crypto_alg)
1790 return ERR_PTR(-ENOMEM);
1791
1792 t_crypto_alg->ahash_alg = template->template_ahash;
1793 halg = &t_crypto_alg->ahash_alg;
1794 alg = &halg->halg.base;
1795
1796 if (keyed) {
1797 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1798 template->mac_name);
1799 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1800 template->mac_driver_name);
1801 } else {
1802 halg->setkey = NULL;
1803 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1804 template->name);
1805 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1806 template->driver_name);
1807 }
1808 alg->cra_module = THIS_MODULE;
1809 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1810 alg->cra_priority = CC_CRA_PRIO;
1811 alg->cra_blocksize = template->blocksize;
1812 alg->cra_alignmask = 0;
1813 alg->cra_exit = cc_cra_exit;
1814
1815 alg->cra_init = cc_cra_init;
1816 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
1817 CRYPTO_ALG_KERN_DRIVER_ONLY;
1818 alg->cra_type = &crypto_ahash_type;
1819
1820 t_crypto_alg->hash_mode = template->hash_mode;
1821 t_crypto_alg->hw_mode = template->hw_mode;
1822 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1823
1824 return t_crypto_alg;
1825}
1826
1827int cc_init_hash_sram(struct cc_drvdata *drvdata)
1828{
1829 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1830 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1831 unsigned int larval_seq_len = 0;
1832 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
27b3b22d 1833 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
63893811
GBY
1834 int rc = 0;
1835
1836 /* Copy-to-sram digest-len */
1837 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1838 ARRAY_SIZE(digest_len_init), larval_seq,
1839 &larval_seq_len);
1840 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1841 if (rc)
1842 goto init_digest_const_err;
1843
1844 sram_buff_ofs += sizeof(digest_len_init);
1845 larval_seq_len = 0;
1846
27b3b22d
GBY
1847 if (large_sha_supported) {
1848 /* Copy-to-sram digest-len for sha384/512 */
1849 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1850 ARRAY_SIZE(digest_len_sha512_init),
1851 larval_seq, &larval_seq_len);
1852 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1853 if (rc)
1854 goto init_digest_const_err;
63893811 1855
27b3b22d
GBY
1856 sram_buff_ofs += sizeof(digest_len_sha512_init);
1857 larval_seq_len = 0;
1858 }
63893811
GBY
1859
1860 /* The initial digests offset */
1861 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1862
1863 /* Copy-to-sram initial SHA* digests */
1864 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1865 larval_seq, &larval_seq_len);
1866 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1867 if (rc)
1868 goto init_digest_const_err;
1869 sram_buff_ofs += sizeof(md5_init);
1870 larval_seq_len = 0;
1871
1872 cc_set_sram_desc(sha1_init, sram_buff_ofs,
1873 ARRAY_SIZE(sha1_init), larval_seq,
1874 &larval_seq_len);
1875 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1876 if (rc)
1877 goto init_digest_const_err;
1878 sram_buff_ofs += sizeof(sha1_init);
1879 larval_seq_len = 0;
1880
1881 cc_set_sram_desc(sha224_init, sram_buff_ofs,
1882 ARRAY_SIZE(sha224_init), larval_seq,
1883 &larval_seq_len);
1884 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1885 if (rc)
1886 goto init_digest_const_err;
1887 sram_buff_ofs += sizeof(sha224_init);
1888 larval_seq_len = 0;
1889
1890 cc_set_sram_desc(sha256_init, sram_buff_ofs,
1891 ARRAY_SIZE(sha256_init), larval_seq,
1892 &larval_seq_len);
1893 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1894 if (rc)
1895 goto init_digest_const_err;
1896 sram_buff_ofs += sizeof(sha256_init);
1897 larval_seq_len = 0;
1898
27b3b22d
GBY
1899 if (large_sha_supported) {
1900 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1901 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1902 &larval_seq_len);
1903 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1904 if (rc)
1905 goto init_digest_const_err;
1906 sram_buff_ofs += sizeof(sha384_init);
1907 larval_seq_len = 0;
63893811 1908
27b3b22d
GBY
1909 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1910 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1911 &larval_seq_len);
1912 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1913 if (rc)
1914 goto init_digest_const_err;
1915 }
63893811
GBY
1916
1917init_digest_const_err:
1918 return rc;
1919}
1920
1921static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1922{
1923 int i;
1924 u32 tmp;
1925
1926 for (i = 0; i < size; i += 2) {
1927 tmp = buf[i];
1928 buf[i] = buf[i + 1];
1929 buf[i + 1] = tmp;
1930 }
1931}
1932
1933/*
1934 * Due to the way the HW works we need to swap every
1935 * double word in the SHA384 and SHA512 larval hashes
1936 */
1937void __init cc_hash_global_init(void)
1938{
1939 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1940 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1941}
1942
1943int cc_hash_alloc(struct cc_drvdata *drvdata)
1944{
1945 struct cc_hash_handle *hash_handle;
1946 cc_sram_addr_t sram_buff;
1947 u32 sram_size_to_alloc;
1948 struct device *dev = drvdata_to_dev(drvdata);
1949 int rc = 0;
1950 int alg;
1951
1952 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1953 if (!hash_handle)
1954 return -ENOMEM;
1955
1956 INIT_LIST_HEAD(&hash_handle->hash_list);
1957 drvdata->hash_handle = hash_handle;
1958
1959 sram_size_to_alloc = sizeof(digest_len_init) +
63893811
GBY
1960 sizeof(md5_init) +
1961 sizeof(sha1_init) +
1962 sizeof(sha224_init) +
1963 sizeof(sha256_init);
1964
27b3b22d
GBY
1965 if (drvdata->hw_rev >= CC_HW_REV_712)
1966 sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1967 sizeof(sha384_init) + sizeof(sha512_init);
1968
63893811
GBY
1969 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1970 if (sram_buff == NULL_SRAM_ADDR) {
1971 dev_err(dev, "SRAM pool exhausted\n");
1972 rc = -ENOMEM;
1973 goto fail;
1974 }
1975
1976 /* The initial digest-len offset */
1977 hash_handle->digest_len_sram_addr = sram_buff;
1978
1979 /*must be set before the alg registration as it is being used there*/
1980 rc = cc_init_hash_sram(drvdata);
1981 if (rc) {
1982 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1983 goto fail;
1984 }
1985
1986 /* ahash registration */
1987 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1988 struct cc_hash_alg *t_alg;
1989 int hw_mode = driver_hash[alg].hw_mode;
1990
27b3b22d
GBY
1991 /* We either support both HASH and MAC or none */
1992 if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
1993 continue;
1994
63893811
GBY
1995 /* register hmac version */
1996 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1997 if (IS_ERR(t_alg)) {
1998 rc = PTR_ERR(t_alg);
1999 dev_err(dev, "%s alg allocation failed\n",
2000 driver_hash[alg].driver_name);
2001 goto fail;
2002 }
2003 t_alg->drvdata = drvdata;
2004
2005 rc = crypto_register_ahash(&t_alg->ahash_alg);
2006 if (rc) {
2007 dev_err(dev, "%s alg registration failed\n",
2008 driver_hash[alg].driver_name);
2009 kfree(t_alg);
2010 goto fail;
2011 } else {
2012 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2013 }
2014
2015 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2016 hw_mode == DRV_CIPHER_CMAC)
2017 continue;
2018
2019 /* register hash version */
2020 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2021 if (IS_ERR(t_alg)) {
2022 rc = PTR_ERR(t_alg);
2023 dev_err(dev, "%s alg allocation failed\n",
2024 driver_hash[alg].driver_name);
2025 goto fail;
2026 }
2027 t_alg->drvdata = drvdata;
2028
2029 rc = crypto_register_ahash(&t_alg->ahash_alg);
2030 if (rc) {
2031 dev_err(dev, "%s alg registration failed\n",
2032 driver_hash[alg].driver_name);
2033 kfree(t_alg);
2034 goto fail;
2035 } else {
2036 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2037 }
2038 }
2039
2040 return 0;
2041
2042fail:
2043 kfree(drvdata->hash_handle);
2044 drvdata->hash_handle = NULL;
2045 return rc;
2046}
2047
2048int cc_hash_free(struct cc_drvdata *drvdata)
2049{
2050 struct cc_hash_alg *t_hash_alg, *hash_n;
2051 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2052
2053 if (hash_handle) {
2054 list_for_each_entry_safe(t_hash_alg, hash_n,
2055 &hash_handle->hash_list, entry) {
2056 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2057 list_del(&t_hash_alg->entry);
2058 kfree(t_hash_alg);
2059 }
2060
2061 kfree(hash_handle);
2062 drvdata->hash_handle = NULL;
2063 }
2064 return 0;
2065}
2066
2067static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2068 unsigned int *seq_size)
2069{
2070 unsigned int idx = *seq_size;
2071 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2072 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2073 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2074
2075 /* Setup XCBC MAC K1 */
2076 hw_desc_init(&desc[idx]);
2077 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2078 XCBC_MAC_K1_OFFSET),
2079 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2080 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2081 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2082 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2083 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2084 set_flow_mode(&desc[idx], S_DIN_to_AES);
2085 idx++;
2086
2087 /* Setup XCBC MAC K2 */
2088 hw_desc_init(&desc[idx]);
2089 set_din_type(&desc[idx], DMA_DLLI,
2090 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2091 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2092 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2093 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2094 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2095 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2096 set_flow_mode(&desc[idx], S_DIN_to_AES);
2097 idx++;
2098
2099 /* Setup XCBC MAC K3 */
2100 hw_desc_init(&desc[idx]);
2101 set_din_type(&desc[idx], DMA_DLLI,
2102 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2103 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2104 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2105 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2106 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2107 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2108 set_flow_mode(&desc[idx], S_DIN_to_AES);
2109 idx++;
2110
2111 /* Loading MAC state */
2112 hw_desc_init(&desc[idx]);
2113 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2114 CC_AES_BLOCK_SIZE, NS_BIT);
2115 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2116 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2117 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2118 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2119 set_flow_mode(&desc[idx], S_DIN_to_AES);
2120 idx++;
2121 *seq_size = idx;
2122}
2123
2124static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2125 unsigned int *seq_size)
2126{
2127 unsigned int idx = *seq_size;
2128 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2129 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2130 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2131
2132 /* Setup CMAC Key */
2133 hw_desc_init(&desc[idx]);
2134 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2135 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2136 ctx->key_params.keylen), NS_BIT);
2137 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2138 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2139 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2140 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2141 set_flow_mode(&desc[idx], S_DIN_to_AES);
2142 idx++;
2143
2144 /* Load MAC state */
2145 hw_desc_init(&desc[idx]);
2146 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2147 CC_AES_BLOCK_SIZE, NS_BIT);
2148 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2149 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2150 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2151 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2152 set_flow_mode(&desc[idx], S_DIN_to_AES);
2153 idx++;
2154 *seq_size = idx;
2155}
2156
2157static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2158 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2159 struct cc_hw_desc desc[], bool is_not_last_data,
2160 unsigned int *seq_size)
2161{
2162 unsigned int idx = *seq_size;
2163 struct device *dev = drvdata_to_dev(ctx->drvdata);
2164
2165 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2166 hw_desc_init(&desc[idx]);
2167 set_din_type(&desc[idx], DMA_DLLI,
2168 sg_dma_address(areq_ctx->curr_sg),
2169 areq_ctx->curr_sg->length, NS_BIT);
2170 set_flow_mode(&desc[idx], flow_mode);
2171 idx++;
2172 } else {
2173 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2174 dev_dbg(dev, " NULL mode\n");
2175 /* nothing to build */
2176 return;
2177 }
2178 /* bypass */
2179 hw_desc_init(&desc[idx]);
2180 set_din_type(&desc[idx], DMA_DLLI,
2181 areq_ctx->mlli_params.mlli_dma_addr,
2182 areq_ctx->mlli_params.mlli_len, NS_BIT);
2183 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2184 areq_ctx->mlli_params.mlli_len);
2185 set_flow_mode(&desc[idx], BYPASS);
2186 idx++;
2187 /* process */
2188 hw_desc_init(&desc[idx]);
2189 set_din_type(&desc[idx], DMA_MLLI,
2190 ctx->drvdata->mlli_sram_addr,
2191 areq_ctx->mlli_nents, NS_BIT);
2192 set_flow_mode(&desc[idx], flow_mode);
2193 idx++;
2194 }
2195 if (is_not_last_data)
2196 set_din_not_last_indication(&desc[(idx - 1)]);
2197 /* return updated desc sequence size */
2198 *seq_size = idx;
2199}
2200
2201static const void *cc_larval_digest(struct device *dev, u32 mode)
2202{
2203 switch (mode) {
2204 case DRV_HASH_MD5:
2205 return md5_init;
2206 case DRV_HASH_SHA1:
2207 return sha1_init;
2208 case DRV_HASH_SHA224:
2209 return sha224_init;
2210 case DRV_HASH_SHA256:
2211 return sha256_init;
63893811
GBY
2212 case DRV_HASH_SHA384:
2213 return sha384_init;
2214 case DRV_HASH_SHA512:
2215 return sha512_init;
63893811
GBY
2216 default:
2217 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2218 return md5_init;
2219 }
2220}
2221
2222/*!
2223 * Gets the address of the initial digest in SRAM
2224 * according to the given hash mode
2225 *
2226 * \param drvdata
2227 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2228 *
2229 * \return u32 The address of the initial digest in SRAM
2230 */
2231cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2232{
2233 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2234 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2235 struct device *dev = drvdata_to_dev(_drvdata);
2236
2237 switch (mode) {
2238 case DRV_HASH_NULL:
2239 break; /*Ignore*/
2240 case DRV_HASH_MD5:
2241 return (hash_handle->larval_digest_sram_addr);
2242 case DRV_HASH_SHA1:
2243 return (hash_handle->larval_digest_sram_addr +
2244 sizeof(md5_init));
2245 case DRV_HASH_SHA224:
2246 return (hash_handle->larval_digest_sram_addr +
2247 sizeof(md5_init) +
2248 sizeof(sha1_init));
2249 case DRV_HASH_SHA256:
2250 return (hash_handle->larval_digest_sram_addr +
2251 sizeof(md5_init) +
2252 sizeof(sha1_init) +
2253 sizeof(sha224_init));
63893811
GBY
2254 case DRV_HASH_SHA384:
2255 return (hash_handle->larval_digest_sram_addr +
2256 sizeof(md5_init) +
2257 sizeof(sha1_init) +
2258 sizeof(sha224_init) +
2259 sizeof(sha256_init));
2260 case DRV_HASH_SHA512:
2261 return (hash_handle->larval_digest_sram_addr +
2262 sizeof(md5_init) +
2263 sizeof(sha1_init) +
2264 sizeof(sha224_init) +
2265 sizeof(sha256_init) +
2266 sizeof(sha384_init));
63893811
GBY
2267 default:
2268 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2269 }
2270
2271 /*This is valid wrong value to avoid kernel crash*/
2272 return hash_handle->larval_digest_sram_addr;
2273}
2274
2275cc_sram_addr_t
2276cc_digest_len_addr(void *drvdata, u32 mode)
2277{
2278 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2279 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2280 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2281
2282 switch (mode) {
2283 case DRV_HASH_SHA1:
2284 case DRV_HASH_SHA224:
2285 case DRV_HASH_SHA256:
2286 case DRV_HASH_MD5:
2287 return digest_len_addr;
2288#if (CC_DEV_SHA_MAX > 256)
2289 case DRV_HASH_SHA384:
2290 case DRV_HASH_SHA512:
2291 return digest_len_addr + sizeof(digest_len_init);
2292#endif
2293 default:
2294 return digest_len_addr; /*to avoid kernel crash*/
2295 }
2296}