1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/internal/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
14 #include "cc_request_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define template_aead template_u.aead
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28 struct cc_aead_handle {
29 cc_sram_addr_t sram_workspace_addr;
30 struct list_head aead_list;
35 u8 *ipad_opad; /* IPAD, OPAD*/
36 dma_addr_t padded_authkey_dma_addr;
37 dma_addr_t ipad_opad_dma_addr;
41 u8 *xcbc_keys; /* K1,K2,K3 */
42 dma_addr_t xcbc_keys_dma_addr;
46 struct cc_drvdata *drvdata;
47 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
49 dma_addr_t enckey_dma_addr;
51 struct cc_hmac_s hmac;
52 struct cc_xcbc_s xcbc;
54 unsigned int enc_keylen;
55 unsigned int auth_keylen;
56 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
57 unsigned int hash_len;
58 enum drv_cipher_mode cipher_mode;
59 enum cc_flow_mode flow_mode;
60 enum drv_hash_mode auth_mode;
63 static inline bool valid_assoclen(struct aead_request *req)
65 return ((req->assoclen == 16) || (req->assoclen == 20));
68 static void cc_aead_exit(struct crypto_aead *tfm)
70 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
71 struct device *dev = drvdata_to_dev(ctx->drvdata);
73 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
74 crypto_tfm_alg_name(&tfm->base));
76 /* Unmap enckey buffer */
78 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
79 ctx->enckey_dma_addr);
80 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81 &ctx->enckey_dma_addr);
82 ctx->enckey_dma_addr = 0;
86 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
87 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
89 if (xcbc->xcbc_keys) {
90 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
92 xcbc->xcbc_keys_dma_addr);
94 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95 &xcbc->xcbc_keys_dma_addr);
96 xcbc->xcbc_keys_dma_addr = 0;
97 xcbc->xcbc_keys = NULL;
98 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
99 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
101 if (hmac->ipad_opad) {
102 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
104 hmac->ipad_opad_dma_addr);
105 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106 &hmac->ipad_opad_dma_addr);
107 hmac->ipad_opad_dma_addr = 0;
108 hmac->ipad_opad = NULL;
110 if (hmac->padded_authkey) {
111 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
112 hmac->padded_authkey,
113 hmac->padded_authkey_dma_addr);
114 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115 &hmac->padded_authkey_dma_addr);
116 hmac->padded_authkey_dma_addr = 0;
117 hmac->padded_authkey = NULL;
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
124 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
126 return cc_get_default_hash_len(ctx->drvdata);
129 static int cc_aead_init(struct crypto_aead *tfm)
131 struct aead_alg *alg = crypto_aead_alg(tfm);
132 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
133 struct cc_crypto_alg *cc_alg =
134 container_of(alg, struct cc_crypto_alg, aead_alg);
135 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
137 dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
138 crypto_tfm_alg_name(&tfm->base));
140 /* Initialize modes in instance */
141 ctx->cipher_mode = cc_alg->cipher_mode;
142 ctx->flow_mode = cc_alg->flow_mode;
143 ctx->auth_mode = cc_alg->auth_mode;
144 ctx->drvdata = cc_alg->drvdata;
145 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
147 /* Allocate key buffer, cache line aligned */
148 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
149 &ctx->enckey_dma_addr, GFP_KERNEL);
151 dev_err(dev, "Failed allocating key buffer\n");
154 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
157 /* Set default authlen value */
159 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
160 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
161 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
163 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164 /* (and temporary for user key - up to 256b) */
165 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
166 &xcbc->xcbc_keys_dma_addr,
168 if (!xcbc->xcbc_keys) {
169 dev_err(dev, "Failed allocating buffer for XCBC keys\n");
172 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
173 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
174 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
175 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
177 /* Allocate dma-coherent buffer for IPAD + OPAD */
178 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
179 &hmac->ipad_opad_dma_addr,
182 if (!hmac->ipad_opad) {
183 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
187 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
190 hmac->padded_authkey = dma_alloc_coherent(dev,
195 if (!hmac->padded_authkey) {
196 dev_err(dev, "failed to allocate padded_authkey\n");
200 ctx->auth_state.hmac.ipad_opad = NULL;
201 ctx->auth_state.hmac.padded_authkey = NULL;
203 ctx->hash_len = cc_get_aead_hash_len(tfm);
212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
214 struct aead_request *areq = (struct aead_request *)cc_req;
215 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
216 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
217 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
219 /* BACKLOG notification */
220 if (err == -EINPROGRESS)
223 cc_unmap_aead_request(dev, areq);
225 /* Restore ordinary iv pointer */
226 areq->iv = areq_ctx->backup_iv;
231 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233 ctx->authsize) != 0) {
234 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235 ctx->authsize, ctx->cipher_mode);
236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory.
239 sg_zero_buffer(areq->dst, sg_nents(areq->dst),
244 } else if (areq_ctx->is_icv_fragmented) {
245 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
247 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
248 skip, (skip + ctx->authsize),
252 aead_request_complete(areq, err);
255 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
256 struct cc_aead_ctx *ctx)
258 /* Load the AES key */
259 hw_desc_init(&desc[0]);
260 /* We are using for the source/user key the same buffer
261 * as for the output keys, * because after this key loading it
262 * is not needed anymore
264 set_din_type(&desc[0], DMA_DLLI,
265 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
267 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
268 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
269 set_key_size_aes(&desc[0], ctx->auth_keylen);
270 set_flow_mode(&desc[0], S_DIN_to_AES);
271 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
273 hw_desc_init(&desc[1]);
274 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
275 set_flow_mode(&desc[1], DIN_AES_DOUT);
276 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
277 AES_KEYSIZE_128, NS_BIT, 0);
279 hw_desc_init(&desc[2]);
280 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
281 set_flow_mode(&desc[2], DIN_AES_DOUT);
282 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
284 AES_KEYSIZE_128, NS_BIT, 0);
286 hw_desc_init(&desc[3]);
287 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
288 set_flow_mode(&desc[3], DIN_AES_DOUT);
289 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
290 + 2 * AES_KEYSIZE_128),
291 AES_KEYSIZE_128, NS_BIT, 0);
296 static unsigned int hmac_setkey(struct cc_hw_desc *desc,
297 struct cc_aead_ctx *ctx)
299 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
300 unsigned int digest_ofs = 0;
301 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
302 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
303 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
304 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
305 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
307 unsigned int idx = 0;
310 /* calc derived HMAC key */
311 for (i = 0; i < 2; i++) {
312 /* Load hash initial state */
313 hw_desc_init(&desc[idx]);
314 set_cipher_mode(&desc[idx], hash_mode);
315 set_din_sram(&desc[idx],
316 cc_larval_digest_addr(ctx->drvdata,
319 set_flow_mode(&desc[idx], S_DIN_to_HASH);
320 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
323 /* Load the hash current length*/
324 hw_desc_init(&desc[idx]);
325 set_cipher_mode(&desc[idx], hash_mode);
326 set_din_const(&desc[idx], 0, ctx->hash_len);
327 set_flow_mode(&desc[idx], S_DIN_to_HASH);
328 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
331 /* Prepare ipad key */
332 hw_desc_init(&desc[idx]);
333 set_xor_val(&desc[idx], hmac_pad_const[i]);
334 set_cipher_mode(&desc[idx], hash_mode);
335 set_flow_mode(&desc[idx], S_DIN_to_HASH);
336 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
339 /* Perform HASH update */
340 hw_desc_init(&desc[idx]);
341 set_din_type(&desc[idx], DMA_DLLI,
342 hmac->padded_authkey_dma_addr,
343 SHA256_BLOCK_SIZE, NS_BIT);
344 set_cipher_mode(&desc[idx], hash_mode);
345 set_xor_active(&desc[idx]);
346 set_flow_mode(&desc[idx], DIN_HASH);
350 hw_desc_init(&desc[idx]);
351 set_cipher_mode(&desc[idx], hash_mode);
352 set_dout_dlli(&desc[idx],
353 (hmac->ipad_opad_dma_addr + digest_ofs),
354 digest_size, NS_BIT, 0);
355 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
356 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
357 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
360 digest_ofs += digest_size;
366 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
368 struct device *dev = drvdata_to_dev(ctx->drvdata);
370 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
371 ctx->enc_keylen, ctx->auth_keylen);
373 switch (ctx->auth_mode) {
375 case DRV_HASH_SHA256:
377 case DRV_HASH_XCBC_MAC:
378 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
379 ctx->auth_keylen != AES_KEYSIZE_192 &&
380 ctx->auth_keylen != AES_KEYSIZE_256)
383 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
384 if (ctx->auth_keylen > 0)
388 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
391 /* Check cipher key size */
392 if (ctx->flow_mode == S_DIN_to_DES) {
393 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
394 dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
398 } else { /* Default assumed to be AES ciphers */
399 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
400 ctx->enc_keylen != AES_KEYSIZE_192 &&
401 ctx->enc_keylen != AES_KEYSIZE_256) {
402 dev_err(dev, "Invalid cipher(AES) key size: %u\n",
408 return 0; /* All tests of keys sizes passed */
411 /* This function prepers the user key so it can pass to the hmac processing
412 * (copy to intenral buffer or hash in case of key longer than block
414 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
417 dma_addr_t key_dma_addr = 0;
418 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
419 struct device *dev = drvdata_to_dev(ctx->drvdata);
420 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
421 struct cc_crypto_req cc_req = {};
422 unsigned int blocksize;
423 unsigned int digestsize;
424 unsigned int hashmode;
425 unsigned int idx = 0;
428 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
429 dma_addr_t padded_authkey_dma_addr =
430 ctx->auth_state.hmac.padded_authkey_dma_addr;
432 switch (ctx->auth_mode) { /* auth_key required and >0 */
434 blocksize = SHA1_BLOCK_SIZE;
435 digestsize = SHA1_DIGEST_SIZE;
436 hashmode = DRV_HASH_HW_SHA1;
438 case DRV_HASH_SHA256:
440 blocksize = SHA256_BLOCK_SIZE;
441 digestsize = SHA256_DIGEST_SIZE;
442 hashmode = DRV_HASH_HW_SHA256;
447 key = kmemdup(authkey, keylen, GFP_KERNEL);
451 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
453 if (dma_mapping_error(dev, key_dma_addr)) {
454 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
459 if (keylen > blocksize) {
460 /* Load hash initial state */
461 hw_desc_init(&desc[idx]);
462 set_cipher_mode(&desc[idx], hashmode);
463 set_din_sram(&desc[idx], larval_addr, digestsize);
464 set_flow_mode(&desc[idx], S_DIN_to_HASH);
465 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
468 /* Load the hash current length*/
469 hw_desc_init(&desc[idx]);
470 set_cipher_mode(&desc[idx], hashmode);
471 set_din_const(&desc[idx], 0, ctx->hash_len);
472 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
473 set_flow_mode(&desc[idx], S_DIN_to_HASH);
474 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
477 hw_desc_init(&desc[idx]);
478 set_din_type(&desc[idx], DMA_DLLI,
479 key_dma_addr, keylen, NS_BIT);
480 set_flow_mode(&desc[idx], DIN_HASH);
484 hw_desc_init(&desc[idx]);
485 set_cipher_mode(&desc[idx], hashmode);
486 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
487 digestsize, NS_BIT, 0);
488 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
489 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
490 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
491 set_cipher_config0(&desc[idx],
492 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
495 hw_desc_init(&desc[idx]);
496 set_din_const(&desc[idx], 0, (blocksize - digestsize));
497 set_flow_mode(&desc[idx], BYPASS);
498 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
499 digestsize), (blocksize - digestsize),
503 hw_desc_init(&desc[idx]);
504 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
506 set_flow_mode(&desc[idx], BYPASS);
507 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
511 if ((blocksize - keylen) != 0) {
512 hw_desc_init(&desc[idx]);
513 set_din_const(&desc[idx], 0,
514 (blocksize - keylen));
515 set_flow_mode(&desc[idx], BYPASS);
516 set_dout_dlli(&desc[idx],
517 (padded_authkey_dma_addr +
519 (blocksize - keylen), NS_BIT, 0);
524 hw_desc_init(&desc[idx]);
525 set_din_const(&desc[idx], 0, (blocksize - keylen));
526 set_flow_mode(&desc[idx], BYPASS);
527 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
528 blocksize, NS_BIT, 0);
532 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
534 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
537 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
544 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
547 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
548 struct cc_crypto_req cc_req = {};
549 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
550 unsigned int seq_len = 0;
551 struct device *dev = drvdata_to_dev(ctx->drvdata);
552 const u8 *enckey, *authkey;
555 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
556 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
558 /* STAT_PHASE_0: Init and sanity checks */
560 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
561 struct crypto_authenc_keys keys;
563 rc = crypto_authenc_extractkeys(&keys, key, keylen);
566 enckey = keys.enckey;
567 authkey = keys.authkey;
568 ctx->enc_keylen = keys.enckeylen;
569 ctx->auth_keylen = keys.authkeylen;
571 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
572 /* the nonce is stored in bytes at end of key */
573 if (ctx->enc_keylen <
574 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
576 /* Copy nonce from last 4 bytes in CTR key to
577 * first 4 bytes in CTR IV
579 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
580 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
581 /* Set CTR key size */
582 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
584 } else { /* non-authenc - has just one key */
587 ctx->enc_keylen = keylen;
588 ctx->auth_keylen = 0;
591 rc = validate_keys_sizes(ctx);
595 /* STAT_PHASE_1: Copy key to ctx */
597 /* Get key material */
598 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
599 if (ctx->enc_keylen == 24)
600 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
601 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
602 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
604 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
605 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
610 /* STAT_PHASE_2: Create sequence */
612 switch (ctx->auth_mode) {
614 case DRV_HASH_SHA256:
615 seq_len = hmac_setkey(desc, ctx);
617 case DRV_HASH_XCBC_MAC:
618 seq_len = xcbc_setkey(desc, ctx);
620 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
621 break; /* No auth. key setup */
623 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
627 /* STAT_PHASE_3: Submit sequence to HW */
629 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
630 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
632 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
637 /* Update STAT_PHASE_3 */
641 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
644 struct crypto_authenc_keys keys;
647 err = crypto_authenc_extractkeys(&keys, key, keylen);
651 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
652 cc_aead_setkey(aead, key, keylen);
654 memzero_explicit(&keys, sizeof(keys));
658 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
661 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
667 memcpy(ctx->ctr_nonce, key + keylen, 3);
669 return cc_aead_setkey(tfm, key, keylen);
672 static int cc_aead_setauthsize(struct crypto_aead *authenc,
673 unsigned int authsize)
675 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
676 struct device *dev = drvdata_to_dev(ctx->drvdata);
678 /* Unsupported auth. sizes */
680 authsize > crypto_aead_maxauthsize(authenc)) {
684 ctx->authsize = authsize;
685 dev_dbg(dev, "authlen=%d\n", ctx->authsize);
690 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
691 unsigned int authsize)
702 return cc_aead_setauthsize(authenc, authsize);
705 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
706 unsigned int authsize)
721 return cc_aead_setauthsize(authenc, authsize);
724 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
725 struct cc_hw_desc desc[], unsigned int *seq_size)
727 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
728 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
729 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
730 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
731 unsigned int idx = *seq_size;
732 struct device *dev = drvdata_to_dev(ctx->drvdata);
734 switch (assoc_dma_type) {
735 case CC_DMA_BUF_DLLI:
736 dev_dbg(dev, "ASSOC buffer type DLLI\n");
737 hw_desc_init(&desc[idx]);
738 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
739 areq_ctx->assoclen, NS_BIT);
740 set_flow_mode(&desc[idx], flow_mode);
741 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
742 areq_ctx->cryptlen > 0)
743 set_din_not_last_indication(&desc[idx]);
745 case CC_DMA_BUF_MLLI:
746 dev_dbg(dev, "ASSOC buffer type MLLI\n");
747 hw_desc_init(&desc[idx]);
748 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
749 areq_ctx->assoc.mlli_nents, NS_BIT);
750 set_flow_mode(&desc[idx], flow_mode);
751 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
752 areq_ctx->cryptlen > 0)
753 set_din_not_last_indication(&desc[idx]);
755 case CC_DMA_BUF_NULL:
757 dev_err(dev, "Invalid ASSOC buffer type\n");
763 static void cc_proc_authen_desc(struct aead_request *areq,
764 unsigned int flow_mode,
765 struct cc_hw_desc desc[],
766 unsigned int *seq_size, int direct)
768 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
769 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
770 unsigned int idx = *seq_size;
771 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
772 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
773 struct device *dev = drvdata_to_dev(ctx->drvdata);
775 switch (data_dma_type) {
776 case CC_DMA_BUF_DLLI:
778 struct scatterlist *cipher =
779 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
780 areq_ctx->dst_sgl : areq_ctx->src_sgl;
782 unsigned int offset =
783 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
784 areq_ctx->dst_offset : areq_ctx->src_offset;
785 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
786 hw_desc_init(&desc[idx]);
787 set_din_type(&desc[idx], DMA_DLLI,
788 (sg_dma_address(cipher) + offset),
789 areq_ctx->cryptlen, NS_BIT);
790 set_flow_mode(&desc[idx], flow_mode);
793 case CC_DMA_BUF_MLLI:
795 /* DOUBLE-PASS flow (as default)
796 * assoc. + iv + data -compact in one table
797 * if assoclen is ZERO only IV perform
799 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
800 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
802 if (areq_ctx->is_single_pass) {
803 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
804 mlli_addr = areq_ctx->dst.sram_addr;
805 mlli_nents = areq_ctx->dst.mlli_nents;
807 mlli_addr = areq_ctx->src.sram_addr;
808 mlli_nents = areq_ctx->src.mlli_nents;
812 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
813 hw_desc_init(&desc[idx]);
814 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
816 set_flow_mode(&desc[idx], flow_mode);
819 case CC_DMA_BUF_NULL:
821 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
827 static void cc_proc_cipher_desc(struct aead_request *areq,
828 unsigned int flow_mode,
829 struct cc_hw_desc desc[],
830 unsigned int *seq_size)
832 unsigned int idx = *seq_size;
833 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
834 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
835 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
836 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
837 struct device *dev = drvdata_to_dev(ctx->drvdata);
839 if (areq_ctx->cryptlen == 0)
840 return; /*null processing*/
842 switch (data_dma_type) {
843 case CC_DMA_BUF_DLLI:
844 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
845 hw_desc_init(&desc[idx]);
846 set_din_type(&desc[idx], DMA_DLLI,
847 (sg_dma_address(areq_ctx->src_sgl) +
848 areq_ctx->src_offset), areq_ctx->cryptlen,
850 set_dout_dlli(&desc[idx],
851 (sg_dma_address(areq_ctx->dst_sgl) +
852 areq_ctx->dst_offset),
853 areq_ctx->cryptlen, NS_BIT, 0);
854 set_flow_mode(&desc[idx], flow_mode);
856 case CC_DMA_BUF_MLLI:
857 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
858 hw_desc_init(&desc[idx]);
859 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
860 areq_ctx->src.mlli_nents, NS_BIT);
861 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
862 areq_ctx->dst.mlli_nents, NS_BIT, 0);
863 set_flow_mode(&desc[idx], flow_mode);
865 case CC_DMA_BUF_NULL:
867 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
873 static void cc_proc_digest_desc(struct aead_request *req,
874 struct cc_hw_desc desc[],
875 unsigned int *seq_size)
877 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
878 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
879 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
880 unsigned int idx = *seq_size;
881 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
882 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
883 int direct = req_ctx->gen_ctx.op_type;
885 /* Get final ICV result */
886 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
887 hw_desc_init(&desc[idx]);
888 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
889 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
890 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
892 set_queue_last_ind(ctx->drvdata, &desc[idx]);
893 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
894 set_aes_not_hash_mode(&desc[idx]);
895 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
897 set_cipher_config0(&desc[idx],
898 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
899 set_cipher_mode(&desc[idx], hash_mode);
902 /* Get ICV out from hardware */
903 hw_desc_init(&desc[idx]);
904 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
905 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
906 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
907 ctx->authsize, NS_BIT, 1);
908 set_queue_last_ind(ctx->drvdata, &desc[idx]);
909 set_cipher_config0(&desc[idx],
910 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
911 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
912 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
913 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
914 set_aes_not_hash_mode(&desc[idx]);
916 set_cipher_mode(&desc[idx], hash_mode);
923 static void cc_set_cipher_desc(struct aead_request *req,
924 struct cc_hw_desc desc[],
925 unsigned int *seq_size)
927 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
928 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
929 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
930 unsigned int hw_iv_size = req_ctx->hw_iv_size;
931 unsigned int idx = *seq_size;
932 int direct = req_ctx->gen_ctx.op_type;
934 /* Setup cipher state */
935 hw_desc_init(&desc[idx]);
936 set_cipher_config0(&desc[idx], direct);
937 set_flow_mode(&desc[idx], ctx->flow_mode);
938 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
940 if (ctx->cipher_mode == DRV_CIPHER_CTR)
941 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
943 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
944 set_cipher_mode(&desc[idx], ctx->cipher_mode);
948 hw_desc_init(&desc[idx]);
949 set_cipher_config0(&desc[idx], direct);
950 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
951 set_flow_mode(&desc[idx], ctx->flow_mode);
952 if (ctx->flow_mode == S_DIN_to_AES) {
953 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
954 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
955 ctx->enc_keylen), NS_BIT);
956 set_key_size_aes(&desc[idx], ctx->enc_keylen);
958 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
959 ctx->enc_keylen, NS_BIT);
960 set_key_size_des(&desc[idx], ctx->enc_keylen);
962 set_cipher_mode(&desc[idx], ctx->cipher_mode);
968 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
969 unsigned int *seq_size, unsigned int data_flow_mode)
971 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
972 int direct = req_ctx->gen_ctx.op_type;
973 unsigned int idx = *seq_size;
975 if (req_ctx->cryptlen == 0)
976 return; /*null processing*/
978 cc_set_cipher_desc(req, desc, &idx);
979 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
980 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
981 /* We must wait for DMA to write all cipher */
982 hw_desc_init(&desc[idx]);
983 set_din_no_dma(&desc[idx], 0, 0xfffff0);
984 set_dout_no_dma(&desc[idx], 0, 0, 1);
991 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
992 unsigned int *seq_size)
994 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
995 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
996 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
997 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
998 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
999 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1000 unsigned int idx = *seq_size;
1002 /* Loading hash ipad xor key state */
1003 hw_desc_init(&desc[idx]);
1004 set_cipher_mode(&desc[idx], hash_mode);
1005 set_din_type(&desc[idx], DMA_DLLI,
1006 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1008 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1009 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1012 /* Load init. digest len (64 bytes) */
1013 hw_desc_init(&desc[idx]);
1014 set_cipher_mode(&desc[idx], hash_mode);
1015 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1017 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1018 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1024 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1025 unsigned int *seq_size)
1027 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1028 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1029 unsigned int idx = *seq_size;
1031 /* Loading MAC state */
1032 hw_desc_init(&desc[idx]);
1033 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1034 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1035 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1036 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1037 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1038 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1039 set_aes_not_hash_mode(&desc[idx]);
1042 /* Setup XCBC MAC K1 */
1043 hw_desc_init(&desc[idx]);
1044 set_din_type(&desc[idx], DMA_DLLI,
1045 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1046 AES_KEYSIZE_128, NS_BIT);
1047 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1048 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1049 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1050 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1051 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1052 set_aes_not_hash_mode(&desc[idx]);
1055 /* Setup XCBC MAC K2 */
1056 hw_desc_init(&desc[idx]);
1057 set_din_type(&desc[idx], DMA_DLLI,
1058 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1059 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1060 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1061 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1062 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1063 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1064 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1065 set_aes_not_hash_mode(&desc[idx]);
1068 /* Setup XCBC MAC K3 */
1069 hw_desc_init(&desc[idx]);
1070 set_din_type(&desc[idx], DMA_DLLI,
1071 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1072 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1073 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1074 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1075 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1076 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1077 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1078 set_aes_not_hash_mode(&desc[idx]);
1084 static void cc_proc_header_desc(struct aead_request *req,
1085 struct cc_hw_desc desc[],
1086 unsigned int *seq_size)
1088 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1089 unsigned int idx = *seq_size;
1091 /* Hash associated data */
1092 if (areq_ctx->assoclen > 0)
1093 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1099 static void cc_proc_scheme_desc(struct aead_request *req,
1100 struct cc_hw_desc desc[],
1101 unsigned int *seq_size)
1103 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1104 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1105 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1106 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1107 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1108 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1109 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1110 unsigned int idx = *seq_size;
1112 hw_desc_init(&desc[idx]);
1113 set_cipher_mode(&desc[idx], hash_mode);
1114 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1116 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1117 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1118 set_cipher_do(&desc[idx], DO_PAD);
1121 /* Get final ICV result */
1122 hw_desc_init(&desc[idx]);
1123 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1125 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1126 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1127 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1128 set_cipher_mode(&desc[idx], hash_mode);
1131 /* Loading hash opad xor key state */
1132 hw_desc_init(&desc[idx]);
1133 set_cipher_mode(&desc[idx], hash_mode);
1134 set_din_type(&desc[idx], DMA_DLLI,
1135 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1136 digest_size, NS_BIT);
1137 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1138 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1141 /* Load init. digest len (64 bytes) */
1142 hw_desc_init(&desc[idx]);
1143 set_cipher_mode(&desc[idx], hash_mode);
1144 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1146 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1147 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1148 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1151 /* Perform HASH update */
1152 hw_desc_init(&desc[idx]);
1153 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1155 set_flow_mode(&desc[idx], DIN_HASH);
1161 static void cc_mlli_to_sram(struct aead_request *req,
1162 struct cc_hw_desc desc[], unsigned int *seq_size)
1164 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1165 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1166 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1167 struct device *dev = drvdata_to_dev(ctx->drvdata);
1169 if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1170 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1171 !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1172 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1173 (unsigned int)ctx->drvdata->mlli_sram_addr,
1174 req_ctx->mlli_params.mlli_len);
1175 /* Copy MLLI table host-to-sram */
1176 hw_desc_init(&desc[*seq_size]);
1177 set_din_type(&desc[*seq_size], DMA_DLLI,
1178 req_ctx->mlli_params.mlli_dma_addr,
1179 req_ctx->mlli_params.mlli_len, NS_BIT);
1180 set_dout_sram(&desc[*seq_size],
1181 ctx->drvdata->mlli_sram_addr,
1182 req_ctx->mlli_params.mlli_len);
1183 set_flow_mode(&desc[*seq_size], BYPASS);
1188 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1189 enum cc_flow_mode setup_flow_mode,
1190 bool is_single_pass)
1192 enum cc_flow_mode data_flow_mode;
1194 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1195 if (setup_flow_mode == S_DIN_to_AES)
1196 data_flow_mode = is_single_pass ?
1197 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1199 data_flow_mode = is_single_pass ?
1200 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1201 } else { /* Decrypt */
1202 if (setup_flow_mode == S_DIN_to_AES)
1203 data_flow_mode = is_single_pass ?
1204 AES_and_HASH : DIN_AES_DOUT;
1206 data_flow_mode = is_single_pass ?
1207 DES_and_HASH : DIN_DES_DOUT;
1210 return data_flow_mode;
1213 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1214 unsigned int *seq_size)
1216 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1217 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1218 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1219 int direct = req_ctx->gen_ctx.op_type;
1220 unsigned int data_flow_mode =
1221 cc_get_data_flow(direct, ctx->flow_mode,
1222 req_ctx->is_single_pass);
1224 if (req_ctx->is_single_pass) {
1228 cc_set_hmac_desc(req, desc, seq_size);
1229 cc_set_cipher_desc(req, desc, seq_size);
1230 cc_proc_header_desc(req, desc, seq_size);
1231 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1232 cc_proc_scheme_desc(req, desc, seq_size);
1233 cc_proc_digest_desc(req, desc, seq_size);
1239 * Fallback for unsupported single-pass modes,
1240 * i.e. using assoc. data of non-word-multiple
1242 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1243 /* encrypt first.. */
1244 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1245 /* authenc after..*/
1246 cc_set_hmac_desc(req, desc, seq_size);
1247 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1248 cc_proc_scheme_desc(req, desc, seq_size);
1249 cc_proc_digest_desc(req, desc, seq_size);
1251 } else { /*DECRYPT*/
1252 /* authenc first..*/
1253 cc_set_hmac_desc(req, desc, seq_size);
1254 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1255 cc_proc_scheme_desc(req, desc, seq_size);
1256 /* decrypt after.. */
1257 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1258 /* read the digest result with setting the completion bit
1259 * must be after the cipher operation
1261 cc_proc_digest_desc(req, desc, seq_size);
1266 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1267 unsigned int *seq_size)
1269 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1270 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1271 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1272 int direct = req_ctx->gen_ctx.op_type;
1273 unsigned int data_flow_mode =
1274 cc_get_data_flow(direct, ctx->flow_mode,
1275 req_ctx->is_single_pass);
1277 if (req_ctx->is_single_pass) {
1281 cc_set_xcbc_desc(req, desc, seq_size);
1282 cc_set_cipher_desc(req, desc, seq_size);
1283 cc_proc_header_desc(req, desc, seq_size);
1284 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1285 cc_proc_digest_desc(req, desc, seq_size);
1291 * Fallback for unsupported single-pass modes,
1292 * i.e. using assoc. data of non-word-multiple
1294 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1295 /* encrypt first.. */
1296 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1297 /* authenc after.. */
1298 cc_set_xcbc_desc(req, desc, seq_size);
1299 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1300 cc_proc_digest_desc(req, desc, seq_size);
1301 } else { /*DECRYPT*/
1302 /* authenc first.. */
1303 cc_set_xcbc_desc(req, desc, seq_size);
1304 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1305 /* decrypt after..*/
1306 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1307 /* read the digest result with setting the completion bit
1308 * must be after the cipher operation
1310 cc_proc_digest_desc(req, desc, seq_size);
1314 static int validate_data_size(struct cc_aead_ctx *ctx,
1315 enum drv_crypto_direction direct,
1316 struct aead_request *req)
1318 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1319 struct device *dev = drvdata_to_dev(ctx->drvdata);
1320 unsigned int assoclen = areq_ctx->assoclen;
1321 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1322 (req->cryptlen - ctx->authsize) : req->cryptlen;
1324 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1325 req->cryptlen < ctx->authsize)
1328 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1330 switch (ctx->flow_mode) {
1332 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1333 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1335 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1337 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1338 if (areq_ctx->plaintext_authenticate_only)
1339 areq_ctx->is_single_pass = false;
1343 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1344 areq_ctx->is_single_pass = false;
1346 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1347 !IS_ALIGNED(cipherlen, sizeof(u32)))
1348 areq_ctx->is_single_pass = false;
1352 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1354 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1355 areq_ctx->is_single_pass = false;
1358 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1368 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1370 unsigned int len = 0;
1372 if (header_size == 0)
1375 if (header_size < ((1UL << 16) - (1UL << 8))) {
1378 pa0_buff[0] = (header_size >> 8) & 0xFF;
1379 pa0_buff[1] = header_size & 0xFF;
1385 pa0_buff[2] = (header_size >> 24) & 0xFF;
1386 pa0_buff[3] = (header_size >> 16) & 0xFF;
1387 pa0_buff[4] = (header_size >> 8) & 0xFF;
1388 pa0_buff[5] = header_size & 0xFF;
1394 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1398 memset(block, 0, csize);
1403 else if (msglen > (1 << (8 * csize)))
1406 data = cpu_to_be32(msglen);
1407 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1412 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1413 unsigned int *seq_size)
1415 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1416 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1417 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1418 unsigned int idx = *seq_size;
1419 unsigned int cipher_flow_mode;
1420 dma_addr_t mac_result;
1422 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1423 cipher_flow_mode = AES_to_HASH_and_DOUT;
1424 mac_result = req_ctx->mac_buf_dma_addr;
1425 } else { /* Encrypt */
1426 cipher_flow_mode = AES_and_HASH;
1427 mac_result = req_ctx->icv_dma_addr;
1431 hw_desc_init(&desc[idx]);
1432 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1433 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1434 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1435 ctx->enc_keylen), NS_BIT);
1436 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1437 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1438 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1439 set_flow_mode(&desc[idx], S_DIN_to_AES);
1442 /* load ctr state */
1443 hw_desc_init(&desc[idx]);
1444 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1445 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1446 set_din_type(&desc[idx], DMA_DLLI,
1447 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1448 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1449 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1450 set_flow_mode(&desc[idx], S_DIN_to_AES);
1454 hw_desc_init(&desc[idx]);
1455 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1456 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1457 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1458 ctx->enc_keylen), NS_BIT);
1459 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1460 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1461 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1462 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1463 set_aes_not_hash_mode(&desc[idx]);
1466 /* load MAC state */
1467 hw_desc_init(&desc[idx]);
1468 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1469 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1470 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1471 AES_BLOCK_SIZE, NS_BIT);
1472 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1473 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1474 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1475 set_aes_not_hash_mode(&desc[idx]);
1478 /* process assoc data */
1479 if (req_ctx->assoclen > 0) {
1480 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1482 hw_desc_init(&desc[idx]);
1483 set_din_type(&desc[idx], DMA_DLLI,
1484 sg_dma_address(&req_ctx->ccm_adata_sg),
1485 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1486 set_flow_mode(&desc[idx], DIN_HASH);
1490 /* process the cipher */
1491 if (req_ctx->cryptlen)
1492 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1494 /* Read temporal MAC */
1495 hw_desc_init(&desc[idx]);
1496 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1497 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1499 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1500 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1501 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1502 set_aes_not_hash_mode(&desc[idx]);
1505 /* load AES-CTR state (for last MAC calculation)*/
1506 hw_desc_init(&desc[idx]);
1507 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1508 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1509 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1510 AES_BLOCK_SIZE, NS_BIT);
1511 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1512 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1513 set_flow_mode(&desc[idx], S_DIN_to_AES);
1516 hw_desc_init(&desc[idx]);
1517 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1518 set_dout_no_dma(&desc[idx], 0, 0, 1);
1521 /* encrypt the "T" value and store MAC in mac_state */
1522 hw_desc_init(&desc[idx]);
1523 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1524 ctx->authsize, NS_BIT);
1525 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1526 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1527 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1534 static int config_ccm_adata(struct aead_request *req)
1536 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1537 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1538 struct device *dev = drvdata_to_dev(ctx->drvdata);
1539 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1540 //unsigned int size_of_a = 0, rem_a_size = 0;
1541 unsigned int lp = req->iv[0];
1542 /* Note: The code assume that req->iv[0] already contains the value
1545 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1546 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1547 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1548 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1549 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1550 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1551 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1553 (req->cryptlen - ctx->authsize);
1556 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1557 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1559 /* taken from crypto/ccm.c */
1560 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1561 if (l < 2 || l > 8) {
1562 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1565 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1567 /* format control info per RFC 3610 and
1568 * NIST Special Publication 800-38C
1570 *b0 |= (8 * ((m - 2) / 2));
1571 if (req_ctx->assoclen > 0)
1572 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1574 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1576 dev_err(dev, "message len overflow detected");
1579 /* END of "taken from crypto/ccm.c" */
1581 /* l(a) - size of associated data. */
1582 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1584 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1587 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1588 ctr_count_0[15] = 0;
1593 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1595 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1596 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1597 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1600 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1601 /* For RFC 4309, always use 4 bytes for message length
1602 * (at most 2^32-1 bytes).
1604 areq_ctx->ctr_iv[0] = 3;
1606 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1607 * that we build here.
1609 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1610 CCM_BLOCK_NONCE_SIZE);
1611 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1613 req->iv = areq_ctx->ctr_iv;
1614 areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1617 static void cc_set_ghash_desc(struct aead_request *req,
1618 struct cc_hw_desc desc[], unsigned int *seq_size)
1620 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1621 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1622 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1623 unsigned int idx = *seq_size;
1625 /* load key to AES*/
1626 hw_desc_init(&desc[idx]);
1627 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1628 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1629 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1630 ctx->enc_keylen, NS_BIT);
1631 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1632 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1633 set_flow_mode(&desc[idx], S_DIN_to_AES);
1636 /* process one zero block to generate hkey */
1637 hw_desc_init(&desc[idx]);
1638 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1639 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1641 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1644 /* Memory Barrier */
1645 hw_desc_init(&desc[idx]);
1646 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1647 set_dout_no_dma(&desc[idx], 0, 0, 1);
1650 /* Load GHASH subkey */
1651 hw_desc_init(&desc[idx]);
1652 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1653 AES_BLOCK_SIZE, NS_BIT);
1654 set_dout_no_dma(&desc[idx], 0, 0, 1);
1655 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1656 set_aes_not_hash_mode(&desc[idx]);
1657 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1658 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1659 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1662 /* Configure Hash Engine to work with GHASH.
1663 * Since it was not possible to extend HASH submodes to add GHASH,
1664 * The following command is necessary in order to
1665 * select GHASH (according to HW designers)
1667 hw_desc_init(&desc[idx]);
1668 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1669 set_dout_no_dma(&desc[idx], 0, 0, 1);
1670 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1671 set_aes_not_hash_mode(&desc[idx]);
1672 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1673 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1674 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1675 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1676 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1679 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1682 hw_desc_init(&desc[idx]);
1683 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1684 set_dout_no_dma(&desc[idx], 0, 0, 1);
1685 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1686 set_aes_not_hash_mode(&desc[idx]);
1687 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1688 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1689 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1695 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1696 unsigned int *seq_size)
1698 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1699 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1700 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1701 unsigned int idx = *seq_size;
1703 /* load key to AES*/
1704 hw_desc_init(&desc[idx]);
1705 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1706 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1707 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1708 ctx->enc_keylen, NS_BIT);
1709 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1710 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1711 set_flow_mode(&desc[idx], S_DIN_to_AES);
1714 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1715 /* load AES/CTR initial CTR value inc by 2*/
1716 hw_desc_init(&desc[idx]);
1717 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1718 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1719 set_din_type(&desc[idx], DMA_DLLI,
1720 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1722 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1723 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1724 set_flow_mode(&desc[idx], S_DIN_to_AES);
1731 static void cc_proc_gcm_result(struct aead_request *req,
1732 struct cc_hw_desc desc[],
1733 unsigned int *seq_size)
1735 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1736 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1737 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1738 dma_addr_t mac_result;
1739 unsigned int idx = *seq_size;
1741 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1742 mac_result = req_ctx->mac_buf_dma_addr;
1743 } else { /* Encrypt */
1744 mac_result = req_ctx->icv_dma_addr;
1747 /* process(ghash) gcm_block_len */
1748 hw_desc_init(&desc[idx]);
1749 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1750 AES_BLOCK_SIZE, NS_BIT);
1751 set_flow_mode(&desc[idx], DIN_HASH);
1754 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1755 hw_desc_init(&desc[idx]);
1756 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1757 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1758 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1760 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1761 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1762 set_aes_not_hash_mode(&desc[idx]);
1766 /* load AES/CTR initial CTR value inc by 1*/
1767 hw_desc_init(&desc[idx]);
1768 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1769 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1770 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1771 AES_BLOCK_SIZE, NS_BIT);
1772 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1773 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1774 set_flow_mode(&desc[idx], S_DIN_to_AES);
1777 /* Memory Barrier */
1778 hw_desc_init(&desc[idx]);
1779 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1780 set_dout_no_dma(&desc[idx], 0, 0, 1);
1783 /* process GCTR on stored GHASH and store MAC in mac_state*/
1784 hw_desc_init(&desc[idx]);
1785 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1786 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1787 AES_BLOCK_SIZE, NS_BIT);
1788 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1789 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1790 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1796 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1797 unsigned int *seq_size)
1799 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1800 unsigned int cipher_flow_mode;
1802 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1803 cipher_flow_mode = AES_and_HASH;
1804 } else { /* Encrypt */
1805 cipher_flow_mode = AES_to_HASH_and_DOUT;
1808 //in RFC4543 no data to encrypt. just copy data from src to dest.
1809 if (req_ctx->plaintext_authenticate_only) {
1810 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1811 cc_set_ghash_desc(req, desc, seq_size);
1812 /* process(ghash) assoc data */
1813 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1814 cc_set_gctr_desc(req, desc, seq_size);
1815 cc_proc_gcm_result(req, desc, seq_size);
1819 // for gcm and rfc4106.
1820 cc_set_ghash_desc(req, desc, seq_size);
1821 /* process(ghash) assoc data */
1822 if (req_ctx->assoclen > 0)
1823 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1824 cc_set_gctr_desc(req, desc, seq_size);
1825 /* process(gctr+ghash) */
1826 if (req_ctx->cryptlen)
1827 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1828 cc_proc_gcm_result(req, desc, seq_size);
1833 static int config_gcm_context(struct aead_request *req)
1835 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1836 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1837 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1838 struct device *dev = drvdata_to_dev(ctx->drvdata);
1840 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1841 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1843 (req->cryptlen - ctx->authsize);
1844 __be32 counter = cpu_to_be32(2);
1846 dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1847 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1849 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1851 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1853 memcpy(req->iv + 12, &counter, 4);
1854 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1856 counter = cpu_to_be32(1);
1857 memcpy(req->iv + 12, &counter, 4);
1858 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1860 if (!req_ctx->plaintext_authenticate_only) {
1863 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1864 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1865 temp64 = cpu_to_be64(cryptlen * 8);
1866 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1868 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1869 * data that is nothing is encrypted.
1873 temp64 = cpu_to_be64((req_ctx->assoclen +
1874 GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1875 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1877 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1883 static void cc_proc_rfc4_gcm(struct aead_request *req)
1885 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1886 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1887 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1889 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1890 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1891 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1892 GCM_BLOCK_RFC4_IV_SIZE);
1893 req->iv = areq_ctx->ctr_iv;
1894 areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1897 static int cc_proc_aead(struct aead_request *req,
1898 enum drv_crypto_direction direct)
1902 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1903 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1904 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1905 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1906 struct device *dev = drvdata_to_dev(ctx->drvdata);
1907 struct cc_crypto_req cc_req = {};
1909 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1910 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1911 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1912 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1914 /* STAT_PHASE_0: Init and sanity checks */
1916 /* Check data length according to mode */
1917 if (validate_data_size(ctx, direct, req)) {
1918 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1919 req->cryptlen, areq_ctx->assoclen);
1923 /* Setup request structure */
1924 cc_req.user_cb = (void *)cc_aead_complete;
1925 cc_req.user_arg = (void *)req;
1927 /* Setup request context */
1928 areq_ctx->gen_ctx.op_type = direct;
1929 areq_ctx->req_authsize = ctx->authsize;
1930 areq_ctx->cipher_mode = ctx->cipher_mode;
1932 /* STAT_PHASE_1: Map buffers */
1934 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1935 /* Build CTR IV - Copy nonce from last 4 bytes in
1936 * CTR key to first 4 bytes in CTR IV
1938 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1939 CTR_RFC3686_NONCE_SIZE);
1940 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1941 CTR_RFC3686_IV_SIZE);
1942 /* Initialize counter portion of counter block */
1943 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1944 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1946 /* Replace with counter iv */
1947 req->iv = areq_ctx->ctr_iv;
1948 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1949 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1950 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1951 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1952 if (areq_ctx->ctr_iv != req->iv) {
1953 memcpy(areq_ctx->ctr_iv, req->iv,
1954 crypto_aead_ivsize(tfm));
1955 req->iv = areq_ctx->ctr_iv;
1958 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1961 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1962 rc = config_ccm_adata(req);
1964 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1969 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1972 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1973 rc = config_gcm_context(req);
1975 dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1981 rc = cc_map_aead_request(ctx->drvdata, req);
1983 dev_err(dev, "map_request() failed\n");
1987 /* STAT_PHASE_2: Create sequence */
1989 /* Load MLLI tables to SRAM if necessary */
1990 cc_mlli_to_sram(req, desc, &seq_len);
1992 /*TODO: move seq len by reference */
1993 switch (ctx->auth_mode) {
1995 case DRV_HASH_SHA256:
1996 cc_hmac_authenc(req, desc, &seq_len);
1998 case DRV_HASH_XCBC_MAC:
1999 cc_xcbc_authenc(req, desc, &seq_len);
2002 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2003 cc_ccm(req, desc, &seq_len);
2004 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2005 cc_gcm(req, desc, &seq_len);
2008 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2009 cc_unmap_aead_request(dev, req);
2014 /* STAT_PHASE_3: Lock HW and push sequence */
2016 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2018 if (rc != -EINPROGRESS && rc != -EBUSY) {
2019 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2020 cc_unmap_aead_request(dev, req);
2027 static int cc_aead_encrypt(struct aead_request *req)
2029 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2032 memset(areq_ctx, 0, sizeof(*areq_ctx));
2034 /* No generated IV required */
2035 areq_ctx->backup_iv = req->iv;
2036 areq_ctx->assoclen = req->assoclen;
2037 areq_ctx->is_gcm4543 = false;
2039 areq_ctx->plaintext_authenticate_only = false;
2041 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2042 if (rc != -EINPROGRESS && rc != -EBUSY)
2043 req->iv = areq_ctx->backup_iv;
2048 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2050 /* Very similar to cc_aead_encrypt() above. */
2052 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2053 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2054 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2055 struct device *dev = drvdata_to_dev(ctx->drvdata);
2058 if (!valid_assoclen(req)) {
2059 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2063 memset(areq_ctx, 0, sizeof(*areq_ctx));
2065 /* No generated IV required */
2066 areq_ctx->backup_iv = req->iv;
2067 areq_ctx->assoclen = req->assoclen;
2068 areq_ctx->is_gcm4543 = true;
2070 cc_proc_rfc4309_ccm(req);
2072 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2073 if (rc != -EINPROGRESS && rc != -EBUSY)
2074 req->iv = areq_ctx->backup_iv;
2079 static int cc_aead_decrypt(struct aead_request *req)
2081 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2084 memset(areq_ctx, 0, sizeof(*areq_ctx));
2086 /* No generated IV required */
2087 areq_ctx->backup_iv = req->iv;
2088 areq_ctx->assoclen = req->assoclen;
2089 areq_ctx->is_gcm4543 = false;
2091 areq_ctx->plaintext_authenticate_only = false;
2093 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2094 if (rc != -EINPROGRESS && rc != -EBUSY)
2095 req->iv = areq_ctx->backup_iv;
2100 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2102 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2103 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2104 struct device *dev = drvdata_to_dev(ctx->drvdata);
2105 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2108 if (!valid_assoclen(req)) {
2109 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2113 memset(areq_ctx, 0, sizeof(*areq_ctx));
2115 /* No generated IV required */
2116 areq_ctx->backup_iv = req->iv;
2117 areq_ctx->assoclen = req->assoclen;
2119 areq_ctx->is_gcm4543 = true;
2120 cc_proc_rfc4309_ccm(req);
2122 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2123 if (rc != -EINPROGRESS && rc != -EBUSY)
2124 req->iv = areq_ctx->backup_iv;
2130 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2131 unsigned int keylen)
2133 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2134 struct device *dev = drvdata_to_dev(ctx->drvdata);
2136 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2142 memcpy(ctx->ctr_nonce, key + keylen, 4);
2144 return cc_aead_setkey(tfm, key, keylen);
2147 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2148 unsigned int keylen)
2150 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2151 struct device *dev = drvdata_to_dev(ctx->drvdata);
2153 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2159 memcpy(ctx->ctr_nonce, key + keylen, 4);
2161 return cc_aead_setkey(tfm, key, keylen);
2164 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2165 unsigned int authsize)
2180 return cc_aead_setauthsize(authenc, authsize);
2183 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2184 unsigned int authsize)
2186 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2187 struct device *dev = drvdata_to_dev(ctx->drvdata);
2189 dev_dbg(dev, "authsize %d\n", authsize);
2200 return cc_aead_setauthsize(authenc, authsize);
2203 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2204 unsigned int authsize)
2206 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2207 struct device *dev = drvdata_to_dev(ctx->drvdata);
2209 dev_dbg(dev, "authsize %d\n", authsize);
2214 return cc_aead_setauthsize(authenc, authsize);
2217 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2219 /* Very similar to cc_aead_encrypt() above. */
2221 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2222 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2223 struct device *dev = drvdata_to_dev(ctx->drvdata);
2224 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2227 if (!valid_assoclen(req)) {
2228 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2232 memset(areq_ctx, 0, sizeof(*areq_ctx));
2234 /* No generated IV required */
2235 areq_ctx->backup_iv = req->iv;
2236 areq_ctx->assoclen = req->assoclen;
2237 areq_ctx->plaintext_authenticate_only = false;
2239 cc_proc_rfc4_gcm(req);
2240 areq_ctx->is_gcm4543 = true;
2242 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2243 if (rc != -EINPROGRESS && rc != -EBUSY)
2244 req->iv = areq_ctx->backup_iv;
2249 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2251 /* Very similar to cc_aead_encrypt() above. */
2252 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2253 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2254 struct device *dev = drvdata_to_dev(ctx->drvdata);
2255 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2258 if (!valid_assoclen(req)) {
2259 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2263 memset(areq_ctx, 0, sizeof(*areq_ctx));
2265 //plaintext is not encryped with rfc4543
2266 areq_ctx->plaintext_authenticate_only = true;
2268 /* No generated IV required */
2269 areq_ctx->backup_iv = req->iv;
2270 areq_ctx->assoclen = req->assoclen;
2272 cc_proc_rfc4_gcm(req);
2273 areq_ctx->is_gcm4543 = true;
2275 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2276 if (rc != -EINPROGRESS && rc != -EBUSY)
2277 req->iv = areq_ctx->backup_iv;
2282 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2284 /* Very similar to cc_aead_decrypt() above. */
2286 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2287 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2288 struct device *dev = drvdata_to_dev(ctx->drvdata);
2289 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2292 if (!valid_assoclen(req)) {
2293 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2297 memset(areq_ctx, 0, sizeof(*areq_ctx));
2299 /* No generated IV required */
2300 areq_ctx->backup_iv = req->iv;
2301 areq_ctx->assoclen = req->assoclen;
2302 areq_ctx->plaintext_authenticate_only = false;
2304 cc_proc_rfc4_gcm(req);
2305 areq_ctx->is_gcm4543 = true;
2307 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2308 if (rc != -EINPROGRESS && rc != -EBUSY)
2309 req->iv = areq_ctx->backup_iv;
2314 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2316 /* Very similar to cc_aead_decrypt() above. */
2317 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2318 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2319 struct device *dev = drvdata_to_dev(ctx->drvdata);
2320 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2323 if (!valid_assoclen(req)) {
2324 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2328 memset(areq_ctx, 0, sizeof(*areq_ctx));
2330 //plaintext is not decryped with rfc4543
2331 areq_ctx->plaintext_authenticate_only = true;
2333 /* No generated IV required */
2334 areq_ctx->backup_iv = req->iv;
2335 areq_ctx->assoclen = req->assoclen;
2337 cc_proc_rfc4_gcm(req);
2338 areq_ctx->is_gcm4543 = true;
2340 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2341 if (rc != -EINPROGRESS && rc != -EBUSY)
2342 req->iv = areq_ctx->backup_iv;
2348 static struct cc_alg_template aead_algs[] = {
2350 .name = "authenc(hmac(sha1),cbc(aes))",
2351 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2352 .blocksize = AES_BLOCK_SIZE,
2354 .setkey = cc_aead_setkey,
2355 .setauthsize = cc_aead_setauthsize,
2356 .encrypt = cc_aead_encrypt,
2357 .decrypt = cc_aead_decrypt,
2358 .init = cc_aead_init,
2359 .exit = cc_aead_exit,
2360 .ivsize = AES_BLOCK_SIZE,
2361 .maxauthsize = SHA1_DIGEST_SIZE,
2363 .cipher_mode = DRV_CIPHER_CBC,
2364 .flow_mode = S_DIN_to_AES,
2365 .auth_mode = DRV_HASH_SHA1,
2366 .min_hw_rev = CC_HW_REV_630,
2367 .std_body = CC_STD_NIST,
2370 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2371 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2372 .blocksize = DES3_EDE_BLOCK_SIZE,
2374 .setkey = cc_des3_aead_setkey,
2375 .setauthsize = cc_aead_setauthsize,
2376 .encrypt = cc_aead_encrypt,
2377 .decrypt = cc_aead_decrypt,
2378 .init = cc_aead_init,
2379 .exit = cc_aead_exit,
2380 .ivsize = DES3_EDE_BLOCK_SIZE,
2381 .maxauthsize = SHA1_DIGEST_SIZE,
2383 .cipher_mode = DRV_CIPHER_CBC,
2384 .flow_mode = S_DIN_to_DES,
2385 .auth_mode = DRV_HASH_SHA1,
2386 .min_hw_rev = CC_HW_REV_630,
2387 .std_body = CC_STD_NIST,
2390 .name = "authenc(hmac(sha256),cbc(aes))",
2391 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2392 .blocksize = AES_BLOCK_SIZE,
2394 .setkey = cc_aead_setkey,
2395 .setauthsize = cc_aead_setauthsize,
2396 .encrypt = cc_aead_encrypt,
2397 .decrypt = cc_aead_decrypt,
2398 .init = cc_aead_init,
2399 .exit = cc_aead_exit,
2400 .ivsize = AES_BLOCK_SIZE,
2401 .maxauthsize = SHA256_DIGEST_SIZE,
2403 .cipher_mode = DRV_CIPHER_CBC,
2404 .flow_mode = S_DIN_to_AES,
2405 .auth_mode = DRV_HASH_SHA256,
2406 .min_hw_rev = CC_HW_REV_630,
2407 .std_body = CC_STD_NIST,
2410 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2411 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2412 .blocksize = DES3_EDE_BLOCK_SIZE,
2414 .setkey = cc_des3_aead_setkey,
2415 .setauthsize = cc_aead_setauthsize,
2416 .encrypt = cc_aead_encrypt,
2417 .decrypt = cc_aead_decrypt,
2418 .init = cc_aead_init,
2419 .exit = cc_aead_exit,
2420 .ivsize = DES3_EDE_BLOCK_SIZE,
2421 .maxauthsize = SHA256_DIGEST_SIZE,
2423 .cipher_mode = DRV_CIPHER_CBC,
2424 .flow_mode = S_DIN_to_DES,
2425 .auth_mode = DRV_HASH_SHA256,
2426 .min_hw_rev = CC_HW_REV_630,
2427 .std_body = CC_STD_NIST,
2430 .name = "authenc(xcbc(aes),cbc(aes))",
2431 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2432 .blocksize = AES_BLOCK_SIZE,
2434 .setkey = cc_aead_setkey,
2435 .setauthsize = cc_aead_setauthsize,
2436 .encrypt = cc_aead_encrypt,
2437 .decrypt = cc_aead_decrypt,
2438 .init = cc_aead_init,
2439 .exit = cc_aead_exit,
2440 .ivsize = AES_BLOCK_SIZE,
2441 .maxauthsize = AES_BLOCK_SIZE,
2443 .cipher_mode = DRV_CIPHER_CBC,
2444 .flow_mode = S_DIN_to_AES,
2445 .auth_mode = DRV_HASH_XCBC_MAC,
2446 .min_hw_rev = CC_HW_REV_630,
2447 .std_body = CC_STD_NIST,
2450 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2451 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2454 .setkey = cc_aead_setkey,
2455 .setauthsize = cc_aead_setauthsize,
2456 .encrypt = cc_aead_encrypt,
2457 .decrypt = cc_aead_decrypt,
2458 .init = cc_aead_init,
2459 .exit = cc_aead_exit,
2460 .ivsize = CTR_RFC3686_IV_SIZE,
2461 .maxauthsize = SHA1_DIGEST_SIZE,
2463 .cipher_mode = DRV_CIPHER_CTR,
2464 .flow_mode = S_DIN_to_AES,
2465 .auth_mode = DRV_HASH_SHA1,
2466 .min_hw_rev = CC_HW_REV_630,
2467 .std_body = CC_STD_NIST,
2470 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2471 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2474 .setkey = cc_aead_setkey,
2475 .setauthsize = cc_aead_setauthsize,
2476 .encrypt = cc_aead_encrypt,
2477 .decrypt = cc_aead_decrypt,
2478 .init = cc_aead_init,
2479 .exit = cc_aead_exit,
2480 .ivsize = CTR_RFC3686_IV_SIZE,
2481 .maxauthsize = SHA256_DIGEST_SIZE,
2483 .cipher_mode = DRV_CIPHER_CTR,
2484 .flow_mode = S_DIN_to_AES,
2485 .auth_mode = DRV_HASH_SHA256,
2486 .min_hw_rev = CC_HW_REV_630,
2487 .std_body = CC_STD_NIST,
2490 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2491 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2494 .setkey = cc_aead_setkey,
2495 .setauthsize = cc_aead_setauthsize,
2496 .encrypt = cc_aead_encrypt,
2497 .decrypt = cc_aead_decrypt,
2498 .init = cc_aead_init,
2499 .exit = cc_aead_exit,
2500 .ivsize = CTR_RFC3686_IV_SIZE,
2501 .maxauthsize = AES_BLOCK_SIZE,
2503 .cipher_mode = DRV_CIPHER_CTR,
2504 .flow_mode = S_DIN_to_AES,
2505 .auth_mode = DRV_HASH_XCBC_MAC,
2506 .min_hw_rev = CC_HW_REV_630,
2507 .std_body = CC_STD_NIST,
2511 .driver_name = "ccm-aes-ccree",
2514 .setkey = cc_aead_setkey,
2515 .setauthsize = cc_ccm_setauthsize,
2516 .encrypt = cc_aead_encrypt,
2517 .decrypt = cc_aead_decrypt,
2518 .init = cc_aead_init,
2519 .exit = cc_aead_exit,
2520 .ivsize = AES_BLOCK_SIZE,
2521 .maxauthsize = AES_BLOCK_SIZE,
2523 .cipher_mode = DRV_CIPHER_CCM,
2524 .flow_mode = S_DIN_to_AES,
2525 .auth_mode = DRV_HASH_NULL,
2526 .min_hw_rev = CC_HW_REV_630,
2527 .std_body = CC_STD_NIST,
2530 .name = "rfc4309(ccm(aes))",
2531 .driver_name = "rfc4309-ccm-aes-ccree",
2534 .setkey = cc_rfc4309_ccm_setkey,
2535 .setauthsize = cc_rfc4309_ccm_setauthsize,
2536 .encrypt = cc_rfc4309_ccm_encrypt,
2537 .decrypt = cc_rfc4309_ccm_decrypt,
2538 .init = cc_aead_init,
2539 .exit = cc_aead_exit,
2540 .ivsize = CCM_BLOCK_IV_SIZE,
2541 .maxauthsize = AES_BLOCK_SIZE,
2543 .cipher_mode = DRV_CIPHER_CCM,
2544 .flow_mode = S_DIN_to_AES,
2545 .auth_mode = DRV_HASH_NULL,
2546 .min_hw_rev = CC_HW_REV_630,
2547 .std_body = CC_STD_NIST,
2551 .driver_name = "gcm-aes-ccree",
2554 .setkey = cc_aead_setkey,
2555 .setauthsize = cc_gcm_setauthsize,
2556 .encrypt = cc_aead_encrypt,
2557 .decrypt = cc_aead_decrypt,
2558 .init = cc_aead_init,
2559 .exit = cc_aead_exit,
2561 .maxauthsize = AES_BLOCK_SIZE,
2563 .cipher_mode = DRV_CIPHER_GCTR,
2564 .flow_mode = S_DIN_to_AES,
2565 .auth_mode = DRV_HASH_NULL,
2566 .min_hw_rev = CC_HW_REV_630,
2567 .std_body = CC_STD_NIST,
2570 .name = "rfc4106(gcm(aes))",
2571 .driver_name = "rfc4106-gcm-aes-ccree",
2574 .setkey = cc_rfc4106_gcm_setkey,
2575 .setauthsize = cc_rfc4106_gcm_setauthsize,
2576 .encrypt = cc_rfc4106_gcm_encrypt,
2577 .decrypt = cc_rfc4106_gcm_decrypt,
2578 .init = cc_aead_init,
2579 .exit = cc_aead_exit,
2580 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2581 .maxauthsize = AES_BLOCK_SIZE,
2583 .cipher_mode = DRV_CIPHER_GCTR,
2584 .flow_mode = S_DIN_to_AES,
2585 .auth_mode = DRV_HASH_NULL,
2586 .min_hw_rev = CC_HW_REV_630,
2587 .std_body = CC_STD_NIST,
2590 .name = "rfc4543(gcm(aes))",
2591 .driver_name = "rfc4543-gcm-aes-ccree",
2594 .setkey = cc_rfc4543_gcm_setkey,
2595 .setauthsize = cc_rfc4543_gcm_setauthsize,
2596 .encrypt = cc_rfc4543_gcm_encrypt,
2597 .decrypt = cc_rfc4543_gcm_decrypt,
2598 .init = cc_aead_init,
2599 .exit = cc_aead_exit,
2600 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2601 .maxauthsize = AES_BLOCK_SIZE,
2603 .cipher_mode = DRV_CIPHER_GCTR,
2604 .flow_mode = S_DIN_to_AES,
2605 .auth_mode = DRV_HASH_NULL,
2606 .min_hw_rev = CC_HW_REV_630,
2607 .std_body = CC_STD_NIST,
2611 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2614 struct cc_crypto_alg *t_alg;
2615 struct aead_alg *alg;
2617 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2619 return ERR_PTR(-ENOMEM);
2621 alg = &tmpl->template_aead;
2623 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2624 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2626 alg->base.cra_module = THIS_MODULE;
2627 alg->base.cra_priority = CC_CRA_PRIO;
2629 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2630 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2631 alg->init = cc_aead_init;
2632 alg->exit = cc_aead_exit;
2634 t_alg->aead_alg = *alg;
2636 t_alg->cipher_mode = tmpl->cipher_mode;
2637 t_alg->flow_mode = tmpl->flow_mode;
2638 t_alg->auth_mode = tmpl->auth_mode;
2643 int cc_aead_free(struct cc_drvdata *drvdata)
2645 struct cc_crypto_alg *t_alg, *n;
2646 struct cc_aead_handle *aead_handle =
2647 (struct cc_aead_handle *)drvdata->aead_handle;
2650 /* Remove registered algs */
2651 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2653 crypto_unregister_aead(&t_alg->aead_alg);
2654 list_del(&t_alg->entry);
2658 drvdata->aead_handle = NULL;
2664 int cc_aead_alloc(struct cc_drvdata *drvdata)
2666 struct cc_aead_handle *aead_handle;
2667 struct cc_crypto_alg *t_alg;
2670 struct device *dev = drvdata_to_dev(drvdata);
2672 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2678 INIT_LIST_HEAD(&aead_handle->aead_list);
2679 drvdata->aead_handle = aead_handle;
2681 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2682 MAX_HMAC_DIGEST_SIZE);
2684 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2685 dev_err(dev, "SRAM pool exhausted\n");
2691 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2692 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2693 !(drvdata->std_bodies & aead_algs[alg].std_body))
2696 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2697 if (IS_ERR(t_alg)) {
2698 rc = PTR_ERR(t_alg);
2699 dev_err(dev, "%s alg allocation failed\n",
2700 aead_algs[alg].driver_name);
2703 t_alg->drvdata = drvdata;
2704 rc = crypto_register_aead(&t_alg->aead_alg);
2706 dev_err(dev, "%s alg registration failed\n",
2707 t_alg->aead_alg.base.cra_driver_name);
2710 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2711 dev_dbg(dev, "Registered %s\n",
2712 t_alg->aead_alg.base.cra_driver_name);
2721 cc_aead_free(drvdata);