1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/internal/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
14 #include "cc_request_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define template_aead template_u.aead
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28 struct cc_aead_handle {
29 cc_sram_addr_t sram_workspace_addr;
30 struct list_head aead_list;
35 u8 *ipad_opad; /* IPAD, OPAD*/
36 dma_addr_t padded_authkey_dma_addr;
37 dma_addr_t ipad_opad_dma_addr;
41 u8 *xcbc_keys; /* K1,K2,K3 */
42 dma_addr_t xcbc_keys_dma_addr;
46 struct cc_drvdata *drvdata;
47 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
49 dma_addr_t enckey_dma_addr;
51 struct cc_hmac_s hmac;
52 struct cc_xcbc_s xcbc;
54 unsigned int enc_keylen;
55 unsigned int auth_keylen;
56 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
57 unsigned int hash_len;
58 enum drv_cipher_mode cipher_mode;
59 enum cc_flow_mode flow_mode;
60 enum drv_hash_mode auth_mode;
63 static inline bool valid_assoclen(struct aead_request *req)
65 return ((req->assoclen == 16) || (req->assoclen == 20));
68 static void cc_aead_exit(struct crypto_aead *tfm)
70 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
71 struct device *dev = drvdata_to_dev(ctx->drvdata);
73 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
74 crypto_tfm_alg_name(&tfm->base));
76 /* Unmap enckey buffer */
78 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
79 ctx->enckey_dma_addr);
80 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81 &ctx->enckey_dma_addr);
82 ctx->enckey_dma_addr = 0;
86 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
87 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
89 if (xcbc->xcbc_keys) {
90 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
92 xcbc->xcbc_keys_dma_addr);
94 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95 &xcbc->xcbc_keys_dma_addr);
96 xcbc->xcbc_keys_dma_addr = 0;
97 xcbc->xcbc_keys = NULL;
98 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
99 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
101 if (hmac->ipad_opad) {
102 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
104 hmac->ipad_opad_dma_addr);
105 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106 &hmac->ipad_opad_dma_addr);
107 hmac->ipad_opad_dma_addr = 0;
108 hmac->ipad_opad = NULL;
110 if (hmac->padded_authkey) {
111 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
112 hmac->padded_authkey,
113 hmac->padded_authkey_dma_addr);
114 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115 &hmac->padded_authkey_dma_addr);
116 hmac->padded_authkey_dma_addr = 0;
117 hmac->padded_authkey = NULL;
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
124 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
126 return cc_get_default_hash_len(ctx->drvdata);
129 static int cc_aead_init(struct crypto_aead *tfm)
131 struct aead_alg *alg = crypto_aead_alg(tfm);
132 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
133 struct cc_crypto_alg *cc_alg =
134 container_of(alg, struct cc_crypto_alg, aead_alg);
135 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
137 dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
138 crypto_tfm_alg_name(&tfm->base));
140 /* Initialize modes in instance */
141 ctx->cipher_mode = cc_alg->cipher_mode;
142 ctx->flow_mode = cc_alg->flow_mode;
143 ctx->auth_mode = cc_alg->auth_mode;
144 ctx->drvdata = cc_alg->drvdata;
145 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
147 /* Allocate key buffer, cache line aligned */
148 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
149 &ctx->enckey_dma_addr, GFP_KERNEL);
151 dev_err(dev, "Failed allocating key buffer\n");
154 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
157 /* Set default authlen value */
159 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
160 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
161 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
163 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164 /* (and temporary for user key - up to 256b) */
165 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
166 &xcbc->xcbc_keys_dma_addr,
168 if (!xcbc->xcbc_keys) {
169 dev_err(dev, "Failed allocating buffer for XCBC keys\n");
172 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
173 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
174 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
175 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
177 /* Allocate dma-coherent buffer for IPAD + OPAD */
178 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
179 &hmac->ipad_opad_dma_addr,
182 if (!hmac->ipad_opad) {
183 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
187 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
190 hmac->padded_authkey = dma_alloc_coherent(dev,
195 if (!hmac->padded_authkey) {
196 dev_err(dev, "failed to allocate padded_authkey\n");
200 ctx->auth_state.hmac.ipad_opad = NULL;
201 ctx->auth_state.hmac.padded_authkey = NULL;
203 ctx->hash_len = cc_get_aead_hash_len(tfm);
212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
214 struct aead_request *areq = (struct aead_request *)cc_req;
215 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
216 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
217 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
219 /* BACKLOG notification */
220 if (err == -EINPROGRESS)
223 cc_unmap_aead_request(dev, areq);
225 /* Restore ordinary iv pointer */
226 areq->iv = areq_ctx->backup_iv;
231 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233 ctx->authsize) != 0) {
234 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235 ctx->authsize, ctx->cipher_mode);
236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory.
239 sg_zero_buffer(areq->dst, sg_nents(areq->dst),
244 } else if (areq_ctx->is_icv_fragmented) {
245 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
247 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
248 skip, (skip + ctx->authsize),
252 aead_request_complete(areq, err);
255 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
256 struct cc_aead_ctx *ctx)
258 /* Load the AES key */
259 hw_desc_init(&desc[0]);
260 /* We are using for the source/user key the same buffer
261 * as for the output keys, * because after this key loading it
262 * is not needed anymore
264 set_din_type(&desc[0], DMA_DLLI,
265 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
267 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
268 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
269 set_key_size_aes(&desc[0], ctx->auth_keylen);
270 set_flow_mode(&desc[0], S_DIN_to_AES);
271 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
273 hw_desc_init(&desc[1]);
274 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
275 set_flow_mode(&desc[1], DIN_AES_DOUT);
276 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
277 AES_KEYSIZE_128, NS_BIT, 0);
279 hw_desc_init(&desc[2]);
280 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
281 set_flow_mode(&desc[2], DIN_AES_DOUT);
282 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
284 AES_KEYSIZE_128, NS_BIT, 0);
286 hw_desc_init(&desc[3]);
287 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
288 set_flow_mode(&desc[3], DIN_AES_DOUT);
289 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
290 + 2 * AES_KEYSIZE_128),
291 AES_KEYSIZE_128, NS_BIT, 0);
296 static unsigned int hmac_setkey(struct cc_hw_desc *desc,
297 struct cc_aead_ctx *ctx)
299 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
300 unsigned int digest_ofs = 0;
301 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
302 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
303 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
304 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
305 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
307 unsigned int idx = 0;
310 /* calc derived HMAC key */
311 for (i = 0; i < 2; i++) {
312 /* Load hash initial state */
313 hw_desc_init(&desc[idx]);
314 set_cipher_mode(&desc[idx], hash_mode);
315 set_din_sram(&desc[idx],
316 cc_larval_digest_addr(ctx->drvdata,
319 set_flow_mode(&desc[idx], S_DIN_to_HASH);
320 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
323 /* Load the hash current length*/
324 hw_desc_init(&desc[idx]);
325 set_cipher_mode(&desc[idx], hash_mode);
326 set_din_const(&desc[idx], 0, ctx->hash_len);
327 set_flow_mode(&desc[idx], S_DIN_to_HASH);
328 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
331 /* Prepare ipad key */
332 hw_desc_init(&desc[idx]);
333 set_xor_val(&desc[idx], hmac_pad_const[i]);
334 set_cipher_mode(&desc[idx], hash_mode);
335 set_flow_mode(&desc[idx], S_DIN_to_HASH);
336 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
339 /* Perform HASH update */
340 hw_desc_init(&desc[idx]);
341 set_din_type(&desc[idx], DMA_DLLI,
342 hmac->padded_authkey_dma_addr,
343 SHA256_BLOCK_SIZE, NS_BIT);
344 set_cipher_mode(&desc[idx], hash_mode);
345 set_xor_active(&desc[idx]);
346 set_flow_mode(&desc[idx], DIN_HASH);
350 hw_desc_init(&desc[idx]);
351 set_cipher_mode(&desc[idx], hash_mode);
352 set_dout_dlli(&desc[idx],
353 (hmac->ipad_opad_dma_addr + digest_ofs),
354 digest_size, NS_BIT, 0);
355 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
356 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
357 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
360 digest_ofs += digest_size;
366 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
368 struct device *dev = drvdata_to_dev(ctx->drvdata);
370 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
371 ctx->enc_keylen, ctx->auth_keylen);
373 switch (ctx->auth_mode) {
375 case DRV_HASH_SHA256:
377 case DRV_HASH_XCBC_MAC:
378 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
379 ctx->auth_keylen != AES_KEYSIZE_192 &&
380 ctx->auth_keylen != AES_KEYSIZE_256)
383 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
384 if (ctx->auth_keylen > 0)
388 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
391 /* Check cipher key size */
392 if (ctx->flow_mode == S_DIN_to_DES) {
393 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
394 dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
398 } else { /* Default assumed to be AES ciphers */
399 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
400 ctx->enc_keylen != AES_KEYSIZE_192 &&
401 ctx->enc_keylen != AES_KEYSIZE_256) {
402 dev_err(dev, "Invalid cipher(AES) key size: %u\n",
408 return 0; /* All tests of keys sizes passed */
411 /* This function prepers the user key so it can pass to the hmac processing
412 * (copy to intenral buffer or hash in case of key longer than block
414 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
417 dma_addr_t key_dma_addr = 0;
418 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
419 struct device *dev = drvdata_to_dev(ctx->drvdata);
420 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
421 struct cc_crypto_req cc_req = {};
422 unsigned int blocksize;
423 unsigned int digestsize;
424 unsigned int hashmode;
425 unsigned int idx = 0;
428 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
429 dma_addr_t padded_authkey_dma_addr =
430 ctx->auth_state.hmac.padded_authkey_dma_addr;
432 switch (ctx->auth_mode) { /* auth_key required and >0 */
434 blocksize = SHA1_BLOCK_SIZE;
435 digestsize = SHA1_DIGEST_SIZE;
436 hashmode = DRV_HASH_HW_SHA1;
438 case DRV_HASH_SHA256:
440 blocksize = SHA256_BLOCK_SIZE;
441 digestsize = SHA256_DIGEST_SIZE;
442 hashmode = DRV_HASH_HW_SHA256;
447 key = kmemdup(authkey, keylen, GFP_KERNEL);
451 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
453 if (dma_mapping_error(dev, key_dma_addr)) {
454 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
459 if (keylen > blocksize) {
460 /* Load hash initial state */
461 hw_desc_init(&desc[idx]);
462 set_cipher_mode(&desc[idx], hashmode);
463 set_din_sram(&desc[idx], larval_addr, digestsize);
464 set_flow_mode(&desc[idx], S_DIN_to_HASH);
465 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
468 /* Load the hash current length*/
469 hw_desc_init(&desc[idx]);
470 set_cipher_mode(&desc[idx], hashmode);
471 set_din_const(&desc[idx], 0, ctx->hash_len);
472 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
473 set_flow_mode(&desc[idx], S_DIN_to_HASH);
474 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
477 hw_desc_init(&desc[idx]);
478 set_din_type(&desc[idx], DMA_DLLI,
479 key_dma_addr, keylen, NS_BIT);
480 set_flow_mode(&desc[idx], DIN_HASH);
484 hw_desc_init(&desc[idx]);
485 set_cipher_mode(&desc[idx], hashmode);
486 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
487 digestsize, NS_BIT, 0);
488 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
489 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
490 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
491 set_cipher_config0(&desc[idx],
492 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
495 hw_desc_init(&desc[idx]);
496 set_din_const(&desc[idx], 0, (blocksize - digestsize));
497 set_flow_mode(&desc[idx], BYPASS);
498 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
499 digestsize), (blocksize - digestsize),
503 hw_desc_init(&desc[idx]);
504 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
506 set_flow_mode(&desc[idx], BYPASS);
507 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
511 if ((blocksize - keylen) != 0) {
512 hw_desc_init(&desc[idx]);
513 set_din_const(&desc[idx], 0,
514 (blocksize - keylen));
515 set_flow_mode(&desc[idx], BYPASS);
516 set_dout_dlli(&desc[idx],
517 (padded_authkey_dma_addr +
519 (blocksize - keylen), NS_BIT, 0);
524 hw_desc_init(&desc[idx]);
525 set_din_const(&desc[idx], 0, (blocksize - keylen));
526 set_flow_mode(&desc[idx], BYPASS);
527 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
528 blocksize, NS_BIT, 0);
532 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
534 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
537 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
544 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
547 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
548 struct cc_crypto_req cc_req = {};
549 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
550 unsigned int seq_len = 0;
551 struct device *dev = drvdata_to_dev(ctx->drvdata);
552 const u8 *enckey, *authkey;
555 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
556 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
558 /* STAT_PHASE_0: Init and sanity checks */
560 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
561 struct crypto_authenc_keys keys;
563 rc = crypto_authenc_extractkeys(&keys, key, keylen);
566 enckey = keys.enckey;
567 authkey = keys.authkey;
568 ctx->enc_keylen = keys.enckeylen;
569 ctx->auth_keylen = keys.authkeylen;
571 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
572 /* the nonce is stored in bytes at end of key */
574 if (ctx->enc_keylen <
575 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
577 /* Copy nonce from last 4 bytes in CTR key to
578 * first 4 bytes in CTR IV
580 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
581 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
582 /* Set CTR key size */
583 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
585 } else { /* non-authenc - has just one key */
588 ctx->enc_keylen = keylen;
589 ctx->auth_keylen = 0;
592 rc = validate_keys_sizes(ctx);
596 /* STAT_PHASE_1: Copy key to ctx */
598 /* Get key material */
599 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
600 if (ctx->enc_keylen == 24)
601 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
602 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
603 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
605 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
606 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
611 /* STAT_PHASE_2: Create sequence */
613 switch (ctx->auth_mode) {
615 case DRV_HASH_SHA256:
616 seq_len = hmac_setkey(desc, ctx);
618 case DRV_HASH_XCBC_MAC:
619 seq_len = xcbc_setkey(desc, ctx);
621 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
622 break; /* No auth. key setup */
624 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
629 /* STAT_PHASE_3: Submit sequence to HW */
631 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
632 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
634 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
639 /* Update STAT_PHASE_3 */
643 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
649 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
652 struct crypto_authenc_keys keys;
655 err = crypto_authenc_extractkeys(&keys, key, keylen);
659 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
660 cc_aead_setkey(aead, key, keylen);
662 memzero_explicit(&keys, sizeof(keys));
666 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
669 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
675 memcpy(ctx->ctr_nonce, key + keylen, 3);
677 return cc_aead_setkey(tfm, key, keylen);
680 static int cc_aead_setauthsize(struct crypto_aead *authenc,
681 unsigned int authsize)
683 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
684 struct device *dev = drvdata_to_dev(ctx->drvdata);
686 /* Unsupported auth. sizes */
688 authsize > crypto_aead_maxauthsize(authenc)) {
692 ctx->authsize = authsize;
693 dev_dbg(dev, "authlen=%d\n", ctx->authsize);
698 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
699 unsigned int authsize)
710 return cc_aead_setauthsize(authenc, authsize);
713 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
714 unsigned int authsize)
729 return cc_aead_setauthsize(authenc, authsize);
732 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
733 struct cc_hw_desc desc[], unsigned int *seq_size)
735 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
736 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
737 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
738 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
739 unsigned int idx = *seq_size;
740 struct device *dev = drvdata_to_dev(ctx->drvdata);
742 switch (assoc_dma_type) {
743 case CC_DMA_BUF_DLLI:
744 dev_dbg(dev, "ASSOC buffer type DLLI\n");
745 hw_desc_init(&desc[idx]);
746 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
747 areq_ctx->assoclen, NS_BIT);
748 set_flow_mode(&desc[idx], flow_mode);
749 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
750 areq_ctx->cryptlen > 0)
751 set_din_not_last_indication(&desc[idx]);
753 case CC_DMA_BUF_MLLI:
754 dev_dbg(dev, "ASSOC buffer type MLLI\n");
755 hw_desc_init(&desc[idx]);
756 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
757 areq_ctx->assoc.mlli_nents, NS_BIT);
758 set_flow_mode(&desc[idx], flow_mode);
759 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
760 areq_ctx->cryptlen > 0)
761 set_din_not_last_indication(&desc[idx]);
763 case CC_DMA_BUF_NULL:
765 dev_err(dev, "Invalid ASSOC buffer type\n");
771 static void cc_proc_authen_desc(struct aead_request *areq,
772 unsigned int flow_mode,
773 struct cc_hw_desc desc[],
774 unsigned int *seq_size, int direct)
776 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
777 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
778 unsigned int idx = *seq_size;
779 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
780 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
781 struct device *dev = drvdata_to_dev(ctx->drvdata);
783 switch (data_dma_type) {
784 case CC_DMA_BUF_DLLI:
786 struct scatterlist *cipher =
787 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
788 areq_ctx->dst_sgl : areq_ctx->src_sgl;
790 unsigned int offset =
791 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
792 areq_ctx->dst_offset : areq_ctx->src_offset;
793 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
794 hw_desc_init(&desc[idx]);
795 set_din_type(&desc[idx], DMA_DLLI,
796 (sg_dma_address(cipher) + offset),
797 areq_ctx->cryptlen, NS_BIT);
798 set_flow_mode(&desc[idx], flow_mode);
801 case CC_DMA_BUF_MLLI:
803 /* DOUBLE-PASS flow (as default)
804 * assoc. + iv + data -compact in one table
805 * if assoclen is ZERO only IV perform
807 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
808 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
810 if (areq_ctx->is_single_pass) {
811 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
812 mlli_addr = areq_ctx->dst.sram_addr;
813 mlli_nents = areq_ctx->dst.mlli_nents;
815 mlli_addr = areq_ctx->src.sram_addr;
816 mlli_nents = areq_ctx->src.mlli_nents;
820 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
821 hw_desc_init(&desc[idx]);
822 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
824 set_flow_mode(&desc[idx], flow_mode);
827 case CC_DMA_BUF_NULL:
829 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
835 static void cc_proc_cipher_desc(struct aead_request *areq,
836 unsigned int flow_mode,
837 struct cc_hw_desc desc[],
838 unsigned int *seq_size)
840 unsigned int idx = *seq_size;
841 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
842 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
843 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
844 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
845 struct device *dev = drvdata_to_dev(ctx->drvdata);
847 if (areq_ctx->cryptlen == 0)
848 return; /*null processing*/
850 switch (data_dma_type) {
851 case CC_DMA_BUF_DLLI:
852 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
853 hw_desc_init(&desc[idx]);
854 set_din_type(&desc[idx], DMA_DLLI,
855 (sg_dma_address(areq_ctx->src_sgl) +
856 areq_ctx->src_offset), areq_ctx->cryptlen,
858 set_dout_dlli(&desc[idx],
859 (sg_dma_address(areq_ctx->dst_sgl) +
860 areq_ctx->dst_offset),
861 areq_ctx->cryptlen, NS_BIT, 0);
862 set_flow_mode(&desc[idx], flow_mode);
864 case CC_DMA_BUF_MLLI:
865 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
866 hw_desc_init(&desc[idx]);
867 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
868 areq_ctx->src.mlli_nents, NS_BIT);
869 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
870 areq_ctx->dst.mlli_nents, NS_BIT, 0);
871 set_flow_mode(&desc[idx], flow_mode);
873 case CC_DMA_BUF_NULL:
875 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
881 static void cc_proc_digest_desc(struct aead_request *req,
882 struct cc_hw_desc desc[],
883 unsigned int *seq_size)
885 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
886 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
887 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
888 unsigned int idx = *seq_size;
889 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
890 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
891 int direct = req_ctx->gen_ctx.op_type;
893 /* Get final ICV result */
894 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
895 hw_desc_init(&desc[idx]);
896 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
897 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
898 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
900 set_queue_last_ind(ctx->drvdata, &desc[idx]);
901 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
902 set_aes_not_hash_mode(&desc[idx]);
903 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
905 set_cipher_config0(&desc[idx],
906 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
907 set_cipher_mode(&desc[idx], hash_mode);
910 /* Get ICV out from hardware */
911 hw_desc_init(&desc[idx]);
912 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
913 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
914 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
915 ctx->authsize, NS_BIT, 1);
916 set_queue_last_ind(ctx->drvdata, &desc[idx]);
917 set_cipher_config0(&desc[idx],
918 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
919 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
920 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
921 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
922 set_aes_not_hash_mode(&desc[idx]);
924 set_cipher_mode(&desc[idx], hash_mode);
931 static void cc_set_cipher_desc(struct aead_request *req,
932 struct cc_hw_desc desc[],
933 unsigned int *seq_size)
935 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
936 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
937 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
938 unsigned int hw_iv_size = req_ctx->hw_iv_size;
939 unsigned int idx = *seq_size;
940 int direct = req_ctx->gen_ctx.op_type;
942 /* Setup cipher state */
943 hw_desc_init(&desc[idx]);
944 set_cipher_config0(&desc[idx], direct);
945 set_flow_mode(&desc[idx], ctx->flow_mode);
946 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
948 if (ctx->cipher_mode == DRV_CIPHER_CTR)
949 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
951 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
952 set_cipher_mode(&desc[idx], ctx->cipher_mode);
956 hw_desc_init(&desc[idx]);
957 set_cipher_config0(&desc[idx], direct);
958 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
959 set_flow_mode(&desc[idx], ctx->flow_mode);
960 if (ctx->flow_mode == S_DIN_to_AES) {
961 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
962 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
963 ctx->enc_keylen), NS_BIT);
964 set_key_size_aes(&desc[idx], ctx->enc_keylen);
966 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
967 ctx->enc_keylen, NS_BIT);
968 set_key_size_des(&desc[idx], ctx->enc_keylen);
970 set_cipher_mode(&desc[idx], ctx->cipher_mode);
976 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
977 unsigned int *seq_size, unsigned int data_flow_mode)
979 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
980 int direct = req_ctx->gen_ctx.op_type;
981 unsigned int idx = *seq_size;
983 if (req_ctx->cryptlen == 0)
984 return; /*null processing*/
986 cc_set_cipher_desc(req, desc, &idx);
987 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
988 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
989 /* We must wait for DMA to write all cipher */
990 hw_desc_init(&desc[idx]);
991 set_din_no_dma(&desc[idx], 0, 0xfffff0);
992 set_dout_no_dma(&desc[idx], 0, 0, 1);
999 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
1000 unsigned int *seq_size)
1002 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1003 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1004 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1005 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1006 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1007 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1008 unsigned int idx = *seq_size;
1010 /* Loading hash ipad xor key state */
1011 hw_desc_init(&desc[idx]);
1012 set_cipher_mode(&desc[idx], hash_mode);
1013 set_din_type(&desc[idx], DMA_DLLI,
1014 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1016 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1020 /* Load init. digest len (64 bytes) */
1021 hw_desc_init(&desc[idx]);
1022 set_cipher_mode(&desc[idx], hash_mode);
1023 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1025 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1026 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1032 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1033 unsigned int *seq_size)
1035 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1036 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1037 unsigned int idx = *seq_size;
1039 /* Loading MAC state */
1040 hw_desc_init(&desc[idx]);
1041 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1042 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1043 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1044 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1045 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1046 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1047 set_aes_not_hash_mode(&desc[idx]);
1050 /* Setup XCBC MAC K1 */
1051 hw_desc_init(&desc[idx]);
1052 set_din_type(&desc[idx], DMA_DLLI,
1053 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1054 AES_KEYSIZE_128, NS_BIT);
1055 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1056 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1057 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1058 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1059 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1060 set_aes_not_hash_mode(&desc[idx]);
1063 /* Setup XCBC MAC K2 */
1064 hw_desc_init(&desc[idx]);
1065 set_din_type(&desc[idx], DMA_DLLI,
1066 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1067 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1068 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1069 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1070 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1071 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1072 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1073 set_aes_not_hash_mode(&desc[idx]);
1076 /* Setup XCBC MAC K3 */
1077 hw_desc_init(&desc[idx]);
1078 set_din_type(&desc[idx], DMA_DLLI,
1079 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1080 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1081 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1082 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1083 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1084 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1085 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1086 set_aes_not_hash_mode(&desc[idx]);
1092 static void cc_proc_header_desc(struct aead_request *req,
1093 struct cc_hw_desc desc[],
1094 unsigned int *seq_size)
1096 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1097 unsigned int idx = *seq_size;
1099 /* Hash associated data */
1100 if (areq_ctx->assoclen > 0)
1101 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1107 static void cc_proc_scheme_desc(struct aead_request *req,
1108 struct cc_hw_desc desc[],
1109 unsigned int *seq_size)
1111 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1112 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1113 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1114 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1115 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1116 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1117 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1118 unsigned int idx = *seq_size;
1120 hw_desc_init(&desc[idx]);
1121 set_cipher_mode(&desc[idx], hash_mode);
1122 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1124 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1125 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1126 set_cipher_do(&desc[idx], DO_PAD);
1129 /* Get final ICV result */
1130 hw_desc_init(&desc[idx]);
1131 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1133 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1134 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1135 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1136 set_cipher_mode(&desc[idx], hash_mode);
1139 /* Loading hash opad xor key state */
1140 hw_desc_init(&desc[idx]);
1141 set_cipher_mode(&desc[idx], hash_mode);
1142 set_din_type(&desc[idx], DMA_DLLI,
1143 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1144 digest_size, NS_BIT);
1145 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1146 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1149 /* Load init. digest len (64 bytes) */
1150 hw_desc_init(&desc[idx]);
1151 set_cipher_mode(&desc[idx], hash_mode);
1152 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1154 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1155 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1156 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1159 /* Perform HASH update */
1160 hw_desc_init(&desc[idx]);
1161 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1163 set_flow_mode(&desc[idx], DIN_HASH);
1169 static void cc_mlli_to_sram(struct aead_request *req,
1170 struct cc_hw_desc desc[], unsigned int *seq_size)
1172 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1173 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1174 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1175 struct device *dev = drvdata_to_dev(ctx->drvdata);
1177 if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1178 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1179 !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1180 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1181 (unsigned int)ctx->drvdata->mlli_sram_addr,
1182 req_ctx->mlli_params.mlli_len);
1183 /* Copy MLLI table host-to-sram */
1184 hw_desc_init(&desc[*seq_size]);
1185 set_din_type(&desc[*seq_size], DMA_DLLI,
1186 req_ctx->mlli_params.mlli_dma_addr,
1187 req_ctx->mlli_params.mlli_len, NS_BIT);
1188 set_dout_sram(&desc[*seq_size],
1189 ctx->drvdata->mlli_sram_addr,
1190 req_ctx->mlli_params.mlli_len);
1191 set_flow_mode(&desc[*seq_size], BYPASS);
1196 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1197 enum cc_flow_mode setup_flow_mode,
1198 bool is_single_pass)
1200 enum cc_flow_mode data_flow_mode;
1202 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1203 if (setup_flow_mode == S_DIN_to_AES)
1204 data_flow_mode = is_single_pass ?
1205 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1207 data_flow_mode = is_single_pass ?
1208 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1209 } else { /* Decrypt */
1210 if (setup_flow_mode == S_DIN_to_AES)
1211 data_flow_mode = is_single_pass ?
1212 AES_and_HASH : DIN_AES_DOUT;
1214 data_flow_mode = is_single_pass ?
1215 DES_and_HASH : DIN_DES_DOUT;
1218 return data_flow_mode;
1221 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1222 unsigned int *seq_size)
1224 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1225 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1226 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1227 int direct = req_ctx->gen_ctx.op_type;
1228 unsigned int data_flow_mode =
1229 cc_get_data_flow(direct, ctx->flow_mode,
1230 req_ctx->is_single_pass);
1232 if (req_ctx->is_single_pass) {
1236 cc_set_hmac_desc(req, desc, seq_size);
1237 cc_set_cipher_desc(req, desc, seq_size);
1238 cc_proc_header_desc(req, desc, seq_size);
1239 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1240 cc_proc_scheme_desc(req, desc, seq_size);
1241 cc_proc_digest_desc(req, desc, seq_size);
1247 * Fallback for unsupported single-pass modes,
1248 * i.e. using assoc. data of non-word-multiple
1250 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1251 /* encrypt first.. */
1252 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1253 /* authenc after..*/
1254 cc_set_hmac_desc(req, desc, seq_size);
1255 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1256 cc_proc_scheme_desc(req, desc, seq_size);
1257 cc_proc_digest_desc(req, desc, seq_size);
1259 } else { /*DECRYPT*/
1260 /* authenc first..*/
1261 cc_set_hmac_desc(req, desc, seq_size);
1262 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1263 cc_proc_scheme_desc(req, desc, seq_size);
1264 /* decrypt after.. */
1265 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1266 /* read the digest result with setting the completion bit
1267 * must be after the cipher operation
1269 cc_proc_digest_desc(req, desc, seq_size);
1274 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1275 unsigned int *seq_size)
1277 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1278 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1279 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1280 int direct = req_ctx->gen_ctx.op_type;
1281 unsigned int data_flow_mode =
1282 cc_get_data_flow(direct, ctx->flow_mode,
1283 req_ctx->is_single_pass);
1285 if (req_ctx->is_single_pass) {
1289 cc_set_xcbc_desc(req, desc, seq_size);
1290 cc_set_cipher_desc(req, desc, seq_size);
1291 cc_proc_header_desc(req, desc, seq_size);
1292 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1293 cc_proc_digest_desc(req, desc, seq_size);
1299 * Fallback for unsupported single-pass modes,
1300 * i.e. using assoc. data of non-word-multiple
1302 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1303 /* encrypt first.. */
1304 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1305 /* authenc after.. */
1306 cc_set_xcbc_desc(req, desc, seq_size);
1307 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1308 cc_proc_digest_desc(req, desc, seq_size);
1309 } else { /*DECRYPT*/
1310 /* authenc first.. */
1311 cc_set_xcbc_desc(req, desc, seq_size);
1312 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1313 /* decrypt after..*/
1314 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1315 /* read the digest result with setting the completion bit
1316 * must be after the cipher operation
1318 cc_proc_digest_desc(req, desc, seq_size);
1322 static int validate_data_size(struct cc_aead_ctx *ctx,
1323 enum drv_crypto_direction direct,
1324 struct aead_request *req)
1326 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1327 struct device *dev = drvdata_to_dev(ctx->drvdata);
1328 unsigned int assoclen = areq_ctx->assoclen;
1329 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1330 (req->cryptlen - ctx->authsize) : req->cryptlen;
1332 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1333 req->cryptlen < ctx->authsize)
1336 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1338 switch (ctx->flow_mode) {
1340 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1341 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1343 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1345 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1346 if (areq_ctx->plaintext_authenticate_only)
1347 areq_ctx->is_single_pass = false;
1351 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1352 areq_ctx->is_single_pass = false;
1354 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1355 !IS_ALIGNED(cipherlen, sizeof(u32)))
1356 areq_ctx->is_single_pass = false;
1360 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1362 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1363 areq_ctx->is_single_pass = false;
1366 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1376 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1378 unsigned int len = 0;
1380 if (header_size == 0)
1383 if (header_size < ((1UL << 16) - (1UL << 8))) {
1386 pa0_buff[0] = (header_size >> 8) & 0xFF;
1387 pa0_buff[1] = header_size & 0xFF;
1393 pa0_buff[2] = (header_size >> 24) & 0xFF;
1394 pa0_buff[3] = (header_size >> 16) & 0xFF;
1395 pa0_buff[4] = (header_size >> 8) & 0xFF;
1396 pa0_buff[5] = header_size & 0xFF;
1402 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1406 memset(block, 0, csize);
1411 else if (msglen > (1 << (8 * csize)))
1414 data = cpu_to_be32(msglen);
1415 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1420 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1421 unsigned int *seq_size)
1423 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1424 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1425 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1426 unsigned int idx = *seq_size;
1427 unsigned int cipher_flow_mode;
1428 dma_addr_t mac_result;
1430 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1431 cipher_flow_mode = AES_to_HASH_and_DOUT;
1432 mac_result = req_ctx->mac_buf_dma_addr;
1433 } else { /* Encrypt */
1434 cipher_flow_mode = AES_and_HASH;
1435 mac_result = req_ctx->icv_dma_addr;
1439 hw_desc_init(&desc[idx]);
1440 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1441 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1442 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1443 ctx->enc_keylen), NS_BIT);
1444 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1445 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1446 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1447 set_flow_mode(&desc[idx], S_DIN_to_AES);
1450 /* load ctr state */
1451 hw_desc_init(&desc[idx]);
1452 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1453 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1454 set_din_type(&desc[idx], DMA_DLLI,
1455 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1456 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1457 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1458 set_flow_mode(&desc[idx], S_DIN_to_AES);
1462 hw_desc_init(&desc[idx]);
1463 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1464 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1465 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1466 ctx->enc_keylen), NS_BIT);
1467 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1468 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1469 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1471 set_aes_not_hash_mode(&desc[idx]);
1474 /* load MAC state */
1475 hw_desc_init(&desc[idx]);
1476 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1477 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1478 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1479 AES_BLOCK_SIZE, NS_BIT);
1480 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1481 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1482 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1483 set_aes_not_hash_mode(&desc[idx]);
1486 /* process assoc data */
1487 if (req_ctx->assoclen > 0) {
1488 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1490 hw_desc_init(&desc[idx]);
1491 set_din_type(&desc[idx], DMA_DLLI,
1492 sg_dma_address(&req_ctx->ccm_adata_sg),
1493 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1494 set_flow_mode(&desc[idx], DIN_HASH);
1498 /* process the cipher */
1499 if (req_ctx->cryptlen)
1500 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1502 /* Read temporal MAC */
1503 hw_desc_init(&desc[idx]);
1504 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1505 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1507 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1508 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1509 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1510 set_aes_not_hash_mode(&desc[idx]);
1513 /* load AES-CTR state (for last MAC calculation)*/
1514 hw_desc_init(&desc[idx]);
1515 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1516 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1517 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1518 AES_BLOCK_SIZE, NS_BIT);
1519 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1520 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1521 set_flow_mode(&desc[idx], S_DIN_to_AES);
1524 hw_desc_init(&desc[idx]);
1525 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1526 set_dout_no_dma(&desc[idx], 0, 0, 1);
1529 /* encrypt the "T" value and store MAC in mac_state */
1530 hw_desc_init(&desc[idx]);
1531 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1532 ctx->authsize, NS_BIT);
1533 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1534 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1535 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1542 static int config_ccm_adata(struct aead_request *req)
1544 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1545 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1546 struct device *dev = drvdata_to_dev(ctx->drvdata);
1547 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1548 //unsigned int size_of_a = 0, rem_a_size = 0;
1549 unsigned int lp = req->iv[0];
1550 /* Note: The code assume that req->iv[0] already contains the value
1553 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1554 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1555 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1556 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1557 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1558 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1559 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1561 (req->cryptlen - ctx->authsize);
1564 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1565 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1567 /* taken from crypto/ccm.c */
1568 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1569 if (l < 2 || l > 8) {
1570 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1573 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1575 /* format control info per RFC 3610 and
1576 * NIST Special Publication 800-38C
1578 *b0 |= (8 * ((m - 2) / 2));
1579 if (req_ctx->assoclen > 0)
1580 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1582 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1584 dev_err(dev, "message len overflow detected");
1587 /* END of "taken from crypto/ccm.c" */
1589 /* l(a) - size of associated data. */
1590 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1592 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1595 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1596 ctr_count_0[15] = 0;
1601 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1603 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1604 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1605 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1608 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1609 /* For RFC 4309, always use 4 bytes for message length
1610 * (at most 2^32-1 bytes).
1612 areq_ctx->ctr_iv[0] = 3;
1614 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1615 * that we build here.
1617 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1618 CCM_BLOCK_NONCE_SIZE);
1619 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1621 req->iv = areq_ctx->ctr_iv;
1622 areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1625 static void cc_set_ghash_desc(struct aead_request *req,
1626 struct cc_hw_desc desc[], unsigned int *seq_size)
1628 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1629 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1630 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1631 unsigned int idx = *seq_size;
1633 /* load key to AES*/
1634 hw_desc_init(&desc[idx]);
1635 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1636 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1637 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1638 ctx->enc_keylen, NS_BIT);
1639 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1640 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1641 set_flow_mode(&desc[idx], S_DIN_to_AES);
1644 /* process one zero block to generate hkey */
1645 hw_desc_init(&desc[idx]);
1646 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1647 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1649 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1652 /* Memory Barrier */
1653 hw_desc_init(&desc[idx]);
1654 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1655 set_dout_no_dma(&desc[idx], 0, 0, 1);
1658 /* Load GHASH subkey */
1659 hw_desc_init(&desc[idx]);
1660 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1661 AES_BLOCK_SIZE, NS_BIT);
1662 set_dout_no_dma(&desc[idx], 0, 0, 1);
1663 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1664 set_aes_not_hash_mode(&desc[idx]);
1665 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1666 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1667 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1670 /* Configure Hash Engine to work with GHASH.
1671 * Since it was not possible to extend HASH submodes to add GHASH,
1672 * The following command is necessary in order to
1673 * select GHASH (according to HW designers)
1675 hw_desc_init(&desc[idx]);
1676 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1677 set_dout_no_dma(&desc[idx], 0, 0, 1);
1678 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1679 set_aes_not_hash_mode(&desc[idx]);
1680 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1681 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1682 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1683 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1684 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1687 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1690 hw_desc_init(&desc[idx]);
1691 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1692 set_dout_no_dma(&desc[idx], 0, 0, 1);
1693 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1694 set_aes_not_hash_mode(&desc[idx]);
1695 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1696 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1697 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1703 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1704 unsigned int *seq_size)
1706 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1707 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1708 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1709 unsigned int idx = *seq_size;
1711 /* load key to AES*/
1712 hw_desc_init(&desc[idx]);
1713 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1715 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1716 ctx->enc_keylen, NS_BIT);
1717 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1718 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1719 set_flow_mode(&desc[idx], S_DIN_to_AES);
1722 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1723 /* load AES/CTR initial CTR value inc by 2*/
1724 hw_desc_init(&desc[idx]);
1725 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1726 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1727 set_din_type(&desc[idx], DMA_DLLI,
1728 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1730 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1731 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1732 set_flow_mode(&desc[idx], S_DIN_to_AES);
1739 static void cc_proc_gcm_result(struct aead_request *req,
1740 struct cc_hw_desc desc[],
1741 unsigned int *seq_size)
1743 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1744 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1745 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1746 dma_addr_t mac_result;
1747 unsigned int idx = *seq_size;
1749 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1750 mac_result = req_ctx->mac_buf_dma_addr;
1751 } else { /* Encrypt */
1752 mac_result = req_ctx->icv_dma_addr;
1755 /* process(ghash) gcm_block_len */
1756 hw_desc_init(&desc[idx]);
1757 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1758 AES_BLOCK_SIZE, NS_BIT);
1759 set_flow_mode(&desc[idx], DIN_HASH);
1762 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1763 hw_desc_init(&desc[idx]);
1764 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1765 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1766 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1768 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1769 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1770 set_aes_not_hash_mode(&desc[idx]);
1774 /* load AES/CTR initial CTR value inc by 1*/
1775 hw_desc_init(&desc[idx]);
1776 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1777 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1778 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1779 AES_BLOCK_SIZE, NS_BIT);
1780 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1781 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1782 set_flow_mode(&desc[idx], S_DIN_to_AES);
1785 /* Memory Barrier */
1786 hw_desc_init(&desc[idx]);
1787 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1788 set_dout_no_dma(&desc[idx], 0, 0, 1);
1791 /* process GCTR on stored GHASH and store MAC in mac_state*/
1792 hw_desc_init(&desc[idx]);
1793 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1794 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1795 AES_BLOCK_SIZE, NS_BIT);
1796 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1797 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1798 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1804 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1805 unsigned int *seq_size)
1807 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1808 unsigned int cipher_flow_mode;
1810 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1811 cipher_flow_mode = AES_and_HASH;
1812 } else { /* Encrypt */
1813 cipher_flow_mode = AES_to_HASH_and_DOUT;
1816 //in RFC4543 no data to encrypt. just copy data from src to dest.
1817 if (req_ctx->plaintext_authenticate_only) {
1818 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1819 cc_set_ghash_desc(req, desc, seq_size);
1820 /* process(ghash) assoc data */
1821 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1822 cc_set_gctr_desc(req, desc, seq_size);
1823 cc_proc_gcm_result(req, desc, seq_size);
1827 // for gcm and rfc4106.
1828 cc_set_ghash_desc(req, desc, seq_size);
1829 /* process(ghash) assoc data */
1830 if (req_ctx->assoclen > 0)
1831 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1832 cc_set_gctr_desc(req, desc, seq_size);
1833 /* process(gctr+ghash) */
1834 if (req_ctx->cryptlen)
1835 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1836 cc_proc_gcm_result(req, desc, seq_size);
1841 static int config_gcm_context(struct aead_request *req)
1843 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1844 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1845 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1846 struct device *dev = drvdata_to_dev(ctx->drvdata);
1848 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1849 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1851 (req->cryptlen - ctx->authsize);
1852 __be32 counter = cpu_to_be32(2);
1854 dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1855 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1857 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1859 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1861 memcpy(req->iv + 12, &counter, 4);
1862 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1864 counter = cpu_to_be32(1);
1865 memcpy(req->iv + 12, &counter, 4);
1866 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1868 if (!req_ctx->plaintext_authenticate_only) {
1871 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1872 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1873 temp64 = cpu_to_be64(cryptlen * 8);
1874 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1876 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1877 * data that is nothing is encrypted.
1881 temp64 = cpu_to_be64((req_ctx->assoclen +
1882 GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1883 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1885 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1891 static void cc_proc_rfc4_gcm(struct aead_request *req)
1893 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1894 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1895 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1897 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1898 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1899 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1900 GCM_BLOCK_RFC4_IV_SIZE);
1901 req->iv = areq_ctx->ctr_iv;
1902 areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1905 static int cc_proc_aead(struct aead_request *req,
1906 enum drv_crypto_direction direct)
1910 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1911 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1913 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1914 struct device *dev = drvdata_to_dev(ctx->drvdata);
1915 struct cc_crypto_req cc_req = {};
1917 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1918 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1919 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1920 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1922 /* STAT_PHASE_0: Init and sanity checks */
1924 /* Check data length according to mode */
1925 if (validate_data_size(ctx, direct, req)) {
1926 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1927 req->cryptlen, areq_ctx->assoclen);
1928 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1932 /* Setup request structure */
1933 cc_req.user_cb = (void *)cc_aead_complete;
1934 cc_req.user_arg = (void *)req;
1936 /* Setup request context */
1937 areq_ctx->gen_ctx.op_type = direct;
1938 areq_ctx->req_authsize = ctx->authsize;
1939 areq_ctx->cipher_mode = ctx->cipher_mode;
1941 /* STAT_PHASE_1: Map buffers */
1943 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1944 /* Build CTR IV - Copy nonce from last 4 bytes in
1945 * CTR key to first 4 bytes in CTR IV
1947 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1948 CTR_RFC3686_NONCE_SIZE);
1949 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1950 CTR_RFC3686_IV_SIZE);
1951 /* Initialize counter portion of counter block */
1952 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1953 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1955 /* Replace with counter iv */
1956 req->iv = areq_ctx->ctr_iv;
1957 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1958 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1959 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1960 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1961 if (areq_ctx->ctr_iv != req->iv) {
1962 memcpy(areq_ctx->ctr_iv, req->iv,
1963 crypto_aead_ivsize(tfm));
1964 req->iv = areq_ctx->ctr_iv;
1967 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1970 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1971 rc = config_ccm_adata(req);
1973 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1978 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1981 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1982 rc = config_gcm_context(req);
1984 dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1990 rc = cc_map_aead_request(ctx->drvdata, req);
1992 dev_err(dev, "map_request() failed\n");
1996 /* STAT_PHASE_2: Create sequence */
1998 /* Load MLLI tables to SRAM if necessary */
1999 cc_mlli_to_sram(req, desc, &seq_len);
2001 /*TODO: move seq len by reference */
2002 switch (ctx->auth_mode) {
2004 case DRV_HASH_SHA256:
2005 cc_hmac_authenc(req, desc, &seq_len);
2007 case DRV_HASH_XCBC_MAC:
2008 cc_xcbc_authenc(req, desc, &seq_len);
2011 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2012 cc_ccm(req, desc, &seq_len);
2013 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2014 cc_gcm(req, desc, &seq_len);
2017 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2018 cc_unmap_aead_request(dev, req);
2023 /* STAT_PHASE_3: Lock HW and push sequence */
2025 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2027 if (rc != -EINPROGRESS && rc != -EBUSY) {
2028 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2029 cc_unmap_aead_request(dev, req);
2036 static int cc_aead_encrypt(struct aead_request *req)
2038 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2041 memset(areq_ctx, 0, sizeof(*areq_ctx));
2043 /* No generated IV required */
2044 areq_ctx->backup_iv = req->iv;
2045 areq_ctx->assoclen = req->assoclen;
2046 areq_ctx->is_gcm4543 = false;
2048 areq_ctx->plaintext_authenticate_only = false;
2050 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2051 if (rc != -EINPROGRESS && rc != -EBUSY)
2052 req->iv = areq_ctx->backup_iv;
2057 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2059 /* Very similar to cc_aead_encrypt() above. */
2061 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2062 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2063 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2064 struct device *dev = drvdata_to_dev(ctx->drvdata);
2067 if (!valid_assoclen(req)) {
2068 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2072 memset(areq_ctx, 0, sizeof(*areq_ctx));
2074 /* No generated IV required */
2075 areq_ctx->backup_iv = req->iv;
2076 areq_ctx->assoclen = req->assoclen;
2077 areq_ctx->is_gcm4543 = true;
2079 cc_proc_rfc4309_ccm(req);
2081 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2082 if (rc != -EINPROGRESS && rc != -EBUSY)
2083 req->iv = areq_ctx->backup_iv;
2088 static int cc_aead_decrypt(struct aead_request *req)
2090 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2093 memset(areq_ctx, 0, sizeof(*areq_ctx));
2095 /* No generated IV required */
2096 areq_ctx->backup_iv = req->iv;
2097 areq_ctx->assoclen = req->assoclen;
2098 areq_ctx->is_gcm4543 = false;
2100 areq_ctx->plaintext_authenticate_only = false;
2102 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2103 if (rc != -EINPROGRESS && rc != -EBUSY)
2104 req->iv = areq_ctx->backup_iv;
2109 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2111 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2112 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2113 struct device *dev = drvdata_to_dev(ctx->drvdata);
2114 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2117 if (!valid_assoclen(req)) {
2118 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2122 memset(areq_ctx, 0, sizeof(*areq_ctx));
2124 /* No generated IV required */
2125 areq_ctx->backup_iv = req->iv;
2126 areq_ctx->assoclen = req->assoclen;
2128 areq_ctx->is_gcm4543 = true;
2129 cc_proc_rfc4309_ccm(req);
2131 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2132 if (rc != -EINPROGRESS && rc != -EBUSY)
2133 req->iv = areq_ctx->backup_iv;
2139 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2140 unsigned int keylen)
2142 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2143 struct device *dev = drvdata_to_dev(ctx->drvdata);
2145 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2151 memcpy(ctx->ctr_nonce, key + keylen, 4);
2153 return cc_aead_setkey(tfm, key, keylen);
2156 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2157 unsigned int keylen)
2159 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2160 struct device *dev = drvdata_to_dev(ctx->drvdata);
2162 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2168 memcpy(ctx->ctr_nonce, key + keylen, 4);
2170 return cc_aead_setkey(tfm, key, keylen);
2173 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2174 unsigned int authsize)
2189 return cc_aead_setauthsize(authenc, authsize);
2192 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2193 unsigned int authsize)
2195 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2196 struct device *dev = drvdata_to_dev(ctx->drvdata);
2198 dev_dbg(dev, "authsize %d\n", authsize);
2209 return cc_aead_setauthsize(authenc, authsize);
2212 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2213 unsigned int authsize)
2215 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2216 struct device *dev = drvdata_to_dev(ctx->drvdata);
2218 dev_dbg(dev, "authsize %d\n", authsize);
2223 return cc_aead_setauthsize(authenc, authsize);
2226 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2228 /* Very similar to cc_aead_encrypt() above. */
2230 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2231 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2232 struct device *dev = drvdata_to_dev(ctx->drvdata);
2233 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2236 if (!valid_assoclen(req)) {
2237 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2241 memset(areq_ctx, 0, sizeof(*areq_ctx));
2243 /* No generated IV required */
2244 areq_ctx->backup_iv = req->iv;
2245 areq_ctx->assoclen = req->assoclen;
2246 areq_ctx->plaintext_authenticate_only = false;
2248 cc_proc_rfc4_gcm(req);
2249 areq_ctx->is_gcm4543 = true;
2251 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2252 if (rc != -EINPROGRESS && rc != -EBUSY)
2253 req->iv = areq_ctx->backup_iv;
2258 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2260 /* Very similar to cc_aead_encrypt() above. */
2261 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2262 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2263 struct device *dev = drvdata_to_dev(ctx->drvdata);
2264 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2267 if (!valid_assoclen(req)) {
2268 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2272 memset(areq_ctx, 0, sizeof(*areq_ctx));
2274 //plaintext is not encryped with rfc4543
2275 areq_ctx->plaintext_authenticate_only = true;
2277 /* No generated IV required */
2278 areq_ctx->backup_iv = req->iv;
2279 areq_ctx->assoclen = req->assoclen;
2281 cc_proc_rfc4_gcm(req);
2282 areq_ctx->is_gcm4543 = true;
2284 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2285 if (rc != -EINPROGRESS && rc != -EBUSY)
2286 req->iv = areq_ctx->backup_iv;
2291 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2293 /* Very similar to cc_aead_decrypt() above. */
2295 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2296 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2297 struct device *dev = drvdata_to_dev(ctx->drvdata);
2298 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2301 if (!valid_assoclen(req)) {
2302 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2306 memset(areq_ctx, 0, sizeof(*areq_ctx));
2308 /* No generated IV required */
2309 areq_ctx->backup_iv = req->iv;
2310 areq_ctx->assoclen = req->assoclen;
2311 areq_ctx->plaintext_authenticate_only = false;
2313 cc_proc_rfc4_gcm(req);
2314 areq_ctx->is_gcm4543 = true;
2316 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2317 if (rc != -EINPROGRESS && rc != -EBUSY)
2318 req->iv = areq_ctx->backup_iv;
2323 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2325 /* Very similar to cc_aead_decrypt() above. */
2326 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2327 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2328 struct device *dev = drvdata_to_dev(ctx->drvdata);
2329 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2332 if (!valid_assoclen(req)) {
2333 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2337 memset(areq_ctx, 0, sizeof(*areq_ctx));
2339 //plaintext is not decryped with rfc4543
2340 areq_ctx->plaintext_authenticate_only = true;
2342 /* No generated IV required */
2343 areq_ctx->backup_iv = req->iv;
2344 areq_ctx->assoclen = req->assoclen;
2346 cc_proc_rfc4_gcm(req);
2347 areq_ctx->is_gcm4543 = true;
2349 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2350 if (rc != -EINPROGRESS && rc != -EBUSY)
2351 req->iv = areq_ctx->backup_iv;
2357 static struct cc_alg_template aead_algs[] = {
2359 .name = "authenc(hmac(sha1),cbc(aes))",
2360 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2361 .blocksize = AES_BLOCK_SIZE,
2363 .setkey = cc_aead_setkey,
2364 .setauthsize = cc_aead_setauthsize,
2365 .encrypt = cc_aead_encrypt,
2366 .decrypt = cc_aead_decrypt,
2367 .init = cc_aead_init,
2368 .exit = cc_aead_exit,
2369 .ivsize = AES_BLOCK_SIZE,
2370 .maxauthsize = SHA1_DIGEST_SIZE,
2372 .cipher_mode = DRV_CIPHER_CBC,
2373 .flow_mode = S_DIN_to_AES,
2374 .auth_mode = DRV_HASH_SHA1,
2375 .min_hw_rev = CC_HW_REV_630,
2376 .std_body = CC_STD_NIST,
2379 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2380 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2381 .blocksize = DES3_EDE_BLOCK_SIZE,
2383 .setkey = cc_des3_aead_setkey,
2384 .setauthsize = cc_aead_setauthsize,
2385 .encrypt = cc_aead_encrypt,
2386 .decrypt = cc_aead_decrypt,
2387 .init = cc_aead_init,
2388 .exit = cc_aead_exit,
2389 .ivsize = DES3_EDE_BLOCK_SIZE,
2390 .maxauthsize = SHA1_DIGEST_SIZE,
2392 .cipher_mode = DRV_CIPHER_CBC,
2393 .flow_mode = S_DIN_to_DES,
2394 .auth_mode = DRV_HASH_SHA1,
2395 .min_hw_rev = CC_HW_REV_630,
2396 .std_body = CC_STD_NIST,
2399 .name = "authenc(hmac(sha256),cbc(aes))",
2400 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2401 .blocksize = AES_BLOCK_SIZE,
2403 .setkey = cc_aead_setkey,
2404 .setauthsize = cc_aead_setauthsize,
2405 .encrypt = cc_aead_encrypt,
2406 .decrypt = cc_aead_decrypt,
2407 .init = cc_aead_init,
2408 .exit = cc_aead_exit,
2409 .ivsize = AES_BLOCK_SIZE,
2410 .maxauthsize = SHA256_DIGEST_SIZE,
2412 .cipher_mode = DRV_CIPHER_CBC,
2413 .flow_mode = S_DIN_to_AES,
2414 .auth_mode = DRV_HASH_SHA256,
2415 .min_hw_rev = CC_HW_REV_630,
2416 .std_body = CC_STD_NIST,
2419 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2420 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2421 .blocksize = DES3_EDE_BLOCK_SIZE,
2423 .setkey = cc_des3_aead_setkey,
2424 .setauthsize = cc_aead_setauthsize,
2425 .encrypt = cc_aead_encrypt,
2426 .decrypt = cc_aead_decrypt,
2427 .init = cc_aead_init,
2428 .exit = cc_aead_exit,
2429 .ivsize = DES3_EDE_BLOCK_SIZE,
2430 .maxauthsize = SHA256_DIGEST_SIZE,
2432 .cipher_mode = DRV_CIPHER_CBC,
2433 .flow_mode = S_DIN_to_DES,
2434 .auth_mode = DRV_HASH_SHA256,
2435 .min_hw_rev = CC_HW_REV_630,
2436 .std_body = CC_STD_NIST,
2439 .name = "authenc(xcbc(aes),cbc(aes))",
2440 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2441 .blocksize = AES_BLOCK_SIZE,
2443 .setkey = cc_aead_setkey,
2444 .setauthsize = cc_aead_setauthsize,
2445 .encrypt = cc_aead_encrypt,
2446 .decrypt = cc_aead_decrypt,
2447 .init = cc_aead_init,
2448 .exit = cc_aead_exit,
2449 .ivsize = AES_BLOCK_SIZE,
2450 .maxauthsize = AES_BLOCK_SIZE,
2452 .cipher_mode = DRV_CIPHER_CBC,
2453 .flow_mode = S_DIN_to_AES,
2454 .auth_mode = DRV_HASH_XCBC_MAC,
2455 .min_hw_rev = CC_HW_REV_630,
2456 .std_body = CC_STD_NIST,
2459 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2460 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2463 .setkey = cc_aead_setkey,
2464 .setauthsize = cc_aead_setauthsize,
2465 .encrypt = cc_aead_encrypt,
2466 .decrypt = cc_aead_decrypt,
2467 .init = cc_aead_init,
2468 .exit = cc_aead_exit,
2469 .ivsize = CTR_RFC3686_IV_SIZE,
2470 .maxauthsize = SHA1_DIGEST_SIZE,
2472 .cipher_mode = DRV_CIPHER_CTR,
2473 .flow_mode = S_DIN_to_AES,
2474 .auth_mode = DRV_HASH_SHA1,
2475 .min_hw_rev = CC_HW_REV_630,
2476 .std_body = CC_STD_NIST,
2479 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2480 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2483 .setkey = cc_aead_setkey,
2484 .setauthsize = cc_aead_setauthsize,
2485 .encrypt = cc_aead_encrypt,
2486 .decrypt = cc_aead_decrypt,
2487 .init = cc_aead_init,
2488 .exit = cc_aead_exit,
2489 .ivsize = CTR_RFC3686_IV_SIZE,
2490 .maxauthsize = SHA256_DIGEST_SIZE,
2492 .cipher_mode = DRV_CIPHER_CTR,
2493 .flow_mode = S_DIN_to_AES,
2494 .auth_mode = DRV_HASH_SHA256,
2495 .min_hw_rev = CC_HW_REV_630,
2496 .std_body = CC_STD_NIST,
2499 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2500 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2503 .setkey = cc_aead_setkey,
2504 .setauthsize = cc_aead_setauthsize,
2505 .encrypt = cc_aead_encrypt,
2506 .decrypt = cc_aead_decrypt,
2507 .init = cc_aead_init,
2508 .exit = cc_aead_exit,
2509 .ivsize = CTR_RFC3686_IV_SIZE,
2510 .maxauthsize = AES_BLOCK_SIZE,
2512 .cipher_mode = DRV_CIPHER_CTR,
2513 .flow_mode = S_DIN_to_AES,
2514 .auth_mode = DRV_HASH_XCBC_MAC,
2515 .min_hw_rev = CC_HW_REV_630,
2516 .std_body = CC_STD_NIST,
2520 .driver_name = "ccm-aes-ccree",
2523 .setkey = cc_aead_setkey,
2524 .setauthsize = cc_ccm_setauthsize,
2525 .encrypt = cc_aead_encrypt,
2526 .decrypt = cc_aead_decrypt,
2527 .init = cc_aead_init,
2528 .exit = cc_aead_exit,
2529 .ivsize = AES_BLOCK_SIZE,
2530 .maxauthsize = AES_BLOCK_SIZE,
2532 .cipher_mode = DRV_CIPHER_CCM,
2533 .flow_mode = S_DIN_to_AES,
2534 .auth_mode = DRV_HASH_NULL,
2535 .min_hw_rev = CC_HW_REV_630,
2536 .std_body = CC_STD_NIST,
2539 .name = "rfc4309(ccm(aes))",
2540 .driver_name = "rfc4309-ccm-aes-ccree",
2543 .setkey = cc_rfc4309_ccm_setkey,
2544 .setauthsize = cc_rfc4309_ccm_setauthsize,
2545 .encrypt = cc_rfc4309_ccm_encrypt,
2546 .decrypt = cc_rfc4309_ccm_decrypt,
2547 .init = cc_aead_init,
2548 .exit = cc_aead_exit,
2549 .ivsize = CCM_BLOCK_IV_SIZE,
2550 .maxauthsize = AES_BLOCK_SIZE,
2552 .cipher_mode = DRV_CIPHER_CCM,
2553 .flow_mode = S_DIN_to_AES,
2554 .auth_mode = DRV_HASH_NULL,
2555 .min_hw_rev = CC_HW_REV_630,
2556 .std_body = CC_STD_NIST,
2560 .driver_name = "gcm-aes-ccree",
2563 .setkey = cc_aead_setkey,
2564 .setauthsize = cc_gcm_setauthsize,
2565 .encrypt = cc_aead_encrypt,
2566 .decrypt = cc_aead_decrypt,
2567 .init = cc_aead_init,
2568 .exit = cc_aead_exit,
2570 .maxauthsize = AES_BLOCK_SIZE,
2572 .cipher_mode = DRV_CIPHER_GCTR,
2573 .flow_mode = S_DIN_to_AES,
2574 .auth_mode = DRV_HASH_NULL,
2575 .min_hw_rev = CC_HW_REV_630,
2576 .std_body = CC_STD_NIST,
2579 .name = "rfc4106(gcm(aes))",
2580 .driver_name = "rfc4106-gcm-aes-ccree",
2583 .setkey = cc_rfc4106_gcm_setkey,
2584 .setauthsize = cc_rfc4106_gcm_setauthsize,
2585 .encrypt = cc_rfc4106_gcm_encrypt,
2586 .decrypt = cc_rfc4106_gcm_decrypt,
2587 .init = cc_aead_init,
2588 .exit = cc_aead_exit,
2589 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2590 .maxauthsize = AES_BLOCK_SIZE,
2592 .cipher_mode = DRV_CIPHER_GCTR,
2593 .flow_mode = S_DIN_to_AES,
2594 .auth_mode = DRV_HASH_NULL,
2595 .min_hw_rev = CC_HW_REV_630,
2596 .std_body = CC_STD_NIST,
2599 .name = "rfc4543(gcm(aes))",
2600 .driver_name = "rfc4543-gcm-aes-ccree",
2603 .setkey = cc_rfc4543_gcm_setkey,
2604 .setauthsize = cc_rfc4543_gcm_setauthsize,
2605 .encrypt = cc_rfc4543_gcm_encrypt,
2606 .decrypt = cc_rfc4543_gcm_decrypt,
2607 .init = cc_aead_init,
2608 .exit = cc_aead_exit,
2609 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2610 .maxauthsize = AES_BLOCK_SIZE,
2612 .cipher_mode = DRV_CIPHER_GCTR,
2613 .flow_mode = S_DIN_to_AES,
2614 .auth_mode = DRV_HASH_NULL,
2615 .min_hw_rev = CC_HW_REV_630,
2616 .std_body = CC_STD_NIST,
2620 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2623 struct cc_crypto_alg *t_alg;
2624 struct aead_alg *alg;
2626 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2628 return ERR_PTR(-ENOMEM);
2630 alg = &tmpl->template_aead;
2632 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2633 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2635 alg->base.cra_module = THIS_MODULE;
2636 alg->base.cra_priority = CC_CRA_PRIO;
2638 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2639 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2640 alg->init = cc_aead_init;
2641 alg->exit = cc_aead_exit;
2643 t_alg->aead_alg = *alg;
2645 t_alg->cipher_mode = tmpl->cipher_mode;
2646 t_alg->flow_mode = tmpl->flow_mode;
2647 t_alg->auth_mode = tmpl->auth_mode;
2652 int cc_aead_free(struct cc_drvdata *drvdata)
2654 struct cc_crypto_alg *t_alg, *n;
2655 struct cc_aead_handle *aead_handle =
2656 (struct cc_aead_handle *)drvdata->aead_handle;
2659 /* Remove registered algs */
2660 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2662 crypto_unregister_aead(&t_alg->aead_alg);
2663 list_del(&t_alg->entry);
2667 drvdata->aead_handle = NULL;
2673 int cc_aead_alloc(struct cc_drvdata *drvdata)
2675 struct cc_aead_handle *aead_handle;
2676 struct cc_crypto_alg *t_alg;
2679 struct device *dev = drvdata_to_dev(drvdata);
2681 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2687 INIT_LIST_HEAD(&aead_handle->aead_list);
2688 drvdata->aead_handle = aead_handle;
2690 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2691 MAX_HMAC_DIGEST_SIZE);
2693 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2694 dev_err(dev, "SRAM pool exhausted\n");
2700 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2701 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2702 !(drvdata->std_bodies & aead_algs[alg].std_body))
2705 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2706 if (IS_ERR(t_alg)) {
2707 rc = PTR_ERR(t_alg);
2708 dev_err(dev, "%s alg allocation failed\n",
2709 aead_algs[alg].driver_name);
2712 t_alg->drvdata = drvdata;
2713 rc = crypto_register_aead(&t_alg->aead_alg);
2715 dev_err(dev, "%s alg registration failed\n",
2716 t_alg->aead_alg.base.cra_driver_name);
2719 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2720 dev_dbg(dev, "Registered %s\n",
2721 t_alg->aead_alg.base.cra_driver_name);
2730 cc_aead_free(drvdata);