Merge mainline/master into arm/fixes
[linux-block.git] / drivers / crypto / ccree / cc_aead.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/internal/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
13 #include "cc_aead.h"
14 #include "cc_request_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17
18 #define template_aead   template_u.aead
19
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
22
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
27
28 struct cc_aead_handle {
29         cc_sram_addr_t sram_workspace_addr;
30         struct list_head aead_list;
31 };
32
33 struct cc_hmac_s {
34         u8 *padded_authkey;
35         u8 *ipad_opad; /* IPAD, OPAD*/
36         dma_addr_t padded_authkey_dma_addr;
37         dma_addr_t ipad_opad_dma_addr;
38 };
39
40 struct cc_xcbc_s {
41         u8 *xcbc_keys; /* K1,K2,K3 */
42         dma_addr_t xcbc_keys_dma_addr;
43 };
44
45 struct cc_aead_ctx {
46         struct cc_drvdata *drvdata;
47         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
48         u8 *enckey;
49         dma_addr_t enckey_dma_addr;
50         union {
51                 struct cc_hmac_s hmac;
52                 struct cc_xcbc_s xcbc;
53         } auth_state;
54         unsigned int enc_keylen;
55         unsigned int auth_keylen;
56         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
57         unsigned int hash_len;
58         enum drv_cipher_mode cipher_mode;
59         enum cc_flow_mode flow_mode;
60         enum drv_hash_mode auth_mode;
61 };
62
63 static inline bool valid_assoclen(struct aead_request *req)
64 {
65         return ((req->assoclen == 16) || (req->assoclen == 20));
66 }
67
68 static void cc_aead_exit(struct crypto_aead *tfm)
69 {
70         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
71         struct device *dev = drvdata_to_dev(ctx->drvdata);
72
73         dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
74                 crypto_tfm_alg_name(&tfm->base));
75
76         /* Unmap enckey buffer */
77         if (ctx->enckey) {
78                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
79                                   ctx->enckey_dma_addr);
80                 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81                         &ctx->enckey_dma_addr);
82                 ctx->enckey_dma_addr = 0;
83                 ctx->enckey = NULL;
84         }
85
86         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
87                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
88
89                 if (xcbc->xcbc_keys) {
90                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
91                                           xcbc->xcbc_keys,
92                                           xcbc->xcbc_keys_dma_addr);
93                 }
94                 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95                         &xcbc->xcbc_keys_dma_addr);
96                 xcbc->xcbc_keys_dma_addr = 0;
97                 xcbc->xcbc_keys = NULL;
98         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
99                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
100
101                 if (hmac->ipad_opad) {
102                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
103                                           hmac->ipad_opad,
104                                           hmac->ipad_opad_dma_addr);
105                         dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106                                 &hmac->ipad_opad_dma_addr);
107                         hmac->ipad_opad_dma_addr = 0;
108                         hmac->ipad_opad = NULL;
109                 }
110                 if (hmac->padded_authkey) {
111                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
112                                           hmac->padded_authkey,
113                                           hmac->padded_authkey_dma_addr);
114                         dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115                                 &hmac->padded_authkey_dma_addr);
116                         hmac->padded_authkey_dma_addr = 0;
117                         hmac->padded_authkey = NULL;
118                 }
119         }
120 }
121
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
123 {
124         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
125
126         return cc_get_default_hash_len(ctx->drvdata);
127 }
128
129 static int cc_aead_init(struct crypto_aead *tfm)
130 {
131         struct aead_alg *alg = crypto_aead_alg(tfm);
132         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
133         struct cc_crypto_alg *cc_alg =
134                         container_of(alg, struct cc_crypto_alg, aead_alg);
135         struct device *dev = drvdata_to_dev(cc_alg->drvdata);
136
137         dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
138                 crypto_tfm_alg_name(&tfm->base));
139
140         /* Initialize modes in instance */
141         ctx->cipher_mode = cc_alg->cipher_mode;
142         ctx->flow_mode = cc_alg->flow_mode;
143         ctx->auth_mode = cc_alg->auth_mode;
144         ctx->drvdata = cc_alg->drvdata;
145         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
146
147         /* Allocate key buffer, cache line aligned */
148         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
149                                          &ctx->enckey_dma_addr, GFP_KERNEL);
150         if (!ctx->enckey) {
151                 dev_err(dev, "Failed allocating key buffer\n");
152                 goto init_failed;
153         }
154         dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
155                 ctx->enckey);
156
157         /* Set default authlen value */
158
159         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
160                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
161                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
162
163                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164                 /* (and temporary for user key - up to 256b) */
165                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
166                                                      &xcbc->xcbc_keys_dma_addr,
167                                                      GFP_KERNEL);
168                 if (!xcbc->xcbc_keys) {
169                         dev_err(dev, "Failed allocating buffer for XCBC keys\n");
170                         goto init_failed;
171                 }
172         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
173                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
174                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
175                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
176
177                 /* Allocate dma-coherent buffer for IPAD + OPAD */
178                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
179                                                      &hmac->ipad_opad_dma_addr,
180                                                      GFP_KERNEL);
181
182                 if (!hmac->ipad_opad) {
183                         dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
184                         goto init_failed;
185                 }
186
187                 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
188                         hmac->ipad_opad);
189
190                 hmac->padded_authkey = dma_alloc_coherent(dev,
191                                                           MAX_HMAC_BLOCK_SIZE,
192                                                           pkey_dma,
193                                                           GFP_KERNEL);
194
195                 if (!hmac->padded_authkey) {
196                         dev_err(dev, "failed to allocate padded_authkey\n");
197                         goto init_failed;
198                 }
199         } else {
200                 ctx->auth_state.hmac.ipad_opad = NULL;
201                 ctx->auth_state.hmac.padded_authkey = NULL;
202         }
203         ctx->hash_len = cc_get_aead_hash_len(tfm);
204
205         return 0;
206
207 init_failed:
208         cc_aead_exit(tfm);
209         return -ENOMEM;
210 }
211
212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
213 {
214         struct aead_request *areq = (struct aead_request *)cc_req;
215         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
216         struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
217         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
218
219         /* BACKLOG notification */
220         if (err == -EINPROGRESS)
221                 goto done;
222
223         cc_unmap_aead_request(dev, areq);
224
225         /* Restore ordinary iv pointer */
226         areq->iv = areq_ctx->backup_iv;
227
228         if (err)
229                 goto done;
230
231         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233                            ctx->authsize) != 0) {
234                         dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235                                 ctx->authsize, ctx->cipher_mode);
236                         /* In case of payload authentication failure, MUST NOT
237                          * revealed the decrypted message --> zero its memory.
238                          */
239                         sg_zero_buffer(areq->dst, sg_nents(areq->dst),
240                                        areq->cryptlen, 0);
241                         err = -EBADMSG;
242                 }
243         /*ENCRYPT*/
244         } else if (areq_ctx->is_icv_fragmented) {
245                 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
246
247                 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
248                                    skip, (skip + ctx->authsize),
249                                    CC_SG_FROM_BUF);
250         }
251 done:
252         aead_request_complete(areq, err);
253 }
254
255 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
256                                 struct cc_aead_ctx *ctx)
257 {
258         /* Load the AES key */
259         hw_desc_init(&desc[0]);
260         /* We are using for the source/user key the same buffer
261          * as for the output keys, * because after this key loading it
262          * is not needed anymore
263          */
264         set_din_type(&desc[0], DMA_DLLI,
265                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
266                      NS_BIT);
267         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
268         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
269         set_key_size_aes(&desc[0], ctx->auth_keylen);
270         set_flow_mode(&desc[0], S_DIN_to_AES);
271         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
272
273         hw_desc_init(&desc[1]);
274         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
275         set_flow_mode(&desc[1], DIN_AES_DOUT);
276         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
277                       AES_KEYSIZE_128, NS_BIT, 0);
278
279         hw_desc_init(&desc[2]);
280         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
281         set_flow_mode(&desc[2], DIN_AES_DOUT);
282         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
283                                          + AES_KEYSIZE_128),
284                               AES_KEYSIZE_128, NS_BIT, 0);
285
286         hw_desc_init(&desc[3]);
287         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
288         set_flow_mode(&desc[3], DIN_AES_DOUT);
289         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
290                                           + 2 * AES_KEYSIZE_128),
291                               AES_KEYSIZE_128, NS_BIT, 0);
292
293         return 4;
294 }
295
296 static unsigned int hmac_setkey(struct cc_hw_desc *desc,
297                                 struct cc_aead_ctx *ctx)
298 {
299         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
300         unsigned int digest_ofs = 0;
301         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
302                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
303         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
304                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
305         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
306
307         unsigned int idx = 0;
308         int i;
309
310         /* calc derived HMAC key */
311         for (i = 0; i < 2; i++) {
312                 /* Load hash initial state */
313                 hw_desc_init(&desc[idx]);
314                 set_cipher_mode(&desc[idx], hash_mode);
315                 set_din_sram(&desc[idx],
316                              cc_larval_digest_addr(ctx->drvdata,
317                                                    ctx->auth_mode),
318                              digest_size);
319                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
320                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
321                 idx++;
322
323                 /* Load the hash current length*/
324                 hw_desc_init(&desc[idx]);
325                 set_cipher_mode(&desc[idx], hash_mode);
326                 set_din_const(&desc[idx], 0, ctx->hash_len);
327                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
328                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
329                 idx++;
330
331                 /* Prepare ipad key */
332                 hw_desc_init(&desc[idx]);
333                 set_xor_val(&desc[idx], hmac_pad_const[i]);
334                 set_cipher_mode(&desc[idx], hash_mode);
335                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
336                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
337                 idx++;
338
339                 /* Perform HASH update */
340                 hw_desc_init(&desc[idx]);
341                 set_din_type(&desc[idx], DMA_DLLI,
342                              hmac->padded_authkey_dma_addr,
343                              SHA256_BLOCK_SIZE, NS_BIT);
344                 set_cipher_mode(&desc[idx], hash_mode);
345                 set_xor_active(&desc[idx]);
346                 set_flow_mode(&desc[idx], DIN_HASH);
347                 idx++;
348
349                 /* Get the digset */
350                 hw_desc_init(&desc[idx]);
351                 set_cipher_mode(&desc[idx], hash_mode);
352                 set_dout_dlli(&desc[idx],
353                               (hmac->ipad_opad_dma_addr + digest_ofs),
354                               digest_size, NS_BIT, 0);
355                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
356                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
357                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
358                 idx++;
359
360                 digest_ofs += digest_size;
361         }
362
363         return idx;
364 }
365
366 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
367 {
368         struct device *dev = drvdata_to_dev(ctx->drvdata);
369
370         dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
371                 ctx->enc_keylen, ctx->auth_keylen);
372
373         switch (ctx->auth_mode) {
374         case DRV_HASH_SHA1:
375         case DRV_HASH_SHA256:
376                 break;
377         case DRV_HASH_XCBC_MAC:
378                 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
379                     ctx->auth_keylen != AES_KEYSIZE_192 &&
380                     ctx->auth_keylen != AES_KEYSIZE_256)
381                         return -ENOTSUPP;
382                 break;
383         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
384                 if (ctx->auth_keylen > 0)
385                         return -EINVAL;
386                 break;
387         default:
388                 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
389                 return -EINVAL;
390         }
391         /* Check cipher key size */
392         if (ctx->flow_mode == S_DIN_to_DES) {
393                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
394                         dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
395                                 ctx->enc_keylen);
396                         return -EINVAL;
397                 }
398         } else { /* Default assumed to be AES ciphers */
399                 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
400                     ctx->enc_keylen != AES_KEYSIZE_192 &&
401                     ctx->enc_keylen != AES_KEYSIZE_256) {
402                         dev_err(dev, "Invalid cipher(AES) key size: %u\n",
403                                 ctx->enc_keylen);
404                         return -EINVAL;
405                 }
406         }
407
408         return 0; /* All tests of keys sizes passed */
409 }
410
411 /* This function prepers the user key so it can pass to the hmac processing
412  * (copy to intenral buffer or hash in case of key longer than block
413  */
414 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
415                                  unsigned int keylen)
416 {
417         dma_addr_t key_dma_addr = 0;
418         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
419         struct device *dev = drvdata_to_dev(ctx->drvdata);
420         u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
421         struct cc_crypto_req cc_req = {};
422         unsigned int blocksize;
423         unsigned int digestsize;
424         unsigned int hashmode;
425         unsigned int idx = 0;
426         int rc = 0;
427         u8 *key = NULL;
428         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
429         dma_addr_t padded_authkey_dma_addr =
430                 ctx->auth_state.hmac.padded_authkey_dma_addr;
431
432         switch (ctx->auth_mode) { /* auth_key required and >0 */
433         case DRV_HASH_SHA1:
434                 blocksize = SHA1_BLOCK_SIZE;
435                 digestsize = SHA1_DIGEST_SIZE;
436                 hashmode = DRV_HASH_HW_SHA1;
437                 break;
438         case DRV_HASH_SHA256:
439         default:
440                 blocksize = SHA256_BLOCK_SIZE;
441                 digestsize = SHA256_DIGEST_SIZE;
442                 hashmode = DRV_HASH_HW_SHA256;
443         }
444
445         if (keylen != 0) {
446
447                 key = kmemdup(authkey, keylen, GFP_KERNEL);
448                 if (!key)
449                         return -ENOMEM;
450
451                 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
452                                               DMA_TO_DEVICE);
453                 if (dma_mapping_error(dev, key_dma_addr)) {
454                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
455                                 key, keylen);
456                         kzfree(key);
457                         return -ENOMEM;
458                 }
459                 if (keylen > blocksize) {
460                         /* Load hash initial state */
461                         hw_desc_init(&desc[idx]);
462                         set_cipher_mode(&desc[idx], hashmode);
463                         set_din_sram(&desc[idx], larval_addr, digestsize);
464                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
465                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
466                         idx++;
467
468                         /* Load the hash current length*/
469                         hw_desc_init(&desc[idx]);
470                         set_cipher_mode(&desc[idx], hashmode);
471                         set_din_const(&desc[idx], 0, ctx->hash_len);
472                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
473                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
474                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
475                         idx++;
476
477                         hw_desc_init(&desc[idx]);
478                         set_din_type(&desc[idx], DMA_DLLI,
479                                      key_dma_addr, keylen, NS_BIT);
480                         set_flow_mode(&desc[idx], DIN_HASH);
481                         idx++;
482
483                         /* Get hashed key */
484                         hw_desc_init(&desc[idx]);
485                         set_cipher_mode(&desc[idx], hashmode);
486                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
487                                       digestsize, NS_BIT, 0);
488                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
489                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
490                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
491                         set_cipher_config0(&desc[idx],
492                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
493                         idx++;
494
495                         hw_desc_init(&desc[idx]);
496                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
497                         set_flow_mode(&desc[idx], BYPASS);
498                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
499                                       digestsize), (blocksize - digestsize),
500                                       NS_BIT, 0);
501                         idx++;
502                 } else {
503                         hw_desc_init(&desc[idx]);
504                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
505                                      keylen, NS_BIT);
506                         set_flow_mode(&desc[idx], BYPASS);
507                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
508                                       keylen, NS_BIT, 0);
509                         idx++;
510
511                         if ((blocksize - keylen) != 0) {
512                                 hw_desc_init(&desc[idx]);
513                                 set_din_const(&desc[idx], 0,
514                                               (blocksize - keylen));
515                                 set_flow_mode(&desc[idx], BYPASS);
516                                 set_dout_dlli(&desc[idx],
517                                               (padded_authkey_dma_addr +
518                                                keylen),
519                                               (blocksize - keylen), NS_BIT, 0);
520                                 idx++;
521                         }
522                 }
523         } else {
524                 hw_desc_init(&desc[idx]);
525                 set_din_const(&desc[idx], 0, (blocksize - keylen));
526                 set_flow_mode(&desc[idx], BYPASS);
527                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
528                               blocksize, NS_BIT, 0);
529                 idx++;
530         }
531
532         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
533         if (rc)
534                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
535
536         if (key_dma_addr)
537                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
538
539         kzfree(key);
540
541         return rc;
542 }
543
544 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
545                           unsigned int keylen)
546 {
547         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
548         struct cc_crypto_req cc_req = {};
549         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
550         unsigned int seq_len = 0;
551         struct device *dev = drvdata_to_dev(ctx->drvdata);
552         const u8 *enckey, *authkey;
553         int rc;
554
555         dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
556                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
557
558         /* STAT_PHASE_0: Init and sanity checks */
559
560         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
561                 struct crypto_authenc_keys keys;
562
563                 rc = crypto_authenc_extractkeys(&keys, key, keylen);
564                 if (rc)
565                         goto badkey;
566                 enckey = keys.enckey;
567                 authkey = keys.authkey;
568                 ctx->enc_keylen = keys.enckeylen;
569                 ctx->auth_keylen = keys.authkeylen;
570
571                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
572                         /* the nonce is stored in bytes at end of key */
573                         rc = -EINVAL;
574                         if (ctx->enc_keylen <
575                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
576                                 goto badkey;
577                         /* Copy nonce from last 4 bytes in CTR key to
578                          *  first 4 bytes in CTR IV
579                          */
580                         memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
581                                CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
582                         /* Set CTR key size */
583                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
584                 }
585         } else { /* non-authenc - has just one key */
586                 enckey = key;
587                 authkey = NULL;
588                 ctx->enc_keylen = keylen;
589                 ctx->auth_keylen = 0;
590         }
591
592         rc = validate_keys_sizes(ctx);
593         if (rc)
594                 goto badkey;
595
596         /* STAT_PHASE_1: Copy key to ctx */
597
598         /* Get key material */
599         memcpy(ctx->enckey, enckey, ctx->enc_keylen);
600         if (ctx->enc_keylen == 24)
601                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
602         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
603                 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
604                        ctx->auth_keylen);
605         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
606                 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
607                 if (rc)
608                         goto badkey;
609         }
610
611         /* STAT_PHASE_2: Create sequence */
612
613         switch (ctx->auth_mode) {
614         case DRV_HASH_SHA1:
615         case DRV_HASH_SHA256:
616                 seq_len = hmac_setkey(desc, ctx);
617                 break;
618         case DRV_HASH_XCBC_MAC:
619                 seq_len = xcbc_setkey(desc, ctx);
620                 break;
621         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
622                 break; /* No auth. key setup */
623         default:
624                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
625                 rc = -ENOTSUPP;
626                 goto badkey;
627         }
628
629         /* STAT_PHASE_3: Submit sequence to HW */
630
631         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
632                 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
633                 if (rc) {
634                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
635                         goto setkey_error;
636                 }
637         }
638
639         /* Update STAT_PHASE_3 */
640         return rc;
641
642 badkey:
643         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
644
645 setkey_error:
646         return rc;
647 }
648
649 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
650                                unsigned int keylen)
651 {
652         struct crypto_authenc_keys keys;
653         int err;
654
655         err = crypto_authenc_extractkeys(&keys, key, keylen);
656         if (unlikely(err))
657                 return err;
658
659         err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
660               cc_aead_setkey(aead, key, keylen);
661
662         memzero_explicit(&keys, sizeof(keys));
663         return err;
664 }
665
666 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
667                                  unsigned int keylen)
668 {
669         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
670
671         if (keylen < 3)
672                 return -EINVAL;
673
674         keylen -= 3;
675         memcpy(ctx->ctr_nonce, key + keylen, 3);
676
677         return cc_aead_setkey(tfm, key, keylen);
678 }
679
680 static int cc_aead_setauthsize(struct crypto_aead *authenc,
681                                unsigned int authsize)
682 {
683         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
684         struct device *dev = drvdata_to_dev(ctx->drvdata);
685
686         /* Unsupported auth. sizes */
687         if (authsize == 0 ||
688             authsize > crypto_aead_maxauthsize(authenc)) {
689                 return -ENOTSUPP;
690         }
691
692         ctx->authsize = authsize;
693         dev_dbg(dev, "authlen=%d\n", ctx->authsize);
694
695         return 0;
696 }
697
698 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
699                                       unsigned int authsize)
700 {
701         switch (authsize) {
702         case 8:
703         case 12:
704         case 16:
705                 break;
706         default:
707                 return -EINVAL;
708         }
709
710         return cc_aead_setauthsize(authenc, authsize);
711 }
712
713 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
714                               unsigned int authsize)
715 {
716         switch (authsize) {
717         case 4:
718         case 6:
719         case 8:
720         case 10:
721         case 12:
722         case 14:
723         case 16:
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         return cc_aead_setauthsize(authenc, authsize);
730 }
731
732 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
733                               struct cc_hw_desc desc[], unsigned int *seq_size)
734 {
735         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
736         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
737         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
738         enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
739         unsigned int idx = *seq_size;
740         struct device *dev = drvdata_to_dev(ctx->drvdata);
741
742         switch (assoc_dma_type) {
743         case CC_DMA_BUF_DLLI:
744                 dev_dbg(dev, "ASSOC buffer type DLLI\n");
745                 hw_desc_init(&desc[idx]);
746                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
747                              areq_ctx->assoclen, NS_BIT);
748                 set_flow_mode(&desc[idx], flow_mode);
749                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
750                     areq_ctx->cryptlen > 0)
751                         set_din_not_last_indication(&desc[idx]);
752                 break;
753         case CC_DMA_BUF_MLLI:
754                 dev_dbg(dev, "ASSOC buffer type MLLI\n");
755                 hw_desc_init(&desc[idx]);
756                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
757                              areq_ctx->assoc.mlli_nents, NS_BIT);
758                 set_flow_mode(&desc[idx], flow_mode);
759                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
760                     areq_ctx->cryptlen > 0)
761                         set_din_not_last_indication(&desc[idx]);
762                 break;
763         case CC_DMA_BUF_NULL:
764         default:
765                 dev_err(dev, "Invalid ASSOC buffer type\n");
766         }
767
768         *seq_size = (++idx);
769 }
770
771 static void cc_proc_authen_desc(struct aead_request *areq,
772                                 unsigned int flow_mode,
773                                 struct cc_hw_desc desc[],
774                                 unsigned int *seq_size, int direct)
775 {
776         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
777         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
778         unsigned int idx = *seq_size;
779         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
780         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
781         struct device *dev = drvdata_to_dev(ctx->drvdata);
782
783         switch (data_dma_type) {
784         case CC_DMA_BUF_DLLI:
785         {
786                 struct scatterlist *cipher =
787                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
788                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
789
790                 unsigned int offset =
791                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
792                         areq_ctx->dst_offset : areq_ctx->src_offset;
793                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
794                 hw_desc_init(&desc[idx]);
795                 set_din_type(&desc[idx], DMA_DLLI,
796                              (sg_dma_address(cipher) + offset),
797                              areq_ctx->cryptlen, NS_BIT);
798                 set_flow_mode(&desc[idx], flow_mode);
799                 break;
800         }
801         case CC_DMA_BUF_MLLI:
802         {
803                 /* DOUBLE-PASS flow (as default)
804                  * assoc. + iv + data -compact in one table
805                  * if assoclen is ZERO only IV perform
806                  */
807                 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
808                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
809
810                 if (areq_ctx->is_single_pass) {
811                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
812                                 mlli_addr = areq_ctx->dst.sram_addr;
813                                 mlli_nents = areq_ctx->dst.mlli_nents;
814                         } else {
815                                 mlli_addr = areq_ctx->src.sram_addr;
816                                 mlli_nents = areq_ctx->src.mlli_nents;
817                         }
818                 }
819
820                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
821                 hw_desc_init(&desc[idx]);
822                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
823                              NS_BIT);
824                 set_flow_mode(&desc[idx], flow_mode);
825                 break;
826         }
827         case CC_DMA_BUF_NULL:
828         default:
829                 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
830         }
831
832         *seq_size = (++idx);
833 }
834
835 static void cc_proc_cipher_desc(struct aead_request *areq,
836                                 unsigned int flow_mode,
837                                 struct cc_hw_desc desc[],
838                                 unsigned int *seq_size)
839 {
840         unsigned int idx = *seq_size;
841         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
842         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
843         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
844         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
845         struct device *dev = drvdata_to_dev(ctx->drvdata);
846
847         if (areq_ctx->cryptlen == 0)
848                 return; /*null processing*/
849
850         switch (data_dma_type) {
851         case CC_DMA_BUF_DLLI:
852                 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
853                 hw_desc_init(&desc[idx]);
854                 set_din_type(&desc[idx], DMA_DLLI,
855                              (sg_dma_address(areq_ctx->src_sgl) +
856                               areq_ctx->src_offset), areq_ctx->cryptlen,
857                               NS_BIT);
858                 set_dout_dlli(&desc[idx],
859                               (sg_dma_address(areq_ctx->dst_sgl) +
860                                areq_ctx->dst_offset),
861                               areq_ctx->cryptlen, NS_BIT, 0);
862                 set_flow_mode(&desc[idx], flow_mode);
863                 break;
864         case CC_DMA_BUF_MLLI:
865                 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
866                 hw_desc_init(&desc[idx]);
867                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
868                              areq_ctx->src.mlli_nents, NS_BIT);
869                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
870                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
871                 set_flow_mode(&desc[idx], flow_mode);
872                 break;
873         case CC_DMA_BUF_NULL:
874         default:
875                 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
876         }
877
878         *seq_size = (++idx);
879 }
880
881 static void cc_proc_digest_desc(struct aead_request *req,
882                                 struct cc_hw_desc desc[],
883                                 unsigned int *seq_size)
884 {
885         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
886         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
887         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
888         unsigned int idx = *seq_size;
889         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
890                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
891         int direct = req_ctx->gen_ctx.op_type;
892
893         /* Get final ICV result */
894         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
895                 hw_desc_init(&desc[idx]);
896                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
897                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
898                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
899                               NS_BIT, 1);
900                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
901                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
902                         set_aes_not_hash_mode(&desc[idx]);
903                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
904                 } else {
905                         set_cipher_config0(&desc[idx],
906                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
907                         set_cipher_mode(&desc[idx], hash_mode);
908                 }
909         } else { /*Decrypt*/
910                 /* Get ICV out from hardware */
911                 hw_desc_init(&desc[idx]);
912                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
913                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
914                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
915                               ctx->authsize, NS_BIT, 1);
916                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
917                 set_cipher_config0(&desc[idx],
918                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
919                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
920                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
921                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
922                         set_aes_not_hash_mode(&desc[idx]);
923                 } else {
924                         set_cipher_mode(&desc[idx], hash_mode);
925                 }
926         }
927
928         *seq_size = (++idx);
929 }
930
931 static void cc_set_cipher_desc(struct aead_request *req,
932                                struct cc_hw_desc desc[],
933                                unsigned int *seq_size)
934 {
935         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
936         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
937         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
938         unsigned int hw_iv_size = req_ctx->hw_iv_size;
939         unsigned int idx = *seq_size;
940         int direct = req_ctx->gen_ctx.op_type;
941
942         /* Setup cipher state */
943         hw_desc_init(&desc[idx]);
944         set_cipher_config0(&desc[idx], direct);
945         set_flow_mode(&desc[idx], ctx->flow_mode);
946         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
947                      hw_iv_size, NS_BIT);
948         if (ctx->cipher_mode == DRV_CIPHER_CTR)
949                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
950         else
951                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
952         set_cipher_mode(&desc[idx], ctx->cipher_mode);
953         idx++;
954
955         /* Setup enc. key */
956         hw_desc_init(&desc[idx]);
957         set_cipher_config0(&desc[idx], direct);
958         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
959         set_flow_mode(&desc[idx], ctx->flow_mode);
960         if (ctx->flow_mode == S_DIN_to_AES) {
961                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
962                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
963                               ctx->enc_keylen), NS_BIT);
964                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
965         } else {
966                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
967                              ctx->enc_keylen, NS_BIT);
968                 set_key_size_des(&desc[idx], ctx->enc_keylen);
969         }
970         set_cipher_mode(&desc[idx], ctx->cipher_mode);
971         idx++;
972
973         *seq_size = idx;
974 }
975
976 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
977                            unsigned int *seq_size, unsigned int data_flow_mode)
978 {
979         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
980         int direct = req_ctx->gen_ctx.op_type;
981         unsigned int idx = *seq_size;
982
983         if (req_ctx->cryptlen == 0)
984                 return; /*null processing*/
985
986         cc_set_cipher_desc(req, desc, &idx);
987         cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
988         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
989                 /* We must wait for DMA to write all cipher */
990                 hw_desc_init(&desc[idx]);
991                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
992                 set_dout_no_dma(&desc[idx], 0, 0, 1);
993                 idx++;
994         }
995
996         *seq_size = idx;
997 }
998
999 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
1000                              unsigned int *seq_size)
1001 {
1002         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1003         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1004         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1005                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1006         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1007                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1008         unsigned int idx = *seq_size;
1009
1010         /* Loading hash ipad xor key state */
1011         hw_desc_init(&desc[idx]);
1012         set_cipher_mode(&desc[idx], hash_mode);
1013         set_din_type(&desc[idx], DMA_DLLI,
1014                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1015                      NS_BIT);
1016         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1018         idx++;
1019
1020         /* Load init. digest len (64 bytes) */
1021         hw_desc_init(&desc[idx]);
1022         set_cipher_mode(&desc[idx], hash_mode);
1023         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1024                      ctx->hash_len);
1025         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1026         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1027         idx++;
1028
1029         *seq_size = idx;
1030 }
1031
1032 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1033                              unsigned int *seq_size)
1034 {
1035         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1036         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1037         unsigned int idx = *seq_size;
1038
1039         /* Loading MAC state */
1040         hw_desc_init(&desc[idx]);
1041         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1042         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1043         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1044         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1045         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1046         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1047         set_aes_not_hash_mode(&desc[idx]);
1048         idx++;
1049
1050         /* Setup XCBC MAC K1 */
1051         hw_desc_init(&desc[idx]);
1052         set_din_type(&desc[idx], DMA_DLLI,
1053                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1054                      AES_KEYSIZE_128, NS_BIT);
1055         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1056         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1057         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1058         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1059         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1060         set_aes_not_hash_mode(&desc[idx]);
1061         idx++;
1062
1063         /* Setup XCBC MAC K2 */
1064         hw_desc_init(&desc[idx]);
1065         set_din_type(&desc[idx], DMA_DLLI,
1066                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1067                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1068         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1069         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1070         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1071         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1072         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1073         set_aes_not_hash_mode(&desc[idx]);
1074         idx++;
1075
1076         /* Setup XCBC MAC K3 */
1077         hw_desc_init(&desc[idx]);
1078         set_din_type(&desc[idx], DMA_DLLI,
1079                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1080                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1081         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1082         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1083         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1084         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1085         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1086         set_aes_not_hash_mode(&desc[idx]);
1087         idx++;
1088
1089         *seq_size = idx;
1090 }
1091
1092 static void cc_proc_header_desc(struct aead_request *req,
1093                                 struct cc_hw_desc desc[],
1094                                 unsigned int *seq_size)
1095 {
1096         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1097         unsigned int idx = *seq_size;
1098
1099         /* Hash associated data */
1100         if (areq_ctx->assoclen > 0)
1101                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1102
1103         /* Hash IV */
1104         *seq_size = idx;
1105 }
1106
1107 static void cc_proc_scheme_desc(struct aead_request *req,
1108                                 struct cc_hw_desc desc[],
1109                                 unsigned int *seq_size)
1110 {
1111         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1112         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1113         struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1114         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1115                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1116         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1117                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1118         unsigned int idx = *seq_size;
1119
1120         hw_desc_init(&desc[idx]);
1121         set_cipher_mode(&desc[idx], hash_mode);
1122         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1123                       ctx->hash_len);
1124         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1125         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1126         set_cipher_do(&desc[idx], DO_PAD);
1127         idx++;
1128
1129         /* Get final ICV result */
1130         hw_desc_init(&desc[idx]);
1131         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1132                       digest_size);
1133         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1134         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1135         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1136         set_cipher_mode(&desc[idx], hash_mode);
1137         idx++;
1138
1139         /* Loading hash opad xor key state */
1140         hw_desc_init(&desc[idx]);
1141         set_cipher_mode(&desc[idx], hash_mode);
1142         set_din_type(&desc[idx], DMA_DLLI,
1143                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1144                      digest_size, NS_BIT);
1145         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1146         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1147         idx++;
1148
1149         /* Load init. digest len (64 bytes) */
1150         hw_desc_init(&desc[idx]);
1151         set_cipher_mode(&desc[idx], hash_mode);
1152         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1153                      ctx->hash_len);
1154         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1155         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1156         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1157         idx++;
1158
1159         /* Perform HASH update */
1160         hw_desc_init(&desc[idx]);
1161         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1162                      digest_size);
1163         set_flow_mode(&desc[idx], DIN_HASH);
1164         idx++;
1165
1166         *seq_size = idx;
1167 }
1168
1169 static void cc_mlli_to_sram(struct aead_request *req,
1170                             struct cc_hw_desc desc[], unsigned int *seq_size)
1171 {
1172         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1173         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1174         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1175         struct device *dev = drvdata_to_dev(ctx->drvdata);
1176
1177         if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1178             req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1179             !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1180                 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1181                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1182                         req_ctx->mlli_params.mlli_len);
1183                 /* Copy MLLI table host-to-sram */
1184                 hw_desc_init(&desc[*seq_size]);
1185                 set_din_type(&desc[*seq_size], DMA_DLLI,
1186                              req_ctx->mlli_params.mlli_dma_addr,
1187                              req_ctx->mlli_params.mlli_len, NS_BIT);
1188                 set_dout_sram(&desc[*seq_size],
1189                               ctx->drvdata->mlli_sram_addr,
1190                               req_ctx->mlli_params.mlli_len);
1191                 set_flow_mode(&desc[*seq_size], BYPASS);
1192                 (*seq_size)++;
1193         }
1194 }
1195
1196 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1197                                           enum cc_flow_mode setup_flow_mode,
1198                                           bool is_single_pass)
1199 {
1200         enum cc_flow_mode data_flow_mode;
1201
1202         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1203                 if (setup_flow_mode == S_DIN_to_AES)
1204                         data_flow_mode = is_single_pass ?
1205                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1206                 else
1207                         data_flow_mode = is_single_pass ?
1208                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1209         } else { /* Decrypt */
1210                 if (setup_flow_mode == S_DIN_to_AES)
1211                         data_flow_mode = is_single_pass ?
1212                                 AES_and_HASH : DIN_AES_DOUT;
1213                 else
1214                         data_flow_mode = is_single_pass ?
1215                                 DES_and_HASH : DIN_DES_DOUT;
1216         }
1217
1218         return data_flow_mode;
1219 }
1220
1221 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1222                             unsigned int *seq_size)
1223 {
1224         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1225         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1226         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1227         int direct = req_ctx->gen_ctx.op_type;
1228         unsigned int data_flow_mode =
1229                 cc_get_data_flow(direct, ctx->flow_mode,
1230                                  req_ctx->is_single_pass);
1231
1232         if (req_ctx->is_single_pass) {
1233                 /**
1234                  * Single-pass flow
1235                  */
1236                 cc_set_hmac_desc(req, desc, seq_size);
1237                 cc_set_cipher_desc(req, desc, seq_size);
1238                 cc_proc_header_desc(req, desc, seq_size);
1239                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1240                 cc_proc_scheme_desc(req, desc, seq_size);
1241                 cc_proc_digest_desc(req, desc, seq_size);
1242                 return;
1243         }
1244
1245         /**
1246          * Double-pass flow
1247          * Fallback for unsupported single-pass modes,
1248          * i.e. using assoc. data of non-word-multiple
1249          */
1250         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1251                 /* encrypt first.. */
1252                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1253                 /* authenc after..*/
1254                 cc_set_hmac_desc(req, desc, seq_size);
1255                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1256                 cc_proc_scheme_desc(req, desc, seq_size);
1257                 cc_proc_digest_desc(req, desc, seq_size);
1258
1259         } else { /*DECRYPT*/
1260                 /* authenc first..*/
1261                 cc_set_hmac_desc(req, desc, seq_size);
1262                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1263                 cc_proc_scheme_desc(req, desc, seq_size);
1264                 /* decrypt after.. */
1265                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1266                 /* read the digest result with setting the completion bit
1267                  * must be after the cipher operation
1268                  */
1269                 cc_proc_digest_desc(req, desc, seq_size);
1270         }
1271 }
1272
1273 static void
1274 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1275                 unsigned int *seq_size)
1276 {
1277         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1278         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1279         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1280         int direct = req_ctx->gen_ctx.op_type;
1281         unsigned int data_flow_mode =
1282                 cc_get_data_flow(direct, ctx->flow_mode,
1283                                  req_ctx->is_single_pass);
1284
1285         if (req_ctx->is_single_pass) {
1286                 /**
1287                  * Single-pass flow
1288                  */
1289                 cc_set_xcbc_desc(req, desc, seq_size);
1290                 cc_set_cipher_desc(req, desc, seq_size);
1291                 cc_proc_header_desc(req, desc, seq_size);
1292                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1293                 cc_proc_digest_desc(req, desc, seq_size);
1294                 return;
1295         }
1296
1297         /**
1298          * Double-pass flow
1299          * Fallback for unsupported single-pass modes,
1300          * i.e. using assoc. data of non-word-multiple
1301          */
1302         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1303                 /* encrypt first.. */
1304                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1305                 /* authenc after.. */
1306                 cc_set_xcbc_desc(req, desc, seq_size);
1307                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1308                 cc_proc_digest_desc(req, desc, seq_size);
1309         } else { /*DECRYPT*/
1310                 /* authenc first.. */
1311                 cc_set_xcbc_desc(req, desc, seq_size);
1312                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1313                 /* decrypt after..*/
1314                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1315                 /* read the digest result with setting the completion bit
1316                  * must be after the cipher operation
1317                  */
1318                 cc_proc_digest_desc(req, desc, seq_size);
1319         }
1320 }
1321
1322 static int validate_data_size(struct cc_aead_ctx *ctx,
1323                               enum drv_crypto_direction direct,
1324                               struct aead_request *req)
1325 {
1326         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1327         struct device *dev = drvdata_to_dev(ctx->drvdata);
1328         unsigned int assoclen = areq_ctx->assoclen;
1329         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1330                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1331
1332         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1333             req->cryptlen < ctx->authsize)
1334                 goto data_size_err;
1335
1336         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1337
1338         switch (ctx->flow_mode) {
1339         case S_DIN_to_AES:
1340                 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1341                     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1342                         goto data_size_err;
1343                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1344                         break;
1345                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1346                         if (areq_ctx->plaintext_authenticate_only)
1347                                 areq_ctx->is_single_pass = false;
1348                         break;
1349                 }
1350
1351                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1352                         areq_ctx->is_single_pass = false;
1353
1354                 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1355                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1356                         areq_ctx->is_single_pass = false;
1357
1358                 break;
1359         case S_DIN_to_DES:
1360                 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1361                         goto data_size_err;
1362                 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1363                         areq_ctx->is_single_pass = false;
1364                 break;
1365         default:
1366                 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1367                 goto data_size_err;
1368         }
1369
1370         return 0;
1371
1372 data_size_err:
1373         return -EINVAL;
1374 }
1375
1376 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1377 {
1378         unsigned int len = 0;
1379
1380         if (header_size == 0)
1381                 return 0;
1382
1383         if (header_size < ((1UL << 16) - (1UL << 8))) {
1384                 len = 2;
1385
1386                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1387                 pa0_buff[1] = header_size & 0xFF;
1388         } else {
1389                 len = 6;
1390
1391                 pa0_buff[0] = 0xFF;
1392                 pa0_buff[1] = 0xFE;
1393                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1394                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1395                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1396                 pa0_buff[5] = header_size & 0xFF;
1397         }
1398
1399         return len;
1400 }
1401
1402 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1403 {
1404         __be32 data;
1405
1406         memset(block, 0, csize);
1407         block += csize;
1408
1409         if (csize >= 4)
1410                 csize = 4;
1411         else if (msglen > (1 << (8 * csize)))
1412                 return -EOVERFLOW;
1413
1414         data = cpu_to_be32(msglen);
1415         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1416
1417         return 0;
1418 }
1419
1420 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1421                   unsigned int *seq_size)
1422 {
1423         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1424         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1425         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1426         unsigned int idx = *seq_size;
1427         unsigned int cipher_flow_mode;
1428         dma_addr_t mac_result;
1429
1430         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1431                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1432                 mac_result = req_ctx->mac_buf_dma_addr;
1433         } else { /* Encrypt */
1434                 cipher_flow_mode = AES_and_HASH;
1435                 mac_result = req_ctx->icv_dma_addr;
1436         }
1437
1438         /* load key */
1439         hw_desc_init(&desc[idx]);
1440         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1441         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1442                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1443                       ctx->enc_keylen), NS_BIT);
1444         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1445         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1446         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1447         set_flow_mode(&desc[idx], S_DIN_to_AES);
1448         idx++;
1449
1450         /* load ctr state */
1451         hw_desc_init(&desc[idx]);
1452         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1453         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1454         set_din_type(&desc[idx], DMA_DLLI,
1455                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1456         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1457         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1458         set_flow_mode(&desc[idx], S_DIN_to_AES);
1459         idx++;
1460
1461         /* load MAC key */
1462         hw_desc_init(&desc[idx]);
1463         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1464         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1465                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1466                       ctx->enc_keylen), NS_BIT);
1467         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1468         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1469         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1471         set_aes_not_hash_mode(&desc[idx]);
1472         idx++;
1473
1474         /* load MAC state */
1475         hw_desc_init(&desc[idx]);
1476         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1477         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1478         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1479                      AES_BLOCK_SIZE, NS_BIT);
1480         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1481         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1482         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1483         set_aes_not_hash_mode(&desc[idx]);
1484         idx++;
1485
1486         /* process assoc data */
1487         if (req_ctx->assoclen > 0) {
1488                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1489         } else {
1490                 hw_desc_init(&desc[idx]);
1491                 set_din_type(&desc[idx], DMA_DLLI,
1492                              sg_dma_address(&req_ctx->ccm_adata_sg),
1493                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1494                 set_flow_mode(&desc[idx], DIN_HASH);
1495                 idx++;
1496         }
1497
1498         /* process the cipher */
1499         if (req_ctx->cryptlen)
1500                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1501
1502         /* Read temporal MAC */
1503         hw_desc_init(&desc[idx]);
1504         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1505         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1506                       NS_BIT, 0);
1507         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1508         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1509         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1510         set_aes_not_hash_mode(&desc[idx]);
1511         idx++;
1512
1513         /* load AES-CTR state (for last MAC calculation)*/
1514         hw_desc_init(&desc[idx]);
1515         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1516         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1517         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1518                      AES_BLOCK_SIZE, NS_BIT);
1519         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1520         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1521         set_flow_mode(&desc[idx], S_DIN_to_AES);
1522         idx++;
1523
1524         hw_desc_init(&desc[idx]);
1525         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1526         set_dout_no_dma(&desc[idx], 0, 0, 1);
1527         idx++;
1528
1529         /* encrypt the "T" value and store MAC in mac_state */
1530         hw_desc_init(&desc[idx]);
1531         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1532                      ctx->authsize, NS_BIT);
1533         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1534         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1535         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1536         idx++;
1537
1538         *seq_size = idx;
1539         return 0;
1540 }
1541
1542 static int config_ccm_adata(struct aead_request *req)
1543 {
1544         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1545         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1546         struct device *dev = drvdata_to_dev(ctx->drvdata);
1547         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1548         //unsigned int size_of_a = 0, rem_a_size = 0;
1549         unsigned int lp = req->iv[0];
1550         /* Note: The code assume that req->iv[0] already contains the value
1551          * of L' of RFC3610
1552          */
1553         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1554         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1555         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1556         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1557         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1558         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1559                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1560                                 req->cryptlen :
1561                                 (req->cryptlen - ctx->authsize);
1562         int rc;
1563
1564         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1565         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1566
1567         /* taken from crypto/ccm.c */
1568         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1569         if (l < 2 || l > 8) {
1570                 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1571                 return -EINVAL;
1572         }
1573         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1574
1575         /* format control info per RFC 3610 and
1576          * NIST Special Publication 800-38C
1577          */
1578         *b0 |= (8 * ((m - 2) / 2));
1579         if (req_ctx->assoclen > 0)
1580                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1581
1582         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1583         if (rc) {
1584                 dev_err(dev, "message len overflow detected");
1585                 return rc;
1586         }
1587          /* END of "taken from crypto/ccm.c" */
1588
1589         /* l(a) - size of associated data. */
1590         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1591
1592         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1593         req->iv[15] = 1;
1594
1595         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1596         ctr_count_0[15] = 0;
1597
1598         return 0;
1599 }
1600
1601 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1602 {
1603         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1604         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1605         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1606
1607         /* L' */
1608         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1609         /* For RFC 4309, always use 4 bytes for message length
1610          * (at most 2^32-1 bytes).
1611          */
1612         areq_ctx->ctr_iv[0] = 3;
1613
1614         /* In RFC 4309 there is an 11-bytes nonce+IV part,
1615          * that we build here.
1616          */
1617         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1618                CCM_BLOCK_NONCE_SIZE);
1619         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1620                CCM_BLOCK_IV_SIZE);
1621         req->iv = areq_ctx->ctr_iv;
1622         areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1623 }
1624
1625 static void cc_set_ghash_desc(struct aead_request *req,
1626                               struct cc_hw_desc desc[], unsigned int *seq_size)
1627 {
1628         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1629         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1630         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1631         unsigned int idx = *seq_size;
1632
1633         /* load key to AES*/
1634         hw_desc_init(&desc[idx]);
1635         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1636         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1637         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1638                      ctx->enc_keylen, NS_BIT);
1639         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1640         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1641         set_flow_mode(&desc[idx], S_DIN_to_AES);
1642         idx++;
1643
1644         /* process one zero block to generate hkey */
1645         hw_desc_init(&desc[idx]);
1646         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1647         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1648                       NS_BIT, 0);
1649         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1650         idx++;
1651
1652         /* Memory Barrier */
1653         hw_desc_init(&desc[idx]);
1654         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1655         set_dout_no_dma(&desc[idx], 0, 0, 1);
1656         idx++;
1657
1658         /* Load GHASH subkey */
1659         hw_desc_init(&desc[idx]);
1660         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1661                      AES_BLOCK_SIZE, NS_BIT);
1662         set_dout_no_dma(&desc[idx], 0, 0, 1);
1663         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1664         set_aes_not_hash_mode(&desc[idx]);
1665         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1666         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1667         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1668         idx++;
1669
1670         /* Configure Hash Engine to work with GHASH.
1671          * Since it was not possible to extend HASH submodes to add GHASH,
1672          * The following command is necessary in order to
1673          * select GHASH (according to HW designers)
1674          */
1675         hw_desc_init(&desc[idx]);
1676         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1677         set_dout_no_dma(&desc[idx], 0, 0, 1);
1678         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1679         set_aes_not_hash_mode(&desc[idx]);
1680         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1681         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1682         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1683         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1684         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1685         idx++;
1686
1687         /* Load GHASH initial STATE (which is 0). (for any hash there is an
1688          * initial state)
1689          */
1690         hw_desc_init(&desc[idx]);
1691         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1692         set_dout_no_dma(&desc[idx], 0, 0, 1);
1693         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1694         set_aes_not_hash_mode(&desc[idx]);
1695         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1696         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1697         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1698         idx++;
1699
1700         *seq_size = idx;
1701 }
1702
1703 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1704                              unsigned int *seq_size)
1705 {
1706         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1707         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1708         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1709         unsigned int idx = *seq_size;
1710
1711         /* load key to AES*/
1712         hw_desc_init(&desc[idx]);
1713         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1715         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1716                      ctx->enc_keylen, NS_BIT);
1717         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1718         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1719         set_flow_mode(&desc[idx], S_DIN_to_AES);
1720         idx++;
1721
1722         if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1723                 /* load AES/CTR initial CTR value inc by 2*/
1724                 hw_desc_init(&desc[idx]);
1725                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1726                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1727                 set_din_type(&desc[idx], DMA_DLLI,
1728                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1729                              NS_BIT);
1730                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1731                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1732                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1733                 idx++;
1734         }
1735
1736         *seq_size = idx;
1737 }
1738
1739 static void cc_proc_gcm_result(struct aead_request *req,
1740                                struct cc_hw_desc desc[],
1741                                unsigned int *seq_size)
1742 {
1743         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1744         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1745         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1746         dma_addr_t mac_result;
1747         unsigned int idx = *seq_size;
1748
1749         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1750                 mac_result = req_ctx->mac_buf_dma_addr;
1751         } else { /* Encrypt */
1752                 mac_result = req_ctx->icv_dma_addr;
1753         }
1754
1755         /* process(ghash) gcm_block_len */
1756         hw_desc_init(&desc[idx]);
1757         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1758                      AES_BLOCK_SIZE, NS_BIT);
1759         set_flow_mode(&desc[idx], DIN_HASH);
1760         idx++;
1761
1762         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1763         hw_desc_init(&desc[idx]);
1764         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1765         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1766         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1767                       NS_BIT, 0);
1768         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1769         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1770         set_aes_not_hash_mode(&desc[idx]);
1771
1772         idx++;
1773
1774         /* load AES/CTR initial CTR value inc by 1*/
1775         hw_desc_init(&desc[idx]);
1776         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1777         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1778         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1779                      AES_BLOCK_SIZE, NS_BIT);
1780         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1781         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1782         set_flow_mode(&desc[idx], S_DIN_to_AES);
1783         idx++;
1784
1785         /* Memory Barrier */
1786         hw_desc_init(&desc[idx]);
1787         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1788         set_dout_no_dma(&desc[idx], 0, 0, 1);
1789         idx++;
1790
1791         /* process GCTR on stored GHASH and store MAC in mac_state*/
1792         hw_desc_init(&desc[idx]);
1793         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1794         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1795                      AES_BLOCK_SIZE, NS_BIT);
1796         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1797         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1798         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1799         idx++;
1800
1801         *seq_size = idx;
1802 }
1803
1804 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1805                   unsigned int *seq_size)
1806 {
1807         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1808         unsigned int cipher_flow_mode;
1809
1810         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1811                 cipher_flow_mode = AES_and_HASH;
1812         } else { /* Encrypt */
1813                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1814         }
1815
1816         //in RFC4543 no data to encrypt. just copy data from src to dest.
1817         if (req_ctx->plaintext_authenticate_only) {
1818                 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1819                 cc_set_ghash_desc(req, desc, seq_size);
1820                 /* process(ghash) assoc data */
1821                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1822                 cc_set_gctr_desc(req, desc, seq_size);
1823                 cc_proc_gcm_result(req, desc, seq_size);
1824                 return 0;
1825         }
1826
1827         // for gcm and rfc4106.
1828         cc_set_ghash_desc(req, desc, seq_size);
1829         /* process(ghash) assoc data */
1830         if (req_ctx->assoclen > 0)
1831                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1832         cc_set_gctr_desc(req, desc, seq_size);
1833         /* process(gctr+ghash) */
1834         if (req_ctx->cryptlen)
1835                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1836         cc_proc_gcm_result(req, desc, seq_size);
1837
1838         return 0;
1839 }
1840
1841 static int config_gcm_context(struct aead_request *req)
1842 {
1843         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1844         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1845         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1846         struct device *dev = drvdata_to_dev(ctx->drvdata);
1847
1848         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1849                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1850                                 req->cryptlen :
1851                                 (req->cryptlen - ctx->authsize);
1852         __be32 counter = cpu_to_be32(2);
1853
1854         dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1855                 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1856
1857         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1858
1859         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1860
1861         memcpy(req->iv + 12, &counter, 4);
1862         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1863
1864         counter = cpu_to_be32(1);
1865         memcpy(req->iv + 12, &counter, 4);
1866         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1867
1868         if (!req_ctx->plaintext_authenticate_only) {
1869                 __be64 temp64;
1870
1871                 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1872                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1873                 temp64 = cpu_to_be64(cryptlen * 8);
1874                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1875         } else {
1876                 /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1877                  * data that is nothing is encrypted.
1878                  */
1879                 __be64 temp64;
1880
1881                 temp64 = cpu_to_be64((req_ctx->assoclen +
1882                                       GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1883                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1884                 temp64 = 0;
1885                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1886         }
1887
1888         return 0;
1889 }
1890
1891 static void cc_proc_rfc4_gcm(struct aead_request *req)
1892 {
1893         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1894         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1895         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1896
1897         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1898                ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1899         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1900                GCM_BLOCK_RFC4_IV_SIZE);
1901         req->iv = areq_ctx->ctr_iv;
1902         areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1903 }
1904
1905 static int cc_proc_aead(struct aead_request *req,
1906                         enum drv_crypto_direction direct)
1907 {
1908         int rc = 0;
1909         int seq_len = 0;
1910         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1911         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1913         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1914         struct device *dev = drvdata_to_dev(ctx->drvdata);
1915         struct cc_crypto_req cc_req = {};
1916
1917         dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1918                 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1919                 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1920                 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1921
1922         /* STAT_PHASE_0: Init and sanity checks */
1923
1924         /* Check data length according to mode */
1925         if (validate_data_size(ctx, direct, req)) {
1926                 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1927                         req->cryptlen, areq_ctx->assoclen);
1928                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1929                 return -EINVAL;
1930         }
1931
1932         /* Setup request structure */
1933         cc_req.user_cb = (void *)cc_aead_complete;
1934         cc_req.user_arg = (void *)req;
1935
1936         /* Setup request context */
1937         areq_ctx->gen_ctx.op_type = direct;
1938         areq_ctx->req_authsize = ctx->authsize;
1939         areq_ctx->cipher_mode = ctx->cipher_mode;
1940
1941         /* STAT_PHASE_1: Map buffers */
1942
1943         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1944                 /* Build CTR IV - Copy nonce from last 4 bytes in
1945                  * CTR key to first 4 bytes in CTR IV
1946                  */
1947                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1948                        CTR_RFC3686_NONCE_SIZE);
1949                 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1950                        CTR_RFC3686_IV_SIZE);
1951                 /* Initialize counter portion of counter block */
1952                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1953                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1954
1955                 /* Replace with counter iv */
1956                 req->iv = areq_ctx->ctr_iv;
1957                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1958         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1959                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1960                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1961                 if (areq_ctx->ctr_iv != req->iv) {
1962                         memcpy(areq_ctx->ctr_iv, req->iv,
1963                                crypto_aead_ivsize(tfm));
1964                         req->iv = areq_ctx->ctr_iv;
1965                 }
1966         }  else {
1967                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1968         }
1969
1970         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1971                 rc = config_ccm_adata(req);
1972                 if (rc) {
1973                         dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1974                                 rc);
1975                         goto exit;
1976                 }
1977         } else {
1978                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1979         }
1980
1981         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1982                 rc = config_gcm_context(req);
1983                 if (rc) {
1984                         dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1985                                 rc);
1986                         goto exit;
1987                 }
1988         }
1989
1990         rc = cc_map_aead_request(ctx->drvdata, req);
1991         if (rc) {
1992                 dev_err(dev, "map_request() failed\n");
1993                 goto exit;
1994         }
1995
1996         /* STAT_PHASE_2: Create sequence */
1997
1998         /* Load MLLI tables to SRAM if necessary */
1999         cc_mlli_to_sram(req, desc, &seq_len);
2000
2001         /*TODO: move seq len by reference */
2002         switch (ctx->auth_mode) {
2003         case DRV_HASH_SHA1:
2004         case DRV_HASH_SHA256:
2005                 cc_hmac_authenc(req, desc, &seq_len);
2006                 break;
2007         case DRV_HASH_XCBC_MAC:
2008                 cc_xcbc_authenc(req, desc, &seq_len);
2009                 break;
2010         case DRV_HASH_NULL:
2011                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2012                         cc_ccm(req, desc, &seq_len);
2013                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2014                         cc_gcm(req, desc, &seq_len);
2015                 break;
2016         default:
2017                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2018                 cc_unmap_aead_request(dev, req);
2019                 rc = -ENOTSUPP;
2020                 goto exit;
2021         }
2022
2023         /* STAT_PHASE_3: Lock HW and push sequence */
2024
2025         rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2026
2027         if (rc != -EINPROGRESS && rc != -EBUSY) {
2028                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2029                 cc_unmap_aead_request(dev, req);
2030         }
2031
2032 exit:
2033         return rc;
2034 }
2035
2036 static int cc_aead_encrypt(struct aead_request *req)
2037 {
2038         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2039         int rc;
2040
2041         memset(areq_ctx, 0, sizeof(*areq_ctx));
2042
2043         /* No generated IV required */
2044         areq_ctx->backup_iv = req->iv;
2045         areq_ctx->assoclen = req->assoclen;
2046         areq_ctx->is_gcm4543 = false;
2047
2048         areq_ctx->plaintext_authenticate_only = false;
2049
2050         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2051         if (rc != -EINPROGRESS && rc != -EBUSY)
2052                 req->iv = areq_ctx->backup_iv;
2053
2054         return rc;
2055 }
2056
2057 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2058 {
2059         /* Very similar to cc_aead_encrypt() above. */
2060
2061         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2062         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2063         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2064         struct device *dev = drvdata_to_dev(ctx->drvdata);
2065         int rc = -EINVAL;
2066
2067         if (!valid_assoclen(req)) {
2068                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2069                 goto out;
2070         }
2071
2072         memset(areq_ctx, 0, sizeof(*areq_ctx));
2073
2074         /* No generated IV required */
2075         areq_ctx->backup_iv = req->iv;
2076         areq_ctx->assoclen = req->assoclen;
2077         areq_ctx->is_gcm4543 = true;
2078
2079         cc_proc_rfc4309_ccm(req);
2080
2081         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2082         if (rc != -EINPROGRESS && rc != -EBUSY)
2083                 req->iv = areq_ctx->backup_iv;
2084 out:
2085         return rc;
2086 }
2087
2088 static int cc_aead_decrypt(struct aead_request *req)
2089 {
2090         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2091         int rc;
2092
2093         memset(areq_ctx, 0, sizeof(*areq_ctx));
2094
2095         /* No generated IV required */
2096         areq_ctx->backup_iv = req->iv;
2097         areq_ctx->assoclen = req->assoclen;
2098         areq_ctx->is_gcm4543 = false;
2099
2100         areq_ctx->plaintext_authenticate_only = false;
2101
2102         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2103         if (rc != -EINPROGRESS && rc != -EBUSY)
2104                 req->iv = areq_ctx->backup_iv;
2105
2106         return rc;
2107 }
2108
2109 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2110 {
2111         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2112         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2113         struct device *dev = drvdata_to_dev(ctx->drvdata);
2114         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2115         int rc = -EINVAL;
2116
2117         if (!valid_assoclen(req)) {
2118                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2119                 goto out;
2120         }
2121
2122         memset(areq_ctx, 0, sizeof(*areq_ctx));
2123
2124         /* No generated IV required */
2125         areq_ctx->backup_iv = req->iv;
2126         areq_ctx->assoclen = req->assoclen;
2127
2128         areq_ctx->is_gcm4543 = true;
2129         cc_proc_rfc4309_ccm(req);
2130
2131         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2132         if (rc != -EINPROGRESS && rc != -EBUSY)
2133                 req->iv = areq_ctx->backup_iv;
2134
2135 out:
2136         return rc;
2137 }
2138
2139 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2140                                  unsigned int keylen)
2141 {
2142         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2143         struct device *dev = drvdata_to_dev(ctx->drvdata);
2144
2145         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2146
2147         if (keylen < 4)
2148                 return -EINVAL;
2149
2150         keylen -= 4;
2151         memcpy(ctx->ctr_nonce, key + keylen, 4);
2152
2153         return cc_aead_setkey(tfm, key, keylen);
2154 }
2155
2156 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2157                                  unsigned int keylen)
2158 {
2159         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2160         struct device *dev = drvdata_to_dev(ctx->drvdata);
2161
2162         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2163
2164         if (keylen < 4)
2165                 return -EINVAL;
2166
2167         keylen -= 4;
2168         memcpy(ctx->ctr_nonce, key + keylen, 4);
2169
2170         return cc_aead_setkey(tfm, key, keylen);
2171 }
2172
2173 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2174                               unsigned int authsize)
2175 {
2176         switch (authsize) {
2177         case 4:
2178         case 8:
2179         case 12:
2180         case 13:
2181         case 14:
2182         case 15:
2183         case 16:
2184                 break;
2185         default:
2186                 return -EINVAL;
2187         }
2188
2189         return cc_aead_setauthsize(authenc, authsize);
2190 }
2191
2192 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2193                                       unsigned int authsize)
2194 {
2195         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2196         struct device *dev = drvdata_to_dev(ctx->drvdata);
2197
2198         dev_dbg(dev, "authsize %d\n", authsize);
2199
2200         switch (authsize) {
2201         case 8:
2202         case 12:
2203         case 16:
2204                 break;
2205         default:
2206                 return -EINVAL;
2207         }
2208
2209         return cc_aead_setauthsize(authenc, authsize);
2210 }
2211
2212 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2213                                       unsigned int authsize)
2214 {
2215         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2216         struct device *dev = drvdata_to_dev(ctx->drvdata);
2217
2218         dev_dbg(dev, "authsize %d\n", authsize);
2219
2220         if (authsize != 16)
2221                 return -EINVAL;
2222
2223         return cc_aead_setauthsize(authenc, authsize);
2224 }
2225
2226 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2227 {
2228         /* Very similar to cc_aead_encrypt() above. */
2229
2230         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2231         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2232         struct device *dev = drvdata_to_dev(ctx->drvdata);
2233         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2234         int rc = -EINVAL;
2235
2236         if (!valid_assoclen(req)) {
2237                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2238                 goto out;
2239         }
2240
2241         memset(areq_ctx, 0, sizeof(*areq_ctx));
2242
2243         /* No generated IV required */
2244         areq_ctx->backup_iv = req->iv;
2245         areq_ctx->assoclen = req->assoclen;
2246         areq_ctx->plaintext_authenticate_only = false;
2247
2248         cc_proc_rfc4_gcm(req);
2249         areq_ctx->is_gcm4543 = true;
2250
2251         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2252         if (rc != -EINPROGRESS && rc != -EBUSY)
2253                 req->iv = areq_ctx->backup_iv;
2254 out:
2255         return rc;
2256 }
2257
2258 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2259 {
2260         /* Very similar to cc_aead_encrypt() above. */
2261         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2262         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2263         struct device *dev = drvdata_to_dev(ctx->drvdata);
2264         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2265         int rc = -EINVAL;
2266
2267         if (!valid_assoclen(req)) {
2268                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2269                 goto out;
2270         }
2271
2272         memset(areq_ctx, 0, sizeof(*areq_ctx));
2273
2274         //plaintext is not encryped with rfc4543
2275         areq_ctx->plaintext_authenticate_only = true;
2276
2277         /* No generated IV required */
2278         areq_ctx->backup_iv = req->iv;
2279         areq_ctx->assoclen = req->assoclen;
2280
2281         cc_proc_rfc4_gcm(req);
2282         areq_ctx->is_gcm4543 = true;
2283
2284         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2285         if (rc != -EINPROGRESS && rc != -EBUSY)
2286                 req->iv = areq_ctx->backup_iv;
2287 out:
2288         return rc;
2289 }
2290
2291 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2292 {
2293         /* Very similar to cc_aead_decrypt() above. */
2294
2295         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2296         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2297         struct device *dev = drvdata_to_dev(ctx->drvdata);
2298         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2299         int rc = -EINVAL;
2300
2301         if (!valid_assoclen(req)) {
2302                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2303                 goto out;
2304         }
2305
2306         memset(areq_ctx, 0, sizeof(*areq_ctx));
2307
2308         /* No generated IV required */
2309         areq_ctx->backup_iv = req->iv;
2310         areq_ctx->assoclen = req->assoclen;
2311         areq_ctx->plaintext_authenticate_only = false;
2312
2313         cc_proc_rfc4_gcm(req);
2314         areq_ctx->is_gcm4543 = true;
2315
2316         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2317         if (rc != -EINPROGRESS && rc != -EBUSY)
2318                 req->iv = areq_ctx->backup_iv;
2319 out:
2320         return rc;
2321 }
2322
2323 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2324 {
2325         /* Very similar to cc_aead_decrypt() above. */
2326         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2327         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2328         struct device *dev = drvdata_to_dev(ctx->drvdata);
2329         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2330         int rc = -EINVAL;
2331
2332         if (!valid_assoclen(req)) {
2333                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2334                 goto out;
2335         }
2336
2337         memset(areq_ctx, 0, sizeof(*areq_ctx));
2338
2339         //plaintext is not decryped with rfc4543
2340         areq_ctx->plaintext_authenticate_only = true;
2341
2342         /* No generated IV required */
2343         areq_ctx->backup_iv = req->iv;
2344         areq_ctx->assoclen = req->assoclen;
2345
2346         cc_proc_rfc4_gcm(req);
2347         areq_ctx->is_gcm4543 = true;
2348
2349         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2350         if (rc != -EINPROGRESS && rc != -EBUSY)
2351                 req->iv = areq_ctx->backup_iv;
2352 out:
2353         return rc;
2354 }
2355
2356 /* aead alg */
2357 static struct cc_alg_template aead_algs[] = {
2358         {
2359                 .name = "authenc(hmac(sha1),cbc(aes))",
2360                 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2361                 .blocksize = AES_BLOCK_SIZE,
2362                 .template_aead = {
2363                         .setkey = cc_aead_setkey,
2364                         .setauthsize = cc_aead_setauthsize,
2365                         .encrypt = cc_aead_encrypt,
2366                         .decrypt = cc_aead_decrypt,
2367                         .init = cc_aead_init,
2368                         .exit = cc_aead_exit,
2369                         .ivsize = AES_BLOCK_SIZE,
2370                         .maxauthsize = SHA1_DIGEST_SIZE,
2371                 },
2372                 .cipher_mode = DRV_CIPHER_CBC,
2373                 .flow_mode = S_DIN_to_AES,
2374                 .auth_mode = DRV_HASH_SHA1,
2375                 .min_hw_rev = CC_HW_REV_630,
2376                 .std_body = CC_STD_NIST,
2377         },
2378         {
2379                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2380                 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2381                 .blocksize = DES3_EDE_BLOCK_SIZE,
2382                 .template_aead = {
2383                         .setkey = cc_des3_aead_setkey,
2384                         .setauthsize = cc_aead_setauthsize,
2385                         .encrypt = cc_aead_encrypt,
2386                         .decrypt = cc_aead_decrypt,
2387                         .init = cc_aead_init,
2388                         .exit = cc_aead_exit,
2389                         .ivsize = DES3_EDE_BLOCK_SIZE,
2390                         .maxauthsize = SHA1_DIGEST_SIZE,
2391                 },
2392                 .cipher_mode = DRV_CIPHER_CBC,
2393                 .flow_mode = S_DIN_to_DES,
2394                 .auth_mode = DRV_HASH_SHA1,
2395                 .min_hw_rev = CC_HW_REV_630,
2396                 .std_body = CC_STD_NIST,
2397         },
2398         {
2399                 .name = "authenc(hmac(sha256),cbc(aes))",
2400                 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2401                 .blocksize = AES_BLOCK_SIZE,
2402                 .template_aead = {
2403                         .setkey = cc_aead_setkey,
2404                         .setauthsize = cc_aead_setauthsize,
2405                         .encrypt = cc_aead_encrypt,
2406                         .decrypt = cc_aead_decrypt,
2407                         .init = cc_aead_init,
2408                         .exit = cc_aead_exit,
2409                         .ivsize = AES_BLOCK_SIZE,
2410                         .maxauthsize = SHA256_DIGEST_SIZE,
2411                 },
2412                 .cipher_mode = DRV_CIPHER_CBC,
2413                 .flow_mode = S_DIN_to_AES,
2414                 .auth_mode = DRV_HASH_SHA256,
2415                 .min_hw_rev = CC_HW_REV_630,
2416                 .std_body = CC_STD_NIST,
2417         },
2418         {
2419                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2420                 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2421                 .blocksize = DES3_EDE_BLOCK_SIZE,
2422                 .template_aead = {
2423                         .setkey = cc_des3_aead_setkey,
2424                         .setauthsize = cc_aead_setauthsize,
2425                         .encrypt = cc_aead_encrypt,
2426                         .decrypt = cc_aead_decrypt,
2427                         .init = cc_aead_init,
2428                         .exit = cc_aead_exit,
2429                         .ivsize = DES3_EDE_BLOCK_SIZE,
2430                         .maxauthsize = SHA256_DIGEST_SIZE,
2431                 },
2432                 .cipher_mode = DRV_CIPHER_CBC,
2433                 .flow_mode = S_DIN_to_DES,
2434                 .auth_mode = DRV_HASH_SHA256,
2435                 .min_hw_rev = CC_HW_REV_630,
2436                 .std_body = CC_STD_NIST,
2437         },
2438         {
2439                 .name = "authenc(xcbc(aes),cbc(aes))",
2440                 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2441                 .blocksize = AES_BLOCK_SIZE,
2442                 .template_aead = {
2443                         .setkey = cc_aead_setkey,
2444                         .setauthsize = cc_aead_setauthsize,
2445                         .encrypt = cc_aead_encrypt,
2446                         .decrypt = cc_aead_decrypt,
2447                         .init = cc_aead_init,
2448                         .exit = cc_aead_exit,
2449                         .ivsize = AES_BLOCK_SIZE,
2450                         .maxauthsize = AES_BLOCK_SIZE,
2451                 },
2452                 .cipher_mode = DRV_CIPHER_CBC,
2453                 .flow_mode = S_DIN_to_AES,
2454                 .auth_mode = DRV_HASH_XCBC_MAC,
2455                 .min_hw_rev = CC_HW_REV_630,
2456                 .std_body = CC_STD_NIST,
2457         },
2458         {
2459                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2460                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2461                 .blocksize = 1,
2462                 .template_aead = {
2463                         .setkey = cc_aead_setkey,
2464                         .setauthsize = cc_aead_setauthsize,
2465                         .encrypt = cc_aead_encrypt,
2466                         .decrypt = cc_aead_decrypt,
2467                         .init = cc_aead_init,
2468                         .exit = cc_aead_exit,
2469                         .ivsize = CTR_RFC3686_IV_SIZE,
2470                         .maxauthsize = SHA1_DIGEST_SIZE,
2471                 },
2472                 .cipher_mode = DRV_CIPHER_CTR,
2473                 .flow_mode = S_DIN_to_AES,
2474                 .auth_mode = DRV_HASH_SHA1,
2475                 .min_hw_rev = CC_HW_REV_630,
2476                 .std_body = CC_STD_NIST,
2477         },
2478         {
2479                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2480                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2481                 .blocksize = 1,
2482                 .template_aead = {
2483                         .setkey = cc_aead_setkey,
2484                         .setauthsize = cc_aead_setauthsize,
2485                         .encrypt = cc_aead_encrypt,
2486                         .decrypt = cc_aead_decrypt,
2487                         .init = cc_aead_init,
2488                         .exit = cc_aead_exit,
2489                         .ivsize = CTR_RFC3686_IV_SIZE,
2490                         .maxauthsize = SHA256_DIGEST_SIZE,
2491                 },
2492                 .cipher_mode = DRV_CIPHER_CTR,
2493                 .flow_mode = S_DIN_to_AES,
2494                 .auth_mode = DRV_HASH_SHA256,
2495                 .min_hw_rev = CC_HW_REV_630,
2496                 .std_body = CC_STD_NIST,
2497         },
2498         {
2499                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2500                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2501                 .blocksize = 1,
2502                 .template_aead = {
2503                         .setkey = cc_aead_setkey,
2504                         .setauthsize = cc_aead_setauthsize,
2505                         .encrypt = cc_aead_encrypt,
2506                         .decrypt = cc_aead_decrypt,
2507                         .init = cc_aead_init,
2508                         .exit = cc_aead_exit,
2509                         .ivsize = CTR_RFC3686_IV_SIZE,
2510                         .maxauthsize = AES_BLOCK_SIZE,
2511                 },
2512                 .cipher_mode = DRV_CIPHER_CTR,
2513                 .flow_mode = S_DIN_to_AES,
2514                 .auth_mode = DRV_HASH_XCBC_MAC,
2515                 .min_hw_rev = CC_HW_REV_630,
2516                 .std_body = CC_STD_NIST,
2517         },
2518         {
2519                 .name = "ccm(aes)",
2520                 .driver_name = "ccm-aes-ccree",
2521                 .blocksize = 1,
2522                 .template_aead = {
2523                         .setkey = cc_aead_setkey,
2524                         .setauthsize = cc_ccm_setauthsize,
2525                         .encrypt = cc_aead_encrypt,
2526                         .decrypt = cc_aead_decrypt,
2527                         .init = cc_aead_init,
2528                         .exit = cc_aead_exit,
2529                         .ivsize = AES_BLOCK_SIZE,
2530                         .maxauthsize = AES_BLOCK_SIZE,
2531                 },
2532                 .cipher_mode = DRV_CIPHER_CCM,
2533                 .flow_mode = S_DIN_to_AES,
2534                 .auth_mode = DRV_HASH_NULL,
2535                 .min_hw_rev = CC_HW_REV_630,
2536                 .std_body = CC_STD_NIST,
2537         },
2538         {
2539                 .name = "rfc4309(ccm(aes))",
2540                 .driver_name = "rfc4309-ccm-aes-ccree",
2541                 .blocksize = 1,
2542                 .template_aead = {
2543                         .setkey = cc_rfc4309_ccm_setkey,
2544                         .setauthsize = cc_rfc4309_ccm_setauthsize,
2545                         .encrypt = cc_rfc4309_ccm_encrypt,
2546                         .decrypt = cc_rfc4309_ccm_decrypt,
2547                         .init = cc_aead_init,
2548                         .exit = cc_aead_exit,
2549                         .ivsize = CCM_BLOCK_IV_SIZE,
2550                         .maxauthsize = AES_BLOCK_SIZE,
2551                 },
2552                 .cipher_mode = DRV_CIPHER_CCM,
2553                 .flow_mode = S_DIN_to_AES,
2554                 .auth_mode = DRV_HASH_NULL,
2555                 .min_hw_rev = CC_HW_REV_630,
2556                 .std_body = CC_STD_NIST,
2557         },
2558         {
2559                 .name = "gcm(aes)",
2560                 .driver_name = "gcm-aes-ccree",
2561                 .blocksize = 1,
2562                 .template_aead = {
2563                         .setkey = cc_aead_setkey,
2564                         .setauthsize = cc_gcm_setauthsize,
2565                         .encrypt = cc_aead_encrypt,
2566                         .decrypt = cc_aead_decrypt,
2567                         .init = cc_aead_init,
2568                         .exit = cc_aead_exit,
2569                         .ivsize = 12,
2570                         .maxauthsize = AES_BLOCK_SIZE,
2571                 },
2572                 .cipher_mode = DRV_CIPHER_GCTR,
2573                 .flow_mode = S_DIN_to_AES,
2574                 .auth_mode = DRV_HASH_NULL,
2575                 .min_hw_rev = CC_HW_REV_630,
2576                 .std_body = CC_STD_NIST,
2577         },
2578         {
2579                 .name = "rfc4106(gcm(aes))",
2580                 .driver_name = "rfc4106-gcm-aes-ccree",
2581                 .blocksize = 1,
2582                 .template_aead = {
2583                         .setkey = cc_rfc4106_gcm_setkey,
2584                         .setauthsize = cc_rfc4106_gcm_setauthsize,
2585                         .encrypt = cc_rfc4106_gcm_encrypt,
2586                         .decrypt = cc_rfc4106_gcm_decrypt,
2587                         .init = cc_aead_init,
2588                         .exit = cc_aead_exit,
2589                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2590                         .maxauthsize = AES_BLOCK_SIZE,
2591                 },
2592                 .cipher_mode = DRV_CIPHER_GCTR,
2593                 .flow_mode = S_DIN_to_AES,
2594                 .auth_mode = DRV_HASH_NULL,
2595                 .min_hw_rev = CC_HW_REV_630,
2596                 .std_body = CC_STD_NIST,
2597         },
2598         {
2599                 .name = "rfc4543(gcm(aes))",
2600                 .driver_name = "rfc4543-gcm-aes-ccree",
2601                 .blocksize = 1,
2602                 .template_aead = {
2603                         .setkey = cc_rfc4543_gcm_setkey,
2604                         .setauthsize = cc_rfc4543_gcm_setauthsize,
2605                         .encrypt = cc_rfc4543_gcm_encrypt,
2606                         .decrypt = cc_rfc4543_gcm_decrypt,
2607                         .init = cc_aead_init,
2608                         .exit = cc_aead_exit,
2609                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2610                         .maxauthsize = AES_BLOCK_SIZE,
2611                 },
2612                 .cipher_mode = DRV_CIPHER_GCTR,
2613                 .flow_mode = S_DIN_to_AES,
2614                 .auth_mode = DRV_HASH_NULL,
2615                 .min_hw_rev = CC_HW_REV_630,
2616                 .std_body = CC_STD_NIST,
2617         },
2618 };
2619
2620 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2621                                                 struct device *dev)
2622 {
2623         struct cc_crypto_alg *t_alg;
2624         struct aead_alg *alg;
2625
2626         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2627         if (!t_alg)
2628                 return ERR_PTR(-ENOMEM);
2629
2630         alg = &tmpl->template_aead;
2631
2632         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2633         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2634                  tmpl->driver_name);
2635         alg->base.cra_module = THIS_MODULE;
2636         alg->base.cra_priority = CC_CRA_PRIO;
2637
2638         alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2639         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2640         alg->init = cc_aead_init;
2641         alg->exit = cc_aead_exit;
2642
2643         t_alg->aead_alg = *alg;
2644
2645         t_alg->cipher_mode = tmpl->cipher_mode;
2646         t_alg->flow_mode = tmpl->flow_mode;
2647         t_alg->auth_mode = tmpl->auth_mode;
2648
2649         return t_alg;
2650 }
2651
2652 int cc_aead_free(struct cc_drvdata *drvdata)
2653 {
2654         struct cc_crypto_alg *t_alg, *n;
2655         struct cc_aead_handle *aead_handle =
2656                 (struct cc_aead_handle *)drvdata->aead_handle;
2657
2658         if (aead_handle) {
2659                 /* Remove registered algs */
2660                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2661                                          entry) {
2662                         crypto_unregister_aead(&t_alg->aead_alg);
2663                         list_del(&t_alg->entry);
2664                         kfree(t_alg);
2665                 }
2666                 kfree(aead_handle);
2667                 drvdata->aead_handle = NULL;
2668         }
2669
2670         return 0;
2671 }
2672
2673 int cc_aead_alloc(struct cc_drvdata *drvdata)
2674 {
2675         struct cc_aead_handle *aead_handle;
2676         struct cc_crypto_alg *t_alg;
2677         int rc = -ENOMEM;
2678         int alg;
2679         struct device *dev = drvdata_to_dev(drvdata);
2680
2681         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2682         if (!aead_handle) {
2683                 rc = -ENOMEM;
2684                 goto fail0;
2685         }
2686
2687         INIT_LIST_HEAD(&aead_handle->aead_list);
2688         drvdata->aead_handle = aead_handle;
2689
2690         aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2691                                                          MAX_HMAC_DIGEST_SIZE);
2692
2693         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2694                 dev_err(dev, "SRAM pool exhausted\n");
2695                 rc = -ENOMEM;
2696                 goto fail1;
2697         }
2698
2699         /* Linux crypto */
2700         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2701                 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2702                     !(drvdata->std_bodies & aead_algs[alg].std_body))
2703                         continue;
2704
2705                 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2706                 if (IS_ERR(t_alg)) {
2707                         rc = PTR_ERR(t_alg);
2708                         dev_err(dev, "%s alg allocation failed\n",
2709                                 aead_algs[alg].driver_name);
2710                         goto fail1;
2711                 }
2712                 t_alg->drvdata = drvdata;
2713                 rc = crypto_register_aead(&t_alg->aead_alg);
2714                 if (rc) {
2715                         dev_err(dev, "%s alg registration failed\n",
2716                                 t_alg->aead_alg.base.cra_driver_name);
2717                         goto fail2;
2718                 } else {
2719                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2720                         dev_dbg(dev, "Registered %s\n",
2721                                 t_alg->aead_alg.base.cra_driver_name);
2722                 }
2723         }
2724
2725         return 0;
2726
2727 fail2:
2728         kfree(t_alg);
2729 fail1:
2730         cc_aead_free(drvdata);
2731 fail0:
2732         return rc;
2733 }