crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
[linux-block.git] / drivers / crypto / ccree / cc_aead.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/internal/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
13 #include "cc_aead.h"
14 #include "cc_request_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17
18 #define template_aead   template_u.aead
19
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
22
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
27
28 struct cc_aead_handle {
29         cc_sram_addr_t sram_workspace_addr;
30         struct list_head aead_list;
31 };
32
33 struct cc_hmac_s {
34         u8 *padded_authkey;
35         u8 *ipad_opad; /* IPAD, OPAD*/
36         dma_addr_t padded_authkey_dma_addr;
37         dma_addr_t ipad_opad_dma_addr;
38 };
39
40 struct cc_xcbc_s {
41         u8 *xcbc_keys; /* K1,K2,K3 */
42         dma_addr_t xcbc_keys_dma_addr;
43 };
44
45 struct cc_aead_ctx {
46         struct cc_drvdata *drvdata;
47         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
48         u8 *enckey;
49         dma_addr_t enckey_dma_addr;
50         union {
51                 struct cc_hmac_s hmac;
52                 struct cc_xcbc_s xcbc;
53         } auth_state;
54         unsigned int enc_keylen;
55         unsigned int auth_keylen;
56         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
57         unsigned int hash_len;
58         enum drv_cipher_mode cipher_mode;
59         enum cc_flow_mode flow_mode;
60         enum drv_hash_mode auth_mode;
61 };
62
63 static inline bool valid_assoclen(struct aead_request *req)
64 {
65         return ((req->assoclen == 16) || (req->assoclen == 20));
66 }
67
68 static void cc_aead_exit(struct crypto_aead *tfm)
69 {
70         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
71         struct device *dev = drvdata_to_dev(ctx->drvdata);
72
73         dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
74                 crypto_tfm_alg_name(&tfm->base));
75
76         /* Unmap enckey buffer */
77         if (ctx->enckey) {
78                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
79                                   ctx->enckey_dma_addr);
80                 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81                         &ctx->enckey_dma_addr);
82                 ctx->enckey_dma_addr = 0;
83                 ctx->enckey = NULL;
84         }
85
86         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
87                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
88
89                 if (xcbc->xcbc_keys) {
90                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
91                                           xcbc->xcbc_keys,
92                                           xcbc->xcbc_keys_dma_addr);
93                 }
94                 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95                         &xcbc->xcbc_keys_dma_addr);
96                 xcbc->xcbc_keys_dma_addr = 0;
97                 xcbc->xcbc_keys = NULL;
98         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
99                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
100
101                 if (hmac->ipad_opad) {
102                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
103                                           hmac->ipad_opad,
104                                           hmac->ipad_opad_dma_addr);
105                         dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106                                 &hmac->ipad_opad_dma_addr);
107                         hmac->ipad_opad_dma_addr = 0;
108                         hmac->ipad_opad = NULL;
109                 }
110                 if (hmac->padded_authkey) {
111                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
112                                           hmac->padded_authkey,
113                                           hmac->padded_authkey_dma_addr);
114                         dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115                                 &hmac->padded_authkey_dma_addr);
116                         hmac->padded_authkey_dma_addr = 0;
117                         hmac->padded_authkey = NULL;
118                 }
119         }
120 }
121
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
123 {
124         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
125
126         return cc_get_default_hash_len(ctx->drvdata);
127 }
128
129 static int cc_aead_init(struct crypto_aead *tfm)
130 {
131         struct aead_alg *alg = crypto_aead_alg(tfm);
132         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
133         struct cc_crypto_alg *cc_alg =
134                         container_of(alg, struct cc_crypto_alg, aead_alg);
135         struct device *dev = drvdata_to_dev(cc_alg->drvdata);
136
137         dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
138                 crypto_tfm_alg_name(&tfm->base));
139
140         /* Initialize modes in instance */
141         ctx->cipher_mode = cc_alg->cipher_mode;
142         ctx->flow_mode = cc_alg->flow_mode;
143         ctx->auth_mode = cc_alg->auth_mode;
144         ctx->drvdata = cc_alg->drvdata;
145         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
146
147         /* Allocate key buffer, cache line aligned */
148         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
149                                          &ctx->enckey_dma_addr, GFP_KERNEL);
150         if (!ctx->enckey) {
151                 dev_err(dev, "Failed allocating key buffer\n");
152                 goto init_failed;
153         }
154         dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
155                 ctx->enckey);
156
157         /* Set default authlen value */
158
159         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
160                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
161                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
162
163                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164                 /* (and temporary for user key - up to 256b) */
165                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
166                                                      &xcbc->xcbc_keys_dma_addr,
167                                                      GFP_KERNEL);
168                 if (!xcbc->xcbc_keys) {
169                         dev_err(dev, "Failed allocating buffer for XCBC keys\n");
170                         goto init_failed;
171                 }
172         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
173                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
174                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
175                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
176
177                 /* Allocate dma-coherent buffer for IPAD + OPAD */
178                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
179                                                      &hmac->ipad_opad_dma_addr,
180                                                      GFP_KERNEL);
181
182                 if (!hmac->ipad_opad) {
183                         dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
184                         goto init_failed;
185                 }
186
187                 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
188                         hmac->ipad_opad);
189
190                 hmac->padded_authkey = dma_alloc_coherent(dev,
191                                                           MAX_HMAC_BLOCK_SIZE,
192                                                           pkey_dma,
193                                                           GFP_KERNEL);
194
195                 if (!hmac->padded_authkey) {
196                         dev_err(dev, "failed to allocate padded_authkey\n");
197                         goto init_failed;
198                 }
199         } else {
200                 ctx->auth_state.hmac.ipad_opad = NULL;
201                 ctx->auth_state.hmac.padded_authkey = NULL;
202         }
203         ctx->hash_len = cc_get_aead_hash_len(tfm);
204
205         return 0;
206
207 init_failed:
208         cc_aead_exit(tfm);
209         return -ENOMEM;
210 }
211
212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
213 {
214         struct aead_request *areq = (struct aead_request *)cc_req;
215         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
216         struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
217         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
218
219         /* BACKLOG notification */
220         if (err == -EINPROGRESS)
221                 goto done;
222
223         cc_unmap_aead_request(dev, areq);
224
225         /* Restore ordinary iv pointer */
226         areq->iv = areq_ctx->backup_iv;
227
228         if (err)
229                 goto done;
230
231         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233                            ctx->authsize) != 0) {
234                         dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235                                 ctx->authsize, ctx->cipher_mode);
236                         /* In case of payload authentication failure, MUST NOT
237                          * revealed the decrypted message --> zero its memory.
238                          */
239                         sg_zero_buffer(areq->dst, sg_nents(areq->dst),
240                                        areq->cryptlen, 0);
241                         err = -EBADMSG;
242                 }
243         /*ENCRYPT*/
244         } else if (areq_ctx->is_icv_fragmented) {
245                 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
246
247                 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
248                                    skip, (skip + ctx->authsize),
249                                    CC_SG_FROM_BUF);
250         }
251 done:
252         aead_request_complete(areq, err);
253 }
254
255 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
256                                 struct cc_aead_ctx *ctx)
257 {
258         /* Load the AES key */
259         hw_desc_init(&desc[0]);
260         /* We are using for the source/user key the same buffer
261          * as for the output keys, * because after this key loading it
262          * is not needed anymore
263          */
264         set_din_type(&desc[0], DMA_DLLI,
265                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
266                      NS_BIT);
267         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
268         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
269         set_key_size_aes(&desc[0], ctx->auth_keylen);
270         set_flow_mode(&desc[0], S_DIN_to_AES);
271         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
272
273         hw_desc_init(&desc[1]);
274         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
275         set_flow_mode(&desc[1], DIN_AES_DOUT);
276         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
277                       AES_KEYSIZE_128, NS_BIT, 0);
278
279         hw_desc_init(&desc[2]);
280         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
281         set_flow_mode(&desc[2], DIN_AES_DOUT);
282         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
283                                          + AES_KEYSIZE_128),
284                               AES_KEYSIZE_128, NS_BIT, 0);
285
286         hw_desc_init(&desc[3]);
287         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
288         set_flow_mode(&desc[3], DIN_AES_DOUT);
289         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
290                                           + 2 * AES_KEYSIZE_128),
291                               AES_KEYSIZE_128, NS_BIT, 0);
292
293         return 4;
294 }
295
296 static unsigned int hmac_setkey(struct cc_hw_desc *desc,
297                                 struct cc_aead_ctx *ctx)
298 {
299         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
300         unsigned int digest_ofs = 0;
301         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
302                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
303         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
304                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
305         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
306
307         unsigned int idx = 0;
308         int i;
309
310         /* calc derived HMAC key */
311         for (i = 0; i < 2; i++) {
312                 /* Load hash initial state */
313                 hw_desc_init(&desc[idx]);
314                 set_cipher_mode(&desc[idx], hash_mode);
315                 set_din_sram(&desc[idx],
316                              cc_larval_digest_addr(ctx->drvdata,
317                                                    ctx->auth_mode),
318                              digest_size);
319                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
320                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
321                 idx++;
322
323                 /* Load the hash current length*/
324                 hw_desc_init(&desc[idx]);
325                 set_cipher_mode(&desc[idx], hash_mode);
326                 set_din_const(&desc[idx], 0, ctx->hash_len);
327                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
328                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
329                 idx++;
330
331                 /* Prepare ipad key */
332                 hw_desc_init(&desc[idx]);
333                 set_xor_val(&desc[idx], hmac_pad_const[i]);
334                 set_cipher_mode(&desc[idx], hash_mode);
335                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
336                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
337                 idx++;
338
339                 /* Perform HASH update */
340                 hw_desc_init(&desc[idx]);
341                 set_din_type(&desc[idx], DMA_DLLI,
342                              hmac->padded_authkey_dma_addr,
343                              SHA256_BLOCK_SIZE, NS_BIT);
344                 set_cipher_mode(&desc[idx], hash_mode);
345                 set_xor_active(&desc[idx]);
346                 set_flow_mode(&desc[idx], DIN_HASH);
347                 idx++;
348
349                 /* Get the digset */
350                 hw_desc_init(&desc[idx]);
351                 set_cipher_mode(&desc[idx], hash_mode);
352                 set_dout_dlli(&desc[idx],
353                               (hmac->ipad_opad_dma_addr + digest_ofs),
354                               digest_size, NS_BIT, 0);
355                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
356                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
357                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
358                 idx++;
359
360                 digest_ofs += digest_size;
361         }
362
363         return idx;
364 }
365
366 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
367 {
368         struct device *dev = drvdata_to_dev(ctx->drvdata);
369
370         dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
371                 ctx->enc_keylen, ctx->auth_keylen);
372
373         switch (ctx->auth_mode) {
374         case DRV_HASH_SHA1:
375         case DRV_HASH_SHA256:
376                 break;
377         case DRV_HASH_XCBC_MAC:
378                 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
379                     ctx->auth_keylen != AES_KEYSIZE_192 &&
380                     ctx->auth_keylen != AES_KEYSIZE_256)
381                         return -ENOTSUPP;
382                 break;
383         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
384                 if (ctx->auth_keylen > 0)
385                         return -EINVAL;
386                 break;
387         default:
388                 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
389                 return -EINVAL;
390         }
391         /* Check cipher key size */
392         if (ctx->flow_mode == S_DIN_to_DES) {
393                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
394                         dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
395                                 ctx->enc_keylen);
396                         return -EINVAL;
397                 }
398         } else { /* Default assumed to be AES ciphers */
399                 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
400                     ctx->enc_keylen != AES_KEYSIZE_192 &&
401                     ctx->enc_keylen != AES_KEYSIZE_256) {
402                         dev_err(dev, "Invalid cipher(AES) key size: %u\n",
403                                 ctx->enc_keylen);
404                         return -EINVAL;
405                 }
406         }
407
408         return 0; /* All tests of keys sizes passed */
409 }
410
411 /* This function prepers the user key so it can pass to the hmac processing
412  * (copy to intenral buffer or hash in case of key longer than block
413  */
414 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
415                                  unsigned int keylen)
416 {
417         dma_addr_t key_dma_addr = 0;
418         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
419         struct device *dev = drvdata_to_dev(ctx->drvdata);
420         u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
421         struct cc_crypto_req cc_req = {};
422         unsigned int blocksize;
423         unsigned int digestsize;
424         unsigned int hashmode;
425         unsigned int idx = 0;
426         int rc = 0;
427         u8 *key = NULL;
428         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
429         dma_addr_t padded_authkey_dma_addr =
430                 ctx->auth_state.hmac.padded_authkey_dma_addr;
431
432         switch (ctx->auth_mode) { /* auth_key required and >0 */
433         case DRV_HASH_SHA1:
434                 blocksize = SHA1_BLOCK_SIZE;
435                 digestsize = SHA1_DIGEST_SIZE;
436                 hashmode = DRV_HASH_HW_SHA1;
437                 break;
438         case DRV_HASH_SHA256:
439         default:
440                 blocksize = SHA256_BLOCK_SIZE;
441                 digestsize = SHA256_DIGEST_SIZE;
442                 hashmode = DRV_HASH_HW_SHA256;
443         }
444
445         if (keylen != 0) {
446
447                 key = kmemdup(authkey, keylen, GFP_KERNEL);
448                 if (!key)
449                         return -ENOMEM;
450
451                 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
452                                               DMA_TO_DEVICE);
453                 if (dma_mapping_error(dev, key_dma_addr)) {
454                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
455                                 key, keylen);
456                         kzfree(key);
457                         return -ENOMEM;
458                 }
459                 if (keylen > blocksize) {
460                         /* Load hash initial state */
461                         hw_desc_init(&desc[idx]);
462                         set_cipher_mode(&desc[idx], hashmode);
463                         set_din_sram(&desc[idx], larval_addr, digestsize);
464                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
465                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
466                         idx++;
467
468                         /* Load the hash current length*/
469                         hw_desc_init(&desc[idx]);
470                         set_cipher_mode(&desc[idx], hashmode);
471                         set_din_const(&desc[idx], 0, ctx->hash_len);
472                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
473                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
474                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
475                         idx++;
476
477                         hw_desc_init(&desc[idx]);
478                         set_din_type(&desc[idx], DMA_DLLI,
479                                      key_dma_addr, keylen, NS_BIT);
480                         set_flow_mode(&desc[idx], DIN_HASH);
481                         idx++;
482
483                         /* Get hashed key */
484                         hw_desc_init(&desc[idx]);
485                         set_cipher_mode(&desc[idx], hashmode);
486                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
487                                       digestsize, NS_BIT, 0);
488                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
489                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
490                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
491                         set_cipher_config0(&desc[idx],
492                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
493                         idx++;
494
495                         hw_desc_init(&desc[idx]);
496                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
497                         set_flow_mode(&desc[idx], BYPASS);
498                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
499                                       digestsize), (blocksize - digestsize),
500                                       NS_BIT, 0);
501                         idx++;
502                 } else {
503                         hw_desc_init(&desc[idx]);
504                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
505                                      keylen, NS_BIT);
506                         set_flow_mode(&desc[idx], BYPASS);
507                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
508                                       keylen, NS_BIT, 0);
509                         idx++;
510
511                         if ((blocksize - keylen) != 0) {
512                                 hw_desc_init(&desc[idx]);
513                                 set_din_const(&desc[idx], 0,
514                                               (blocksize - keylen));
515                                 set_flow_mode(&desc[idx], BYPASS);
516                                 set_dout_dlli(&desc[idx],
517                                               (padded_authkey_dma_addr +
518                                                keylen),
519                                               (blocksize - keylen), NS_BIT, 0);
520                                 idx++;
521                         }
522                 }
523         } else {
524                 hw_desc_init(&desc[idx]);
525                 set_din_const(&desc[idx], 0, (blocksize - keylen));
526                 set_flow_mode(&desc[idx], BYPASS);
527                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
528                               blocksize, NS_BIT, 0);
529                 idx++;
530         }
531
532         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
533         if (rc)
534                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
535
536         if (key_dma_addr)
537                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
538
539         kzfree(key);
540
541         return rc;
542 }
543
544 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
545                           unsigned int keylen)
546 {
547         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
548         struct cc_crypto_req cc_req = {};
549         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
550         unsigned int seq_len = 0;
551         struct device *dev = drvdata_to_dev(ctx->drvdata);
552         const u8 *enckey, *authkey;
553         int rc;
554
555         dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
556                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
557
558         /* STAT_PHASE_0: Init and sanity checks */
559
560         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
561                 struct crypto_authenc_keys keys;
562
563                 rc = crypto_authenc_extractkeys(&keys, key, keylen);
564                 if (rc)
565                         return rc;
566                 enckey = keys.enckey;
567                 authkey = keys.authkey;
568                 ctx->enc_keylen = keys.enckeylen;
569                 ctx->auth_keylen = keys.authkeylen;
570
571                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
572                         /* the nonce is stored in bytes at end of key */
573                         if (ctx->enc_keylen <
574                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
575                                 return -EINVAL;
576                         /* Copy nonce from last 4 bytes in CTR key to
577                          *  first 4 bytes in CTR IV
578                          */
579                         memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
580                                CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
581                         /* Set CTR key size */
582                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
583                 }
584         } else { /* non-authenc - has just one key */
585                 enckey = key;
586                 authkey = NULL;
587                 ctx->enc_keylen = keylen;
588                 ctx->auth_keylen = 0;
589         }
590
591         rc = validate_keys_sizes(ctx);
592         if (rc)
593                 return rc;
594
595         /* STAT_PHASE_1: Copy key to ctx */
596
597         /* Get key material */
598         memcpy(ctx->enckey, enckey, ctx->enc_keylen);
599         if (ctx->enc_keylen == 24)
600                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
601         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
602                 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
603                        ctx->auth_keylen);
604         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
605                 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
606                 if (rc)
607                         return rc;
608         }
609
610         /* STAT_PHASE_2: Create sequence */
611
612         switch (ctx->auth_mode) {
613         case DRV_HASH_SHA1:
614         case DRV_HASH_SHA256:
615                 seq_len = hmac_setkey(desc, ctx);
616                 break;
617         case DRV_HASH_XCBC_MAC:
618                 seq_len = xcbc_setkey(desc, ctx);
619                 break;
620         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
621                 break; /* No auth. key setup */
622         default:
623                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
624                 return -ENOTSUPP;
625         }
626
627         /* STAT_PHASE_3: Submit sequence to HW */
628
629         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
630                 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
631                 if (rc) {
632                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
633                         return rc;
634                 }
635         }
636
637         /* Update STAT_PHASE_3 */
638         return rc;
639 }
640
641 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
642                                unsigned int keylen)
643 {
644         struct crypto_authenc_keys keys;
645         int err;
646
647         err = crypto_authenc_extractkeys(&keys, key, keylen);
648         if (unlikely(err))
649                 return err;
650
651         err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
652               cc_aead_setkey(aead, key, keylen);
653
654         memzero_explicit(&keys, sizeof(keys));
655         return err;
656 }
657
658 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
659                                  unsigned int keylen)
660 {
661         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
662
663         if (keylen < 3)
664                 return -EINVAL;
665
666         keylen -= 3;
667         memcpy(ctx->ctr_nonce, key + keylen, 3);
668
669         return cc_aead_setkey(tfm, key, keylen);
670 }
671
672 static int cc_aead_setauthsize(struct crypto_aead *authenc,
673                                unsigned int authsize)
674 {
675         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
676         struct device *dev = drvdata_to_dev(ctx->drvdata);
677
678         /* Unsupported auth. sizes */
679         if (authsize == 0 ||
680             authsize > crypto_aead_maxauthsize(authenc)) {
681                 return -ENOTSUPP;
682         }
683
684         ctx->authsize = authsize;
685         dev_dbg(dev, "authlen=%d\n", ctx->authsize);
686
687         return 0;
688 }
689
690 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
691                                       unsigned int authsize)
692 {
693         switch (authsize) {
694         case 8:
695         case 12:
696         case 16:
697                 break;
698         default:
699                 return -EINVAL;
700         }
701
702         return cc_aead_setauthsize(authenc, authsize);
703 }
704
705 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
706                               unsigned int authsize)
707 {
708         switch (authsize) {
709         case 4:
710         case 6:
711         case 8:
712         case 10:
713         case 12:
714         case 14:
715         case 16:
716                 break;
717         default:
718                 return -EINVAL;
719         }
720
721         return cc_aead_setauthsize(authenc, authsize);
722 }
723
724 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
725                               struct cc_hw_desc desc[], unsigned int *seq_size)
726 {
727         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
728         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
729         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
730         enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
731         unsigned int idx = *seq_size;
732         struct device *dev = drvdata_to_dev(ctx->drvdata);
733
734         switch (assoc_dma_type) {
735         case CC_DMA_BUF_DLLI:
736                 dev_dbg(dev, "ASSOC buffer type DLLI\n");
737                 hw_desc_init(&desc[idx]);
738                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
739                              areq_ctx->assoclen, NS_BIT);
740                 set_flow_mode(&desc[idx], flow_mode);
741                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
742                     areq_ctx->cryptlen > 0)
743                         set_din_not_last_indication(&desc[idx]);
744                 break;
745         case CC_DMA_BUF_MLLI:
746                 dev_dbg(dev, "ASSOC buffer type MLLI\n");
747                 hw_desc_init(&desc[idx]);
748                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
749                              areq_ctx->assoc.mlli_nents, NS_BIT);
750                 set_flow_mode(&desc[idx], flow_mode);
751                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
752                     areq_ctx->cryptlen > 0)
753                         set_din_not_last_indication(&desc[idx]);
754                 break;
755         case CC_DMA_BUF_NULL:
756         default:
757                 dev_err(dev, "Invalid ASSOC buffer type\n");
758         }
759
760         *seq_size = (++idx);
761 }
762
763 static void cc_proc_authen_desc(struct aead_request *areq,
764                                 unsigned int flow_mode,
765                                 struct cc_hw_desc desc[],
766                                 unsigned int *seq_size, int direct)
767 {
768         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
769         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
770         unsigned int idx = *seq_size;
771         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
772         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
773         struct device *dev = drvdata_to_dev(ctx->drvdata);
774
775         switch (data_dma_type) {
776         case CC_DMA_BUF_DLLI:
777         {
778                 struct scatterlist *cipher =
779                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
780                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
781
782                 unsigned int offset =
783                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
784                         areq_ctx->dst_offset : areq_ctx->src_offset;
785                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
786                 hw_desc_init(&desc[idx]);
787                 set_din_type(&desc[idx], DMA_DLLI,
788                              (sg_dma_address(cipher) + offset),
789                              areq_ctx->cryptlen, NS_BIT);
790                 set_flow_mode(&desc[idx], flow_mode);
791                 break;
792         }
793         case CC_DMA_BUF_MLLI:
794         {
795                 /* DOUBLE-PASS flow (as default)
796                  * assoc. + iv + data -compact in one table
797                  * if assoclen is ZERO only IV perform
798                  */
799                 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
800                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
801
802                 if (areq_ctx->is_single_pass) {
803                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
804                                 mlli_addr = areq_ctx->dst.sram_addr;
805                                 mlli_nents = areq_ctx->dst.mlli_nents;
806                         } else {
807                                 mlli_addr = areq_ctx->src.sram_addr;
808                                 mlli_nents = areq_ctx->src.mlli_nents;
809                         }
810                 }
811
812                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
813                 hw_desc_init(&desc[idx]);
814                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
815                              NS_BIT);
816                 set_flow_mode(&desc[idx], flow_mode);
817                 break;
818         }
819         case CC_DMA_BUF_NULL:
820         default:
821                 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
822         }
823
824         *seq_size = (++idx);
825 }
826
827 static void cc_proc_cipher_desc(struct aead_request *areq,
828                                 unsigned int flow_mode,
829                                 struct cc_hw_desc desc[],
830                                 unsigned int *seq_size)
831 {
832         unsigned int idx = *seq_size;
833         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
834         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
835         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
836         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
837         struct device *dev = drvdata_to_dev(ctx->drvdata);
838
839         if (areq_ctx->cryptlen == 0)
840                 return; /*null processing*/
841
842         switch (data_dma_type) {
843         case CC_DMA_BUF_DLLI:
844                 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
845                 hw_desc_init(&desc[idx]);
846                 set_din_type(&desc[idx], DMA_DLLI,
847                              (sg_dma_address(areq_ctx->src_sgl) +
848                               areq_ctx->src_offset), areq_ctx->cryptlen,
849                               NS_BIT);
850                 set_dout_dlli(&desc[idx],
851                               (sg_dma_address(areq_ctx->dst_sgl) +
852                                areq_ctx->dst_offset),
853                               areq_ctx->cryptlen, NS_BIT, 0);
854                 set_flow_mode(&desc[idx], flow_mode);
855                 break;
856         case CC_DMA_BUF_MLLI:
857                 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
858                 hw_desc_init(&desc[idx]);
859                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
860                              areq_ctx->src.mlli_nents, NS_BIT);
861                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
862                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
863                 set_flow_mode(&desc[idx], flow_mode);
864                 break;
865         case CC_DMA_BUF_NULL:
866         default:
867                 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
868         }
869
870         *seq_size = (++idx);
871 }
872
873 static void cc_proc_digest_desc(struct aead_request *req,
874                                 struct cc_hw_desc desc[],
875                                 unsigned int *seq_size)
876 {
877         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
878         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
879         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
880         unsigned int idx = *seq_size;
881         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
882                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
883         int direct = req_ctx->gen_ctx.op_type;
884
885         /* Get final ICV result */
886         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
887                 hw_desc_init(&desc[idx]);
888                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
889                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
890                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
891                               NS_BIT, 1);
892                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
893                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
894                         set_aes_not_hash_mode(&desc[idx]);
895                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
896                 } else {
897                         set_cipher_config0(&desc[idx],
898                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
899                         set_cipher_mode(&desc[idx], hash_mode);
900                 }
901         } else { /*Decrypt*/
902                 /* Get ICV out from hardware */
903                 hw_desc_init(&desc[idx]);
904                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
905                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
906                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
907                               ctx->authsize, NS_BIT, 1);
908                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
909                 set_cipher_config0(&desc[idx],
910                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
911                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
912                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
913                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
914                         set_aes_not_hash_mode(&desc[idx]);
915                 } else {
916                         set_cipher_mode(&desc[idx], hash_mode);
917                 }
918         }
919
920         *seq_size = (++idx);
921 }
922
923 static void cc_set_cipher_desc(struct aead_request *req,
924                                struct cc_hw_desc desc[],
925                                unsigned int *seq_size)
926 {
927         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
928         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
929         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
930         unsigned int hw_iv_size = req_ctx->hw_iv_size;
931         unsigned int idx = *seq_size;
932         int direct = req_ctx->gen_ctx.op_type;
933
934         /* Setup cipher state */
935         hw_desc_init(&desc[idx]);
936         set_cipher_config0(&desc[idx], direct);
937         set_flow_mode(&desc[idx], ctx->flow_mode);
938         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
939                      hw_iv_size, NS_BIT);
940         if (ctx->cipher_mode == DRV_CIPHER_CTR)
941                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
942         else
943                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
944         set_cipher_mode(&desc[idx], ctx->cipher_mode);
945         idx++;
946
947         /* Setup enc. key */
948         hw_desc_init(&desc[idx]);
949         set_cipher_config0(&desc[idx], direct);
950         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
951         set_flow_mode(&desc[idx], ctx->flow_mode);
952         if (ctx->flow_mode == S_DIN_to_AES) {
953                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
954                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
955                               ctx->enc_keylen), NS_BIT);
956                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
957         } else {
958                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
959                              ctx->enc_keylen, NS_BIT);
960                 set_key_size_des(&desc[idx], ctx->enc_keylen);
961         }
962         set_cipher_mode(&desc[idx], ctx->cipher_mode);
963         idx++;
964
965         *seq_size = idx;
966 }
967
968 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
969                            unsigned int *seq_size, unsigned int data_flow_mode)
970 {
971         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
972         int direct = req_ctx->gen_ctx.op_type;
973         unsigned int idx = *seq_size;
974
975         if (req_ctx->cryptlen == 0)
976                 return; /*null processing*/
977
978         cc_set_cipher_desc(req, desc, &idx);
979         cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
980         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
981                 /* We must wait for DMA to write all cipher */
982                 hw_desc_init(&desc[idx]);
983                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
984                 set_dout_no_dma(&desc[idx], 0, 0, 1);
985                 idx++;
986         }
987
988         *seq_size = idx;
989 }
990
991 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
992                              unsigned int *seq_size)
993 {
994         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
995         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
996         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
997                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
998         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
999                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1000         unsigned int idx = *seq_size;
1001
1002         /* Loading hash ipad xor key state */
1003         hw_desc_init(&desc[idx]);
1004         set_cipher_mode(&desc[idx], hash_mode);
1005         set_din_type(&desc[idx], DMA_DLLI,
1006                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1007                      NS_BIT);
1008         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1009         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1010         idx++;
1011
1012         /* Load init. digest len (64 bytes) */
1013         hw_desc_init(&desc[idx]);
1014         set_cipher_mode(&desc[idx], hash_mode);
1015         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1016                      ctx->hash_len);
1017         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1018         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1019         idx++;
1020
1021         *seq_size = idx;
1022 }
1023
1024 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1025                              unsigned int *seq_size)
1026 {
1027         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1028         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1029         unsigned int idx = *seq_size;
1030
1031         /* Loading MAC state */
1032         hw_desc_init(&desc[idx]);
1033         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1034         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1035         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1036         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1037         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1038         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1039         set_aes_not_hash_mode(&desc[idx]);
1040         idx++;
1041
1042         /* Setup XCBC MAC K1 */
1043         hw_desc_init(&desc[idx]);
1044         set_din_type(&desc[idx], DMA_DLLI,
1045                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1046                      AES_KEYSIZE_128, NS_BIT);
1047         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1048         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1049         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1050         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1051         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1052         set_aes_not_hash_mode(&desc[idx]);
1053         idx++;
1054
1055         /* Setup XCBC MAC K2 */
1056         hw_desc_init(&desc[idx]);
1057         set_din_type(&desc[idx], DMA_DLLI,
1058                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1059                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1060         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1061         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1062         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1063         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1064         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1065         set_aes_not_hash_mode(&desc[idx]);
1066         idx++;
1067
1068         /* Setup XCBC MAC K3 */
1069         hw_desc_init(&desc[idx]);
1070         set_din_type(&desc[idx], DMA_DLLI,
1071                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1072                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1073         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1074         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1075         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1076         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1077         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1078         set_aes_not_hash_mode(&desc[idx]);
1079         idx++;
1080
1081         *seq_size = idx;
1082 }
1083
1084 static void cc_proc_header_desc(struct aead_request *req,
1085                                 struct cc_hw_desc desc[],
1086                                 unsigned int *seq_size)
1087 {
1088         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1089         unsigned int idx = *seq_size;
1090
1091         /* Hash associated data */
1092         if (areq_ctx->assoclen > 0)
1093                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1094
1095         /* Hash IV */
1096         *seq_size = idx;
1097 }
1098
1099 static void cc_proc_scheme_desc(struct aead_request *req,
1100                                 struct cc_hw_desc desc[],
1101                                 unsigned int *seq_size)
1102 {
1103         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1104         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1105         struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1106         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1107                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1108         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1109                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1110         unsigned int idx = *seq_size;
1111
1112         hw_desc_init(&desc[idx]);
1113         set_cipher_mode(&desc[idx], hash_mode);
1114         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1115                       ctx->hash_len);
1116         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1117         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1118         set_cipher_do(&desc[idx], DO_PAD);
1119         idx++;
1120
1121         /* Get final ICV result */
1122         hw_desc_init(&desc[idx]);
1123         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1124                       digest_size);
1125         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1126         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1127         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1128         set_cipher_mode(&desc[idx], hash_mode);
1129         idx++;
1130
1131         /* Loading hash opad xor key state */
1132         hw_desc_init(&desc[idx]);
1133         set_cipher_mode(&desc[idx], hash_mode);
1134         set_din_type(&desc[idx], DMA_DLLI,
1135                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1136                      digest_size, NS_BIT);
1137         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1138         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1139         idx++;
1140
1141         /* Load init. digest len (64 bytes) */
1142         hw_desc_init(&desc[idx]);
1143         set_cipher_mode(&desc[idx], hash_mode);
1144         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1145                      ctx->hash_len);
1146         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1147         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1148         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1149         idx++;
1150
1151         /* Perform HASH update */
1152         hw_desc_init(&desc[idx]);
1153         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1154                      digest_size);
1155         set_flow_mode(&desc[idx], DIN_HASH);
1156         idx++;
1157
1158         *seq_size = idx;
1159 }
1160
1161 static void cc_mlli_to_sram(struct aead_request *req,
1162                             struct cc_hw_desc desc[], unsigned int *seq_size)
1163 {
1164         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1165         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1166         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1167         struct device *dev = drvdata_to_dev(ctx->drvdata);
1168
1169         if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1170             req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1171             !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1172                 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1173                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1174                         req_ctx->mlli_params.mlli_len);
1175                 /* Copy MLLI table host-to-sram */
1176                 hw_desc_init(&desc[*seq_size]);
1177                 set_din_type(&desc[*seq_size], DMA_DLLI,
1178                              req_ctx->mlli_params.mlli_dma_addr,
1179                              req_ctx->mlli_params.mlli_len, NS_BIT);
1180                 set_dout_sram(&desc[*seq_size],
1181                               ctx->drvdata->mlli_sram_addr,
1182                               req_ctx->mlli_params.mlli_len);
1183                 set_flow_mode(&desc[*seq_size], BYPASS);
1184                 (*seq_size)++;
1185         }
1186 }
1187
1188 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1189                                           enum cc_flow_mode setup_flow_mode,
1190                                           bool is_single_pass)
1191 {
1192         enum cc_flow_mode data_flow_mode;
1193
1194         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1195                 if (setup_flow_mode == S_DIN_to_AES)
1196                         data_flow_mode = is_single_pass ?
1197                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1198                 else
1199                         data_flow_mode = is_single_pass ?
1200                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1201         } else { /* Decrypt */
1202                 if (setup_flow_mode == S_DIN_to_AES)
1203                         data_flow_mode = is_single_pass ?
1204                                 AES_and_HASH : DIN_AES_DOUT;
1205                 else
1206                         data_flow_mode = is_single_pass ?
1207                                 DES_and_HASH : DIN_DES_DOUT;
1208         }
1209
1210         return data_flow_mode;
1211 }
1212
1213 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1214                             unsigned int *seq_size)
1215 {
1216         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1217         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1218         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1219         int direct = req_ctx->gen_ctx.op_type;
1220         unsigned int data_flow_mode =
1221                 cc_get_data_flow(direct, ctx->flow_mode,
1222                                  req_ctx->is_single_pass);
1223
1224         if (req_ctx->is_single_pass) {
1225                 /**
1226                  * Single-pass flow
1227                  */
1228                 cc_set_hmac_desc(req, desc, seq_size);
1229                 cc_set_cipher_desc(req, desc, seq_size);
1230                 cc_proc_header_desc(req, desc, seq_size);
1231                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1232                 cc_proc_scheme_desc(req, desc, seq_size);
1233                 cc_proc_digest_desc(req, desc, seq_size);
1234                 return;
1235         }
1236
1237         /**
1238          * Double-pass flow
1239          * Fallback for unsupported single-pass modes,
1240          * i.e. using assoc. data of non-word-multiple
1241          */
1242         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1243                 /* encrypt first.. */
1244                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1245                 /* authenc after..*/
1246                 cc_set_hmac_desc(req, desc, seq_size);
1247                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1248                 cc_proc_scheme_desc(req, desc, seq_size);
1249                 cc_proc_digest_desc(req, desc, seq_size);
1250
1251         } else { /*DECRYPT*/
1252                 /* authenc first..*/
1253                 cc_set_hmac_desc(req, desc, seq_size);
1254                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1255                 cc_proc_scheme_desc(req, desc, seq_size);
1256                 /* decrypt after.. */
1257                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1258                 /* read the digest result with setting the completion bit
1259                  * must be after the cipher operation
1260                  */
1261                 cc_proc_digest_desc(req, desc, seq_size);
1262         }
1263 }
1264
1265 static void
1266 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1267                 unsigned int *seq_size)
1268 {
1269         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1270         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1271         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1272         int direct = req_ctx->gen_ctx.op_type;
1273         unsigned int data_flow_mode =
1274                 cc_get_data_flow(direct, ctx->flow_mode,
1275                                  req_ctx->is_single_pass);
1276
1277         if (req_ctx->is_single_pass) {
1278                 /**
1279                  * Single-pass flow
1280                  */
1281                 cc_set_xcbc_desc(req, desc, seq_size);
1282                 cc_set_cipher_desc(req, desc, seq_size);
1283                 cc_proc_header_desc(req, desc, seq_size);
1284                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1285                 cc_proc_digest_desc(req, desc, seq_size);
1286                 return;
1287         }
1288
1289         /**
1290          * Double-pass flow
1291          * Fallback for unsupported single-pass modes,
1292          * i.e. using assoc. data of non-word-multiple
1293          */
1294         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1295                 /* encrypt first.. */
1296                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1297                 /* authenc after.. */
1298                 cc_set_xcbc_desc(req, desc, seq_size);
1299                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1300                 cc_proc_digest_desc(req, desc, seq_size);
1301         } else { /*DECRYPT*/
1302                 /* authenc first.. */
1303                 cc_set_xcbc_desc(req, desc, seq_size);
1304                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1305                 /* decrypt after..*/
1306                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1307                 /* read the digest result with setting the completion bit
1308                  * must be after the cipher operation
1309                  */
1310                 cc_proc_digest_desc(req, desc, seq_size);
1311         }
1312 }
1313
1314 static int validate_data_size(struct cc_aead_ctx *ctx,
1315                               enum drv_crypto_direction direct,
1316                               struct aead_request *req)
1317 {
1318         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1319         struct device *dev = drvdata_to_dev(ctx->drvdata);
1320         unsigned int assoclen = areq_ctx->assoclen;
1321         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1322                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1323
1324         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1325             req->cryptlen < ctx->authsize)
1326                 goto data_size_err;
1327
1328         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1329
1330         switch (ctx->flow_mode) {
1331         case S_DIN_to_AES:
1332                 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1333                     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1334                         goto data_size_err;
1335                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1336                         break;
1337                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1338                         if (areq_ctx->plaintext_authenticate_only)
1339                                 areq_ctx->is_single_pass = false;
1340                         break;
1341                 }
1342
1343                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1344                         areq_ctx->is_single_pass = false;
1345
1346                 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1347                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1348                         areq_ctx->is_single_pass = false;
1349
1350                 break;
1351         case S_DIN_to_DES:
1352                 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1353                         goto data_size_err;
1354                 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1355                         areq_ctx->is_single_pass = false;
1356                 break;
1357         default:
1358                 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1359                 goto data_size_err;
1360         }
1361
1362         return 0;
1363
1364 data_size_err:
1365         return -EINVAL;
1366 }
1367
1368 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1369 {
1370         unsigned int len = 0;
1371
1372         if (header_size == 0)
1373                 return 0;
1374
1375         if (header_size < ((1UL << 16) - (1UL << 8))) {
1376                 len = 2;
1377
1378                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1379                 pa0_buff[1] = header_size & 0xFF;
1380         } else {
1381                 len = 6;
1382
1383                 pa0_buff[0] = 0xFF;
1384                 pa0_buff[1] = 0xFE;
1385                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1386                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1387                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1388                 pa0_buff[5] = header_size & 0xFF;
1389         }
1390
1391         return len;
1392 }
1393
1394 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1395 {
1396         __be32 data;
1397
1398         memset(block, 0, csize);
1399         block += csize;
1400
1401         if (csize >= 4)
1402                 csize = 4;
1403         else if (msglen > (1 << (8 * csize)))
1404                 return -EOVERFLOW;
1405
1406         data = cpu_to_be32(msglen);
1407         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1408
1409         return 0;
1410 }
1411
1412 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1413                   unsigned int *seq_size)
1414 {
1415         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1416         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1417         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1418         unsigned int idx = *seq_size;
1419         unsigned int cipher_flow_mode;
1420         dma_addr_t mac_result;
1421
1422         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1423                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1424                 mac_result = req_ctx->mac_buf_dma_addr;
1425         } else { /* Encrypt */
1426                 cipher_flow_mode = AES_and_HASH;
1427                 mac_result = req_ctx->icv_dma_addr;
1428         }
1429
1430         /* load key */
1431         hw_desc_init(&desc[idx]);
1432         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1433         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1434                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1435                       ctx->enc_keylen), NS_BIT);
1436         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1437         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1438         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1439         set_flow_mode(&desc[idx], S_DIN_to_AES);
1440         idx++;
1441
1442         /* load ctr state */
1443         hw_desc_init(&desc[idx]);
1444         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1445         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1446         set_din_type(&desc[idx], DMA_DLLI,
1447                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1448         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1449         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1450         set_flow_mode(&desc[idx], S_DIN_to_AES);
1451         idx++;
1452
1453         /* load MAC key */
1454         hw_desc_init(&desc[idx]);
1455         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1456         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1457                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1458                       ctx->enc_keylen), NS_BIT);
1459         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1460         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1461         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1462         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1463         set_aes_not_hash_mode(&desc[idx]);
1464         idx++;
1465
1466         /* load MAC state */
1467         hw_desc_init(&desc[idx]);
1468         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1469         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1470         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1471                      AES_BLOCK_SIZE, NS_BIT);
1472         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1473         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1474         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1475         set_aes_not_hash_mode(&desc[idx]);
1476         idx++;
1477
1478         /* process assoc data */
1479         if (req_ctx->assoclen > 0) {
1480                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1481         } else {
1482                 hw_desc_init(&desc[idx]);
1483                 set_din_type(&desc[idx], DMA_DLLI,
1484                              sg_dma_address(&req_ctx->ccm_adata_sg),
1485                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1486                 set_flow_mode(&desc[idx], DIN_HASH);
1487                 idx++;
1488         }
1489
1490         /* process the cipher */
1491         if (req_ctx->cryptlen)
1492                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1493
1494         /* Read temporal MAC */
1495         hw_desc_init(&desc[idx]);
1496         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1497         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1498                       NS_BIT, 0);
1499         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1500         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1501         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1502         set_aes_not_hash_mode(&desc[idx]);
1503         idx++;
1504
1505         /* load AES-CTR state (for last MAC calculation)*/
1506         hw_desc_init(&desc[idx]);
1507         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1508         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1509         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1510                      AES_BLOCK_SIZE, NS_BIT);
1511         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1512         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1513         set_flow_mode(&desc[idx], S_DIN_to_AES);
1514         idx++;
1515
1516         hw_desc_init(&desc[idx]);
1517         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1518         set_dout_no_dma(&desc[idx], 0, 0, 1);
1519         idx++;
1520
1521         /* encrypt the "T" value and store MAC in mac_state */
1522         hw_desc_init(&desc[idx]);
1523         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1524                      ctx->authsize, NS_BIT);
1525         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1526         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1527         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1528         idx++;
1529
1530         *seq_size = idx;
1531         return 0;
1532 }
1533
1534 static int config_ccm_adata(struct aead_request *req)
1535 {
1536         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1537         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1538         struct device *dev = drvdata_to_dev(ctx->drvdata);
1539         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1540         //unsigned int size_of_a = 0, rem_a_size = 0;
1541         unsigned int lp = req->iv[0];
1542         /* Note: The code assume that req->iv[0] already contains the value
1543          * of L' of RFC3610
1544          */
1545         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1546         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1547         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1548         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1549         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1550         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1551                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1552                                 req->cryptlen :
1553                                 (req->cryptlen - ctx->authsize);
1554         int rc;
1555
1556         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1557         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1558
1559         /* taken from crypto/ccm.c */
1560         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1561         if (l < 2 || l > 8) {
1562                 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1563                 return -EINVAL;
1564         }
1565         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1566
1567         /* format control info per RFC 3610 and
1568          * NIST Special Publication 800-38C
1569          */
1570         *b0 |= (8 * ((m - 2) / 2));
1571         if (req_ctx->assoclen > 0)
1572                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1573
1574         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1575         if (rc) {
1576                 dev_err(dev, "message len overflow detected");
1577                 return rc;
1578         }
1579          /* END of "taken from crypto/ccm.c" */
1580
1581         /* l(a) - size of associated data. */
1582         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1583
1584         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1585         req->iv[15] = 1;
1586
1587         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1588         ctr_count_0[15] = 0;
1589
1590         return 0;
1591 }
1592
1593 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1594 {
1595         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1596         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1597         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1598
1599         /* L' */
1600         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1601         /* For RFC 4309, always use 4 bytes for message length
1602          * (at most 2^32-1 bytes).
1603          */
1604         areq_ctx->ctr_iv[0] = 3;
1605
1606         /* In RFC 4309 there is an 11-bytes nonce+IV part,
1607          * that we build here.
1608          */
1609         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1610                CCM_BLOCK_NONCE_SIZE);
1611         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1612                CCM_BLOCK_IV_SIZE);
1613         req->iv = areq_ctx->ctr_iv;
1614         areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1615 }
1616
1617 static void cc_set_ghash_desc(struct aead_request *req,
1618                               struct cc_hw_desc desc[], unsigned int *seq_size)
1619 {
1620         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1621         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1622         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1623         unsigned int idx = *seq_size;
1624
1625         /* load key to AES*/
1626         hw_desc_init(&desc[idx]);
1627         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1628         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1629         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1630                      ctx->enc_keylen, NS_BIT);
1631         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1632         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1633         set_flow_mode(&desc[idx], S_DIN_to_AES);
1634         idx++;
1635
1636         /* process one zero block to generate hkey */
1637         hw_desc_init(&desc[idx]);
1638         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1639         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1640                       NS_BIT, 0);
1641         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1642         idx++;
1643
1644         /* Memory Barrier */
1645         hw_desc_init(&desc[idx]);
1646         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1647         set_dout_no_dma(&desc[idx], 0, 0, 1);
1648         idx++;
1649
1650         /* Load GHASH subkey */
1651         hw_desc_init(&desc[idx]);
1652         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1653                      AES_BLOCK_SIZE, NS_BIT);
1654         set_dout_no_dma(&desc[idx], 0, 0, 1);
1655         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1656         set_aes_not_hash_mode(&desc[idx]);
1657         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1658         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1659         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1660         idx++;
1661
1662         /* Configure Hash Engine to work with GHASH.
1663          * Since it was not possible to extend HASH submodes to add GHASH,
1664          * The following command is necessary in order to
1665          * select GHASH (according to HW designers)
1666          */
1667         hw_desc_init(&desc[idx]);
1668         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1669         set_dout_no_dma(&desc[idx], 0, 0, 1);
1670         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1671         set_aes_not_hash_mode(&desc[idx]);
1672         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1673         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1674         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1675         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1676         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1677         idx++;
1678
1679         /* Load GHASH initial STATE (which is 0). (for any hash there is an
1680          * initial state)
1681          */
1682         hw_desc_init(&desc[idx]);
1683         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1684         set_dout_no_dma(&desc[idx], 0, 0, 1);
1685         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1686         set_aes_not_hash_mode(&desc[idx]);
1687         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1688         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1689         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1690         idx++;
1691
1692         *seq_size = idx;
1693 }
1694
1695 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1696                              unsigned int *seq_size)
1697 {
1698         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1699         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1700         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1701         unsigned int idx = *seq_size;
1702
1703         /* load key to AES*/
1704         hw_desc_init(&desc[idx]);
1705         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1706         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1707         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1708                      ctx->enc_keylen, NS_BIT);
1709         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1710         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1711         set_flow_mode(&desc[idx], S_DIN_to_AES);
1712         idx++;
1713
1714         if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1715                 /* load AES/CTR initial CTR value inc by 2*/
1716                 hw_desc_init(&desc[idx]);
1717                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1718                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1719                 set_din_type(&desc[idx], DMA_DLLI,
1720                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1721                              NS_BIT);
1722                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1723                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1724                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1725                 idx++;
1726         }
1727
1728         *seq_size = idx;
1729 }
1730
1731 static void cc_proc_gcm_result(struct aead_request *req,
1732                                struct cc_hw_desc desc[],
1733                                unsigned int *seq_size)
1734 {
1735         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1736         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1737         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1738         dma_addr_t mac_result;
1739         unsigned int idx = *seq_size;
1740
1741         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1742                 mac_result = req_ctx->mac_buf_dma_addr;
1743         } else { /* Encrypt */
1744                 mac_result = req_ctx->icv_dma_addr;
1745         }
1746
1747         /* process(ghash) gcm_block_len */
1748         hw_desc_init(&desc[idx]);
1749         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1750                      AES_BLOCK_SIZE, NS_BIT);
1751         set_flow_mode(&desc[idx], DIN_HASH);
1752         idx++;
1753
1754         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1755         hw_desc_init(&desc[idx]);
1756         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1757         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1758         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1759                       NS_BIT, 0);
1760         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1761         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1762         set_aes_not_hash_mode(&desc[idx]);
1763
1764         idx++;
1765
1766         /* load AES/CTR initial CTR value inc by 1*/
1767         hw_desc_init(&desc[idx]);
1768         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1769         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1770         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1771                      AES_BLOCK_SIZE, NS_BIT);
1772         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1773         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1774         set_flow_mode(&desc[idx], S_DIN_to_AES);
1775         idx++;
1776
1777         /* Memory Barrier */
1778         hw_desc_init(&desc[idx]);
1779         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1780         set_dout_no_dma(&desc[idx], 0, 0, 1);
1781         idx++;
1782
1783         /* process GCTR on stored GHASH and store MAC in mac_state*/
1784         hw_desc_init(&desc[idx]);
1785         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1786         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1787                      AES_BLOCK_SIZE, NS_BIT);
1788         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1789         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1790         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1791         idx++;
1792
1793         *seq_size = idx;
1794 }
1795
1796 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1797                   unsigned int *seq_size)
1798 {
1799         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1800         unsigned int cipher_flow_mode;
1801
1802         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1803                 cipher_flow_mode = AES_and_HASH;
1804         } else { /* Encrypt */
1805                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1806         }
1807
1808         //in RFC4543 no data to encrypt. just copy data from src to dest.
1809         if (req_ctx->plaintext_authenticate_only) {
1810                 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1811                 cc_set_ghash_desc(req, desc, seq_size);
1812                 /* process(ghash) assoc data */
1813                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1814                 cc_set_gctr_desc(req, desc, seq_size);
1815                 cc_proc_gcm_result(req, desc, seq_size);
1816                 return 0;
1817         }
1818
1819         // for gcm and rfc4106.
1820         cc_set_ghash_desc(req, desc, seq_size);
1821         /* process(ghash) assoc data */
1822         if (req_ctx->assoclen > 0)
1823                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1824         cc_set_gctr_desc(req, desc, seq_size);
1825         /* process(gctr+ghash) */
1826         if (req_ctx->cryptlen)
1827                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1828         cc_proc_gcm_result(req, desc, seq_size);
1829
1830         return 0;
1831 }
1832
1833 static int config_gcm_context(struct aead_request *req)
1834 {
1835         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1836         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1837         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1838         struct device *dev = drvdata_to_dev(ctx->drvdata);
1839
1840         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1841                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1842                                 req->cryptlen :
1843                                 (req->cryptlen - ctx->authsize);
1844         __be32 counter = cpu_to_be32(2);
1845
1846         dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1847                 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1848
1849         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1850
1851         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1852
1853         memcpy(req->iv + 12, &counter, 4);
1854         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1855
1856         counter = cpu_to_be32(1);
1857         memcpy(req->iv + 12, &counter, 4);
1858         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1859
1860         if (!req_ctx->plaintext_authenticate_only) {
1861                 __be64 temp64;
1862
1863                 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1864                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1865                 temp64 = cpu_to_be64(cryptlen * 8);
1866                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1867         } else {
1868                 /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1869                  * data that is nothing is encrypted.
1870                  */
1871                 __be64 temp64;
1872
1873                 temp64 = cpu_to_be64((req_ctx->assoclen +
1874                                       GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1875                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1876                 temp64 = 0;
1877                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1878         }
1879
1880         return 0;
1881 }
1882
1883 static void cc_proc_rfc4_gcm(struct aead_request *req)
1884 {
1885         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1886         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1887         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1888
1889         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1890                ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1891         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1892                GCM_BLOCK_RFC4_IV_SIZE);
1893         req->iv = areq_ctx->ctr_iv;
1894         areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1895 }
1896
1897 static int cc_proc_aead(struct aead_request *req,
1898                         enum drv_crypto_direction direct)
1899 {
1900         int rc = 0;
1901         int seq_len = 0;
1902         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1903         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1904         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1905         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1906         struct device *dev = drvdata_to_dev(ctx->drvdata);
1907         struct cc_crypto_req cc_req = {};
1908
1909         dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1910                 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1911                 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1912                 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1913
1914         /* STAT_PHASE_0: Init and sanity checks */
1915
1916         /* Check data length according to mode */
1917         if (validate_data_size(ctx, direct, req)) {
1918                 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1919                         req->cryptlen, areq_ctx->assoclen);
1920                 return -EINVAL;
1921         }
1922
1923         /* Setup request structure */
1924         cc_req.user_cb = (void *)cc_aead_complete;
1925         cc_req.user_arg = (void *)req;
1926
1927         /* Setup request context */
1928         areq_ctx->gen_ctx.op_type = direct;
1929         areq_ctx->req_authsize = ctx->authsize;
1930         areq_ctx->cipher_mode = ctx->cipher_mode;
1931
1932         /* STAT_PHASE_1: Map buffers */
1933
1934         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1935                 /* Build CTR IV - Copy nonce from last 4 bytes in
1936                  * CTR key to first 4 bytes in CTR IV
1937                  */
1938                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1939                        CTR_RFC3686_NONCE_SIZE);
1940                 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1941                        CTR_RFC3686_IV_SIZE);
1942                 /* Initialize counter portion of counter block */
1943                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1944                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1945
1946                 /* Replace with counter iv */
1947                 req->iv = areq_ctx->ctr_iv;
1948                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1949         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1950                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1951                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1952                 if (areq_ctx->ctr_iv != req->iv) {
1953                         memcpy(areq_ctx->ctr_iv, req->iv,
1954                                crypto_aead_ivsize(tfm));
1955                         req->iv = areq_ctx->ctr_iv;
1956                 }
1957         }  else {
1958                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1959         }
1960
1961         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1962                 rc = config_ccm_adata(req);
1963                 if (rc) {
1964                         dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1965                                 rc);
1966                         goto exit;
1967                 }
1968         } else {
1969                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1970         }
1971
1972         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1973                 rc = config_gcm_context(req);
1974                 if (rc) {
1975                         dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1976                                 rc);
1977                         goto exit;
1978                 }
1979         }
1980
1981         rc = cc_map_aead_request(ctx->drvdata, req);
1982         if (rc) {
1983                 dev_err(dev, "map_request() failed\n");
1984                 goto exit;
1985         }
1986
1987         /* STAT_PHASE_2: Create sequence */
1988
1989         /* Load MLLI tables to SRAM if necessary */
1990         cc_mlli_to_sram(req, desc, &seq_len);
1991
1992         /*TODO: move seq len by reference */
1993         switch (ctx->auth_mode) {
1994         case DRV_HASH_SHA1:
1995         case DRV_HASH_SHA256:
1996                 cc_hmac_authenc(req, desc, &seq_len);
1997                 break;
1998         case DRV_HASH_XCBC_MAC:
1999                 cc_xcbc_authenc(req, desc, &seq_len);
2000                 break;
2001         case DRV_HASH_NULL:
2002                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2003                         cc_ccm(req, desc, &seq_len);
2004                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2005                         cc_gcm(req, desc, &seq_len);
2006                 break;
2007         default:
2008                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2009                 cc_unmap_aead_request(dev, req);
2010                 rc = -ENOTSUPP;
2011                 goto exit;
2012         }
2013
2014         /* STAT_PHASE_3: Lock HW and push sequence */
2015
2016         rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2017
2018         if (rc != -EINPROGRESS && rc != -EBUSY) {
2019                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2020                 cc_unmap_aead_request(dev, req);
2021         }
2022
2023 exit:
2024         return rc;
2025 }
2026
2027 static int cc_aead_encrypt(struct aead_request *req)
2028 {
2029         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2030         int rc;
2031
2032         memset(areq_ctx, 0, sizeof(*areq_ctx));
2033
2034         /* No generated IV required */
2035         areq_ctx->backup_iv = req->iv;
2036         areq_ctx->assoclen = req->assoclen;
2037         areq_ctx->is_gcm4543 = false;
2038
2039         areq_ctx->plaintext_authenticate_only = false;
2040
2041         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2042         if (rc != -EINPROGRESS && rc != -EBUSY)
2043                 req->iv = areq_ctx->backup_iv;
2044
2045         return rc;
2046 }
2047
2048 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2049 {
2050         /* Very similar to cc_aead_encrypt() above. */
2051
2052         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2053         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2054         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2055         struct device *dev = drvdata_to_dev(ctx->drvdata);
2056         int rc = -EINVAL;
2057
2058         if (!valid_assoclen(req)) {
2059                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2060                 goto out;
2061         }
2062
2063         memset(areq_ctx, 0, sizeof(*areq_ctx));
2064
2065         /* No generated IV required */
2066         areq_ctx->backup_iv = req->iv;
2067         areq_ctx->assoclen = req->assoclen;
2068         areq_ctx->is_gcm4543 = true;
2069
2070         cc_proc_rfc4309_ccm(req);
2071
2072         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2073         if (rc != -EINPROGRESS && rc != -EBUSY)
2074                 req->iv = areq_ctx->backup_iv;
2075 out:
2076         return rc;
2077 }
2078
2079 static int cc_aead_decrypt(struct aead_request *req)
2080 {
2081         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2082         int rc;
2083
2084         memset(areq_ctx, 0, sizeof(*areq_ctx));
2085
2086         /* No generated IV required */
2087         areq_ctx->backup_iv = req->iv;
2088         areq_ctx->assoclen = req->assoclen;
2089         areq_ctx->is_gcm4543 = false;
2090
2091         areq_ctx->plaintext_authenticate_only = false;
2092
2093         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2094         if (rc != -EINPROGRESS && rc != -EBUSY)
2095                 req->iv = areq_ctx->backup_iv;
2096
2097         return rc;
2098 }
2099
2100 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2101 {
2102         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2103         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2104         struct device *dev = drvdata_to_dev(ctx->drvdata);
2105         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2106         int rc = -EINVAL;
2107
2108         if (!valid_assoclen(req)) {
2109                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2110                 goto out;
2111         }
2112
2113         memset(areq_ctx, 0, sizeof(*areq_ctx));
2114
2115         /* No generated IV required */
2116         areq_ctx->backup_iv = req->iv;
2117         areq_ctx->assoclen = req->assoclen;
2118
2119         areq_ctx->is_gcm4543 = true;
2120         cc_proc_rfc4309_ccm(req);
2121
2122         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2123         if (rc != -EINPROGRESS && rc != -EBUSY)
2124                 req->iv = areq_ctx->backup_iv;
2125
2126 out:
2127         return rc;
2128 }
2129
2130 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2131                                  unsigned int keylen)
2132 {
2133         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2134         struct device *dev = drvdata_to_dev(ctx->drvdata);
2135
2136         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2137
2138         if (keylen < 4)
2139                 return -EINVAL;
2140
2141         keylen -= 4;
2142         memcpy(ctx->ctr_nonce, key + keylen, 4);
2143
2144         return cc_aead_setkey(tfm, key, keylen);
2145 }
2146
2147 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2148                                  unsigned int keylen)
2149 {
2150         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2151         struct device *dev = drvdata_to_dev(ctx->drvdata);
2152
2153         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2154
2155         if (keylen < 4)
2156                 return -EINVAL;
2157
2158         keylen -= 4;
2159         memcpy(ctx->ctr_nonce, key + keylen, 4);
2160
2161         return cc_aead_setkey(tfm, key, keylen);
2162 }
2163
2164 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2165                               unsigned int authsize)
2166 {
2167         switch (authsize) {
2168         case 4:
2169         case 8:
2170         case 12:
2171         case 13:
2172         case 14:
2173         case 15:
2174         case 16:
2175                 break;
2176         default:
2177                 return -EINVAL;
2178         }
2179
2180         return cc_aead_setauthsize(authenc, authsize);
2181 }
2182
2183 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2184                                       unsigned int authsize)
2185 {
2186         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2187         struct device *dev = drvdata_to_dev(ctx->drvdata);
2188
2189         dev_dbg(dev, "authsize %d\n", authsize);
2190
2191         switch (authsize) {
2192         case 8:
2193         case 12:
2194         case 16:
2195                 break;
2196         default:
2197                 return -EINVAL;
2198         }
2199
2200         return cc_aead_setauthsize(authenc, authsize);
2201 }
2202
2203 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2204                                       unsigned int authsize)
2205 {
2206         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2207         struct device *dev = drvdata_to_dev(ctx->drvdata);
2208
2209         dev_dbg(dev, "authsize %d\n", authsize);
2210
2211         if (authsize != 16)
2212                 return -EINVAL;
2213
2214         return cc_aead_setauthsize(authenc, authsize);
2215 }
2216
2217 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2218 {
2219         /* Very similar to cc_aead_encrypt() above. */
2220
2221         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2222         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2223         struct device *dev = drvdata_to_dev(ctx->drvdata);
2224         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2225         int rc = -EINVAL;
2226
2227         if (!valid_assoclen(req)) {
2228                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2229                 goto out;
2230         }
2231
2232         memset(areq_ctx, 0, sizeof(*areq_ctx));
2233
2234         /* No generated IV required */
2235         areq_ctx->backup_iv = req->iv;
2236         areq_ctx->assoclen = req->assoclen;
2237         areq_ctx->plaintext_authenticate_only = false;
2238
2239         cc_proc_rfc4_gcm(req);
2240         areq_ctx->is_gcm4543 = true;
2241
2242         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2243         if (rc != -EINPROGRESS && rc != -EBUSY)
2244                 req->iv = areq_ctx->backup_iv;
2245 out:
2246         return rc;
2247 }
2248
2249 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2250 {
2251         /* Very similar to cc_aead_encrypt() above. */
2252         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2253         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2254         struct device *dev = drvdata_to_dev(ctx->drvdata);
2255         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2256         int rc = -EINVAL;
2257
2258         if (!valid_assoclen(req)) {
2259                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2260                 goto out;
2261         }
2262
2263         memset(areq_ctx, 0, sizeof(*areq_ctx));
2264
2265         //plaintext is not encryped with rfc4543
2266         areq_ctx->plaintext_authenticate_only = true;
2267
2268         /* No generated IV required */
2269         areq_ctx->backup_iv = req->iv;
2270         areq_ctx->assoclen = req->assoclen;
2271
2272         cc_proc_rfc4_gcm(req);
2273         areq_ctx->is_gcm4543 = true;
2274
2275         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2276         if (rc != -EINPROGRESS && rc != -EBUSY)
2277                 req->iv = areq_ctx->backup_iv;
2278 out:
2279         return rc;
2280 }
2281
2282 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2283 {
2284         /* Very similar to cc_aead_decrypt() above. */
2285
2286         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2287         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2288         struct device *dev = drvdata_to_dev(ctx->drvdata);
2289         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2290         int rc = -EINVAL;
2291
2292         if (!valid_assoclen(req)) {
2293                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2294                 goto out;
2295         }
2296
2297         memset(areq_ctx, 0, sizeof(*areq_ctx));
2298
2299         /* No generated IV required */
2300         areq_ctx->backup_iv = req->iv;
2301         areq_ctx->assoclen = req->assoclen;
2302         areq_ctx->plaintext_authenticate_only = false;
2303
2304         cc_proc_rfc4_gcm(req);
2305         areq_ctx->is_gcm4543 = true;
2306
2307         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2308         if (rc != -EINPROGRESS && rc != -EBUSY)
2309                 req->iv = areq_ctx->backup_iv;
2310 out:
2311         return rc;
2312 }
2313
2314 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2315 {
2316         /* Very similar to cc_aead_decrypt() above. */
2317         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2318         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2319         struct device *dev = drvdata_to_dev(ctx->drvdata);
2320         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2321         int rc = -EINVAL;
2322
2323         if (!valid_assoclen(req)) {
2324                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2325                 goto out;
2326         }
2327
2328         memset(areq_ctx, 0, sizeof(*areq_ctx));
2329
2330         //plaintext is not decryped with rfc4543
2331         areq_ctx->plaintext_authenticate_only = true;
2332
2333         /* No generated IV required */
2334         areq_ctx->backup_iv = req->iv;
2335         areq_ctx->assoclen = req->assoclen;
2336
2337         cc_proc_rfc4_gcm(req);
2338         areq_ctx->is_gcm4543 = true;
2339
2340         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2341         if (rc != -EINPROGRESS && rc != -EBUSY)
2342                 req->iv = areq_ctx->backup_iv;
2343 out:
2344         return rc;
2345 }
2346
2347 /* aead alg */
2348 static struct cc_alg_template aead_algs[] = {
2349         {
2350                 .name = "authenc(hmac(sha1),cbc(aes))",
2351                 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2352                 .blocksize = AES_BLOCK_SIZE,
2353                 .template_aead = {
2354                         .setkey = cc_aead_setkey,
2355                         .setauthsize = cc_aead_setauthsize,
2356                         .encrypt = cc_aead_encrypt,
2357                         .decrypt = cc_aead_decrypt,
2358                         .init = cc_aead_init,
2359                         .exit = cc_aead_exit,
2360                         .ivsize = AES_BLOCK_SIZE,
2361                         .maxauthsize = SHA1_DIGEST_SIZE,
2362                 },
2363                 .cipher_mode = DRV_CIPHER_CBC,
2364                 .flow_mode = S_DIN_to_AES,
2365                 .auth_mode = DRV_HASH_SHA1,
2366                 .min_hw_rev = CC_HW_REV_630,
2367                 .std_body = CC_STD_NIST,
2368         },
2369         {
2370                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2371                 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2372                 .blocksize = DES3_EDE_BLOCK_SIZE,
2373                 .template_aead = {
2374                         .setkey = cc_des3_aead_setkey,
2375                         .setauthsize = cc_aead_setauthsize,
2376                         .encrypt = cc_aead_encrypt,
2377                         .decrypt = cc_aead_decrypt,
2378                         .init = cc_aead_init,
2379                         .exit = cc_aead_exit,
2380                         .ivsize = DES3_EDE_BLOCK_SIZE,
2381                         .maxauthsize = SHA1_DIGEST_SIZE,
2382                 },
2383                 .cipher_mode = DRV_CIPHER_CBC,
2384                 .flow_mode = S_DIN_to_DES,
2385                 .auth_mode = DRV_HASH_SHA1,
2386                 .min_hw_rev = CC_HW_REV_630,
2387                 .std_body = CC_STD_NIST,
2388         },
2389         {
2390                 .name = "authenc(hmac(sha256),cbc(aes))",
2391                 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2392                 .blocksize = AES_BLOCK_SIZE,
2393                 .template_aead = {
2394                         .setkey = cc_aead_setkey,
2395                         .setauthsize = cc_aead_setauthsize,
2396                         .encrypt = cc_aead_encrypt,
2397                         .decrypt = cc_aead_decrypt,
2398                         .init = cc_aead_init,
2399                         .exit = cc_aead_exit,
2400                         .ivsize = AES_BLOCK_SIZE,
2401                         .maxauthsize = SHA256_DIGEST_SIZE,
2402                 },
2403                 .cipher_mode = DRV_CIPHER_CBC,
2404                 .flow_mode = S_DIN_to_AES,
2405                 .auth_mode = DRV_HASH_SHA256,
2406                 .min_hw_rev = CC_HW_REV_630,
2407                 .std_body = CC_STD_NIST,
2408         },
2409         {
2410                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2411                 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2412                 .blocksize = DES3_EDE_BLOCK_SIZE,
2413                 .template_aead = {
2414                         .setkey = cc_des3_aead_setkey,
2415                         .setauthsize = cc_aead_setauthsize,
2416                         .encrypt = cc_aead_encrypt,
2417                         .decrypt = cc_aead_decrypt,
2418                         .init = cc_aead_init,
2419                         .exit = cc_aead_exit,
2420                         .ivsize = DES3_EDE_BLOCK_SIZE,
2421                         .maxauthsize = SHA256_DIGEST_SIZE,
2422                 },
2423                 .cipher_mode = DRV_CIPHER_CBC,
2424                 .flow_mode = S_DIN_to_DES,
2425                 .auth_mode = DRV_HASH_SHA256,
2426                 .min_hw_rev = CC_HW_REV_630,
2427                 .std_body = CC_STD_NIST,
2428         },
2429         {
2430                 .name = "authenc(xcbc(aes),cbc(aes))",
2431                 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2432                 .blocksize = AES_BLOCK_SIZE,
2433                 .template_aead = {
2434                         .setkey = cc_aead_setkey,
2435                         .setauthsize = cc_aead_setauthsize,
2436                         .encrypt = cc_aead_encrypt,
2437                         .decrypt = cc_aead_decrypt,
2438                         .init = cc_aead_init,
2439                         .exit = cc_aead_exit,
2440                         .ivsize = AES_BLOCK_SIZE,
2441                         .maxauthsize = AES_BLOCK_SIZE,
2442                 },
2443                 .cipher_mode = DRV_CIPHER_CBC,
2444                 .flow_mode = S_DIN_to_AES,
2445                 .auth_mode = DRV_HASH_XCBC_MAC,
2446                 .min_hw_rev = CC_HW_REV_630,
2447                 .std_body = CC_STD_NIST,
2448         },
2449         {
2450                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2451                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2452                 .blocksize = 1,
2453                 .template_aead = {
2454                         .setkey = cc_aead_setkey,
2455                         .setauthsize = cc_aead_setauthsize,
2456                         .encrypt = cc_aead_encrypt,
2457                         .decrypt = cc_aead_decrypt,
2458                         .init = cc_aead_init,
2459                         .exit = cc_aead_exit,
2460                         .ivsize = CTR_RFC3686_IV_SIZE,
2461                         .maxauthsize = SHA1_DIGEST_SIZE,
2462                 },
2463                 .cipher_mode = DRV_CIPHER_CTR,
2464                 .flow_mode = S_DIN_to_AES,
2465                 .auth_mode = DRV_HASH_SHA1,
2466                 .min_hw_rev = CC_HW_REV_630,
2467                 .std_body = CC_STD_NIST,
2468         },
2469         {
2470                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2471                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2472                 .blocksize = 1,
2473                 .template_aead = {
2474                         .setkey = cc_aead_setkey,
2475                         .setauthsize = cc_aead_setauthsize,
2476                         .encrypt = cc_aead_encrypt,
2477                         .decrypt = cc_aead_decrypt,
2478                         .init = cc_aead_init,
2479                         .exit = cc_aead_exit,
2480                         .ivsize = CTR_RFC3686_IV_SIZE,
2481                         .maxauthsize = SHA256_DIGEST_SIZE,
2482                 },
2483                 .cipher_mode = DRV_CIPHER_CTR,
2484                 .flow_mode = S_DIN_to_AES,
2485                 .auth_mode = DRV_HASH_SHA256,
2486                 .min_hw_rev = CC_HW_REV_630,
2487                 .std_body = CC_STD_NIST,
2488         },
2489         {
2490                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2491                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2492                 .blocksize = 1,
2493                 .template_aead = {
2494                         .setkey = cc_aead_setkey,
2495                         .setauthsize = cc_aead_setauthsize,
2496                         .encrypt = cc_aead_encrypt,
2497                         .decrypt = cc_aead_decrypt,
2498                         .init = cc_aead_init,
2499                         .exit = cc_aead_exit,
2500                         .ivsize = CTR_RFC3686_IV_SIZE,
2501                         .maxauthsize = AES_BLOCK_SIZE,
2502                 },
2503                 .cipher_mode = DRV_CIPHER_CTR,
2504                 .flow_mode = S_DIN_to_AES,
2505                 .auth_mode = DRV_HASH_XCBC_MAC,
2506                 .min_hw_rev = CC_HW_REV_630,
2507                 .std_body = CC_STD_NIST,
2508         },
2509         {
2510                 .name = "ccm(aes)",
2511                 .driver_name = "ccm-aes-ccree",
2512                 .blocksize = 1,
2513                 .template_aead = {
2514                         .setkey = cc_aead_setkey,
2515                         .setauthsize = cc_ccm_setauthsize,
2516                         .encrypt = cc_aead_encrypt,
2517                         .decrypt = cc_aead_decrypt,
2518                         .init = cc_aead_init,
2519                         .exit = cc_aead_exit,
2520                         .ivsize = AES_BLOCK_SIZE,
2521                         .maxauthsize = AES_BLOCK_SIZE,
2522                 },
2523                 .cipher_mode = DRV_CIPHER_CCM,
2524                 .flow_mode = S_DIN_to_AES,
2525                 .auth_mode = DRV_HASH_NULL,
2526                 .min_hw_rev = CC_HW_REV_630,
2527                 .std_body = CC_STD_NIST,
2528         },
2529         {
2530                 .name = "rfc4309(ccm(aes))",
2531                 .driver_name = "rfc4309-ccm-aes-ccree",
2532                 .blocksize = 1,
2533                 .template_aead = {
2534                         .setkey = cc_rfc4309_ccm_setkey,
2535                         .setauthsize = cc_rfc4309_ccm_setauthsize,
2536                         .encrypt = cc_rfc4309_ccm_encrypt,
2537                         .decrypt = cc_rfc4309_ccm_decrypt,
2538                         .init = cc_aead_init,
2539                         .exit = cc_aead_exit,
2540                         .ivsize = CCM_BLOCK_IV_SIZE,
2541                         .maxauthsize = AES_BLOCK_SIZE,
2542                 },
2543                 .cipher_mode = DRV_CIPHER_CCM,
2544                 .flow_mode = S_DIN_to_AES,
2545                 .auth_mode = DRV_HASH_NULL,
2546                 .min_hw_rev = CC_HW_REV_630,
2547                 .std_body = CC_STD_NIST,
2548         },
2549         {
2550                 .name = "gcm(aes)",
2551                 .driver_name = "gcm-aes-ccree",
2552                 .blocksize = 1,
2553                 .template_aead = {
2554                         .setkey = cc_aead_setkey,
2555                         .setauthsize = cc_gcm_setauthsize,
2556                         .encrypt = cc_aead_encrypt,
2557                         .decrypt = cc_aead_decrypt,
2558                         .init = cc_aead_init,
2559                         .exit = cc_aead_exit,
2560                         .ivsize = 12,
2561                         .maxauthsize = AES_BLOCK_SIZE,
2562                 },
2563                 .cipher_mode = DRV_CIPHER_GCTR,
2564                 .flow_mode = S_DIN_to_AES,
2565                 .auth_mode = DRV_HASH_NULL,
2566                 .min_hw_rev = CC_HW_REV_630,
2567                 .std_body = CC_STD_NIST,
2568         },
2569         {
2570                 .name = "rfc4106(gcm(aes))",
2571                 .driver_name = "rfc4106-gcm-aes-ccree",
2572                 .blocksize = 1,
2573                 .template_aead = {
2574                         .setkey = cc_rfc4106_gcm_setkey,
2575                         .setauthsize = cc_rfc4106_gcm_setauthsize,
2576                         .encrypt = cc_rfc4106_gcm_encrypt,
2577                         .decrypt = cc_rfc4106_gcm_decrypt,
2578                         .init = cc_aead_init,
2579                         .exit = cc_aead_exit,
2580                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2581                         .maxauthsize = AES_BLOCK_SIZE,
2582                 },
2583                 .cipher_mode = DRV_CIPHER_GCTR,
2584                 .flow_mode = S_DIN_to_AES,
2585                 .auth_mode = DRV_HASH_NULL,
2586                 .min_hw_rev = CC_HW_REV_630,
2587                 .std_body = CC_STD_NIST,
2588         },
2589         {
2590                 .name = "rfc4543(gcm(aes))",
2591                 .driver_name = "rfc4543-gcm-aes-ccree",
2592                 .blocksize = 1,
2593                 .template_aead = {
2594                         .setkey = cc_rfc4543_gcm_setkey,
2595                         .setauthsize = cc_rfc4543_gcm_setauthsize,
2596                         .encrypt = cc_rfc4543_gcm_encrypt,
2597                         .decrypt = cc_rfc4543_gcm_decrypt,
2598                         .init = cc_aead_init,
2599                         .exit = cc_aead_exit,
2600                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2601                         .maxauthsize = AES_BLOCK_SIZE,
2602                 },
2603                 .cipher_mode = DRV_CIPHER_GCTR,
2604                 .flow_mode = S_DIN_to_AES,
2605                 .auth_mode = DRV_HASH_NULL,
2606                 .min_hw_rev = CC_HW_REV_630,
2607                 .std_body = CC_STD_NIST,
2608         },
2609 };
2610
2611 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2612                                                 struct device *dev)
2613 {
2614         struct cc_crypto_alg *t_alg;
2615         struct aead_alg *alg;
2616
2617         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2618         if (!t_alg)
2619                 return ERR_PTR(-ENOMEM);
2620
2621         alg = &tmpl->template_aead;
2622
2623         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2624         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2625                  tmpl->driver_name);
2626         alg->base.cra_module = THIS_MODULE;
2627         alg->base.cra_priority = CC_CRA_PRIO;
2628
2629         alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2630         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2631         alg->init = cc_aead_init;
2632         alg->exit = cc_aead_exit;
2633
2634         t_alg->aead_alg = *alg;
2635
2636         t_alg->cipher_mode = tmpl->cipher_mode;
2637         t_alg->flow_mode = tmpl->flow_mode;
2638         t_alg->auth_mode = tmpl->auth_mode;
2639
2640         return t_alg;
2641 }
2642
2643 int cc_aead_free(struct cc_drvdata *drvdata)
2644 {
2645         struct cc_crypto_alg *t_alg, *n;
2646         struct cc_aead_handle *aead_handle =
2647                 (struct cc_aead_handle *)drvdata->aead_handle;
2648
2649         if (aead_handle) {
2650                 /* Remove registered algs */
2651                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2652                                          entry) {
2653                         crypto_unregister_aead(&t_alg->aead_alg);
2654                         list_del(&t_alg->entry);
2655                         kfree(t_alg);
2656                 }
2657                 kfree(aead_handle);
2658                 drvdata->aead_handle = NULL;
2659         }
2660
2661         return 0;
2662 }
2663
2664 int cc_aead_alloc(struct cc_drvdata *drvdata)
2665 {
2666         struct cc_aead_handle *aead_handle;
2667         struct cc_crypto_alg *t_alg;
2668         int rc = -ENOMEM;
2669         int alg;
2670         struct device *dev = drvdata_to_dev(drvdata);
2671
2672         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2673         if (!aead_handle) {
2674                 rc = -ENOMEM;
2675                 goto fail0;
2676         }
2677
2678         INIT_LIST_HEAD(&aead_handle->aead_list);
2679         drvdata->aead_handle = aead_handle;
2680
2681         aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2682                                                          MAX_HMAC_DIGEST_SIZE);
2683
2684         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2685                 dev_err(dev, "SRAM pool exhausted\n");
2686                 rc = -ENOMEM;
2687                 goto fail1;
2688         }
2689
2690         /* Linux crypto */
2691         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2692                 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2693                     !(drvdata->std_bodies & aead_algs[alg].std_body))
2694                         continue;
2695
2696                 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2697                 if (IS_ERR(t_alg)) {
2698                         rc = PTR_ERR(t_alg);
2699                         dev_err(dev, "%s alg allocation failed\n",
2700                                 aead_algs[alg].driver_name);
2701                         goto fail1;
2702                 }
2703                 t_alg->drvdata = drvdata;
2704                 rc = crypto_register_aead(&t_alg->aead_alg);
2705                 if (rc) {
2706                         dev_err(dev, "%s alg registration failed\n",
2707                                 t_alg->aead_alg.base.cra_driver_name);
2708                         goto fail2;
2709                 } else {
2710                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2711                         dev_dbg(dev, "Registered %s\n",
2712                                 t_alg->aead_alg.base.cra_driver_name);
2713                 }
2714         }
2715
2716         return 0;
2717
2718 fail2:
2719         kfree(t_alg);
2720 fail1:
2721         cc_aead_free(drvdata);
2722 fail0:
2723         return rc;
2724 }