Merge tag 'gvt-fixes-2019-03-21' of https://github.com/intel/gvt-linux into drm-intel...
[linux-2.6-block.git] / drivers / crypto / ccree / cc_aead.c
CommitLineData
ff27e85a
GBY
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/internal/aead.h>
8#include <crypto/authenc.h>
9#include <crypto/des.h>
10#include <linux/rtnetlink.h>
11#include "cc_driver.h"
12#include "cc_buffer_mgr.h"
13#include "cc_aead.h"
14#include "cc_request_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define template_aead template_u.aead
19
20#define MAX_AEAD_SETKEY_SEQ 12
21#define MAX_AEAD_PROCESS_SEQ 23
22
23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26#define AES_CCM_RFC4309_NONCE_SIZE 3
27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28
29/* Value of each ICV_CMP byte (of 8) in case of success */
30#define ICV_VERIF_OK 0x01
31
32struct cc_aead_handle {
33 cc_sram_addr_t sram_workspace_addr;
34 struct list_head aead_list;
35};
36
37struct cc_hmac_s {
38 u8 *padded_authkey;
39 u8 *ipad_opad; /* IPAD, OPAD*/
40 dma_addr_t padded_authkey_dma_addr;
41 dma_addr_t ipad_opad_dma_addr;
42};
43
44struct cc_xcbc_s {
45 u8 *xcbc_keys; /* K1,K2,K3 */
46 dma_addr_t xcbc_keys_dma_addr;
47};
48
49struct cc_aead_ctx {
50 struct cc_drvdata *drvdata;
51 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
52 u8 *enckey;
53 dma_addr_t enckey_dma_addr;
54 union {
55 struct cc_hmac_s hmac;
56 struct cc_xcbc_s xcbc;
57 } auth_state;
58 unsigned int enc_keylen;
59 unsigned int auth_keylen;
60 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
f1e52fd0 61 unsigned int hash_len;
ff27e85a
GBY
62 enum drv_cipher_mode cipher_mode;
63 enum cc_flow_mode flow_mode;
64 enum drv_hash_mode auth_mode;
65};
66
67static inline bool valid_assoclen(struct aead_request *req)
68{
69 return ((req->assoclen == 16) || (req->assoclen == 20));
70}
71
72static void cc_aead_exit(struct crypto_aead *tfm)
73{
74 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
75 struct device *dev = drvdata_to_dev(ctx->drvdata);
76
77 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
78 crypto_tfm_alg_name(&tfm->base));
79
80 /* Unmap enckey buffer */
81 if (ctx->enckey) {
82 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
83 ctx->enckey_dma_addr);
84 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
85 &ctx->enckey_dma_addr);
86 ctx->enckey_dma_addr = 0;
87 ctx->enckey = NULL;
88 }
89
90 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
91 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
92
93 if (xcbc->xcbc_keys) {
94 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
95 xcbc->xcbc_keys,
96 xcbc->xcbc_keys_dma_addr);
97 }
98 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
99 &xcbc->xcbc_keys_dma_addr);
100 xcbc->xcbc_keys_dma_addr = 0;
101 xcbc->xcbc_keys = NULL;
102 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
103 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
104
105 if (hmac->ipad_opad) {
106 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
107 hmac->ipad_opad,
108 hmac->ipad_opad_dma_addr);
109 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
110 &hmac->ipad_opad_dma_addr);
111 hmac->ipad_opad_dma_addr = 0;
112 hmac->ipad_opad = NULL;
113 }
114 if (hmac->padded_authkey) {
115 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
116 hmac->padded_authkey,
117 hmac->padded_authkey_dma_addr);
118 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
119 &hmac->padded_authkey_dma_addr);
120 hmac->padded_authkey_dma_addr = 0;
121 hmac->padded_authkey = NULL;
122 }
123 }
124}
125
f1e52fd0
YC
126static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
127{
128 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
129
130 return cc_get_default_hash_len(ctx->drvdata);
131}
132
ff27e85a
GBY
133static int cc_aead_init(struct crypto_aead *tfm)
134{
135 struct aead_alg *alg = crypto_aead_alg(tfm);
136 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
137 struct cc_crypto_alg *cc_alg =
138 container_of(alg, struct cc_crypto_alg, aead_alg);
139 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
140
141 dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
142 crypto_tfm_alg_name(&tfm->base));
143
144 /* Initialize modes in instance */
145 ctx->cipher_mode = cc_alg->cipher_mode;
146 ctx->flow_mode = cc_alg->flow_mode;
147 ctx->auth_mode = cc_alg->auth_mode;
148 ctx->drvdata = cc_alg->drvdata;
149 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
150
151 /* Allocate key buffer, cache line aligned */
152 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
153 &ctx->enckey_dma_addr, GFP_KERNEL);
154 if (!ctx->enckey) {
155 dev_err(dev, "Failed allocating key buffer\n");
156 goto init_failed;
157 }
158 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
159 ctx->enckey);
160
161 /* Set default authlen value */
162
163 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
164 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
165 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
166
167 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
168 /* (and temporary for user key - up to 256b) */
169 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
170 &xcbc->xcbc_keys_dma_addr,
171 GFP_KERNEL);
172 if (!xcbc->xcbc_keys) {
173 dev_err(dev, "Failed allocating buffer for XCBC keys\n");
174 goto init_failed;
175 }
176 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
177 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
178 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
179 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
180
181 /* Allocate dma-coherent buffer for IPAD + OPAD */
182 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
183 &hmac->ipad_opad_dma_addr,
184 GFP_KERNEL);
185
186 if (!hmac->ipad_opad) {
187 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
188 goto init_failed;
189 }
190
191 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
192 hmac->ipad_opad);
193
194 hmac->padded_authkey = dma_alloc_coherent(dev,
195 MAX_HMAC_BLOCK_SIZE,
196 pkey_dma,
197 GFP_KERNEL);
198
199 if (!hmac->padded_authkey) {
200 dev_err(dev, "failed to allocate padded_authkey\n");
201 goto init_failed;
202 }
203 } else {
204 ctx->auth_state.hmac.ipad_opad = NULL;
205 ctx->auth_state.hmac.padded_authkey = NULL;
206 }
f1e52fd0 207 ctx->hash_len = cc_get_aead_hash_len(tfm);
ff27e85a
GBY
208
209 return 0;
210
211init_failed:
212 cc_aead_exit(tfm);
213 return -ENOMEM;
214}
215
216static void cc_aead_complete(struct device *dev, void *cc_req, int err)
217{
218 struct aead_request *areq = (struct aead_request *)cc_req;
219 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
222
223 cc_unmap_aead_request(dev, areq);
224
225 /* Restore ordinary iv pointer */
226 areq->iv = areq_ctx->backup_iv;
227
228 if (err)
229 goto done;
230
231 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233 ctx->authsize) != 0) {
234 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235 ctx->authsize, ctx->cipher_mode);
236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory.
238 */
239 cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
240 err = -EBADMSG;
241 }
242 } else { /*ENCRYPT*/
243 if (areq_ctx->is_icv_fragmented) {
244 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
245
246 cc_copy_sg_portion(dev, areq_ctx->mac_buf,
247 areq_ctx->dst_sgl, skip,
248 (skip + ctx->authsize),
249 CC_SG_FROM_BUF);
250 }
251
252 /* If an IV was generated, copy it back to the user provided
253 * buffer.
254 */
255 if (areq_ctx->backup_giv) {
256 if (ctx->cipher_mode == DRV_CIPHER_CTR)
257 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
258 CTR_RFC3686_NONCE_SIZE,
259 CTR_RFC3686_IV_SIZE);
260 else if (ctx->cipher_mode == DRV_CIPHER_CCM)
261 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
262 CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
263 }
264 }
265done:
266 aead_request_complete(areq, err);
267}
268
269static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
270 struct cc_aead_ctx *ctx)
271{
272 /* Load the AES key */
273 hw_desc_init(&desc[0]);
274 /* We are using for the source/user key the same buffer
275 * as for the output keys, * because after this key loading it
276 * is not needed anymore
277 */
278 set_din_type(&desc[0], DMA_DLLI,
279 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
280 NS_BIT);
281 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
282 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
283 set_key_size_aes(&desc[0], ctx->auth_keylen);
284 set_flow_mode(&desc[0], S_DIN_to_AES);
285 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
286
287 hw_desc_init(&desc[1]);
288 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
289 set_flow_mode(&desc[1], DIN_AES_DOUT);
290 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
291 AES_KEYSIZE_128, NS_BIT, 0);
292
293 hw_desc_init(&desc[2]);
294 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
295 set_flow_mode(&desc[2], DIN_AES_DOUT);
296 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
297 + AES_KEYSIZE_128),
298 AES_KEYSIZE_128, NS_BIT, 0);
299
300 hw_desc_init(&desc[3]);
301 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
302 set_flow_mode(&desc[3], DIN_AES_DOUT);
303 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
304 + 2 * AES_KEYSIZE_128),
305 AES_KEYSIZE_128, NS_BIT, 0);
306
307 return 4;
308}
309
310static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
311{
312 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
313 unsigned int digest_ofs = 0;
314 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
315 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
316 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
317 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
318 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
319
320 unsigned int idx = 0;
321 int i;
322
323 /* calc derived HMAC key */
324 for (i = 0; i < 2; i++) {
325 /* Load hash initial state */
326 hw_desc_init(&desc[idx]);
327 set_cipher_mode(&desc[idx], hash_mode);
328 set_din_sram(&desc[idx],
329 cc_larval_digest_addr(ctx->drvdata,
330 ctx->auth_mode),
331 digest_size);
332 set_flow_mode(&desc[idx], S_DIN_to_HASH);
333 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
334 idx++;
335
336 /* Load the hash current length*/
337 hw_desc_init(&desc[idx]);
338 set_cipher_mode(&desc[idx], hash_mode);
f1e52fd0 339 set_din_const(&desc[idx], 0, ctx->hash_len);
ff27e85a
GBY
340 set_flow_mode(&desc[idx], S_DIN_to_HASH);
341 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
342 idx++;
343
344 /* Prepare ipad key */
345 hw_desc_init(&desc[idx]);
346 set_xor_val(&desc[idx], hmac_pad_const[i]);
347 set_cipher_mode(&desc[idx], hash_mode);
348 set_flow_mode(&desc[idx], S_DIN_to_HASH);
349 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
350 idx++;
351
352 /* Perform HASH update */
353 hw_desc_init(&desc[idx]);
354 set_din_type(&desc[idx], DMA_DLLI,
355 hmac->padded_authkey_dma_addr,
356 SHA256_BLOCK_SIZE, NS_BIT);
357 set_cipher_mode(&desc[idx], hash_mode);
358 set_xor_active(&desc[idx]);
359 set_flow_mode(&desc[idx], DIN_HASH);
360 idx++;
361
362 /* Get the digset */
363 hw_desc_init(&desc[idx]);
364 set_cipher_mode(&desc[idx], hash_mode);
365 set_dout_dlli(&desc[idx],
366 (hmac->ipad_opad_dma_addr + digest_ofs),
367 digest_size, NS_BIT, 0);
368 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
369 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
370 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
371 idx++;
372
373 digest_ofs += digest_size;
374 }
375
376 return idx;
377}
378
379static int validate_keys_sizes(struct cc_aead_ctx *ctx)
380{
381 struct device *dev = drvdata_to_dev(ctx->drvdata);
382
383 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
384 ctx->enc_keylen, ctx->auth_keylen);
385
386 switch (ctx->auth_mode) {
387 case DRV_HASH_SHA1:
388 case DRV_HASH_SHA256:
389 break;
390 case DRV_HASH_XCBC_MAC:
391 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
392 ctx->auth_keylen != AES_KEYSIZE_192 &&
393 ctx->auth_keylen != AES_KEYSIZE_256)
394 return -ENOTSUPP;
395 break;
396 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397 if (ctx->auth_keylen > 0)
398 return -EINVAL;
399 break;
400 default:
401 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
402 return -EINVAL;
403 }
404 /* Check cipher key size */
405 if (ctx->flow_mode == S_DIN_to_DES) {
406 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407 dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
408 ctx->enc_keylen);
409 return -EINVAL;
410 }
411 } else { /* Default assumed to be AES ciphers */
412 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
413 ctx->enc_keylen != AES_KEYSIZE_192 &&
414 ctx->enc_keylen != AES_KEYSIZE_256) {
415 dev_err(dev, "Invalid cipher(AES) key size: %u\n",
416 ctx->enc_keylen);
417 return -EINVAL;
418 }
419 }
420
421 return 0; /* All tests of keys sizes passed */
422}
423
424/* This function prepers the user key so it can pass to the hmac processing
425 * (copy to intenral buffer or hash in case of key longer than block
426 */
427static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
428 unsigned int keylen)
429{
430 dma_addr_t key_dma_addr = 0;
431 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
432 struct device *dev = drvdata_to_dev(ctx->drvdata);
433 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
434 struct cc_crypto_req cc_req = {};
435 unsigned int blocksize;
436 unsigned int digestsize;
437 unsigned int hashmode;
438 unsigned int idx = 0;
439 int rc = 0;
440 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
441 dma_addr_t padded_authkey_dma_addr =
442 ctx->auth_state.hmac.padded_authkey_dma_addr;
443
444 switch (ctx->auth_mode) { /* auth_key required and >0 */
445 case DRV_HASH_SHA1:
446 blocksize = SHA1_BLOCK_SIZE;
447 digestsize = SHA1_DIGEST_SIZE;
448 hashmode = DRV_HASH_HW_SHA1;
449 break;
450 case DRV_HASH_SHA256:
451 default:
452 blocksize = SHA256_BLOCK_SIZE;
453 digestsize = SHA256_DIGEST_SIZE;
454 hashmode = DRV_HASH_HW_SHA256;
455 }
456
457 if (keylen != 0) {
458 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
459 DMA_TO_DEVICE);
460 if (dma_mapping_error(dev, key_dma_addr)) {
461 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
462 key, keylen);
463 return -ENOMEM;
464 }
465 if (keylen > blocksize) {
466 /* Load hash initial state */
467 hw_desc_init(&desc[idx]);
468 set_cipher_mode(&desc[idx], hashmode);
469 set_din_sram(&desc[idx], larval_addr, digestsize);
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
472 idx++;
473
474 /* Load the hash current length*/
475 hw_desc_init(&desc[idx]);
476 set_cipher_mode(&desc[idx], hashmode);
f1e52fd0 477 set_din_const(&desc[idx], 0, ctx->hash_len);
ff27e85a
GBY
478 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
479 set_flow_mode(&desc[idx], S_DIN_to_HASH);
480 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
481 idx++;
482
483 hw_desc_init(&desc[idx]);
484 set_din_type(&desc[idx], DMA_DLLI,
485 key_dma_addr, keylen, NS_BIT);
486 set_flow_mode(&desc[idx], DIN_HASH);
487 idx++;
488
489 /* Get hashed key */
490 hw_desc_init(&desc[idx]);
491 set_cipher_mode(&desc[idx], hashmode);
492 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
493 digestsize, NS_BIT, 0);
494 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
496 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
497 set_cipher_config0(&desc[idx],
498 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
499 idx++;
500
501 hw_desc_init(&desc[idx]);
502 set_din_const(&desc[idx], 0, (blocksize - digestsize));
503 set_flow_mode(&desc[idx], BYPASS);
504 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
505 digestsize), (blocksize - digestsize),
506 NS_BIT, 0);
507 idx++;
508 } else {
509 hw_desc_init(&desc[idx]);
510 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
511 keylen, NS_BIT);
512 set_flow_mode(&desc[idx], BYPASS);
513 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
514 keylen, NS_BIT, 0);
515 idx++;
516
517 if ((blocksize - keylen) != 0) {
518 hw_desc_init(&desc[idx]);
519 set_din_const(&desc[idx], 0,
520 (blocksize - keylen));
521 set_flow_mode(&desc[idx], BYPASS);
522 set_dout_dlli(&desc[idx],
523 (padded_authkey_dma_addr +
524 keylen),
525 (blocksize - keylen), NS_BIT, 0);
526 idx++;
527 }
528 }
529 } else {
530 hw_desc_init(&desc[idx]);
531 set_din_const(&desc[idx], 0, (blocksize - keylen));
532 set_flow_mode(&desc[idx], BYPASS);
533 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
534 blocksize, NS_BIT, 0);
535 idx++;
536 }
537
538 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
539 if (rc)
540 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
541
542 if (key_dma_addr)
543 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
544
545 return rc;
546}
547
548static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
549 unsigned int keylen)
550{
551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
ff27e85a 552 struct cc_crypto_req cc_req = {};
ff27e85a 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
ff27e85a
GBY
554 unsigned int seq_len = 0;
555 struct device *dev = drvdata_to_dev(ctx->drvdata);
dc95b535
EB
556 const u8 *enckey, *authkey;
557 int rc;
ff27e85a
GBY
558
559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
561
562 /* STAT_PHASE_0: Init and sanity checks */
563
564 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
dc95b535
EB
565 struct crypto_authenc_keys keys;
566
567 rc = crypto_authenc_extractkeys(&keys, key, keylen);
568 if (rc)
ff27e85a 569 goto badkey;
dc95b535
EB
570 enckey = keys.enckey;
571 authkey = keys.authkey;
572 ctx->enc_keylen = keys.enckeylen;
573 ctx->auth_keylen = keys.authkeylen;
ff27e85a
GBY
574
575 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
576 /* the nonce is stored in bytes at end of key */
dc95b535 577 rc = -EINVAL;
ff27e85a
GBY
578 if (ctx->enc_keylen <
579 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
580 goto badkey;
581 /* Copy nonce from last 4 bytes in CTR key to
582 * first 4 bytes in CTR IV
583 */
dc95b535
EB
584 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
585 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
ff27e85a
GBY
586 /* Set CTR key size */
587 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
588 }
589 } else { /* non-authenc - has just one key */
dc95b535
EB
590 enckey = key;
591 authkey = NULL;
ff27e85a
GBY
592 ctx->enc_keylen = keylen;
593 ctx->auth_keylen = 0;
594 }
595
596 rc = validate_keys_sizes(ctx);
597 if (rc)
598 goto badkey;
599
600 /* STAT_PHASE_1: Copy key to ctx */
601
602 /* Get key material */
dc95b535 603 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
ff27e85a
GBY
604 if (ctx->enc_keylen == 24)
605 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
606 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
dc95b535
EB
607 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
608 ctx->auth_keylen);
ff27e85a 609 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
dc95b535 610 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
ff27e85a
GBY
611 if (rc)
612 goto badkey;
613 }
614
615 /* STAT_PHASE_2: Create sequence */
616
617 switch (ctx->auth_mode) {
618 case DRV_HASH_SHA1:
619 case DRV_HASH_SHA256:
620 seq_len = hmac_setkey(desc, ctx);
621 break;
622 case DRV_HASH_XCBC_MAC:
623 seq_len = xcbc_setkey(desc, ctx);
624 break;
625 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
626 break; /* No auth. key setup */
627 default:
628 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
629 rc = -ENOTSUPP;
630 goto badkey;
631 }
632
633 /* STAT_PHASE_3: Submit sequence to HW */
634
635 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
636 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
637 if (rc) {
638 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
639 goto setkey_error;
640 }
641 }
642
643 /* Update STAT_PHASE_3 */
644 return rc;
645
646badkey:
647 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
648
649setkey_error:
650 return rc;
651}
652
653static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
654 unsigned int keylen)
655{
656 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
657
658 if (keylen < 3)
659 return -EINVAL;
660
661 keylen -= 3;
662 memcpy(ctx->ctr_nonce, key + keylen, 3);
663
664 return cc_aead_setkey(tfm, key, keylen);
665}
666
667static int cc_aead_setauthsize(struct crypto_aead *authenc,
668 unsigned int authsize)
669{
670 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
671 struct device *dev = drvdata_to_dev(ctx->drvdata);
672
673 /* Unsupported auth. sizes */
674 if (authsize == 0 ||
675 authsize > crypto_aead_maxauthsize(authenc)) {
676 return -ENOTSUPP;
677 }
678
679 ctx->authsize = authsize;
680 dev_dbg(dev, "authlen=%d\n", ctx->authsize);
681
682 return 0;
683}
684
685static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
686 unsigned int authsize)
687{
688 switch (authsize) {
689 case 8:
690 case 12:
691 case 16:
692 break;
693 default:
694 return -EINVAL;
695 }
696
697 return cc_aead_setauthsize(authenc, authsize);
698}
699
700static int cc_ccm_setauthsize(struct crypto_aead *authenc,
701 unsigned int authsize)
702{
703 switch (authsize) {
704 case 4:
705 case 6:
706 case 8:
707 case 10:
708 case 12:
709 case 14:
710 case 16:
711 break;
712 default:
713 return -EINVAL;
714 }
715
716 return cc_aead_setauthsize(authenc, authsize);
717}
718
719static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
720 struct cc_hw_desc desc[], unsigned int *seq_size)
721{
722 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
723 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
724 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
725 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
726 unsigned int idx = *seq_size;
727 struct device *dev = drvdata_to_dev(ctx->drvdata);
728
729 switch (assoc_dma_type) {
730 case CC_DMA_BUF_DLLI:
731 dev_dbg(dev, "ASSOC buffer type DLLI\n");
732 hw_desc_init(&desc[idx]);
733 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
734 areq->assoclen, NS_BIT);
735 set_flow_mode(&desc[idx], flow_mode);
736 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
737 areq_ctx->cryptlen > 0)
738 set_din_not_last_indication(&desc[idx]);
739 break;
740 case CC_DMA_BUF_MLLI:
741 dev_dbg(dev, "ASSOC buffer type MLLI\n");
742 hw_desc_init(&desc[idx]);
743 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
744 areq_ctx->assoc.mlli_nents, NS_BIT);
745 set_flow_mode(&desc[idx], flow_mode);
746 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
747 areq_ctx->cryptlen > 0)
748 set_din_not_last_indication(&desc[idx]);
749 break;
750 case CC_DMA_BUF_NULL:
751 default:
752 dev_err(dev, "Invalid ASSOC buffer type\n");
753 }
754
755 *seq_size = (++idx);
756}
757
758static void cc_proc_authen_desc(struct aead_request *areq,
759 unsigned int flow_mode,
760 struct cc_hw_desc desc[],
761 unsigned int *seq_size, int direct)
762{
763 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
764 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
765 unsigned int idx = *seq_size;
766 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
767 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
768 struct device *dev = drvdata_to_dev(ctx->drvdata);
769
770 switch (data_dma_type) {
771 case CC_DMA_BUF_DLLI:
772 {
773 struct scatterlist *cipher =
774 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
775 areq_ctx->dst_sgl : areq_ctx->src_sgl;
776
777 unsigned int offset =
778 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
779 areq_ctx->dst_offset : areq_ctx->src_offset;
780 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
781 hw_desc_init(&desc[idx]);
782 set_din_type(&desc[idx], DMA_DLLI,
783 (sg_dma_address(cipher) + offset),
784 areq_ctx->cryptlen, NS_BIT);
785 set_flow_mode(&desc[idx], flow_mode);
786 break;
787 }
788 case CC_DMA_BUF_MLLI:
789 {
790 /* DOUBLE-PASS flow (as default)
791 * assoc. + iv + data -compact in one table
792 * if assoclen is ZERO only IV perform
793 */
794 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
795 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
796
797 if (areq_ctx->is_single_pass) {
798 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
799 mlli_addr = areq_ctx->dst.sram_addr;
800 mlli_nents = areq_ctx->dst.mlli_nents;
801 } else {
802 mlli_addr = areq_ctx->src.sram_addr;
803 mlli_nents = areq_ctx->src.mlli_nents;
804 }
805 }
806
807 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
808 hw_desc_init(&desc[idx]);
809 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
810 NS_BIT);
811 set_flow_mode(&desc[idx], flow_mode);
812 break;
813 }
814 case CC_DMA_BUF_NULL:
815 default:
816 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
817 }
818
819 *seq_size = (++idx);
820}
821
822static void cc_proc_cipher_desc(struct aead_request *areq,
823 unsigned int flow_mode,
824 struct cc_hw_desc desc[],
825 unsigned int *seq_size)
826{
827 unsigned int idx = *seq_size;
828 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
829 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
830 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
831 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
832 struct device *dev = drvdata_to_dev(ctx->drvdata);
833
834 if (areq_ctx->cryptlen == 0)
835 return; /*null processing*/
836
837 switch (data_dma_type) {
838 case CC_DMA_BUF_DLLI:
839 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
840 hw_desc_init(&desc[idx]);
841 set_din_type(&desc[idx], DMA_DLLI,
842 (sg_dma_address(areq_ctx->src_sgl) +
843 areq_ctx->src_offset), areq_ctx->cryptlen,
844 NS_BIT);
845 set_dout_dlli(&desc[idx],
846 (sg_dma_address(areq_ctx->dst_sgl) +
847 areq_ctx->dst_offset),
848 areq_ctx->cryptlen, NS_BIT, 0);
849 set_flow_mode(&desc[idx], flow_mode);
850 break;
851 case CC_DMA_BUF_MLLI:
852 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
853 hw_desc_init(&desc[idx]);
854 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
855 areq_ctx->src.mlli_nents, NS_BIT);
856 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
857 areq_ctx->dst.mlli_nents, NS_BIT, 0);
858 set_flow_mode(&desc[idx], flow_mode);
859 break;
860 case CC_DMA_BUF_NULL:
861 default:
862 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
863 }
864
865 *seq_size = (++idx);
866}
867
868static void cc_proc_digest_desc(struct aead_request *req,
869 struct cc_hw_desc desc[],
870 unsigned int *seq_size)
871{
872 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
873 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
874 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
875 unsigned int idx = *seq_size;
876 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
877 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
878 int direct = req_ctx->gen_ctx.op_type;
879
880 /* Get final ICV result */
881 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
882 hw_desc_init(&desc[idx]);
883 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
884 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
885 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
886 NS_BIT, 1);
27b3b22d 887 set_queue_last_ind(ctx->drvdata, &desc[idx]);
ff27e85a
GBY
888 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
889 set_aes_not_hash_mode(&desc[idx]);
890 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
891 } else {
892 set_cipher_config0(&desc[idx],
893 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
894 set_cipher_mode(&desc[idx], hash_mode);
895 }
896 } else { /*Decrypt*/
897 /* Get ICV out from hardware */
898 hw_desc_init(&desc[idx]);
899 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
900 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
901 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
902 ctx->authsize, NS_BIT, 1);
27b3b22d 903 set_queue_last_ind(ctx->drvdata, &desc[idx]);
ff27e85a
GBY
904 set_cipher_config0(&desc[idx],
905 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
906 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
907 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
908 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
909 set_aes_not_hash_mode(&desc[idx]);
910 } else {
911 set_cipher_mode(&desc[idx], hash_mode);
912 }
913 }
914
915 *seq_size = (++idx);
916}
917
918static void cc_set_cipher_desc(struct aead_request *req,
919 struct cc_hw_desc desc[],
920 unsigned int *seq_size)
921{
922 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
923 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
924 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
925 unsigned int hw_iv_size = req_ctx->hw_iv_size;
926 unsigned int idx = *seq_size;
927 int direct = req_ctx->gen_ctx.op_type;
928
929 /* Setup cipher state */
930 hw_desc_init(&desc[idx]);
931 set_cipher_config0(&desc[idx], direct);
932 set_flow_mode(&desc[idx], ctx->flow_mode);
933 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
934 hw_iv_size, NS_BIT);
935 if (ctx->cipher_mode == DRV_CIPHER_CTR)
936 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
937 else
938 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
939 set_cipher_mode(&desc[idx], ctx->cipher_mode);
940 idx++;
941
942 /* Setup enc. key */
943 hw_desc_init(&desc[idx]);
944 set_cipher_config0(&desc[idx], direct);
945 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
946 set_flow_mode(&desc[idx], ctx->flow_mode);
947 if (ctx->flow_mode == S_DIN_to_AES) {
948 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
949 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
950 ctx->enc_keylen), NS_BIT);
951 set_key_size_aes(&desc[idx], ctx->enc_keylen);
952 } else {
953 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
954 ctx->enc_keylen, NS_BIT);
955 set_key_size_des(&desc[idx], ctx->enc_keylen);
956 }
957 set_cipher_mode(&desc[idx], ctx->cipher_mode);
958 idx++;
959
960 *seq_size = idx;
961}
962
963static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
964 unsigned int *seq_size, unsigned int data_flow_mode)
965{
966 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
967 int direct = req_ctx->gen_ctx.op_type;
968 unsigned int idx = *seq_size;
969
970 if (req_ctx->cryptlen == 0)
971 return; /*null processing*/
972
973 cc_set_cipher_desc(req, desc, &idx);
974 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
975 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
976 /* We must wait for DMA to write all cipher */
977 hw_desc_init(&desc[idx]);
978 set_din_no_dma(&desc[idx], 0, 0xfffff0);
979 set_dout_no_dma(&desc[idx], 0, 0, 1);
980 idx++;
981 }
982
983 *seq_size = idx;
984}
985
986static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
987 unsigned int *seq_size)
988{
989 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
990 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
991 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
992 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
993 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
994 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
995 unsigned int idx = *seq_size;
996
997 /* Loading hash ipad xor key state */
998 hw_desc_init(&desc[idx]);
999 set_cipher_mode(&desc[idx], hash_mode);
1000 set_din_type(&desc[idx], DMA_DLLI,
1001 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1002 NS_BIT);
1003 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1004 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1005 idx++;
1006
1007 /* Load init. digest len (64 bytes) */
1008 hw_desc_init(&desc[idx]);
1009 set_cipher_mode(&desc[idx], hash_mode);
1010 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
f1e52fd0 1011 ctx->hash_len);
ff27e85a
GBY
1012 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1013 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1014 idx++;
1015
1016 *seq_size = idx;
1017}
1018
1019static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1020 unsigned int *seq_size)
1021{
1022 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1023 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1024 unsigned int idx = *seq_size;
1025
1026 /* Loading MAC state */
1027 hw_desc_init(&desc[idx]);
1028 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1029 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1030 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1031 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1032 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1033 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1034 set_aes_not_hash_mode(&desc[idx]);
1035 idx++;
1036
1037 /* Setup XCBC MAC K1 */
1038 hw_desc_init(&desc[idx]);
1039 set_din_type(&desc[idx], DMA_DLLI,
1040 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1041 AES_KEYSIZE_128, NS_BIT);
1042 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1043 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1044 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1045 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1046 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1047 set_aes_not_hash_mode(&desc[idx]);
1048 idx++;
1049
1050 /* Setup XCBC MAC K2 */
1051 hw_desc_init(&desc[idx]);
1052 set_din_type(&desc[idx], DMA_DLLI,
1053 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1054 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1055 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1056 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1057 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1058 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1059 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1060 set_aes_not_hash_mode(&desc[idx]);
1061 idx++;
1062
1063 /* Setup XCBC MAC K3 */
1064 hw_desc_init(&desc[idx]);
1065 set_din_type(&desc[idx], DMA_DLLI,
1066 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1067 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1068 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1069 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1070 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1071 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1072 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1073 set_aes_not_hash_mode(&desc[idx]);
1074 idx++;
1075
1076 *seq_size = idx;
1077}
1078
1079static void cc_proc_header_desc(struct aead_request *req,
1080 struct cc_hw_desc desc[],
1081 unsigned int *seq_size)
1082{
1083 unsigned int idx = *seq_size;
1084 /* Hash associated data */
1085 if (req->assoclen > 0)
1086 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1087
1088 /* Hash IV */
1089 *seq_size = idx;
1090}
1091
1092static void cc_proc_scheme_desc(struct aead_request *req,
1093 struct cc_hw_desc desc[],
1094 unsigned int *seq_size)
1095{
1096 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1097 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1098 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1099 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1100 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1101 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1102 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1103 unsigned int idx = *seq_size;
1104
1105 hw_desc_init(&desc[idx]);
1106 set_cipher_mode(&desc[idx], hash_mode);
1107 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
f1e52fd0 1108 ctx->hash_len);
ff27e85a
GBY
1109 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1110 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1111 set_cipher_do(&desc[idx], DO_PAD);
1112 idx++;
1113
1114 /* Get final ICV result */
1115 hw_desc_init(&desc[idx]);
1116 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1117 digest_size);
1118 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1119 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1120 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1121 set_cipher_mode(&desc[idx], hash_mode);
1122 idx++;
1123
1124 /* Loading hash opad xor key state */
1125 hw_desc_init(&desc[idx]);
1126 set_cipher_mode(&desc[idx], hash_mode);
1127 set_din_type(&desc[idx], DMA_DLLI,
1128 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1129 digest_size, NS_BIT);
1130 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1131 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1132 idx++;
1133
1134 /* Load init. digest len (64 bytes) */
1135 hw_desc_init(&desc[idx]);
1136 set_cipher_mode(&desc[idx], hash_mode);
1137 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
f1e52fd0 1138 ctx->hash_len);
ff27e85a
GBY
1139 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1140 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1141 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1142 idx++;
1143
1144 /* Perform HASH update */
1145 hw_desc_init(&desc[idx]);
1146 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1147 digest_size);
1148 set_flow_mode(&desc[idx], DIN_HASH);
1149 idx++;
1150
1151 *seq_size = idx;
1152}
1153
1154static void cc_mlli_to_sram(struct aead_request *req,
1155 struct cc_hw_desc desc[], unsigned int *seq_size)
1156{
1157 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1158 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1159 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1160 struct device *dev = drvdata_to_dev(ctx->drvdata);
1161
1162 if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1163 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1164 !req_ctx->is_single_pass) {
1165 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1166 (unsigned int)ctx->drvdata->mlli_sram_addr,
1167 req_ctx->mlli_params.mlli_len);
1168 /* Copy MLLI table host-to-sram */
1169 hw_desc_init(&desc[*seq_size]);
1170 set_din_type(&desc[*seq_size], DMA_DLLI,
1171 req_ctx->mlli_params.mlli_dma_addr,
1172 req_ctx->mlli_params.mlli_len, NS_BIT);
1173 set_dout_sram(&desc[*seq_size],
1174 ctx->drvdata->mlli_sram_addr,
1175 req_ctx->mlli_params.mlli_len);
1176 set_flow_mode(&desc[*seq_size], BYPASS);
1177 (*seq_size)++;
1178 }
1179}
1180
1181static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1182 enum cc_flow_mode setup_flow_mode,
1183 bool is_single_pass)
1184{
1185 enum cc_flow_mode data_flow_mode;
1186
1187 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1188 if (setup_flow_mode == S_DIN_to_AES)
1189 data_flow_mode = is_single_pass ?
1190 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1191 else
1192 data_flow_mode = is_single_pass ?
1193 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1194 } else { /* Decrypt */
1195 if (setup_flow_mode == S_DIN_to_AES)
1196 data_flow_mode = is_single_pass ?
1197 AES_and_HASH : DIN_AES_DOUT;
1198 else
1199 data_flow_mode = is_single_pass ?
1200 DES_and_HASH : DIN_DES_DOUT;
1201 }
1202
1203 return data_flow_mode;
1204}
1205
1206static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1207 unsigned int *seq_size)
1208{
1209 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1210 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1211 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1212 int direct = req_ctx->gen_ctx.op_type;
1213 unsigned int data_flow_mode =
1214 cc_get_data_flow(direct, ctx->flow_mode,
1215 req_ctx->is_single_pass);
1216
1217 if (req_ctx->is_single_pass) {
1218 /**
1219 * Single-pass flow
1220 */
1221 cc_set_hmac_desc(req, desc, seq_size);
1222 cc_set_cipher_desc(req, desc, seq_size);
1223 cc_proc_header_desc(req, desc, seq_size);
1224 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1225 cc_proc_scheme_desc(req, desc, seq_size);
1226 cc_proc_digest_desc(req, desc, seq_size);
1227 return;
1228 }
1229
1230 /**
1231 * Double-pass flow
1232 * Fallback for unsupported single-pass modes,
1233 * i.e. using assoc. data of non-word-multiple
1234 */
1235 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1236 /* encrypt first.. */
1237 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1238 /* authenc after..*/
1239 cc_set_hmac_desc(req, desc, seq_size);
1240 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1241 cc_proc_scheme_desc(req, desc, seq_size);
1242 cc_proc_digest_desc(req, desc, seq_size);
1243
1244 } else { /*DECRYPT*/
1245 /* authenc first..*/
1246 cc_set_hmac_desc(req, desc, seq_size);
1247 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1248 cc_proc_scheme_desc(req, desc, seq_size);
1249 /* decrypt after.. */
1250 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1251 /* read the digest result with setting the completion bit
1252 * must be after the cipher operation
1253 */
1254 cc_proc_digest_desc(req, desc, seq_size);
1255 }
1256}
1257
1258static void
1259cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1260 unsigned int *seq_size)
1261{
1262 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1263 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1264 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1265 int direct = req_ctx->gen_ctx.op_type;
1266 unsigned int data_flow_mode =
1267 cc_get_data_flow(direct, ctx->flow_mode,
1268 req_ctx->is_single_pass);
1269
1270 if (req_ctx->is_single_pass) {
1271 /**
1272 * Single-pass flow
1273 */
1274 cc_set_xcbc_desc(req, desc, seq_size);
1275 cc_set_cipher_desc(req, desc, seq_size);
1276 cc_proc_header_desc(req, desc, seq_size);
1277 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1278 cc_proc_digest_desc(req, desc, seq_size);
1279 return;
1280 }
1281
1282 /**
1283 * Double-pass flow
1284 * Fallback for unsupported single-pass modes,
1285 * i.e. using assoc. data of non-word-multiple
1286 */
1287 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1288 /* encrypt first.. */
1289 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1290 /* authenc after.. */
1291 cc_set_xcbc_desc(req, desc, seq_size);
1292 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1293 cc_proc_digest_desc(req, desc, seq_size);
1294 } else { /*DECRYPT*/
1295 /* authenc first.. */
1296 cc_set_xcbc_desc(req, desc, seq_size);
1297 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1298 /* decrypt after..*/
1299 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1300 /* read the digest result with setting the completion bit
1301 * must be after the cipher operation
1302 */
1303 cc_proc_digest_desc(req, desc, seq_size);
1304 }
1305}
1306
1307static int validate_data_size(struct cc_aead_ctx *ctx,
1308 enum drv_crypto_direction direct,
1309 struct aead_request *req)
1310{
1311 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1312 struct device *dev = drvdata_to_dev(ctx->drvdata);
1313 unsigned int assoclen = req->assoclen;
1314 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1315 (req->cryptlen - ctx->authsize) : req->cryptlen;
1316
1317 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1318 req->cryptlen < ctx->authsize)
1319 goto data_size_err;
1320
1321 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1322
1323 switch (ctx->flow_mode) {
1324 case S_DIN_to_AES:
1325 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1326 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1327 goto data_size_err;
1328 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1329 break;
1330 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1331 if (areq_ctx->plaintext_authenticate_only)
1332 areq_ctx->is_single_pass = false;
1333 break;
1334 }
1335
1336 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1337 areq_ctx->is_single_pass = false;
1338
1339 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1340 !IS_ALIGNED(cipherlen, sizeof(u32)))
1341 areq_ctx->is_single_pass = false;
1342
1343 break;
1344 case S_DIN_to_DES:
1345 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1346 goto data_size_err;
1347 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1348 areq_ctx->is_single_pass = false;
1349 break;
1350 default:
1351 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1352 goto data_size_err;
1353 }
1354
1355 return 0;
1356
1357data_size_err:
1358 return -EINVAL;
1359}
1360
1361static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1362{
1363 unsigned int len = 0;
1364
1365 if (header_size == 0)
1366 return 0;
1367
1368 if (header_size < ((1UL << 16) - (1UL << 8))) {
1369 len = 2;
1370
1371 pa0_buff[0] = (header_size >> 8) & 0xFF;
1372 pa0_buff[1] = header_size & 0xFF;
1373 } else {
1374 len = 6;
1375
1376 pa0_buff[0] = 0xFF;
1377 pa0_buff[1] = 0xFE;
1378 pa0_buff[2] = (header_size >> 24) & 0xFF;
1379 pa0_buff[3] = (header_size >> 16) & 0xFF;
1380 pa0_buff[4] = (header_size >> 8) & 0xFF;
1381 pa0_buff[5] = header_size & 0xFF;
1382 }
1383
1384 return len;
1385}
1386
1387static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1388{
1389 __be32 data;
1390
1391 memset(block, 0, csize);
1392 block += csize;
1393
1394 if (csize >= 4)
1395 csize = 4;
1396 else if (msglen > (1 << (8 * csize)))
1397 return -EOVERFLOW;
1398
1399 data = cpu_to_be32(msglen);
1400 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1401
1402 return 0;
1403}
1404
1405static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1406 unsigned int *seq_size)
1407{
1408 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1409 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1410 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1411 unsigned int idx = *seq_size;
1412 unsigned int cipher_flow_mode;
1413 dma_addr_t mac_result;
1414
1415 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1416 cipher_flow_mode = AES_to_HASH_and_DOUT;
1417 mac_result = req_ctx->mac_buf_dma_addr;
1418 } else { /* Encrypt */
1419 cipher_flow_mode = AES_and_HASH;
1420 mac_result = req_ctx->icv_dma_addr;
1421 }
1422
1423 /* load key */
1424 hw_desc_init(&desc[idx]);
1425 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1426 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1427 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1428 ctx->enc_keylen), NS_BIT);
1429 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1430 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1431 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1432 set_flow_mode(&desc[idx], S_DIN_to_AES);
1433 idx++;
1434
1435 /* load ctr state */
1436 hw_desc_init(&desc[idx]);
1437 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1438 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1439 set_din_type(&desc[idx], DMA_DLLI,
1440 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1441 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1442 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1443 set_flow_mode(&desc[idx], S_DIN_to_AES);
1444 idx++;
1445
1446 /* load MAC key */
1447 hw_desc_init(&desc[idx]);
1448 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1449 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1450 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1451 ctx->enc_keylen), NS_BIT);
1452 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1453 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1454 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1455 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1456 set_aes_not_hash_mode(&desc[idx]);
1457 idx++;
1458
1459 /* load MAC state */
1460 hw_desc_init(&desc[idx]);
1461 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1462 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1463 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1464 AES_BLOCK_SIZE, NS_BIT);
1465 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1466 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1467 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1468 set_aes_not_hash_mode(&desc[idx]);
1469 idx++;
1470
1471 /* process assoc data */
1472 if (req->assoclen > 0) {
1473 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1474 } else {
1475 hw_desc_init(&desc[idx]);
1476 set_din_type(&desc[idx], DMA_DLLI,
1477 sg_dma_address(&req_ctx->ccm_adata_sg),
1478 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1479 set_flow_mode(&desc[idx], DIN_HASH);
1480 idx++;
1481 }
1482
1483 /* process the cipher */
1484 if (req_ctx->cryptlen)
1485 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1486
1487 /* Read temporal MAC */
1488 hw_desc_init(&desc[idx]);
1489 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1490 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1491 NS_BIT, 0);
1492 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1493 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1494 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1495 set_aes_not_hash_mode(&desc[idx]);
1496 idx++;
1497
1498 /* load AES-CTR state (for last MAC calculation)*/
1499 hw_desc_init(&desc[idx]);
1500 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1501 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1502 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1503 AES_BLOCK_SIZE, NS_BIT);
1504 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1505 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1506 set_flow_mode(&desc[idx], S_DIN_to_AES);
1507 idx++;
1508
1509 hw_desc_init(&desc[idx]);
1510 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1511 set_dout_no_dma(&desc[idx], 0, 0, 1);
1512 idx++;
1513
1514 /* encrypt the "T" value and store MAC in mac_state */
1515 hw_desc_init(&desc[idx]);
1516 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1517 ctx->authsize, NS_BIT);
1518 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
27b3b22d 1519 set_queue_last_ind(ctx->drvdata, &desc[idx]);
ff27e85a
GBY
1520 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1521 idx++;
1522
1523 *seq_size = idx;
1524 return 0;
1525}
1526
1527static int config_ccm_adata(struct aead_request *req)
1528{
1529 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1530 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1531 struct device *dev = drvdata_to_dev(ctx->drvdata);
1532 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1533 //unsigned int size_of_a = 0, rem_a_size = 0;
1534 unsigned int lp = req->iv[0];
1535 /* Note: The code assume that req->iv[0] already contains the value
1536 * of L' of RFC3610
1537 */
1538 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1539 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1540 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1541 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1542 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1543 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1544 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1545 req->cryptlen :
1546 (req->cryptlen - ctx->authsize);
1547 int rc;
1548
1549 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1550 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1551
1552 /* taken from crypto/ccm.c */
1553 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1554 if (l < 2 || l > 8) {
1555 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1556 return -EINVAL;
1557 }
1558 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1559
1560 /* format control info per RFC 3610 and
1561 * NIST Special Publication 800-38C
1562 */
1563 *b0 |= (8 * ((m - 2) / 2));
1564 if (req->assoclen > 0)
1565 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1566
1567 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1568 if (rc) {
1569 dev_err(dev, "message len overflow detected");
1570 return rc;
1571 }
1572 /* END of "taken from crypto/ccm.c" */
1573
1574 /* l(a) - size of associated data. */
1575 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1576
1577 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1578 req->iv[15] = 1;
1579
1580 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1581 ctr_count_0[15] = 0;
1582
1583 return 0;
1584}
1585
1586static void cc_proc_rfc4309_ccm(struct aead_request *req)
1587{
1588 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1589 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1590 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1591
1592 /* L' */
1593 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1594 /* For RFC 4309, always use 4 bytes for message length
1595 * (at most 2^32-1 bytes).
1596 */
1597 areq_ctx->ctr_iv[0] = 3;
1598
1599 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1600 * that we build here.
1601 */
1602 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1603 CCM_BLOCK_NONCE_SIZE);
1604 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1605 CCM_BLOCK_IV_SIZE);
1606 req->iv = areq_ctx->ctr_iv;
1607 req->assoclen -= CCM_BLOCK_IV_SIZE;
1608}
1609
1610static void cc_set_ghash_desc(struct aead_request *req,
1611 struct cc_hw_desc desc[], unsigned int *seq_size)
1612{
1613 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1614 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1615 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1616 unsigned int idx = *seq_size;
1617
1618 /* load key to AES*/
1619 hw_desc_init(&desc[idx]);
1620 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1621 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1622 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1623 ctx->enc_keylen, NS_BIT);
1624 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1625 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1626 set_flow_mode(&desc[idx], S_DIN_to_AES);
1627 idx++;
1628
1629 /* process one zero block to generate hkey */
1630 hw_desc_init(&desc[idx]);
1631 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1632 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1633 NS_BIT, 0);
1634 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1635 idx++;
1636
1637 /* Memory Barrier */
1638 hw_desc_init(&desc[idx]);
1639 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1640 set_dout_no_dma(&desc[idx], 0, 0, 1);
1641 idx++;
1642
1643 /* Load GHASH subkey */
1644 hw_desc_init(&desc[idx]);
1645 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1646 AES_BLOCK_SIZE, NS_BIT);
1647 set_dout_no_dma(&desc[idx], 0, 0, 1);
1648 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1649 set_aes_not_hash_mode(&desc[idx]);
1650 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1651 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1652 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1653 idx++;
1654
1655 /* Configure Hash Engine to work with GHASH.
1656 * Since it was not possible to extend HASH submodes to add GHASH,
1657 * The following command is necessary in order to
1658 * select GHASH (according to HW designers)
1659 */
1660 hw_desc_init(&desc[idx]);
1661 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1662 set_dout_no_dma(&desc[idx], 0, 0, 1);
1663 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1664 set_aes_not_hash_mode(&desc[idx]);
1665 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1666 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1667 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1668 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1669 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1670 idx++;
1671
1672 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1673 * initial state)
1674 */
1675 hw_desc_init(&desc[idx]);
1676 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1677 set_dout_no_dma(&desc[idx], 0, 0, 1);
1678 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1679 set_aes_not_hash_mode(&desc[idx]);
1680 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1681 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1682 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1683 idx++;
1684
1685 *seq_size = idx;
1686}
1687
1688static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1689 unsigned int *seq_size)
1690{
1691 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1692 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1693 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1694 unsigned int idx = *seq_size;
1695
1696 /* load key to AES*/
1697 hw_desc_init(&desc[idx]);
1698 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1699 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1700 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1701 ctx->enc_keylen, NS_BIT);
1702 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1703 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1704 set_flow_mode(&desc[idx], S_DIN_to_AES);
1705 idx++;
1706
1707 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1708 /* load AES/CTR initial CTR value inc by 2*/
1709 hw_desc_init(&desc[idx]);
1710 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1711 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1712 set_din_type(&desc[idx], DMA_DLLI,
1713 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1714 NS_BIT);
1715 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1716 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1717 set_flow_mode(&desc[idx], S_DIN_to_AES);
1718 idx++;
1719 }
1720
1721 *seq_size = idx;
1722}
1723
1724static void cc_proc_gcm_result(struct aead_request *req,
1725 struct cc_hw_desc desc[],
1726 unsigned int *seq_size)
1727{
1728 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1729 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1730 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1731 dma_addr_t mac_result;
1732 unsigned int idx = *seq_size;
1733
1734 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1735 mac_result = req_ctx->mac_buf_dma_addr;
1736 } else { /* Encrypt */
1737 mac_result = req_ctx->icv_dma_addr;
1738 }
1739
1740 /* process(ghash) gcm_block_len */
1741 hw_desc_init(&desc[idx]);
1742 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1743 AES_BLOCK_SIZE, NS_BIT);
1744 set_flow_mode(&desc[idx], DIN_HASH);
1745 idx++;
1746
1747 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1748 hw_desc_init(&desc[idx]);
1749 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1750 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1751 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1752 NS_BIT, 0);
1753 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1754 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1755 set_aes_not_hash_mode(&desc[idx]);
1756
1757 idx++;
1758
1759 /* load AES/CTR initial CTR value inc by 1*/
1760 hw_desc_init(&desc[idx]);
1761 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1762 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1763 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1764 AES_BLOCK_SIZE, NS_BIT);
1765 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1766 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1767 set_flow_mode(&desc[idx], S_DIN_to_AES);
1768 idx++;
1769
1770 /* Memory Barrier */
1771 hw_desc_init(&desc[idx]);
1772 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1773 set_dout_no_dma(&desc[idx], 0, 0, 1);
1774 idx++;
1775
1776 /* process GCTR on stored GHASH and store MAC in mac_state*/
1777 hw_desc_init(&desc[idx]);
1778 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1779 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1780 AES_BLOCK_SIZE, NS_BIT);
1781 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
27b3b22d 1782 set_queue_last_ind(ctx->drvdata, &desc[idx]);
ff27e85a
GBY
1783 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1784 idx++;
1785
1786 *seq_size = idx;
1787}
1788
1789static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1790 unsigned int *seq_size)
1791{
1792 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1793 unsigned int cipher_flow_mode;
1794
1795 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1796 cipher_flow_mode = AES_and_HASH;
1797 } else { /* Encrypt */
1798 cipher_flow_mode = AES_to_HASH_and_DOUT;
1799 }
1800
1801 //in RFC4543 no data to encrypt. just copy data from src to dest.
1802 if (req_ctx->plaintext_authenticate_only) {
1803 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1804 cc_set_ghash_desc(req, desc, seq_size);
1805 /* process(ghash) assoc data */
1806 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1807 cc_set_gctr_desc(req, desc, seq_size);
1808 cc_proc_gcm_result(req, desc, seq_size);
1809 return 0;
1810 }
1811
1812 // for gcm and rfc4106.
1813 cc_set_ghash_desc(req, desc, seq_size);
1814 /* process(ghash) assoc data */
1815 if (req->assoclen > 0)
1816 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1817 cc_set_gctr_desc(req, desc, seq_size);
1818 /* process(gctr+ghash) */
1819 if (req_ctx->cryptlen)
1820 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1821 cc_proc_gcm_result(req, desc, seq_size);
1822
1823 return 0;
1824}
1825
1826static int config_gcm_context(struct aead_request *req)
1827{
1828 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1829 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1830 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1831 struct device *dev = drvdata_to_dev(ctx->drvdata);
1832
1833 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1834 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1835 req->cryptlen :
1836 (req->cryptlen - ctx->authsize);
1837 __be32 counter = cpu_to_be32(2);
1838
1839 dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1840 __func__, cryptlen, req->assoclen, ctx->authsize);
1841
1842 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1843
1844 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1845
1846 memcpy(req->iv + 12, &counter, 4);
1847 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1848
1849 counter = cpu_to_be32(1);
1850 memcpy(req->iv + 12, &counter, 4);
1851 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1852
1853 if (!req_ctx->plaintext_authenticate_only) {
1854 __be64 temp64;
1855
1856 temp64 = cpu_to_be64(req->assoclen * 8);
1857 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1858 temp64 = cpu_to_be64(cryptlen * 8);
1859 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1860 } else {
1861 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1862 * data that is nothing is encrypted.
1863 */
1864 __be64 temp64;
1865
1866 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1867 cryptlen) * 8);
1868 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1869 temp64 = 0;
1870 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1871 }
1872
1873 return 0;
1874}
1875
1876static void cc_proc_rfc4_gcm(struct aead_request *req)
1877{
1878 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1879 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1880 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1881
1882 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1883 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1884 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1885 GCM_BLOCK_RFC4_IV_SIZE);
1886 req->iv = areq_ctx->ctr_iv;
1887 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1888}
1889
1890static int cc_proc_aead(struct aead_request *req,
1891 enum drv_crypto_direction direct)
1892{
1893 int rc = 0;
1894 int seq_len = 0;
1895 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1896 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1897 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1898 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1899 struct device *dev = drvdata_to_dev(ctx->drvdata);
1900 struct cc_crypto_req cc_req = {};
1901
1902 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1903 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1904 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1905 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1906
1907 /* STAT_PHASE_0: Init and sanity checks */
1908
1909 /* Check data length according to mode */
1910 if (validate_data_size(ctx, direct, req)) {
1911 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1912 req->cryptlen, req->assoclen);
1913 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1914 return -EINVAL;
1915 }
1916
1917 /* Setup request structure */
1918 cc_req.user_cb = (void *)cc_aead_complete;
1919 cc_req.user_arg = (void *)req;
1920
1921 /* Setup request context */
1922 areq_ctx->gen_ctx.op_type = direct;
1923 areq_ctx->req_authsize = ctx->authsize;
1924 areq_ctx->cipher_mode = ctx->cipher_mode;
1925
1926 /* STAT_PHASE_1: Map buffers */
1927
1928 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1929 /* Build CTR IV - Copy nonce from last 4 bytes in
1930 * CTR key to first 4 bytes in CTR IV
1931 */
1932 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1933 CTR_RFC3686_NONCE_SIZE);
1934 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1935 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1936 req->iv, CTR_RFC3686_IV_SIZE);
1937 /* Initialize counter portion of counter block */
1938 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1939 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1940
1941 /* Replace with counter iv */
1942 req->iv = areq_ctx->ctr_iv;
1943 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1944 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1945 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1946 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1947 if (areq_ctx->ctr_iv != req->iv) {
1948 memcpy(areq_ctx->ctr_iv, req->iv,
1949 crypto_aead_ivsize(tfm));
1950 req->iv = areq_ctx->ctr_iv;
1951 }
1952 } else {
1953 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1954 }
1955
1956 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1957 rc = config_ccm_adata(req);
1958 if (rc) {
1959 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1960 rc);
1961 goto exit;
1962 }
1963 } else {
1964 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1965 }
1966
1967 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1968 rc = config_gcm_context(req);
1969 if (rc) {
1970 dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1971 rc);
1972 goto exit;
1973 }
1974 }
1975
1976 rc = cc_map_aead_request(ctx->drvdata, req);
1977 if (rc) {
1978 dev_err(dev, "map_request() failed\n");
1979 goto exit;
1980 }
1981
1982 /* do we need to generate IV? */
1983 if (areq_ctx->backup_giv) {
1984 /* set the DMA mapped IV address*/
1985 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1986 cc_req.ivgen_dma_addr[0] =
1987 areq_ctx->gen_ctx.iv_dma_addr +
1988 CTR_RFC3686_NONCE_SIZE;
1989 cc_req.ivgen_dma_addr_len = 1;
1990 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1991 /* In ccm, the IV needs to exist both inside B0 and
1992 * inside the counter.It is also copied to iv_dma_addr
1993 * for other reasons (like returning it to the user).
1994 * So, using 3 (identical) IV outputs.
1995 */
1996 cc_req.ivgen_dma_addr[0] =
1997 areq_ctx->gen_ctx.iv_dma_addr +
1998 CCM_BLOCK_IV_OFFSET;
1999 cc_req.ivgen_dma_addr[1] =
2000 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2001 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2002 cc_req.ivgen_dma_addr[2] =
2003 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2004 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2005 cc_req.ivgen_dma_addr_len = 3;
2006 } else {
2007 cc_req.ivgen_dma_addr[0] =
2008 areq_ctx->gen_ctx.iv_dma_addr;
2009 cc_req.ivgen_dma_addr_len = 1;
2010 }
2011
2012 /* set the IV size (8/16 B long)*/
2013 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2014 }
2015
2016 /* STAT_PHASE_2: Create sequence */
2017
2018 /* Load MLLI tables to SRAM if necessary */
2019 cc_mlli_to_sram(req, desc, &seq_len);
2020
2021 /*TODO: move seq len by reference */
2022 switch (ctx->auth_mode) {
2023 case DRV_HASH_SHA1:
2024 case DRV_HASH_SHA256:
2025 cc_hmac_authenc(req, desc, &seq_len);
2026 break;
2027 case DRV_HASH_XCBC_MAC:
2028 cc_xcbc_authenc(req, desc, &seq_len);
2029 break;
2030 case DRV_HASH_NULL:
2031 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2032 cc_ccm(req, desc, &seq_len);
2033 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2034 cc_gcm(req, desc, &seq_len);
2035 break;
2036 default:
2037 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2038 cc_unmap_aead_request(dev, req);
2039 rc = -ENOTSUPP;
2040 goto exit;
2041 }
2042
2043 /* STAT_PHASE_3: Lock HW and push sequence */
2044
2045 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2046
2047 if (rc != -EINPROGRESS && rc != -EBUSY) {
2048 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2049 cc_unmap_aead_request(dev, req);
2050 }
2051
2052exit:
2053 return rc;
2054}
2055
2056static int cc_aead_encrypt(struct aead_request *req)
2057{
2058 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2059 int rc;
2060
2061 /* No generated IV required */
2062 areq_ctx->backup_iv = req->iv;
2063 areq_ctx->backup_giv = NULL;
2064 areq_ctx->is_gcm4543 = false;
2065
2066 areq_ctx->plaintext_authenticate_only = false;
2067
2068 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2069 if (rc != -EINPROGRESS && rc != -EBUSY)
2070 req->iv = areq_ctx->backup_iv;
2071
2072 return rc;
2073}
2074
2075static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2076{
2077 /* Very similar to cc_aead_encrypt() above. */
2078
2079 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2080 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2081 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2082 struct device *dev = drvdata_to_dev(ctx->drvdata);
2083 int rc = -EINVAL;
2084
2085 if (!valid_assoclen(req)) {
2086 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2087 goto out;
2088 }
2089
2090 /* No generated IV required */
2091 areq_ctx->backup_iv = req->iv;
2092 areq_ctx->backup_giv = NULL;
2093 areq_ctx->is_gcm4543 = true;
2094
2095 cc_proc_rfc4309_ccm(req);
2096
2097 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2098 if (rc != -EINPROGRESS && rc != -EBUSY)
2099 req->iv = areq_ctx->backup_iv;
2100out:
2101 return rc;
2102}
2103
2104static int cc_aead_decrypt(struct aead_request *req)
2105{
2106 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2107 int rc;
2108
2109 /* No generated IV required */
2110 areq_ctx->backup_iv = req->iv;
2111 areq_ctx->backup_giv = NULL;
2112 areq_ctx->is_gcm4543 = false;
2113
2114 areq_ctx->plaintext_authenticate_only = false;
2115
2116 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2117 if (rc != -EINPROGRESS && rc != -EBUSY)
2118 req->iv = areq_ctx->backup_iv;
2119
2120 return rc;
2121}
2122
2123static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2124{
2125 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2126 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2127 struct device *dev = drvdata_to_dev(ctx->drvdata);
2128 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2129 int rc = -EINVAL;
2130
2131 if (!valid_assoclen(req)) {
2132 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2133 goto out;
2134 }
2135
2136 /* No generated IV required */
2137 areq_ctx->backup_iv = req->iv;
2138 areq_ctx->backup_giv = NULL;
2139
2140 areq_ctx->is_gcm4543 = true;
2141 cc_proc_rfc4309_ccm(req);
2142
2143 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2144 if (rc != -EINPROGRESS && rc != -EBUSY)
2145 req->iv = areq_ctx->backup_iv;
2146
2147out:
2148 return rc;
2149}
2150
2151static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2152 unsigned int keylen)
2153{
2154 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2155 struct device *dev = drvdata_to_dev(ctx->drvdata);
2156
2157 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2158
2159 if (keylen < 4)
2160 return -EINVAL;
2161
2162 keylen -= 4;
2163 memcpy(ctx->ctr_nonce, key + keylen, 4);
2164
2165 return cc_aead_setkey(tfm, key, keylen);
2166}
2167
2168static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2169 unsigned int keylen)
2170{
2171 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2172 struct device *dev = drvdata_to_dev(ctx->drvdata);
2173
2174 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2175
2176 if (keylen < 4)
2177 return -EINVAL;
2178
2179 keylen -= 4;
2180 memcpy(ctx->ctr_nonce, key + keylen, 4);
2181
2182 return cc_aead_setkey(tfm, key, keylen);
2183}
2184
2185static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2186 unsigned int authsize)
2187{
2188 switch (authsize) {
2189 case 4:
2190 case 8:
2191 case 12:
2192 case 13:
2193 case 14:
2194 case 15:
2195 case 16:
2196 break;
2197 default:
2198 return -EINVAL;
2199 }
2200
2201 return cc_aead_setauthsize(authenc, authsize);
2202}
2203
2204static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2205 unsigned int authsize)
2206{
2207 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2208 struct device *dev = drvdata_to_dev(ctx->drvdata);
2209
2210 dev_dbg(dev, "authsize %d\n", authsize);
2211
2212 switch (authsize) {
2213 case 8:
2214 case 12:
2215 case 16:
2216 break;
2217 default:
2218 return -EINVAL;
2219 }
2220
2221 return cc_aead_setauthsize(authenc, authsize);
2222}
2223
2224static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2225 unsigned int authsize)
2226{
2227 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2228 struct device *dev = drvdata_to_dev(ctx->drvdata);
2229
2230 dev_dbg(dev, "authsize %d\n", authsize);
2231
2232 if (authsize != 16)
2233 return -EINVAL;
2234
2235 return cc_aead_setauthsize(authenc, authsize);
2236}
2237
2238static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2239{
2240 /* Very similar to cc_aead_encrypt() above. */
2241
2242 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2243 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2244 struct device *dev = drvdata_to_dev(ctx->drvdata);
2245 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2246 int rc = -EINVAL;
2247
2248 if (!valid_assoclen(req)) {
2249 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2250 goto out;
2251 }
2252
2253 /* No generated IV required */
2254 areq_ctx->backup_iv = req->iv;
2255 areq_ctx->backup_giv = NULL;
2256
2257 areq_ctx->plaintext_authenticate_only = false;
2258
2259 cc_proc_rfc4_gcm(req);
2260 areq_ctx->is_gcm4543 = true;
2261
2262 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2263 if (rc != -EINPROGRESS && rc != -EBUSY)
2264 req->iv = areq_ctx->backup_iv;
2265out:
2266 return rc;
2267}
2268
2269static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2270{
2271 /* Very similar to cc_aead_encrypt() above. */
2272
2273 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2274 int rc;
2275
2276 //plaintext is not encryped with rfc4543
2277 areq_ctx->plaintext_authenticate_only = true;
2278
2279 /* No generated IV required */
2280 areq_ctx->backup_iv = req->iv;
2281 areq_ctx->backup_giv = NULL;
2282
2283 cc_proc_rfc4_gcm(req);
2284 areq_ctx->is_gcm4543 = true;
2285
2286 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2287 if (rc != -EINPROGRESS && rc != -EBUSY)
2288 req->iv = areq_ctx->backup_iv;
2289
2290 return rc;
2291}
2292
2293static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2294{
2295 /* Very similar to cc_aead_decrypt() above. */
2296
2297 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2298 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2299 struct device *dev = drvdata_to_dev(ctx->drvdata);
2300 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2301 int rc = -EINVAL;
2302
2303 if (!valid_assoclen(req)) {
2304 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2305 goto out;
2306 }
2307
2308 /* No generated IV required */
2309 areq_ctx->backup_iv = req->iv;
2310 areq_ctx->backup_giv = NULL;
2311
2312 areq_ctx->plaintext_authenticate_only = false;
2313
2314 cc_proc_rfc4_gcm(req);
2315 areq_ctx->is_gcm4543 = true;
2316
2317 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2318 if (rc != -EINPROGRESS && rc != -EBUSY)
2319 req->iv = areq_ctx->backup_iv;
2320out:
2321 return rc;
2322}
2323
2324static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2325{
2326 /* Very similar to cc_aead_decrypt() above. */
2327
2328 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2329 int rc;
2330
2331 //plaintext is not decryped with rfc4543
2332 areq_ctx->plaintext_authenticate_only = true;
2333
2334 /* No generated IV required */
2335 areq_ctx->backup_iv = req->iv;
2336 areq_ctx->backup_giv = NULL;
2337
2338 cc_proc_rfc4_gcm(req);
2339 areq_ctx->is_gcm4543 = true;
2340
2341 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2342 if (rc != -EINPROGRESS && rc != -EBUSY)
2343 req->iv = areq_ctx->backup_iv;
2344
2345 return rc;
2346}
2347
2348/* aead alg */
2349static struct cc_alg_template aead_algs[] = {
2350 {
2351 .name = "authenc(hmac(sha1),cbc(aes))",
2352 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2353 .blocksize = AES_BLOCK_SIZE,
ff27e85a
GBY
2354 .template_aead = {
2355 .setkey = cc_aead_setkey,
2356 .setauthsize = cc_aead_setauthsize,
2357 .encrypt = cc_aead_encrypt,
2358 .decrypt = cc_aead_decrypt,
2359 .init = cc_aead_init,
2360 .exit = cc_aead_exit,
2361 .ivsize = AES_BLOCK_SIZE,
2362 .maxauthsize = SHA1_DIGEST_SIZE,
2363 },
2364 .cipher_mode = DRV_CIPHER_CBC,
2365 .flow_mode = S_DIN_to_AES,
2366 .auth_mode = DRV_HASH_SHA1,
27b3b22d 2367 .min_hw_rev = CC_HW_REV_630,
1c876a90 2368 .std_body = CC_STD_NIST,
ff27e85a
GBY
2369 },
2370 {
2371 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2372 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2373 .blocksize = DES3_EDE_BLOCK_SIZE,
ff27e85a
GBY
2374 .template_aead = {
2375 .setkey = cc_aead_setkey,
2376 .setauthsize = cc_aead_setauthsize,
2377 .encrypt = cc_aead_encrypt,
2378 .decrypt = cc_aead_decrypt,
2379 .init = cc_aead_init,
2380 .exit = cc_aead_exit,
2381 .ivsize = DES3_EDE_BLOCK_SIZE,
2382 .maxauthsize = SHA1_DIGEST_SIZE,
2383 },
2384 .cipher_mode = DRV_CIPHER_CBC,
2385 .flow_mode = S_DIN_to_DES,
2386 .auth_mode = DRV_HASH_SHA1,
27b3b22d 2387 .min_hw_rev = CC_HW_REV_630,
1c876a90 2388 .std_body = CC_STD_NIST,
ff27e85a
GBY
2389 },
2390 {
2391 .name = "authenc(hmac(sha256),cbc(aes))",
2392 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2393 .blocksize = AES_BLOCK_SIZE,
ff27e85a
GBY
2394 .template_aead = {
2395 .setkey = cc_aead_setkey,
2396 .setauthsize = cc_aead_setauthsize,
2397 .encrypt = cc_aead_encrypt,
2398 .decrypt = cc_aead_decrypt,
2399 .init = cc_aead_init,
2400 .exit = cc_aead_exit,
2401 .ivsize = AES_BLOCK_SIZE,
2402 .maxauthsize = SHA256_DIGEST_SIZE,
2403 },
2404 .cipher_mode = DRV_CIPHER_CBC,
2405 .flow_mode = S_DIN_to_AES,
2406 .auth_mode = DRV_HASH_SHA256,
27b3b22d 2407 .min_hw_rev = CC_HW_REV_630,
1c876a90 2408 .std_body = CC_STD_NIST,
ff27e85a
GBY
2409 },
2410 {
2411 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2412 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2413 .blocksize = DES3_EDE_BLOCK_SIZE,
ff27e85a
GBY
2414 .template_aead = {
2415 .setkey = cc_aead_setkey,
2416 .setauthsize = cc_aead_setauthsize,
2417 .encrypt = cc_aead_encrypt,
2418 .decrypt = cc_aead_decrypt,
2419 .init = cc_aead_init,
2420 .exit = cc_aead_exit,
2421 .ivsize = DES3_EDE_BLOCK_SIZE,
2422 .maxauthsize = SHA256_DIGEST_SIZE,
2423 },
2424 .cipher_mode = DRV_CIPHER_CBC,
2425 .flow_mode = S_DIN_to_DES,
2426 .auth_mode = DRV_HASH_SHA256,
27b3b22d 2427 .min_hw_rev = CC_HW_REV_630,
1c876a90 2428 .std_body = CC_STD_NIST,
ff27e85a
GBY
2429 },
2430 {
2431 .name = "authenc(xcbc(aes),cbc(aes))",
2432 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2433 .blocksize = AES_BLOCK_SIZE,
ff27e85a
GBY
2434 .template_aead = {
2435 .setkey = cc_aead_setkey,
2436 .setauthsize = cc_aead_setauthsize,
2437 .encrypt = cc_aead_encrypt,
2438 .decrypt = cc_aead_decrypt,
2439 .init = cc_aead_init,
2440 .exit = cc_aead_exit,
2441 .ivsize = AES_BLOCK_SIZE,
2442 .maxauthsize = AES_BLOCK_SIZE,
2443 },
2444 .cipher_mode = DRV_CIPHER_CBC,
2445 .flow_mode = S_DIN_to_AES,
2446 .auth_mode = DRV_HASH_XCBC_MAC,
27b3b22d 2447 .min_hw_rev = CC_HW_REV_630,
1c876a90 2448 .std_body = CC_STD_NIST,
ff27e85a
GBY
2449 },
2450 {
2451 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2452 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2453 .blocksize = 1,
ff27e85a
GBY
2454 .template_aead = {
2455 .setkey = cc_aead_setkey,
2456 .setauthsize = cc_aead_setauthsize,
2457 .encrypt = cc_aead_encrypt,
2458 .decrypt = cc_aead_decrypt,
2459 .init = cc_aead_init,
2460 .exit = cc_aead_exit,
2461 .ivsize = CTR_RFC3686_IV_SIZE,
2462 .maxauthsize = SHA1_DIGEST_SIZE,
2463 },
2464 .cipher_mode = DRV_CIPHER_CTR,
2465 .flow_mode = S_DIN_to_AES,
2466 .auth_mode = DRV_HASH_SHA1,
27b3b22d 2467 .min_hw_rev = CC_HW_REV_630,
1c876a90 2468 .std_body = CC_STD_NIST,
ff27e85a
GBY
2469 },
2470 {
2471 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2472 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2473 .blocksize = 1,
ff27e85a
GBY
2474 .template_aead = {
2475 .setkey = cc_aead_setkey,
2476 .setauthsize = cc_aead_setauthsize,
2477 .encrypt = cc_aead_encrypt,
2478 .decrypt = cc_aead_decrypt,
2479 .init = cc_aead_init,
2480 .exit = cc_aead_exit,
2481 .ivsize = CTR_RFC3686_IV_SIZE,
2482 .maxauthsize = SHA256_DIGEST_SIZE,
2483 },
2484 .cipher_mode = DRV_CIPHER_CTR,
2485 .flow_mode = S_DIN_to_AES,
2486 .auth_mode = DRV_HASH_SHA256,
27b3b22d 2487 .min_hw_rev = CC_HW_REV_630,
1c876a90 2488 .std_body = CC_STD_NIST,
ff27e85a
GBY
2489 },
2490 {
2491 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2492 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2493 .blocksize = 1,
ff27e85a
GBY
2494 .template_aead = {
2495 .setkey = cc_aead_setkey,
2496 .setauthsize = cc_aead_setauthsize,
2497 .encrypt = cc_aead_encrypt,
2498 .decrypt = cc_aead_decrypt,
2499 .init = cc_aead_init,
2500 .exit = cc_aead_exit,
2501 .ivsize = CTR_RFC3686_IV_SIZE,
2502 .maxauthsize = AES_BLOCK_SIZE,
2503 },
2504 .cipher_mode = DRV_CIPHER_CTR,
2505 .flow_mode = S_DIN_to_AES,
2506 .auth_mode = DRV_HASH_XCBC_MAC,
27b3b22d 2507 .min_hw_rev = CC_HW_REV_630,
1c876a90 2508 .std_body = CC_STD_NIST,
ff27e85a
GBY
2509 },
2510 {
2511 .name = "ccm(aes)",
2512 .driver_name = "ccm-aes-ccree",
2513 .blocksize = 1,
ff27e85a
GBY
2514 .template_aead = {
2515 .setkey = cc_aead_setkey,
2516 .setauthsize = cc_ccm_setauthsize,
2517 .encrypt = cc_aead_encrypt,
2518 .decrypt = cc_aead_decrypt,
2519 .init = cc_aead_init,
2520 .exit = cc_aead_exit,
2521 .ivsize = AES_BLOCK_SIZE,
2522 .maxauthsize = AES_BLOCK_SIZE,
2523 },
2524 .cipher_mode = DRV_CIPHER_CCM,
2525 .flow_mode = S_DIN_to_AES,
2526 .auth_mode = DRV_HASH_NULL,
27b3b22d 2527 .min_hw_rev = CC_HW_REV_630,
1c876a90 2528 .std_body = CC_STD_NIST,
ff27e85a
GBY
2529 },
2530 {
2531 .name = "rfc4309(ccm(aes))",
2532 .driver_name = "rfc4309-ccm-aes-ccree",
2533 .blocksize = 1,
ff27e85a
GBY
2534 .template_aead = {
2535 .setkey = cc_rfc4309_ccm_setkey,
2536 .setauthsize = cc_rfc4309_ccm_setauthsize,
2537 .encrypt = cc_rfc4309_ccm_encrypt,
2538 .decrypt = cc_rfc4309_ccm_decrypt,
2539 .init = cc_aead_init,
2540 .exit = cc_aead_exit,
2541 .ivsize = CCM_BLOCK_IV_SIZE,
2542 .maxauthsize = AES_BLOCK_SIZE,
2543 },
2544 .cipher_mode = DRV_CIPHER_CCM,
2545 .flow_mode = S_DIN_to_AES,
2546 .auth_mode = DRV_HASH_NULL,
27b3b22d 2547 .min_hw_rev = CC_HW_REV_630,
1c876a90 2548 .std_body = CC_STD_NIST,
ff27e85a
GBY
2549 },
2550 {
2551 .name = "gcm(aes)",
2552 .driver_name = "gcm-aes-ccree",
2553 .blocksize = 1,
ff27e85a
GBY
2554 .template_aead = {
2555 .setkey = cc_aead_setkey,
2556 .setauthsize = cc_gcm_setauthsize,
2557 .encrypt = cc_aead_encrypt,
2558 .decrypt = cc_aead_decrypt,
2559 .init = cc_aead_init,
2560 .exit = cc_aead_exit,
2561 .ivsize = 12,
2562 .maxauthsize = AES_BLOCK_SIZE,
2563 },
2564 .cipher_mode = DRV_CIPHER_GCTR,
2565 .flow_mode = S_DIN_to_AES,
2566 .auth_mode = DRV_HASH_NULL,
27b3b22d 2567 .min_hw_rev = CC_HW_REV_630,
1c876a90 2568 .std_body = CC_STD_NIST,
ff27e85a
GBY
2569 },
2570 {
2571 .name = "rfc4106(gcm(aes))",
2572 .driver_name = "rfc4106-gcm-aes-ccree",
2573 .blocksize = 1,
ff27e85a
GBY
2574 .template_aead = {
2575 .setkey = cc_rfc4106_gcm_setkey,
2576 .setauthsize = cc_rfc4106_gcm_setauthsize,
2577 .encrypt = cc_rfc4106_gcm_encrypt,
2578 .decrypt = cc_rfc4106_gcm_decrypt,
2579 .init = cc_aead_init,
2580 .exit = cc_aead_exit,
2581 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2582 .maxauthsize = AES_BLOCK_SIZE,
2583 },
2584 .cipher_mode = DRV_CIPHER_GCTR,
2585 .flow_mode = S_DIN_to_AES,
2586 .auth_mode = DRV_HASH_NULL,
27b3b22d 2587 .min_hw_rev = CC_HW_REV_630,
1c876a90 2588 .std_body = CC_STD_NIST,
ff27e85a
GBY
2589 },
2590 {
2591 .name = "rfc4543(gcm(aes))",
2592 .driver_name = "rfc4543-gcm-aes-ccree",
2593 .blocksize = 1,
ff27e85a
GBY
2594 .template_aead = {
2595 .setkey = cc_rfc4543_gcm_setkey,
2596 .setauthsize = cc_rfc4543_gcm_setauthsize,
2597 .encrypt = cc_rfc4543_gcm_encrypt,
2598 .decrypt = cc_rfc4543_gcm_decrypt,
2599 .init = cc_aead_init,
2600 .exit = cc_aead_exit,
2601 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2602 .maxauthsize = AES_BLOCK_SIZE,
2603 },
2604 .cipher_mode = DRV_CIPHER_GCTR,
2605 .flow_mode = S_DIN_to_AES,
2606 .auth_mode = DRV_HASH_NULL,
27b3b22d 2607 .min_hw_rev = CC_HW_REV_630,
1c876a90 2608 .std_body = CC_STD_NIST,
ff27e85a
GBY
2609 },
2610};
2611
2612static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2613 struct device *dev)
2614{
2615 struct cc_crypto_alg *t_alg;
2616 struct aead_alg *alg;
2617
2618 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2619 if (!t_alg)
2620 return ERR_PTR(-ENOMEM);
2621
2622 alg = &tmpl->template_aead;
2623
2624 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2625 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2626 tmpl->driver_name);
2627 alg->base.cra_module = THIS_MODULE;
2628 alg->base.cra_priority = CC_CRA_PRIO;
2629
2630 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
76c9e53e 2631 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
ff27e85a
GBY
2632 alg->init = cc_aead_init;
2633 alg->exit = cc_aead_exit;
2634
2635 t_alg->aead_alg = *alg;
2636
2637 t_alg->cipher_mode = tmpl->cipher_mode;
2638 t_alg->flow_mode = tmpl->flow_mode;
2639 t_alg->auth_mode = tmpl->auth_mode;
2640
2641 return t_alg;
2642}
2643
2644int cc_aead_free(struct cc_drvdata *drvdata)
2645{
2646 struct cc_crypto_alg *t_alg, *n;
2647 struct cc_aead_handle *aead_handle =
2648 (struct cc_aead_handle *)drvdata->aead_handle;
2649
2650 if (aead_handle) {
2651 /* Remove registered algs */
2652 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2653 entry) {
2654 crypto_unregister_aead(&t_alg->aead_alg);
2655 list_del(&t_alg->entry);
2656 kfree(t_alg);
2657 }
2658 kfree(aead_handle);
2659 drvdata->aead_handle = NULL;
2660 }
2661
2662 return 0;
2663}
2664
2665int cc_aead_alloc(struct cc_drvdata *drvdata)
2666{
2667 struct cc_aead_handle *aead_handle;
2668 struct cc_crypto_alg *t_alg;
2669 int rc = -ENOMEM;
2670 int alg;
2671 struct device *dev = drvdata_to_dev(drvdata);
2672
2673 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2674 if (!aead_handle) {
2675 rc = -ENOMEM;
2676 goto fail0;
2677 }
2678
2679 INIT_LIST_HEAD(&aead_handle->aead_list);
2680 drvdata->aead_handle = aead_handle;
2681
2682 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2683 MAX_HMAC_DIGEST_SIZE);
2684
2685 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2686 dev_err(dev, "SRAM pool exhausted\n");
2687 rc = -ENOMEM;
2688 goto fail1;
2689 }
2690
2691 /* Linux crypto */
2692 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
1c876a90
GBY
2693 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2694 !(drvdata->std_bodies & aead_algs[alg].std_body))
27b3b22d
GBY
2695 continue;
2696
ff27e85a
GBY
2697 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2698 if (IS_ERR(t_alg)) {
2699 rc = PTR_ERR(t_alg);
2700 dev_err(dev, "%s alg allocation failed\n",
2701 aead_algs[alg].driver_name);
2702 goto fail1;
2703 }
2704 t_alg->drvdata = drvdata;
2705 rc = crypto_register_aead(&t_alg->aead_alg);
2706 if (rc) {
2707 dev_err(dev, "%s alg registration failed\n",
2708 t_alg->aead_alg.base.cra_driver_name);
2709 goto fail2;
2710 } else {
2711 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2712 dev_dbg(dev, "Registered %s\n",
2713 t_alg->aead_alg.base.cra_driver_name);
2714 }
2715 }
2716
2717 return 0;
2718
2719fail2:
2720 kfree(t_alg);
2721fail1:
2722 cc_aead_free(drvdata);
2723fail0:
2724 return rc;
2725}