ASoC: mediatek: update MT2701 AFE documentation to adapt mfd device
[linux-2.6-block.git] / drivers / staging / ccree / ssi_hash.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
25
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_hash.h"
32 #include "ssi_sram_mgr.h"
33
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
36
37 struct ssi_hash_handle {
38         ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39         ssi_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
40         struct list_head hash_list;
41         struct completion init_comp;
42 };
43
44 static const u32 digest_len_init[] = {
45         0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47         SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49         SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51         SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52         SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54         SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55         SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58         0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60         SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61         SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63         SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64         SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
65 #endif
66
67 static void ssi_hash_create_xcbc_setup(
68         struct ahash_request *areq,
69         struct cc_hw_desc desc[],
70         unsigned int *seq_size);
71
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73                                        struct cc_hw_desc desc[],
74                                        unsigned int *seq_size);
75
76 struct ssi_hash_alg {
77         struct list_head entry;
78         int hash_mode;
79         int hw_mode;
80         int inter_digestsize;
81         struct ssi_drvdata *drvdata;
82         struct ahash_alg ahash_alg;
83 };
84
85 struct hash_key_req_ctx {
86         u32 keylen;
87         dma_addr_t key_dma_addr;
88 };
89
90 /* hash per-session context */
91 struct ssi_hash_ctx {
92         struct ssi_drvdata *drvdata;
93         /* holds the origin digest; the digest after "setkey" if HMAC,*
94          * the initial digest if HASH.
95          */
96         u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
97         u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
98
99         dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
100         dma_addr_t digest_buff_dma_addr;
101         /* use for hmac with key large then mode block size */
102         struct hash_key_req_ctx key_params;
103         int hash_mode;
104         int hw_mode;
105         int inter_digestsize;
106         struct completion setkey_comp;
107         bool is_hmac;
108 };
109
110 static void ssi_hash_create_data_desc(
111         struct ahash_req_ctx *areq_ctx,
112         struct ssi_hash_ctx *ctx,
113         unsigned int flow_mode, struct cc_hw_desc desc[],
114         bool is_not_last_data,
115         unsigned int *seq_size);
116
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
118 {
119         if (unlikely((mode == DRV_HASH_MD5) ||
120                      (mode == DRV_HASH_SHA384) ||
121                      (mode == DRV_HASH_SHA512))) {
122                 set_bytes_swap(desc, 1);
123         } else {
124                 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
125         }
126 }
127
128 static int ssi_hash_map_result(struct device *dev,
129                                struct ahash_req_ctx *state,
130                                unsigned int digestsize)
131 {
132         state->digest_result_dma_addr =
133                 dma_map_single(dev, (void *)state->digest_result_buff,
134                                digestsize,
135                                DMA_BIDIRECTIONAL);
136         if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137                 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
138                         digestsize);
139                 return -ENOMEM;
140         }
141         dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142                 digestsize, state->digest_result_buff,
143                 &state->digest_result_dma_addr);
144
145         return 0;
146 }
147
148 static int ssi_hash_map_request(struct device *dev,
149                                 struct ahash_req_ctx *state,
150                                 struct ssi_hash_ctx *ctx)
151 {
152         bool is_hmac = ctx->is_hmac;
153         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154                                         ctx->drvdata, ctx->hash_mode);
155         struct ssi_crypto_req ssi_req = {};
156         struct cc_hw_desc desc;
157         int rc = -ENOMEM;
158
159         state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
160         if (!state->buff0)
161                 goto fail0;
162
163         state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
164         if (!state->buff1)
165                 goto fail_buff0;
166
167         state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
168         if (!state->digest_result_buff)
169                 goto fail_buff1;
170
171         state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
172         if (!state->digest_buff)
173                 goto fail_digest_result_buff;
174
175         dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
176                 state->digest_buff);
177         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
178                 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
179                 if (!state->digest_bytes_len)
180                         goto fail1;
181
182                 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183                         state->digest_bytes_len);
184         } else {
185                 state->digest_bytes_len = NULL;
186         }
187
188         state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
189         if (!state->opad_digest_buff)
190                 goto fail2;
191
192         dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193                 state->opad_digest_buff);
194
195         state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196         if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
197                 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198                         ctx->inter_digestsize, state->digest_buff);
199                 goto fail3;
200         }
201         dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202                 ctx->inter_digestsize, state->digest_buff,
203                 &state->digest_buff_dma_addr);
204
205         if (is_hmac) {
206                 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
207                 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
208                         memset(state->digest_buff, 0, ctx->inter_digestsize);
209                 } else { /*sha*/
210                         memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211 #if (DX_DEV_SHA_MAX > 256)
212                         if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
213                                 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
214                         else
215                                 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
216 #else
217                         memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
218 #endif
219                 }
220                 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
221
222                 if (ctx->hash_mode != DRV_HASH_NULL) {
223                         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224                         memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
225                 }
226         } else { /*hash*/
227                 /* Copy the initial digests if hash flow. The SRAM contains the
228                  * initial digests in the expected order for all SHA*
229                  */
230                 hw_desc_init(&desc);
231                 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232                 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233                               ctx->inter_digestsize, NS_BIT, 0);
234                 set_flow_mode(&desc, BYPASS);
235
236                 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
237                 if (unlikely(rc != 0)) {
238                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
239                         goto fail4;
240                 }
241         }
242
243         if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244                 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245                 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
246                         dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247                                 HASH_LEN_SIZE, state->digest_bytes_len);
248                         goto fail4;
249                 }
250                 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251                         HASH_LEN_SIZE, state->digest_bytes_len,
252                         &state->digest_bytes_len_dma_addr);
253         } else {
254                 state->digest_bytes_len_dma_addr = 0;
255         }
256
257         if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258                 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259                 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
260                         dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261                                 ctx->inter_digestsize,
262                                 state->opad_digest_buff);
263                         goto fail5;
264                 }
265                 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266                         ctx->inter_digestsize, state->opad_digest_buff,
267                         &state->opad_digest_dma_addr);
268         } else {
269                 state->opad_digest_dma_addr = 0;
270         }
271         state->buff0_cnt = 0;
272         state->buff1_cnt = 0;
273         state->buff_index = 0;
274         state->mlli_params.curr_pool = NULL;
275
276         return 0;
277
278 fail5:
279         if (state->digest_bytes_len_dma_addr != 0) {
280                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281                 state->digest_bytes_len_dma_addr = 0;
282         }
283 fail4:
284         if (state->digest_buff_dma_addr != 0) {
285                 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286                 state->digest_buff_dma_addr = 0;
287         }
288 fail3:
289         kfree(state->opad_digest_buff);
290 fail2:
291         kfree(state->digest_bytes_len);
292 fail1:
293          kfree(state->digest_buff);
294 fail_digest_result_buff:
295         kfree(state->digest_result_buff);
296         state->digest_result_buff = NULL;
297 fail_buff1:
298         kfree(state->buff1);
299         state->buff1 = NULL;
300 fail_buff0:
301         kfree(state->buff0);
302         state->buff0 = NULL;
303 fail0:
304         return rc;
305 }
306
307 static void ssi_hash_unmap_request(struct device *dev,
308                                    struct ahash_req_ctx *state,
309                                    struct ssi_hash_ctx *ctx)
310 {
311         if (state->digest_buff_dma_addr != 0) {
312                 dma_unmap_single(dev, state->digest_buff_dma_addr,
313                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
314                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315                         &state->digest_buff_dma_addr);
316                 state->digest_buff_dma_addr = 0;
317         }
318         if (state->digest_bytes_len_dma_addr != 0) {
319                 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320                                  HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
321                 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322                         &state->digest_bytes_len_dma_addr);
323                 state->digest_bytes_len_dma_addr = 0;
324         }
325         if (state->opad_digest_dma_addr != 0) {
326                 dma_unmap_single(dev, state->opad_digest_dma_addr,
327                                  ctx->inter_digestsize, DMA_BIDIRECTIONAL);
328                 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329                         &state->opad_digest_dma_addr);
330                 state->opad_digest_dma_addr = 0;
331         }
332
333         kfree(state->opad_digest_buff);
334         kfree(state->digest_bytes_len);
335         kfree(state->digest_buff);
336         kfree(state->digest_result_buff);
337         kfree(state->buff1);
338         kfree(state->buff0);
339 }
340
341 static void ssi_hash_unmap_result(struct device *dev,
342                                   struct ahash_req_ctx *state,
343                                   unsigned int digestsize, u8 *result)
344 {
345         if (state->digest_result_dma_addr != 0) {
346                 dma_unmap_single(dev,
347                                  state->digest_result_dma_addr,
348                                  digestsize,
349                                   DMA_BIDIRECTIONAL);
350                 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351                         state->digest_result_buff,
352                         &state->digest_result_dma_addr, digestsize);
353                 memcpy(result,
354                        state->digest_result_buff,
355                        digestsize);
356         }
357         state->digest_result_dma_addr = 0;
358 }
359
360 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
361 {
362         struct ahash_request *req = (struct ahash_request *)ssi_req;
363         struct ahash_req_ctx *state = ahash_request_ctx(req);
364
365         dev_dbg(dev, "req=%pK\n", req);
366
367         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
368         req->base.complete(&req->base, 0);
369 }
370
371 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
372 {
373         struct ahash_request *req = (struct ahash_request *)ssi_req;
374         struct ahash_req_ctx *state = ahash_request_ctx(req);
375         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
377         u32 digestsize = crypto_ahash_digestsize(tfm);
378
379         dev_dbg(dev, "req=%pK\n", req);
380
381         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
382         ssi_hash_unmap_result(dev, state, digestsize, req->result);
383         ssi_hash_unmap_request(dev, state, ctx);
384         req->base.complete(&req->base, 0);
385 }
386
387 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
388 {
389         struct ahash_request *req = (struct ahash_request *)ssi_req;
390         struct ahash_req_ctx *state = ahash_request_ctx(req);
391         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
393         u32 digestsize = crypto_ahash_digestsize(tfm);
394
395         dev_dbg(dev, "req=%pK\n", req);
396
397         ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
398         ssi_hash_unmap_result(dev, state, digestsize, req->result);
399         ssi_hash_unmap_request(dev, state, ctx);
400         req->base.complete(&req->base, 0);
401 }
402
403 static int ssi_hash_digest(struct ahash_req_ctx *state,
404                            struct ssi_hash_ctx *ctx,
405                            unsigned int digestsize,
406                            struct scatterlist *src,
407                            unsigned int nbytes, u8 *result,
408                            void *async_req)
409 {
410         struct device *dev = drvdata_to_dev(ctx->drvdata);
411         bool is_hmac = ctx->is_hmac;
412         struct ssi_crypto_req ssi_req = {};
413         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
414         ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415                                         ctx->drvdata, ctx->hash_mode);
416         int idx = 0;
417         int rc = 0;
418
419         dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
420                 nbytes);
421
422         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
423                 dev_err(dev, "map_ahash_source() failed\n");
424                 return -ENOMEM;
425         }
426
427         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
428                 dev_err(dev, "map_ahash_digest() failed\n");
429                 return -ENOMEM;
430         }
431
432         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
433                 dev_err(dev, "map_ahash_request_final() failed\n");
434                 return -ENOMEM;
435         }
436
437         if (async_req) {
438                 /* Setup DX request structure */
439                 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
440                 ssi_req.user_arg = (void *)async_req;
441         }
442
443         /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
444         hw_desc_init(&desc[idx]);
445         set_cipher_mode(&desc[idx], ctx->hw_mode);
446         if (is_hmac) {
447                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
448                              ctx->inter_digestsize, NS_BIT);
449         } else {
450                 set_din_sram(&desc[idx], larval_digest_addr,
451                              ctx->inter_digestsize);
452         }
453         set_flow_mode(&desc[idx], S_DIN_to_HASH);
454         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
455         idx++;
456
457         /* Load the hash current length */
458         hw_desc_init(&desc[idx]);
459         set_cipher_mode(&desc[idx], ctx->hw_mode);
460
461         if (is_hmac) {
462                 set_din_type(&desc[idx], DMA_DLLI,
463                              state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
464                              NS_BIT);
465         } else {
466                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
467                 if (likely(nbytes != 0))
468                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
469                 else
470                         set_cipher_do(&desc[idx], DO_PAD);
471         }
472         set_flow_mode(&desc[idx], S_DIN_to_HASH);
473         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
474         idx++;
475
476         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
477
478         if (is_hmac) {
479                 /* HW last hash block padding (aka. "DO_PAD") */
480                 hw_desc_init(&desc[idx]);
481                 set_cipher_mode(&desc[idx], ctx->hw_mode);
482                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
483                               HASH_LEN_SIZE, NS_BIT, 0);
484                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
485                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
486                 set_cipher_do(&desc[idx], DO_PAD);
487                 idx++;
488
489                 /* store the hash digest result in the context */
490                 hw_desc_init(&desc[idx]);
491                 set_cipher_mode(&desc[idx], ctx->hw_mode);
492                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
493                               digestsize, NS_BIT, 0);
494                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
496                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
497                 idx++;
498
499                 /* Loading hash opad xor key state */
500                 hw_desc_init(&desc[idx]);
501                 set_cipher_mode(&desc[idx], ctx->hw_mode);
502                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
503                              ctx->inter_digestsize, NS_BIT);
504                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
505                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
506                 idx++;
507
508                 /* Load the hash current length */
509                 hw_desc_init(&desc[idx]);
510                 set_cipher_mode(&desc[idx], ctx->hw_mode);
511                 set_din_sram(&desc[idx],
512                              ssi_ahash_get_initial_digest_len_sram_addr(
513 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
514                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
515                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
516                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
517                 idx++;
518
519                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
520                 hw_desc_init(&desc[idx]);
521                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
522                 set_dout_no_dma(&desc[idx], 0, 0, 1);
523                 idx++;
524
525                 /* Perform HASH update */
526                 hw_desc_init(&desc[idx]);
527                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
528                              digestsize, NS_BIT);
529                 set_flow_mode(&desc[idx], DIN_HASH);
530                 idx++;
531         }
532
533         /* Get final MAC result */
534         hw_desc_init(&desc[idx]);
535         set_cipher_mode(&desc[idx], ctx->hw_mode);
536         /* TODO */
537         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
538                       NS_BIT, (async_req ? 1 : 0));
539         if (async_req)
540                 set_queue_last_ind(&desc[idx]);
541         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
542         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
543         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
544         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
545         idx++;
546
547         if (async_req) {
548                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
549                 if (unlikely(rc != -EINPROGRESS)) {
550                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
551                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
552                         ssi_hash_unmap_result(dev, state, digestsize, result);
553                         ssi_hash_unmap_request(dev, state, ctx);
554                 }
555         } else {
556                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
557                 if (rc != 0) {
558                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
559                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
560                 } else {
561                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
562                 }
563                 ssi_hash_unmap_result(dev, state, digestsize, result);
564                 ssi_hash_unmap_request(dev, state, ctx);
565         }
566         return rc;
567 }
568
569 static int ssi_hash_update(struct ahash_req_ctx *state,
570                            struct ssi_hash_ctx *ctx,
571                            unsigned int block_size,
572                            struct scatterlist *src,
573                            unsigned int nbytes,
574                            void *async_req)
575 {
576         struct device *dev = drvdata_to_dev(ctx->drvdata);
577         struct ssi_crypto_req ssi_req = {};
578         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
579         u32 idx = 0;
580         int rc;
581
582         dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
583                 "hmac" : "hash", nbytes);
584
585         if (nbytes == 0) {
586                 /* no real updates required */
587                 return 0;
588         }
589
590         rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
591         if (unlikely(rc)) {
592                 if (rc == 1) {
593                         dev_dbg(dev, " data size not require HW update %x\n",
594                                 nbytes);
595                         /* No hardware updates are required */
596                         return 0;
597                 }
598                 dev_err(dev, "map_ahash_request_update() failed\n");
599                 return -ENOMEM;
600         }
601
602         if (async_req) {
603                 /* Setup DX request structure */
604                 ssi_req.user_cb = (void *)ssi_hash_update_complete;
605                 ssi_req.user_arg = async_req;
606         }
607
608         /* Restore hash digest */
609         hw_desc_init(&desc[idx]);
610         set_cipher_mode(&desc[idx], ctx->hw_mode);
611         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
612                      ctx->inter_digestsize, NS_BIT);
613         set_flow_mode(&desc[idx], S_DIN_to_HASH);
614         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
615         idx++;
616         /* Restore hash current length */
617         hw_desc_init(&desc[idx]);
618         set_cipher_mode(&desc[idx], ctx->hw_mode);
619         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
620                      HASH_LEN_SIZE, NS_BIT);
621         set_flow_mode(&desc[idx], S_DIN_to_HASH);
622         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
623         idx++;
624
625         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
626
627         /* store the hash digest result in context */
628         hw_desc_init(&desc[idx]);
629         set_cipher_mode(&desc[idx], ctx->hw_mode);
630         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
631                       ctx->inter_digestsize, NS_BIT, 0);
632         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
633         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
634         idx++;
635
636         /* store current hash length in context */
637         hw_desc_init(&desc[idx]);
638         set_cipher_mode(&desc[idx], ctx->hw_mode);
639         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
640                       HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
641         if (async_req)
642                 set_queue_last_ind(&desc[idx]);
643         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
644         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
645         idx++;
646
647         if (async_req) {
648                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
649                 if (unlikely(rc != -EINPROGRESS)) {
650                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
651                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
652                 }
653         } else {
654                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
655                 if (rc != 0) {
656                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
657                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
658                 } else {
659                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
660                 }
661         }
662         return rc;
663 }
664
665 static int ssi_hash_finup(struct ahash_req_ctx *state,
666                           struct ssi_hash_ctx *ctx,
667                           unsigned int digestsize,
668                           struct scatterlist *src,
669                           unsigned int nbytes,
670                           u8 *result,
671                           void *async_req)
672 {
673         struct device *dev = drvdata_to_dev(ctx->drvdata);
674         bool is_hmac = ctx->is_hmac;
675         struct ssi_crypto_req ssi_req = {};
676         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
677         int idx = 0;
678         int rc;
679
680         dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
681                 nbytes);
682
683         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
684                 dev_err(dev, "map_ahash_request_final() failed\n");
685                 return -ENOMEM;
686         }
687         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
688                 dev_err(dev, "map_ahash_digest() failed\n");
689                 return -ENOMEM;
690         }
691
692         if (async_req) {
693                 /* Setup DX request structure */
694                 ssi_req.user_cb = (void *)ssi_hash_complete;
695                 ssi_req.user_arg = async_req;
696         }
697
698         /* Restore hash digest */
699         hw_desc_init(&desc[idx]);
700         set_cipher_mode(&desc[idx], ctx->hw_mode);
701         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
702                      ctx->inter_digestsize, NS_BIT);
703         set_flow_mode(&desc[idx], S_DIN_to_HASH);
704         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
705         idx++;
706
707         /* Restore hash current length */
708         hw_desc_init(&desc[idx]);
709         set_cipher_mode(&desc[idx], ctx->hw_mode);
710         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
711         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
712                      HASH_LEN_SIZE, NS_BIT);
713         set_flow_mode(&desc[idx], S_DIN_to_HASH);
714         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
715         idx++;
716
717         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
718
719         if (is_hmac) {
720                 /* Store the hash digest result in the context */
721                 hw_desc_init(&desc[idx]);
722                 set_cipher_mode(&desc[idx], ctx->hw_mode);
723                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
724                               digestsize, NS_BIT, 0);
725                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
726                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
727                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
728                 idx++;
729
730                 /* Loading hash OPAD xor key state */
731                 hw_desc_init(&desc[idx]);
732                 set_cipher_mode(&desc[idx], ctx->hw_mode);
733                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
734                              ctx->inter_digestsize, NS_BIT);
735                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
736                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
737                 idx++;
738
739                 /* Load the hash current length */
740                 hw_desc_init(&desc[idx]);
741                 set_cipher_mode(&desc[idx], ctx->hw_mode);
742                 set_din_sram(&desc[idx],
743                              ssi_ahash_get_initial_digest_len_sram_addr(
744 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
745                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
746                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
747                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
748                 idx++;
749
750                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
751                 hw_desc_init(&desc[idx]);
752                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
753                 set_dout_no_dma(&desc[idx], 0, 0, 1);
754                 idx++;
755
756                 /* Perform HASH update on last digest */
757                 hw_desc_init(&desc[idx]);
758                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
759                              digestsize, NS_BIT);
760                 set_flow_mode(&desc[idx], DIN_HASH);
761                 idx++;
762         }
763
764         /* Get final MAC result */
765         hw_desc_init(&desc[idx]);
766         /* TODO */
767         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
768                       NS_BIT, (async_req ? 1 : 0));
769         if (async_req)
770                 set_queue_last_ind(&desc[idx]);
771         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
772         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
773         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
774         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
775         set_cipher_mode(&desc[idx], ctx->hw_mode);
776         idx++;
777
778         if (async_req) {
779                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
780                 if (unlikely(rc != -EINPROGRESS)) {
781                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
782                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
783                         ssi_hash_unmap_result(dev, state, digestsize, result);
784                 }
785         } else {
786                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
787                 if (rc != 0) {
788                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
789                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
790                         ssi_hash_unmap_result(dev, state, digestsize, result);
791                 } else {
792                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
793                         ssi_hash_unmap_result(dev, state, digestsize, result);
794                         ssi_hash_unmap_request(dev, state, ctx);
795                 }
796         }
797         return rc;
798 }
799
800 static int ssi_hash_final(struct ahash_req_ctx *state,
801                           struct ssi_hash_ctx *ctx,
802                           unsigned int digestsize,
803                           struct scatterlist *src,
804                           unsigned int nbytes,
805                           u8 *result,
806                           void *async_req)
807 {
808         struct device *dev = drvdata_to_dev(ctx->drvdata);
809         bool is_hmac = ctx->is_hmac;
810         struct ssi_crypto_req ssi_req = {};
811         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
812         int idx = 0;
813         int rc;
814
815         dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
816                 nbytes);
817
818         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
819                 dev_err(dev, "map_ahash_request_final() failed\n");
820                 return -ENOMEM;
821         }
822
823         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
824                 dev_err(dev, "map_ahash_digest() failed\n");
825                 return -ENOMEM;
826         }
827
828         if (async_req) {
829                 /* Setup DX request structure */
830                 ssi_req.user_cb = (void *)ssi_hash_complete;
831                 ssi_req.user_arg = async_req;
832         }
833
834         /* Restore hash digest */
835         hw_desc_init(&desc[idx]);
836         set_cipher_mode(&desc[idx], ctx->hw_mode);
837         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
838                      ctx->inter_digestsize, NS_BIT);
839         set_flow_mode(&desc[idx], S_DIN_to_HASH);
840         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
841         idx++;
842
843         /* Restore hash current length */
844         hw_desc_init(&desc[idx]);
845         set_cipher_mode(&desc[idx], ctx->hw_mode);
846         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
847         set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
848                      HASH_LEN_SIZE, NS_BIT);
849         set_flow_mode(&desc[idx], S_DIN_to_HASH);
850         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
851         idx++;
852
853         ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
854
855         /* "DO-PAD" must be enabled only when writing current length to HW */
856         hw_desc_init(&desc[idx]);
857         set_cipher_do(&desc[idx], DO_PAD);
858         set_cipher_mode(&desc[idx], ctx->hw_mode);
859         set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
860                       HASH_LEN_SIZE, NS_BIT, 0);
861         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
862         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
863         idx++;
864
865         if (is_hmac) {
866                 /* Store the hash digest result in the context */
867                 hw_desc_init(&desc[idx]);
868                 set_cipher_mode(&desc[idx], ctx->hw_mode);
869                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
870                               digestsize, NS_BIT, 0);
871                 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
872                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
873                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
874                 idx++;
875
876                 /* Loading hash OPAD xor key state */
877                 hw_desc_init(&desc[idx]);
878                 set_cipher_mode(&desc[idx], ctx->hw_mode);
879                 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
880                              ctx->inter_digestsize, NS_BIT);
881                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
882                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
883                 idx++;
884
885                 /* Load the hash current length */
886                 hw_desc_init(&desc[idx]);
887                 set_cipher_mode(&desc[idx], ctx->hw_mode);
888                 set_din_sram(&desc[idx],
889                              ssi_ahash_get_initial_digest_len_sram_addr(
890 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
891                 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
892                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
894                 idx++;
895
896                 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
897                 hw_desc_init(&desc[idx]);
898                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
899                 set_dout_no_dma(&desc[idx], 0, 0, 1);
900                 idx++;
901
902                 /* Perform HASH update on last digest */
903                 hw_desc_init(&desc[idx]);
904                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
905                              digestsize, NS_BIT);
906                 set_flow_mode(&desc[idx], DIN_HASH);
907                 idx++;
908         }
909
910         /* Get final MAC result */
911         hw_desc_init(&desc[idx]);
912         set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
913                       NS_BIT, (async_req ? 1 : 0));
914         if (async_req)
915                 set_queue_last_ind(&desc[idx]);
916         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
918         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
919         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
920         set_cipher_mode(&desc[idx], ctx->hw_mode);
921         idx++;
922
923         if (async_req) {
924                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
925                 if (unlikely(rc != -EINPROGRESS)) {
926                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
927                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
928                         ssi_hash_unmap_result(dev, state, digestsize, result);
929                 }
930         } else {
931                 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
932                 if (rc != 0) {
933                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
934                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
935                         ssi_hash_unmap_result(dev, state, digestsize, result);
936                 } else {
937                         ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
938                         ssi_hash_unmap_result(dev, state, digestsize, result);
939                         ssi_hash_unmap_request(dev, state, ctx);
940                 }
941         }
942         return rc;
943 }
944
945 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
946 {
947         struct device *dev = drvdata_to_dev(ctx->drvdata);
948
949         state->xcbc_count = 0;
950
951         ssi_hash_map_request(dev, state, ctx);
952
953         return 0;
954 }
955
956 static int ssi_hash_setkey(void *hash,
957                            const u8 *key,
958                            unsigned int keylen,
959                            bool synchronize)
960 {
961         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
962         struct ssi_crypto_req ssi_req = {};
963         struct ssi_hash_ctx *ctx = NULL;
964         int blocksize = 0;
965         int digestsize = 0;
966         int i, idx = 0, rc = 0;
967         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
968         ssi_sram_addr_t larval_addr;
969         struct device *dev;
970
971         ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
972         dev = drvdata_to_dev(ctx->drvdata);
973         dev_dbg(dev, "start keylen: %d", keylen);
974
975         blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
976         digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
977
978         larval_addr = ssi_ahash_get_larval_digest_sram_addr(
979                                         ctx->drvdata, ctx->hash_mode);
980
981         /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
982          * any NON-ZERO value utilizes HMAC flow
983          */
984         ctx->key_params.keylen = keylen;
985         ctx->key_params.key_dma_addr = 0;
986         ctx->is_hmac = true;
987
988         if (keylen != 0) {
989                 ctx->key_params.key_dma_addr = dma_map_single(
990                                                 dev, (void *)key,
991                                                 keylen, DMA_TO_DEVICE);
992                 if (unlikely(dma_mapping_error(dev,
993                                                ctx->key_params.key_dma_addr))) {
994                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
995                                 key, keylen);
996                         return -ENOMEM;
997                 }
998                 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
999                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1000
1001                 if (keylen > blocksize) {
1002                         /* Load hash initial state */
1003                         hw_desc_init(&desc[idx]);
1004                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1005                         set_din_sram(&desc[idx], larval_addr,
1006                                      ctx->inter_digestsize);
1007                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1008                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1009                         idx++;
1010
1011                         /* Load the hash current length*/
1012                         hw_desc_init(&desc[idx]);
1013                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1014                         set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1015                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1016                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1018                         idx++;
1019
1020                         hw_desc_init(&desc[idx]);
1021                         set_din_type(&desc[idx], DMA_DLLI,
1022                                      ctx->key_params.key_dma_addr, keylen,
1023                                      NS_BIT);
1024                         set_flow_mode(&desc[idx], DIN_HASH);
1025                         idx++;
1026
1027                         /* Get hashed key */
1028                         hw_desc_init(&desc[idx]);
1029                         set_cipher_mode(&desc[idx], ctx->hw_mode);
1030                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1031                                       digestsize, NS_BIT, 0);
1032                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1033                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1034                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1035                         ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1036                         idx++;
1037
1038                         hw_desc_init(&desc[idx]);
1039                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
1040                         set_flow_mode(&desc[idx], BYPASS);
1041                         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1042                                                    digestsize),
1043                                       (blocksize - digestsize), NS_BIT, 0);
1044                         idx++;
1045                 } else {
1046                         hw_desc_init(&desc[idx]);
1047                         set_din_type(&desc[idx], DMA_DLLI,
1048                                      ctx->key_params.key_dma_addr, keylen,
1049                                      NS_BIT);
1050                         set_flow_mode(&desc[idx], BYPASS);
1051                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1052                                       keylen, NS_BIT, 0);
1053                         idx++;
1054
1055                         if ((blocksize - keylen) != 0) {
1056                                 hw_desc_init(&desc[idx]);
1057                                 set_din_const(&desc[idx], 0,
1058                                               (blocksize - keylen));
1059                                 set_flow_mode(&desc[idx], BYPASS);
1060                                 set_dout_dlli(&desc[idx],
1061                                               (ctx->opad_tmp_keys_dma_addr +
1062                                                keylen), (blocksize - keylen),
1063                                               NS_BIT, 0);
1064                                 idx++;
1065                         }
1066                 }
1067         } else {
1068                 hw_desc_init(&desc[idx]);
1069                 set_din_const(&desc[idx], 0, blocksize);
1070                 set_flow_mode(&desc[idx], BYPASS);
1071                 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1072                               blocksize, NS_BIT, 0);
1073                 idx++;
1074         }
1075
1076         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1077         if (unlikely(rc != 0)) {
1078                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1079                 goto out;
1080         }
1081
1082         /* calc derived HMAC key */
1083         for (idx = 0, i = 0; i < 2; i++) {
1084                 /* Load hash initial state */
1085                 hw_desc_init(&desc[idx]);
1086                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1087                 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1088                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1089                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1090                 idx++;
1091
1092                 /* Load the hash current length*/
1093                 hw_desc_init(&desc[idx]);
1094                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1095                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1096                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1097                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1098                 idx++;
1099
1100                 /* Prepare ipad key */
1101                 hw_desc_init(&desc[idx]);
1102                 set_xor_val(&desc[idx], hmac_pad_const[i]);
1103                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1104                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1105                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1106                 idx++;
1107
1108                 /* Perform HASH update */
1109                 hw_desc_init(&desc[idx]);
1110                 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1111                              blocksize, NS_BIT);
1112                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1113                 set_xor_active(&desc[idx]);
1114                 set_flow_mode(&desc[idx], DIN_HASH);
1115                 idx++;
1116
1117                 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1118                 hw_desc_init(&desc[idx]);
1119                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1120                 if (i > 0) /* Not first iteration */
1121                         set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1122                                       ctx->inter_digestsize, NS_BIT, 0);
1123                 else /* First iteration */
1124                         set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1125                                       ctx->inter_digestsize, NS_BIT, 0);
1126                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1128                 idx++;
1129         }
1130
1131         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1132
1133 out:
1134         if (rc)
1135                 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1136
1137         if (ctx->key_params.key_dma_addr) {
1138                 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1139                                  ctx->key_params.keylen, DMA_TO_DEVICE);
1140                 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1141                         &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1142         }
1143         return rc;
1144 }
1145
1146 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1147                            const u8 *key, unsigned int keylen)
1148 {
1149         struct ssi_crypto_req ssi_req = {};
1150         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1151         struct device *dev = drvdata_to_dev(ctx->drvdata);
1152         int idx = 0, rc = 0;
1153         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1154
1155         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1156
1157         switch (keylen) {
1158         case AES_KEYSIZE_128:
1159         case AES_KEYSIZE_192:
1160         case AES_KEYSIZE_256:
1161                 break;
1162         default:
1163                 return -EINVAL;
1164         }
1165
1166         ctx->key_params.keylen = keylen;
1167
1168         ctx->key_params.key_dma_addr = dma_map_single(
1169                                         dev, (void *)key,
1170                                         keylen, DMA_TO_DEVICE);
1171         if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
1172                 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1173                         key, keylen);
1174                 return -ENOMEM;
1175         }
1176         dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1177                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1178
1179         ctx->is_hmac = true;
1180         /* 1. Load the AES key */
1181         hw_desc_init(&desc[idx]);
1182         set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1183                      keylen, NS_BIT);
1184         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1185         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1186         set_key_size_aes(&desc[idx], keylen);
1187         set_flow_mode(&desc[idx], S_DIN_to_AES);
1188         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1189         idx++;
1190
1191         hw_desc_init(&desc[idx]);
1192         set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1193         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1194         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1195                                            XCBC_MAC_K1_OFFSET),
1196                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1197         idx++;
1198
1199         hw_desc_init(&desc[idx]);
1200         set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1201         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1202         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1203                                            XCBC_MAC_K2_OFFSET),
1204                               CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1205         idx++;
1206
1207         hw_desc_init(&desc[idx]);
1208         set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1209         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1210         set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1211                                            XCBC_MAC_K3_OFFSET),
1212                                CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1213         idx++;
1214
1215         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1216
1217         if (rc != 0)
1218                 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1219
1220         dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1221                          ctx->key_params.keylen, DMA_TO_DEVICE);
1222         dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1223                 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1224
1225         return rc;
1226 }
1227
1228 #if SSI_CC_HAS_CMAC
1229 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1230                            const u8 *key, unsigned int keylen)
1231 {
1232         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1233         struct device *dev = drvdata_to_dev(ctx->drvdata);
1234
1235         dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1236
1237         ctx->is_hmac = true;
1238
1239         switch (keylen) {
1240         case AES_KEYSIZE_128:
1241         case AES_KEYSIZE_192:
1242         case AES_KEYSIZE_256:
1243                 break;
1244         default:
1245                 return -EINVAL;
1246         }
1247
1248         ctx->key_params.keylen = keylen;
1249
1250         /* STAT_PHASE_1: Copy key to ctx */
1251
1252         dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1253                                 keylen, DMA_TO_DEVICE);
1254
1255         memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1256         if (keylen == 24)
1257                 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1258
1259         dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1260                                    keylen, DMA_TO_DEVICE);
1261
1262         ctx->key_params.keylen = keylen;
1263
1264         return 0;
1265 }
1266 #endif
1267
1268 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1269 {
1270         struct device *dev = drvdata_to_dev(ctx->drvdata);
1271
1272         if (ctx->digest_buff_dma_addr != 0) {
1273                 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1274                                  sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1275                 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1276                         &ctx->digest_buff_dma_addr);
1277                 ctx->digest_buff_dma_addr = 0;
1278         }
1279         if (ctx->opad_tmp_keys_dma_addr != 0) {
1280                 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1281                                  sizeof(ctx->opad_tmp_keys_buff),
1282                                  DMA_BIDIRECTIONAL);
1283                 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1284                         &ctx->opad_tmp_keys_dma_addr);
1285                 ctx->opad_tmp_keys_dma_addr = 0;
1286         }
1287
1288         ctx->key_params.keylen = 0;
1289 }
1290
1291 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1292 {
1293         struct device *dev = drvdata_to_dev(ctx->drvdata);
1294
1295         ctx->key_params.keylen = 0;
1296
1297         ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1298         if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1299                 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1300                         sizeof(ctx->digest_buff), ctx->digest_buff);
1301                 goto fail;
1302         }
1303         dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1304                 sizeof(ctx->digest_buff), ctx->digest_buff,
1305                 &ctx->digest_buff_dma_addr);
1306
1307         ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1308         if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1309                 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1310                         sizeof(ctx->opad_tmp_keys_buff),
1311                         ctx->opad_tmp_keys_buff);
1312                 goto fail;
1313         }
1314         dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1315                 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1316                 &ctx->opad_tmp_keys_dma_addr);
1317
1318         ctx->is_hmac = false;
1319         return 0;
1320
1321 fail:
1322         ssi_hash_free_ctx(ctx);
1323         return -ENOMEM;
1324 }
1325
1326 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1327 {
1328         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1329         struct hash_alg_common *hash_alg_common =
1330                 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1331         struct ahash_alg *ahash_alg =
1332                 container_of(hash_alg_common, struct ahash_alg, halg);
1333         struct ssi_hash_alg *ssi_alg =
1334                         container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1335
1336         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1337                                  sizeof(struct ahash_req_ctx));
1338
1339         ctx->hash_mode = ssi_alg->hash_mode;
1340         ctx->hw_mode = ssi_alg->hw_mode;
1341         ctx->inter_digestsize = ssi_alg->inter_digestsize;
1342         ctx->drvdata = ssi_alg->drvdata;
1343
1344         return ssi_hash_alloc_ctx(ctx);
1345 }
1346
1347 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1348 {
1349         struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1350         struct device *dev = drvdata_to_dev(ctx->drvdata);
1351
1352         dev_dbg(dev, "ssi_hash_cra_exit");
1353         ssi_hash_free_ctx(ctx);
1354 }
1355
1356 static int ssi_mac_update(struct ahash_request *req)
1357 {
1358         struct ahash_req_ctx *state = ahash_request_ctx(req);
1359         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1360         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1361         struct device *dev = drvdata_to_dev(ctx->drvdata);
1362         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1363         struct ssi_crypto_req ssi_req = {};
1364         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1365         int rc;
1366         u32 idx = 0;
1367
1368         if (req->nbytes == 0) {
1369                 /* no real updates required */
1370                 return 0;
1371         }
1372
1373         state->xcbc_count++;
1374
1375         rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1376         if (unlikely(rc)) {
1377                 if (rc == 1) {
1378                         dev_dbg(dev, " data size not require HW update %x\n",
1379                                 req->nbytes);
1380                         /* No hardware updates are required */
1381                         return 0;
1382                 }
1383                 dev_err(dev, "map_ahash_request_update() failed\n");
1384                 return -ENOMEM;
1385         }
1386
1387         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1388                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1389         else
1390                 ssi_hash_create_cmac_setup(req, desc, &idx);
1391
1392         ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1393
1394         /* store the hash digest result in context */
1395         hw_desc_init(&desc[idx]);
1396         set_cipher_mode(&desc[idx], ctx->hw_mode);
1397         set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1398                       ctx->inter_digestsize, NS_BIT, 1);
1399         set_queue_last_ind(&desc[idx]);
1400         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1401         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1402         idx++;
1403
1404         /* Setup DX request structure */
1405         ssi_req.user_cb = (void *)ssi_hash_update_complete;
1406         ssi_req.user_arg = (void *)req;
1407
1408         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1409         if (unlikely(rc != -EINPROGRESS)) {
1410                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1411                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1412         }
1413         return rc;
1414 }
1415
1416 static int ssi_mac_final(struct ahash_request *req)
1417 {
1418         struct ahash_req_ctx *state = ahash_request_ctx(req);
1419         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1420         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1421         struct device *dev = drvdata_to_dev(ctx->drvdata);
1422         struct ssi_crypto_req ssi_req = {};
1423         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1424         int idx = 0;
1425         int rc = 0;
1426         u32 key_size, key_len;
1427         u32 digestsize = crypto_ahash_digestsize(tfm);
1428
1429         u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1430                         state->buff0_cnt;
1431
1432         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1433                 key_size = CC_AES_128_BIT_KEY_SIZE;
1434                 key_len  = CC_AES_128_BIT_KEY_SIZE;
1435         } else {
1436                 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1437                         ctx->key_params.keylen;
1438                 key_len =  ctx->key_params.keylen;
1439         }
1440
1441         dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1442
1443         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1444                 dev_err(dev, "map_ahash_request_final() failed\n");
1445                 return -ENOMEM;
1446         }
1447
1448         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1449                 dev_err(dev, "map_ahash_digest() failed\n");
1450                 return -ENOMEM;
1451         }
1452
1453         /* Setup DX request structure */
1454         ssi_req.user_cb = (void *)ssi_hash_complete;
1455         ssi_req.user_arg = (void *)req;
1456
1457         if (state->xcbc_count && (rem_cnt == 0)) {
1458                 /* Load key for ECB decryption */
1459                 hw_desc_init(&desc[idx]);
1460                 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1461                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1462                 set_din_type(&desc[idx], DMA_DLLI,
1463                              (ctx->opad_tmp_keys_dma_addr +
1464                               XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1465                 set_key_size_aes(&desc[idx], key_len);
1466                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1467                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1468                 idx++;
1469
1470                 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1471                 hw_desc_init(&desc[idx]);
1472                 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1473                              CC_AES_BLOCK_SIZE, NS_BIT);
1474                 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1475                               CC_AES_BLOCK_SIZE, NS_BIT, 0);
1476                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1477                 idx++;
1478
1479                 /* Memory Barrier: wait for axi write to complete */
1480                 hw_desc_init(&desc[idx]);
1481                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1482                 set_dout_no_dma(&desc[idx], 0, 0, 1);
1483                 idx++;
1484         }
1485
1486         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1487                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1488         else
1489                 ssi_hash_create_cmac_setup(req, desc, &idx);
1490
1491         if (state->xcbc_count == 0) {
1492                 hw_desc_init(&desc[idx]);
1493                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1494                 set_key_size_aes(&desc[idx], key_len);
1495                 set_cmac_size0_mode(&desc[idx]);
1496                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1497                 idx++;
1498         } else if (rem_cnt > 0) {
1499                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1500         } else {
1501                 hw_desc_init(&desc[idx]);
1502                 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1503                 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1504                 idx++;
1505         }
1506
1507         /* Get final MAC result */
1508         hw_desc_init(&desc[idx]);
1509         /* TODO */
1510         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1511                       digestsize, NS_BIT, 1);
1512         set_queue_last_ind(&desc[idx]);
1513         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1514         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1515         set_cipher_mode(&desc[idx], ctx->hw_mode);
1516         idx++;
1517
1518         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1519         if (unlikely(rc != -EINPROGRESS)) {
1520                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1521                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1522                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1523         }
1524         return rc;
1525 }
1526
1527 static int ssi_mac_finup(struct ahash_request *req)
1528 {
1529         struct ahash_req_ctx *state = ahash_request_ctx(req);
1530         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1532         struct device *dev = drvdata_to_dev(ctx->drvdata);
1533         struct ssi_crypto_req ssi_req = {};
1534         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1535         int idx = 0;
1536         int rc = 0;
1537         u32 key_len = 0;
1538         u32 digestsize = crypto_ahash_digestsize(tfm);
1539
1540         dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1541         if (state->xcbc_count > 0 && req->nbytes == 0) {
1542                 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1543                 return ssi_mac_final(req);
1544         }
1545
1546         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1547                 dev_err(dev, "map_ahash_request_final() failed\n");
1548                 return -ENOMEM;
1549         }
1550         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1551                 dev_err(dev, "map_ahash_digest() failed\n");
1552                 return -ENOMEM;
1553         }
1554
1555         /* Setup DX request structure */
1556         ssi_req.user_cb = (void *)ssi_hash_complete;
1557         ssi_req.user_arg = (void *)req;
1558
1559         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1560                 key_len = CC_AES_128_BIT_KEY_SIZE;
1561                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1562         } else {
1563                 key_len = ctx->key_params.keylen;
1564                 ssi_hash_create_cmac_setup(req, desc, &idx);
1565         }
1566
1567         if (req->nbytes == 0) {
1568                 hw_desc_init(&desc[idx]);
1569                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1570                 set_key_size_aes(&desc[idx], key_len);
1571                 set_cmac_size0_mode(&desc[idx]);
1572                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1573                 idx++;
1574         } else {
1575                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1576         }
1577
1578         /* Get final MAC result */
1579         hw_desc_init(&desc[idx]);
1580         /* TODO */
1581         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1582                       digestsize, NS_BIT, 1);
1583         set_queue_last_ind(&desc[idx]);
1584         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1585         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1586         set_cipher_mode(&desc[idx], ctx->hw_mode);
1587         idx++;
1588
1589         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1590         if (unlikely(rc != -EINPROGRESS)) {
1591                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1592                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1593                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1594         }
1595         return rc;
1596 }
1597
1598 static int ssi_mac_digest(struct ahash_request *req)
1599 {
1600         struct ahash_req_ctx *state = ahash_request_ctx(req);
1601         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1602         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1603         struct device *dev = drvdata_to_dev(ctx->drvdata);
1604         u32 digestsize = crypto_ahash_digestsize(tfm);
1605         struct ssi_crypto_req ssi_req = {};
1606         struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1607         u32 key_len;
1608         int idx = 0;
1609         int rc;
1610
1611         dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1612
1613         if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1614                 dev_err(dev, "map_ahash_source() failed\n");
1615                 return -ENOMEM;
1616         }
1617         if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1618                 dev_err(dev, "map_ahash_digest() failed\n");
1619                 return -ENOMEM;
1620         }
1621
1622         if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1623                 dev_err(dev, "map_ahash_request_final() failed\n");
1624                 return -ENOMEM;
1625         }
1626
1627         /* Setup DX request structure */
1628         ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1629         ssi_req.user_arg = (void *)req;
1630
1631         if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1632                 key_len = CC_AES_128_BIT_KEY_SIZE;
1633                 ssi_hash_create_xcbc_setup(req, desc, &idx);
1634         } else {
1635                 key_len = ctx->key_params.keylen;
1636                 ssi_hash_create_cmac_setup(req, desc, &idx);
1637         }
1638
1639         if (req->nbytes == 0) {
1640                 hw_desc_init(&desc[idx]);
1641                 set_cipher_mode(&desc[idx], ctx->hw_mode);
1642                 set_key_size_aes(&desc[idx], key_len);
1643                 set_cmac_size0_mode(&desc[idx]);
1644                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1645                 idx++;
1646         } else {
1647                 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1648         }
1649
1650         /* Get final MAC result */
1651         hw_desc_init(&desc[idx]);
1652         set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1653                       CC_AES_BLOCK_SIZE, NS_BIT, 1);
1654         set_queue_last_ind(&desc[idx]);
1655         set_flow_mode(&desc[idx], S_AES_to_DOUT);
1656         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1657         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1658         set_cipher_mode(&desc[idx], ctx->hw_mode);
1659         idx++;
1660
1661         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1662         if (unlikely(rc != -EINPROGRESS)) {
1663                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1664                 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1665                 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1666                 ssi_hash_unmap_request(dev, state, ctx);
1667         }
1668         return rc;
1669 }
1670
1671 //ahash wrap functions
1672 static int ssi_ahash_digest(struct ahash_request *req)
1673 {
1674         struct ahash_req_ctx *state = ahash_request_ctx(req);
1675         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1676         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1677         u32 digestsize = crypto_ahash_digestsize(tfm);
1678
1679         return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1680 }
1681
1682 static int ssi_ahash_update(struct ahash_request *req)
1683 {
1684         struct ahash_req_ctx *state = ahash_request_ctx(req);
1685         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1686         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1687         unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1688
1689         return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1690 }
1691
1692 static int ssi_ahash_finup(struct ahash_request *req)
1693 {
1694         struct ahash_req_ctx *state = ahash_request_ctx(req);
1695         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1696         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1697         u32 digestsize = crypto_ahash_digestsize(tfm);
1698
1699         return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1700 }
1701
1702 static int ssi_ahash_final(struct ahash_request *req)
1703 {
1704         struct ahash_req_ctx *state = ahash_request_ctx(req);
1705         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1706         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1707         u32 digestsize = crypto_ahash_digestsize(tfm);
1708
1709         return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1710 }
1711
1712 static int ssi_ahash_init(struct ahash_request *req)
1713 {
1714         struct ahash_req_ctx *state = ahash_request_ctx(req);
1715         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1716         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1717         struct device *dev = drvdata_to_dev(ctx->drvdata);
1718
1719         dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
1720
1721         return ssi_hash_init(state, ctx);
1722 }
1723
1724 static int ssi_ahash_export(struct ahash_request *req, void *out)
1725 {
1726         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1727         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1728         struct device *dev = drvdata_to_dev(ctx->drvdata);
1729         struct ahash_req_ctx *state = ahash_request_ctx(req);
1730         u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1731         u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1732                                 state->buff0_cnt;
1733         const u32 tmp = CC_EXPORT_MAGIC;
1734
1735         memcpy(out, &tmp, sizeof(u32));
1736         out += sizeof(u32);
1737
1738         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1739                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1740         memcpy(out, state->digest_buff, ctx->inter_digestsize);
1741         out += ctx->inter_digestsize;
1742
1743         if (state->digest_bytes_len_dma_addr) {
1744                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1745                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1746                 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1747         } else {
1748                 /* Poison the unused exported digest len field. */
1749                 memset(out, 0x5F, HASH_LEN_SIZE);
1750         }
1751         out += HASH_LEN_SIZE;
1752
1753         memcpy(out, &curr_buff_cnt, sizeof(u32));
1754         out += sizeof(u32);
1755
1756         memcpy(out, curr_buff, curr_buff_cnt);
1757
1758         /* No sync for device ineeded since we did not change the data,
1759          * we only copy it
1760          */
1761
1762         return 0;
1763 }
1764
1765 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1766 {
1767         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1768         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1769         struct device *dev = drvdata_to_dev(ctx->drvdata);
1770         struct ahash_req_ctx *state = ahash_request_ctx(req);
1771         u32 tmp;
1772         int rc;
1773
1774         memcpy(&tmp, in, sizeof(u32));
1775         if (tmp != CC_EXPORT_MAGIC) {
1776                 rc = -EINVAL;
1777                 goto out;
1778         }
1779         in += sizeof(u32);
1780
1781         rc = ssi_hash_init(state, ctx);
1782         if (rc)
1783                 goto out;
1784
1785         dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1786                                 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1787         memcpy(state->digest_buff, in, ctx->inter_digestsize);
1788         in += ctx->inter_digestsize;
1789
1790         if (state->digest_bytes_len_dma_addr) {
1791                 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1792                                         HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1793                 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1794         }
1795         in += HASH_LEN_SIZE;
1796
1797         dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1798                                    ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1799
1800         if (state->digest_bytes_len_dma_addr)
1801                 dma_sync_single_for_device(dev,
1802                                            state->digest_bytes_len_dma_addr,
1803                                            HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1804
1805         state->buff_index = 0;
1806
1807         /* Sanity check the data as much as possible */
1808         memcpy(&tmp, in, sizeof(u32));
1809         if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1810                 rc = -EINVAL;
1811                 goto out;
1812         }
1813         in += sizeof(u32);
1814
1815         state->buff0_cnt = tmp;
1816         memcpy(state->buff0, in, state->buff0_cnt);
1817
1818 out:
1819         return rc;
1820 }
1821
1822 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1823                             const u8 *key, unsigned int keylen)
1824 {
1825         return ssi_hash_setkey((void *)ahash, key, keylen, false);
1826 }
1827
1828 struct ssi_hash_template {
1829         char name[CRYPTO_MAX_ALG_NAME];
1830         char driver_name[CRYPTO_MAX_ALG_NAME];
1831         char mac_name[CRYPTO_MAX_ALG_NAME];
1832         char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1833         unsigned int blocksize;
1834         bool synchronize;
1835         struct ahash_alg template_ahash;
1836         int hash_mode;
1837         int hw_mode;
1838         int inter_digestsize;
1839         struct ssi_drvdata *drvdata;
1840 };
1841
1842 #define CC_STATE_SIZE(_x) \
1843         ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1844
1845 /* hash descriptors */
1846 static struct ssi_hash_template driver_hash[] = {
1847         //Asynchronize hash template
1848         {
1849                 .name = "sha1",
1850                 .driver_name = "sha1-dx",
1851                 .mac_name = "hmac(sha1)",
1852                 .mac_driver_name = "hmac-sha1-dx",
1853                 .blocksize = SHA1_BLOCK_SIZE,
1854                 .synchronize = false,
1855                 .template_ahash = {
1856                         .init = ssi_ahash_init,
1857                         .update = ssi_ahash_update,
1858                         .final = ssi_ahash_final,
1859                         .finup = ssi_ahash_finup,
1860                         .digest = ssi_ahash_digest,
1861                         .export = ssi_ahash_export,
1862                         .import = ssi_ahash_import,
1863                         .setkey = ssi_ahash_setkey,
1864                         .halg = {
1865                                 .digestsize = SHA1_DIGEST_SIZE,
1866                                 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1867                         },
1868                 },
1869                 .hash_mode = DRV_HASH_SHA1,
1870                 .hw_mode = DRV_HASH_HW_SHA1,
1871                 .inter_digestsize = SHA1_DIGEST_SIZE,
1872         },
1873         {
1874                 .name = "sha256",
1875                 .driver_name = "sha256-dx",
1876                 .mac_name = "hmac(sha256)",
1877                 .mac_driver_name = "hmac-sha256-dx",
1878                 .blocksize = SHA256_BLOCK_SIZE,
1879                 .template_ahash = {
1880                         .init = ssi_ahash_init,
1881                         .update = ssi_ahash_update,
1882                         .final = ssi_ahash_final,
1883                         .finup = ssi_ahash_finup,
1884                         .digest = ssi_ahash_digest,
1885                         .export = ssi_ahash_export,
1886                         .import = ssi_ahash_import,
1887                         .setkey = ssi_ahash_setkey,
1888                         .halg = {
1889                                 .digestsize = SHA256_DIGEST_SIZE,
1890                                 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1891                         },
1892                 },
1893                 .hash_mode = DRV_HASH_SHA256,
1894                 .hw_mode = DRV_HASH_HW_SHA256,
1895                 .inter_digestsize = SHA256_DIGEST_SIZE,
1896         },
1897         {
1898                 .name = "sha224",
1899                 .driver_name = "sha224-dx",
1900                 .mac_name = "hmac(sha224)",
1901                 .mac_driver_name = "hmac-sha224-dx",
1902                 .blocksize = SHA224_BLOCK_SIZE,
1903                 .template_ahash = {
1904                         .init = ssi_ahash_init,
1905                         .update = ssi_ahash_update,
1906                         .final = ssi_ahash_final,
1907                         .finup = ssi_ahash_finup,
1908                         .digest = ssi_ahash_digest,
1909                         .export = ssi_ahash_export,
1910                         .import = ssi_ahash_import,
1911                         .setkey = ssi_ahash_setkey,
1912                         .halg = {
1913                                 .digestsize = SHA224_DIGEST_SIZE,
1914                                 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1915                         },
1916                 },
1917                 .hash_mode = DRV_HASH_SHA224,
1918                 .hw_mode = DRV_HASH_HW_SHA256,
1919                 .inter_digestsize = SHA256_DIGEST_SIZE,
1920         },
1921 #if (DX_DEV_SHA_MAX > 256)
1922         {
1923                 .name = "sha384",
1924                 .driver_name = "sha384-dx",
1925                 .mac_name = "hmac(sha384)",
1926                 .mac_driver_name = "hmac-sha384-dx",
1927                 .blocksize = SHA384_BLOCK_SIZE,
1928                 .template_ahash = {
1929                         .init = ssi_ahash_init,
1930                         .update = ssi_ahash_update,
1931                         .final = ssi_ahash_final,
1932                         .finup = ssi_ahash_finup,
1933                         .digest = ssi_ahash_digest,
1934                         .export = ssi_ahash_export,
1935                         .import = ssi_ahash_import,
1936                         .setkey = ssi_ahash_setkey,
1937                         .halg = {
1938                                 .digestsize = SHA384_DIGEST_SIZE,
1939                                 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1940                         },
1941                 },
1942                 .hash_mode = DRV_HASH_SHA384,
1943                 .hw_mode = DRV_HASH_HW_SHA512,
1944                 .inter_digestsize = SHA512_DIGEST_SIZE,
1945         },
1946         {
1947                 .name = "sha512",
1948                 .driver_name = "sha512-dx",
1949                 .mac_name = "hmac(sha512)",
1950                 .mac_driver_name = "hmac-sha512-dx",
1951                 .blocksize = SHA512_BLOCK_SIZE,
1952                 .template_ahash = {
1953                         .init = ssi_ahash_init,
1954                         .update = ssi_ahash_update,
1955                         .final = ssi_ahash_final,
1956                         .finup = ssi_ahash_finup,
1957                         .digest = ssi_ahash_digest,
1958                         .export = ssi_ahash_export,
1959                         .import = ssi_ahash_import,
1960                         .setkey = ssi_ahash_setkey,
1961                         .halg = {
1962                                 .digestsize = SHA512_DIGEST_SIZE,
1963                                 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1964                         },
1965                 },
1966                 .hash_mode = DRV_HASH_SHA512,
1967                 .hw_mode = DRV_HASH_HW_SHA512,
1968                 .inter_digestsize = SHA512_DIGEST_SIZE,
1969         },
1970 #endif
1971         {
1972                 .name = "md5",
1973                 .driver_name = "md5-dx",
1974                 .mac_name = "hmac(md5)",
1975                 .mac_driver_name = "hmac-md5-dx",
1976                 .blocksize = MD5_HMAC_BLOCK_SIZE,
1977                 .template_ahash = {
1978                         .init = ssi_ahash_init,
1979                         .update = ssi_ahash_update,
1980                         .final = ssi_ahash_final,
1981                         .finup = ssi_ahash_finup,
1982                         .digest = ssi_ahash_digest,
1983                         .export = ssi_ahash_export,
1984                         .import = ssi_ahash_import,
1985                         .setkey = ssi_ahash_setkey,
1986                         .halg = {
1987                                 .digestsize = MD5_DIGEST_SIZE,
1988                                 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1989                         },
1990                 },
1991                 .hash_mode = DRV_HASH_MD5,
1992                 .hw_mode = DRV_HASH_HW_MD5,
1993                 .inter_digestsize = MD5_DIGEST_SIZE,
1994         },
1995         {
1996                 .mac_name = "xcbc(aes)",
1997                 .mac_driver_name = "xcbc-aes-dx",
1998                 .blocksize = AES_BLOCK_SIZE,
1999                 .template_ahash = {
2000                         .init = ssi_ahash_init,
2001                         .update = ssi_mac_update,
2002                         .final = ssi_mac_final,
2003                         .finup = ssi_mac_finup,
2004                         .digest = ssi_mac_digest,
2005                         .setkey = ssi_xcbc_setkey,
2006                         .export = ssi_ahash_export,
2007                         .import = ssi_ahash_import,
2008                         .halg = {
2009                                 .digestsize = AES_BLOCK_SIZE,
2010                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2011                         },
2012                 },
2013                 .hash_mode = DRV_HASH_NULL,
2014                 .hw_mode = DRV_CIPHER_XCBC_MAC,
2015                 .inter_digestsize = AES_BLOCK_SIZE,
2016         },
2017 #if SSI_CC_HAS_CMAC
2018         {
2019                 .mac_name = "cmac(aes)",
2020                 .mac_driver_name = "cmac-aes-dx",
2021                 .blocksize = AES_BLOCK_SIZE,
2022                 .template_ahash = {
2023                         .init = ssi_ahash_init,
2024                         .update = ssi_mac_update,
2025                         .final = ssi_mac_final,
2026                         .finup = ssi_mac_finup,
2027                         .digest = ssi_mac_digest,
2028                         .setkey = ssi_cmac_setkey,
2029                         .export = ssi_ahash_export,
2030                         .import = ssi_ahash_import,
2031                         .halg = {
2032                                 .digestsize = AES_BLOCK_SIZE,
2033                                 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2034                         },
2035                 },
2036                 .hash_mode = DRV_HASH_NULL,
2037                 .hw_mode = DRV_CIPHER_CMAC,
2038                 .inter_digestsize = AES_BLOCK_SIZE,
2039         },
2040 #endif
2041
2042 };
2043
2044 static struct ssi_hash_alg *
2045 ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2046                     bool keyed)
2047 {
2048         struct ssi_hash_alg *t_crypto_alg;
2049         struct crypto_alg *alg;
2050         struct ahash_alg *halg;
2051
2052         t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2053         if (!t_crypto_alg)
2054                 return ERR_PTR(-ENOMEM);
2055
2056
2057         t_crypto_alg->ahash_alg = template->template_ahash;
2058         halg = &t_crypto_alg->ahash_alg;
2059         alg = &halg->halg.base;
2060
2061         if (keyed) {
2062                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2063                          template->mac_name);
2064                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2065                          template->mac_driver_name);
2066         } else {
2067                 halg->setkey = NULL;
2068                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2069                          template->name);
2070                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2071                          template->driver_name);
2072         }
2073         alg->cra_module = THIS_MODULE;
2074         alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2075         alg->cra_priority = SSI_CRA_PRIO;
2076         alg->cra_blocksize = template->blocksize;
2077         alg->cra_alignmask = 0;
2078         alg->cra_exit = ssi_hash_cra_exit;
2079
2080         alg->cra_init = ssi_ahash_cra_init;
2081         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2082                         CRYPTO_ALG_KERN_DRIVER_ONLY;
2083         alg->cra_type = &crypto_ahash_type;
2084
2085         t_crypto_alg->hash_mode = template->hash_mode;
2086         t_crypto_alg->hw_mode = template->hw_mode;
2087         t_crypto_alg->inter_digestsize = template->inter_digestsize;
2088
2089         return t_crypto_alg;
2090 }
2091
2092 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2093 {
2094         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2095         ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2096         unsigned int larval_seq_len = 0;
2097         struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2098         struct device *dev = drvdata_to_dev(drvdata);
2099         int rc = 0;
2100 #if (DX_DEV_SHA_MAX > 256)
2101         int i;
2102 #endif
2103
2104         /* Copy-to-sram digest-len */
2105         ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2106                                      ARRAY_SIZE(digest_len_init),
2107                                      larval_seq, &larval_seq_len);
2108         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2109         if (unlikely(rc != 0))
2110                 goto init_digest_const_err;
2111
2112         sram_buff_ofs += sizeof(digest_len_init);
2113         larval_seq_len = 0;
2114
2115 #if (DX_DEV_SHA_MAX > 256)
2116         /* Copy-to-sram digest-len for sha384/512 */
2117         ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2118                                      ARRAY_SIZE(digest_len_sha512_init),
2119                                      larval_seq, &larval_seq_len);
2120         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2121         if (unlikely(rc != 0))
2122                 goto init_digest_const_err;
2123
2124         sram_buff_ofs += sizeof(digest_len_sha512_init);
2125         larval_seq_len = 0;
2126 #endif
2127
2128         /* The initial digests offset */
2129         hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2130
2131         /* Copy-to-sram initial SHA* digests */
2132         ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2133                                      ARRAY_SIZE(md5_init), larval_seq,
2134                                      &larval_seq_len);
2135         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2136         if (unlikely(rc != 0))
2137                 goto init_digest_const_err;
2138         sram_buff_ofs += sizeof(md5_init);
2139         larval_seq_len = 0;
2140
2141         ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2142                                      ARRAY_SIZE(sha1_init), larval_seq,
2143                                      &larval_seq_len);
2144         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2145         if (unlikely(rc != 0))
2146                 goto init_digest_const_err;
2147         sram_buff_ofs += sizeof(sha1_init);
2148         larval_seq_len = 0;
2149
2150         ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2151                                      ARRAY_SIZE(sha224_init), larval_seq,
2152                                      &larval_seq_len);
2153         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2154         if (unlikely(rc != 0))
2155                 goto init_digest_const_err;
2156         sram_buff_ofs += sizeof(sha224_init);
2157         larval_seq_len = 0;
2158
2159         ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2160                                      ARRAY_SIZE(sha256_init), larval_seq,
2161                                      &larval_seq_len);
2162         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2163         if (unlikely(rc != 0))
2164                 goto init_digest_const_err;
2165         sram_buff_ofs += sizeof(sha256_init);
2166         larval_seq_len = 0;
2167
2168 #if (DX_DEV_SHA_MAX > 256)
2169         /* We are forced to swap each double-word larval before copying to sram */
2170         for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2171                 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2172                 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2173
2174                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2175                                              larval_seq, &larval_seq_len);
2176                 sram_buff_ofs += sizeof(u32);
2177                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2178                                              larval_seq, &larval_seq_len);
2179                 sram_buff_ofs += sizeof(u32);
2180         }
2181         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2182         if (unlikely(rc != 0)) {
2183                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2184                 goto init_digest_const_err;
2185         }
2186         larval_seq_len = 0;
2187
2188         for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2189                 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2190                 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2191
2192                 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2193                                              larval_seq, &larval_seq_len);
2194                 sram_buff_ofs += sizeof(u32);
2195                 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2196                                              larval_seq, &larval_seq_len);
2197                 sram_buff_ofs += sizeof(u32);
2198         }
2199         rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2200         if (unlikely(rc != 0)) {
2201                 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
2202                 goto init_digest_const_err;
2203         }
2204 #endif
2205
2206 init_digest_const_err:
2207         return rc;
2208 }
2209
2210 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2211 {
2212         struct ssi_hash_handle *hash_handle;
2213         ssi_sram_addr_t sram_buff;
2214         u32 sram_size_to_alloc;
2215         struct device *dev = drvdata_to_dev(drvdata);
2216         int rc = 0;
2217         int alg;
2218
2219         hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2220         if (!hash_handle)
2221                 return -ENOMEM;
2222
2223         INIT_LIST_HEAD(&hash_handle->hash_list);
2224         drvdata->hash_handle = hash_handle;
2225
2226         sram_size_to_alloc = sizeof(digest_len_init) +
2227 #if (DX_DEV_SHA_MAX > 256)
2228                         sizeof(digest_len_sha512_init) +
2229                         sizeof(sha384_init) +
2230                         sizeof(sha512_init) +
2231 #endif
2232                         sizeof(md5_init) +
2233                         sizeof(sha1_init) +
2234                         sizeof(sha224_init) +
2235                         sizeof(sha256_init);
2236
2237         sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2238         if (sram_buff == NULL_SRAM_ADDR) {
2239                 dev_err(dev, "SRAM pool exhausted\n");
2240                 rc = -ENOMEM;
2241                 goto fail;
2242         }
2243
2244         /* The initial digest-len offset */
2245         hash_handle->digest_len_sram_addr = sram_buff;
2246
2247         /*must be set before the alg registration as it is being used there*/
2248         rc = ssi_hash_init_sram_digest_consts(drvdata);
2249         if (unlikely(rc != 0)) {
2250                 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2251                 goto fail;
2252         }
2253
2254         /* ahash registration */
2255         for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2256                 struct ssi_hash_alg *t_alg;
2257                 int hw_mode = driver_hash[alg].hw_mode;
2258
2259                 /* register hmac version */
2260                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
2261                 if (IS_ERR(t_alg)) {
2262                         rc = PTR_ERR(t_alg);
2263                         dev_err(dev, "%s alg allocation failed\n",
2264                                 driver_hash[alg].driver_name);
2265                         goto fail;
2266                 }
2267                 t_alg->drvdata = drvdata;
2268
2269                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2270                 if (unlikely(rc)) {
2271                         dev_err(dev, "%s alg registration failed\n",
2272                                 driver_hash[alg].driver_name);
2273                         kfree(t_alg);
2274                         goto fail;
2275                 } else {
2276                         list_add_tail(&t_alg->entry,
2277                                       &hash_handle->hash_list);
2278                 }
2279
2280                 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2281                     (hw_mode == DRV_CIPHER_CMAC))
2282                         continue;
2283
2284                 /* register hash version */
2285                 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
2286                 if (IS_ERR(t_alg)) {
2287                         rc = PTR_ERR(t_alg);
2288                         dev_err(dev, "%s alg allocation failed\n",
2289                                 driver_hash[alg].driver_name);
2290                         goto fail;
2291                 }
2292                 t_alg->drvdata = drvdata;
2293
2294                 rc = crypto_register_ahash(&t_alg->ahash_alg);
2295                 if (unlikely(rc)) {
2296                         dev_err(dev, "%s alg registration failed\n",
2297                                 driver_hash[alg].driver_name);
2298                         kfree(t_alg);
2299                         goto fail;
2300                 } else {
2301                         list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2302                 }
2303         }
2304
2305         return 0;
2306
2307 fail:
2308         kfree(drvdata->hash_handle);
2309         drvdata->hash_handle = NULL;
2310         return rc;
2311 }
2312
2313 int ssi_hash_free(struct ssi_drvdata *drvdata)
2314 {
2315         struct ssi_hash_alg *t_hash_alg, *hash_n;
2316         struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2317
2318         if (hash_handle) {
2319                 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2320                         crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2321                         list_del(&t_hash_alg->entry);
2322                         kfree(t_hash_alg);
2323                 }
2324
2325                 kfree(hash_handle);
2326                 drvdata->hash_handle = NULL;
2327         }
2328         return 0;
2329 }
2330
2331 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2332                                        struct cc_hw_desc desc[],
2333                                        unsigned int *seq_size)
2334 {
2335         unsigned int idx = *seq_size;
2336         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2337         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2338         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2339
2340         /* Setup XCBC MAC K1 */
2341         hw_desc_init(&desc[idx]);
2342         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2343                                             XCBC_MAC_K1_OFFSET),
2344                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2345         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2346         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2347         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2348         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2349         set_flow_mode(&desc[idx], S_DIN_to_AES);
2350         idx++;
2351
2352         /* Setup XCBC MAC K2 */
2353         hw_desc_init(&desc[idx]);
2354         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2355                                             XCBC_MAC_K2_OFFSET),
2356                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2357         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2358         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2359         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2360         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2361         set_flow_mode(&desc[idx], S_DIN_to_AES);
2362         idx++;
2363
2364         /* Setup XCBC MAC K3 */
2365         hw_desc_init(&desc[idx]);
2366         set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2367                                             XCBC_MAC_K3_OFFSET),
2368                      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2369         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2370         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2371         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2372         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2373         set_flow_mode(&desc[idx], S_DIN_to_AES);
2374         idx++;
2375
2376         /* Loading MAC state */
2377         hw_desc_init(&desc[idx]);
2378         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2379                      CC_AES_BLOCK_SIZE, NS_BIT);
2380         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2381         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2382         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2383         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2384         set_flow_mode(&desc[idx], S_DIN_to_AES);
2385         idx++;
2386         *seq_size = idx;
2387 }
2388
2389 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2390                                        struct cc_hw_desc desc[],
2391                                        unsigned int *seq_size)
2392 {
2393         unsigned int idx = *seq_size;
2394         struct ahash_req_ctx *state = ahash_request_ctx(areq);
2395         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2396         struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2397
2398         /* Setup CMAC Key */
2399         hw_desc_init(&desc[idx]);
2400         set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2401                      ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2402                       ctx->key_params.keylen), NS_BIT);
2403         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2404         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2405         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2406         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2407         set_flow_mode(&desc[idx], S_DIN_to_AES);
2408         idx++;
2409
2410         /* Load MAC state */
2411         hw_desc_init(&desc[idx]);
2412         set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2413                      CC_AES_BLOCK_SIZE, NS_BIT);
2414         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2415         set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2416         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2417         set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2418         set_flow_mode(&desc[idx], S_DIN_to_AES);
2419         idx++;
2420         *seq_size = idx;
2421 }
2422
2423 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2424                                       struct ssi_hash_ctx *ctx,
2425                                       unsigned int flow_mode,
2426                                       struct cc_hw_desc desc[],
2427                                       bool is_not_last_data,
2428                                       unsigned int *seq_size)
2429 {
2430         unsigned int idx = *seq_size;
2431         struct device *dev = drvdata_to_dev(ctx->drvdata);
2432
2433         if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2434                 hw_desc_init(&desc[idx]);
2435                 set_din_type(&desc[idx], DMA_DLLI,
2436                              sg_dma_address(areq_ctx->curr_sg),
2437                              areq_ctx->curr_sg->length, NS_BIT);
2438                 set_flow_mode(&desc[idx], flow_mode);
2439                 idx++;
2440         } else {
2441                 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2442                         dev_dbg(dev, " NULL mode\n");
2443                         /* nothing to build */
2444                         return;
2445                 }
2446                 /* bypass */
2447                 hw_desc_init(&desc[idx]);
2448                 set_din_type(&desc[idx], DMA_DLLI,
2449                              areq_ctx->mlli_params.mlli_dma_addr,
2450                              areq_ctx->mlli_params.mlli_len, NS_BIT);
2451                 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2452                               areq_ctx->mlli_params.mlli_len);
2453                 set_flow_mode(&desc[idx], BYPASS);
2454                 idx++;
2455                 /* process */
2456                 hw_desc_init(&desc[idx]);
2457                 set_din_type(&desc[idx], DMA_MLLI,
2458                              ctx->drvdata->mlli_sram_addr,
2459                              areq_ctx->mlli_nents, NS_BIT);
2460                 set_flow_mode(&desc[idx], flow_mode);
2461                 idx++;
2462         }
2463         if (is_not_last_data)
2464                 set_din_not_last_indication(&desc[(idx - 1)]);
2465         /* return updated desc sequence size */
2466         *seq_size = idx;
2467 }
2468
2469 /*!
2470  * Gets the address of the initial digest in SRAM
2471  * according to the given hash mode
2472  *
2473  * \param drvdata
2474  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2475  *
2476  * \return u32 The address of the inital digest in SRAM
2477  */
2478 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2479 {
2480         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2481         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2482         struct device *dev = drvdata_to_dev(_drvdata);
2483
2484         switch (mode) {
2485         case DRV_HASH_NULL:
2486                 break; /*Ignore*/
2487         case DRV_HASH_MD5:
2488                 return (hash_handle->larval_digest_sram_addr);
2489         case DRV_HASH_SHA1:
2490                 return (hash_handle->larval_digest_sram_addr +
2491                         sizeof(md5_init));
2492         case DRV_HASH_SHA224:
2493                 return (hash_handle->larval_digest_sram_addr +
2494                         sizeof(md5_init) +
2495                         sizeof(sha1_init));
2496         case DRV_HASH_SHA256:
2497                 return (hash_handle->larval_digest_sram_addr +
2498                         sizeof(md5_init) +
2499                         sizeof(sha1_init) +
2500                         sizeof(sha224_init));
2501 #if (DX_DEV_SHA_MAX > 256)
2502         case DRV_HASH_SHA384:
2503                 return (hash_handle->larval_digest_sram_addr +
2504                         sizeof(md5_init) +
2505                         sizeof(sha1_init) +
2506                         sizeof(sha224_init) +
2507                         sizeof(sha256_init));
2508         case DRV_HASH_SHA512:
2509                 return (hash_handle->larval_digest_sram_addr +
2510                         sizeof(md5_init) +
2511                         sizeof(sha1_init) +
2512                         sizeof(sha224_init) +
2513                         sizeof(sha256_init) +
2514                         sizeof(sha384_init));
2515 #endif
2516         default:
2517                 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2518         }
2519
2520         /*This is valid wrong value to avoid kernel crash*/
2521         return hash_handle->larval_digest_sram_addr;
2522 }
2523
2524 ssi_sram_addr_t
2525 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2526 {
2527         struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2528         struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2529         ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2530
2531         switch (mode) {
2532         case DRV_HASH_SHA1:
2533         case DRV_HASH_SHA224:
2534         case DRV_HASH_SHA256:
2535         case DRV_HASH_MD5:
2536                 return digest_len_addr;
2537 #if (DX_DEV_SHA_MAX > 256)
2538         case DRV_HASH_SHA384:
2539         case DRV_HASH_SHA512:
2540                 return  digest_len_addr + sizeof(digest_len_init);
2541 #endif
2542         default:
2543                 return digest_len_addr; /*to avoid kernel crash*/
2544         }
2545 }
2546