Merge tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt...
[linux-2.6-block.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77         0, 0, 16, 24, 40, 48, 64, 72, 88,
78         96, 112, 120, 136, 144, 160, 168, 184,
79         192, 208, 216, 232, 240, 256, 264, 280,
80         288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84         0, 32, 32, 48, 48, 64, 64, 80, 80,
85         112, 112, 128, 128, 144, 144, 160, 160,
86         192, 192, 208, 208, 224, 224, 240, 240,
87         272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91         0x01000000, 0x02000000, 0x04000000, 0x08000000,
92         0x10000000, 0x20000000, 0x40000000, 0x80000000,
93         0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97                                    unsigned char *input, int err);
98
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101         return ctx->crypto_ctx->aeadctx;
102 }
103
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106         return ctx->crypto_ctx->ablkctx;
107 }
108
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111         return ctx->crypto_ctx->hmacctx;
112 }
113
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116         return gctx->ctx->gcm;
117 }
118
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121         return gctx->ctx->authenc;
122 }
123
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126         return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131         return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136         memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140                          unsigned int entlen,
141                          unsigned int skip)
142 {
143         int nents = 0;
144         unsigned int less;
145         unsigned int skip_len = 0;
146
147         while (sg && skip) {
148                 if (sg_dma_len(sg) <= skip) {
149                         skip -= sg_dma_len(sg);
150                         skip_len = 0;
151                         sg = sg_next(sg);
152                 } else {
153                         skip_len = skip;
154                         skip = 0;
155                 }
156         }
157
158         while (sg && reqlen) {
159                 less = min(reqlen, sg_dma_len(sg) - skip_len);
160                 nents += DIV_ROUND_UP(less, entlen);
161                 reqlen -= less;
162                 skip_len = 0;
163                 sg = sg_next(sg);
164         }
165         return nents;
166 }
167
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170         struct aead_alg *alg = crypto_aead_alg(aead);
171         struct chcr_alg_template *chcr_crypto_alg =
172                 container_of(alg, struct chcr_alg_template, alg.aead);
173         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178         u8 temp[SHA512_DIGEST_SIZE];
179         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180         int authsize = crypto_aead_authsize(tfm);
181         struct cpl_fw6_pld *fw6_pld;
182         int cmp = 0;
183
184         fw6_pld = (struct cpl_fw6_pld *)input;
185         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188         } else {
189
190                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191                                 authsize, req->assoclen +
192                                 req->cryptlen - authsize);
193                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194         }
195         if (cmp)
196                 *err = -EBADMSG;
197         else
198                 *err = 0;
199 }
200
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203         int err = 0;
204
205         spin_lock_bh(&dev->lock_chcr_dev);
206         if (dev->state == CHCR_DETACH)
207                 err = 1;
208         else
209                 atomic_inc(&dev->inflight);
210
211         spin_unlock_bh(&dev->lock_chcr_dev);
212
213         return err;
214 }
215
216 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
217 {
218         atomic_dec(&dev->inflight);
219 }
220
221 static inline int chcr_handle_aead_resp(struct aead_request *req,
222                                          unsigned char *input,
223                                          int err)
224 {
225         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
226         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
227         struct chcr_dev *dev = a_ctx(tfm)->dev;
228
229         chcr_aead_common_exit(req);
230         if (reqctx->verify == VERIFY_SW) {
231                 chcr_verify_tag(req, input, &err);
232                 reqctx->verify = VERIFY_HW;
233         }
234         chcr_dec_wrcount(dev);
235         req->base.complete(&req->base, err);
236
237         return err;
238 }
239
240 static void get_aes_decrypt_key(unsigned char *dec_key,
241                                        const unsigned char *key,
242                                        unsigned int keylength)
243 {
244         u32 temp;
245         u32 w_ring[MAX_NK];
246         int i, j, k;
247         u8  nr, nk;
248
249         switch (keylength) {
250         case AES_KEYLENGTH_128BIT:
251                 nk = KEYLENGTH_4BYTES;
252                 nr = NUMBER_OF_ROUNDS_10;
253                 break;
254         case AES_KEYLENGTH_192BIT:
255                 nk = KEYLENGTH_6BYTES;
256                 nr = NUMBER_OF_ROUNDS_12;
257                 break;
258         case AES_KEYLENGTH_256BIT:
259                 nk = KEYLENGTH_8BYTES;
260                 nr = NUMBER_OF_ROUNDS_14;
261                 break;
262         default:
263                 return;
264         }
265         for (i = 0; i < nk; i++)
266                 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
267
268         i = 0;
269         temp = w_ring[nk - 1];
270         while (i + nk < (nr + 1) * 4) {
271                 if (!(i % nk)) {
272                         /* RotWord(temp) */
273                         temp = (temp << 8) | (temp >> 24);
274                         temp = aes_ks_subword(temp);
275                         temp ^= round_constant[i / nk];
276                 } else if (nk == 8 && (i % 4 == 0)) {
277                         temp = aes_ks_subword(temp);
278                 }
279                 w_ring[i % nk] ^= temp;
280                 temp = w_ring[i % nk];
281                 i++;
282         }
283         i--;
284         for (k = 0, j = i % nk; k < nk; k++) {
285                 *((u32 *)dec_key + k) = htonl(w_ring[j]);
286                 j--;
287                 if (j < 0)
288                         j += nk;
289         }
290 }
291
292 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
293 {
294         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
295
296         switch (ds) {
297         case SHA1_DIGEST_SIZE:
298                 base_hash = crypto_alloc_shash("sha1", 0, 0);
299                 break;
300         case SHA224_DIGEST_SIZE:
301                 base_hash = crypto_alloc_shash("sha224", 0, 0);
302                 break;
303         case SHA256_DIGEST_SIZE:
304                 base_hash = crypto_alloc_shash("sha256", 0, 0);
305                 break;
306         case SHA384_DIGEST_SIZE:
307                 base_hash = crypto_alloc_shash("sha384", 0, 0);
308                 break;
309         case SHA512_DIGEST_SIZE:
310                 base_hash = crypto_alloc_shash("sha512", 0, 0);
311                 break;
312         }
313
314         return base_hash;
315 }
316
317 static int chcr_compute_partial_hash(struct shash_desc *desc,
318                                      char *iopad, char *result_hash,
319                                      int digest_size)
320 {
321         struct sha1_state sha1_st;
322         struct sha256_state sha256_st;
323         struct sha512_state sha512_st;
324         int error;
325
326         if (digest_size == SHA1_DIGEST_SIZE) {
327                 error = crypto_shash_init(desc) ?:
328                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
329                         crypto_shash_export(desc, (void *)&sha1_st);
330                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
331         } else if (digest_size == SHA224_DIGEST_SIZE) {
332                 error = crypto_shash_init(desc) ?:
333                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
334                         crypto_shash_export(desc, (void *)&sha256_st);
335                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
336
337         } else if (digest_size == SHA256_DIGEST_SIZE) {
338                 error = crypto_shash_init(desc) ?:
339                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
340                         crypto_shash_export(desc, (void *)&sha256_st);
341                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
342
343         } else if (digest_size == SHA384_DIGEST_SIZE) {
344                 error = crypto_shash_init(desc) ?:
345                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
346                         crypto_shash_export(desc, (void *)&sha512_st);
347                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
348
349         } else if (digest_size == SHA512_DIGEST_SIZE) {
350                 error = crypto_shash_init(desc) ?:
351                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
352                         crypto_shash_export(desc, (void *)&sha512_st);
353                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
354         } else {
355                 error = -EINVAL;
356                 pr_err("Unknown digest size %d\n", digest_size);
357         }
358         return error;
359 }
360
361 static void chcr_change_order(char *buf, int ds)
362 {
363         int i;
364
365         if (ds == SHA512_DIGEST_SIZE) {
366                 for (i = 0; i < (ds / sizeof(u64)); i++)
367                         *((__be64 *)buf + i) =
368                                 cpu_to_be64(*((u64 *)buf + i));
369         } else {
370                 for (i = 0; i < (ds / sizeof(u32)); i++)
371                         *((__be32 *)buf + i) =
372                                 cpu_to_be32(*((u32 *)buf + i));
373         }
374 }
375
376 static inline int is_hmac(struct crypto_tfm *tfm)
377 {
378         struct crypto_alg *alg = tfm->__crt_alg;
379         struct chcr_alg_template *chcr_crypto_alg =
380                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
381                              alg.hash);
382         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
383                 return 1;
384         return 0;
385 }
386
387 static inline void dsgl_walk_init(struct dsgl_walk *walk,
388                                    struct cpl_rx_phys_dsgl *dsgl)
389 {
390         walk->dsgl = dsgl;
391         walk->nents = 0;
392         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
393 }
394
395 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
396                                  int pci_chan_id)
397 {
398         struct cpl_rx_phys_dsgl *phys_cpl;
399
400         phys_cpl = walk->dsgl;
401
402         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
403                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
404         phys_cpl->pcirlxorder_to_noofsgentr =
405                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
406                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
407                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
408                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
409                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
410                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
411         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
412         phys_cpl->rss_hdr_int.qid = htons(qid);
413         phys_cpl->rss_hdr_int.hash_val = 0;
414         phys_cpl->rss_hdr_int.channel = pci_chan_id;
415 }
416
417 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
418                                         size_t size,
419                                         dma_addr_t addr)
420 {
421         int j;
422
423         if (!size)
424                 return;
425         j = walk->nents;
426         walk->to->len[j % 8] = htons(size);
427         walk->to->addr[j % 8] = cpu_to_be64(addr);
428         j++;
429         if ((j % 8) == 0)
430                 walk->to++;
431         walk->nents = j;
432 }
433
434 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
435                            struct scatterlist *sg,
436                               unsigned int slen,
437                               unsigned int skip)
438 {
439         int skip_len = 0;
440         unsigned int left_size = slen, len = 0;
441         unsigned int j = walk->nents;
442         int offset, ent_len;
443
444         if (!slen)
445                 return;
446         while (sg && skip) {
447                 if (sg_dma_len(sg) <= skip) {
448                         skip -= sg_dma_len(sg);
449                         skip_len = 0;
450                         sg = sg_next(sg);
451                 } else {
452                         skip_len = skip;
453                         skip = 0;
454                 }
455         }
456
457         while (left_size && sg) {
458                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
459                 offset = 0;
460                 while (len) {
461                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
462                         walk->to->len[j % 8] = htons(ent_len);
463                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
464                                                       offset + skip_len);
465                         offset += ent_len;
466                         len -= ent_len;
467                         j++;
468                         if ((j % 8) == 0)
469                                 walk->to++;
470                 }
471                 walk->last_sg = sg;
472                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
473                                           skip_len) + skip_len;
474                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
475                 skip_len = 0;
476                 sg = sg_next(sg);
477         }
478         walk->nents = j;
479 }
480
481 static inline void ulptx_walk_init(struct ulptx_walk *walk,
482                                    struct ulptx_sgl *ulp)
483 {
484         walk->sgl = ulp;
485         walk->nents = 0;
486         walk->pair_idx = 0;
487         walk->pair = ulp->sge;
488         walk->last_sg = NULL;
489         walk->last_sg_len = 0;
490 }
491
492 static inline void ulptx_walk_end(struct ulptx_walk *walk)
493 {
494         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
495                               ULPTX_NSGE_V(walk->nents));
496 }
497
498
499 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
500                                         size_t size,
501                                         dma_addr_t addr)
502 {
503         if (!size)
504                 return;
505
506         if (walk->nents == 0) {
507                 walk->sgl->len0 = cpu_to_be32(size);
508                 walk->sgl->addr0 = cpu_to_be64(addr);
509         } else {
510                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
511                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
512                 walk->pair_idx = !walk->pair_idx;
513                 if (!walk->pair_idx)
514                         walk->pair++;
515         }
516         walk->nents++;
517 }
518
519 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
520                                         struct scatterlist *sg,
521                                unsigned int len,
522                                unsigned int skip)
523 {
524         int small;
525         int skip_len = 0;
526         unsigned int sgmin;
527
528         if (!len)
529                 return;
530         while (sg && skip) {
531                 if (sg_dma_len(sg) <= skip) {
532                         skip -= sg_dma_len(sg);
533                         skip_len = 0;
534                         sg = sg_next(sg);
535                 } else {
536                         skip_len = skip;
537                         skip = 0;
538                 }
539         }
540         WARN(!sg, "SG should not be null here\n");
541         if (sg && (walk->nents == 0)) {
542                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
543                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
544                 walk->sgl->len0 = cpu_to_be32(sgmin);
545                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
546                 walk->nents++;
547                 len -= sgmin;
548                 walk->last_sg = sg;
549                 walk->last_sg_len = sgmin + skip_len;
550                 skip_len += sgmin;
551                 if (sg_dma_len(sg) == skip_len) {
552                         sg = sg_next(sg);
553                         skip_len = 0;
554                 }
555         }
556
557         while (sg && len) {
558                 small = min(sg_dma_len(sg) - skip_len, len);
559                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
560                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
561                 walk->pair->addr[walk->pair_idx] =
562                         cpu_to_be64(sg_dma_address(sg) + skip_len);
563                 walk->pair_idx = !walk->pair_idx;
564                 walk->nents++;
565                 if (!walk->pair_idx)
566                         walk->pair++;
567                 len -= sgmin;
568                 skip_len += sgmin;
569                 walk->last_sg = sg;
570                 walk->last_sg_len = skip_len;
571                 if (sg_dma_len(sg) == skip_len) {
572                         sg = sg_next(sg);
573                         skip_len = 0;
574                 }
575         }
576 }
577
578 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
579 {
580         struct crypto_alg *alg = tfm->__crt_alg;
581         struct chcr_alg_template *chcr_crypto_alg =
582                 container_of(alg, struct chcr_alg_template, alg.crypto);
583
584         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
585 }
586
587 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
588 {
589         struct adapter *adap = netdev2adap(dev);
590         struct sge_uld_txq_info *txq_info =
591                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
592         struct sge_uld_txq *txq;
593         int ret = 0;
594
595         local_bh_disable();
596         txq = &txq_info->uldtxq[idx];
597         spin_lock(&txq->sendq.lock);
598         if (txq->full)
599                 ret = -1;
600         spin_unlock(&txq->sendq.lock);
601         local_bh_enable();
602         return ret;
603 }
604
605 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
606                                struct _key_ctx *key_ctx)
607 {
608         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
609                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
610         } else {
611                 memcpy(key_ctx->key,
612                        ablkctx->key + (ablkctx->enckey_len >> 1),
613                        ablkctx->enckey_len >> 1);
614                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
615                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
616         }
617         return 0;
618 }
619
620 static int chcr_hash_ent_in_wr(struct scatterlist *src,
621                              unsigned int minsg,
622                              unsigned int space,
623                              unsigned int srcskip)
624 {
625         int srclen = 0;
626         int srcsg = minsg;
627         int soffset = 0, sless;
628
629         if (sg_dma_len(src) == srcskip) {
630                 src = sg_next(src);
631                 srcskip = 0;
632         }
633         while (src && space > (sgl_ent_len[srcsg + 1])) {
634                 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
635                                                         CHCR_SRC_SG_SIZE);
636                 srclen += sless;
637                 soffset += sless;
638                 srcsg++;
639                 if (sg_dma_len(src) == (soffset + srcskip)) {
640                         src = sg_next(src);
641                         soffset = 0;
642                         srcskip = 0;
643                 }
644         }
645         return srclen;
646 }
647
648 static int chcr_sg_ent_in_wr(struct scatterlist *src,
649                              struct scatterlist *dst,
650                              unsigned int minsg,
651                              unsigned int space,
652                              unsigned int srcskip,
653                              unsigned int dstskip)
654 {
655         int srclen = 0, dstlen = 0;
656         int srcsg = minsg, dstsg = minsg;
657         int offset = 0, soffset = 0, less, sless = 0;
658
659         if (sg_dma_len(src) == srcskip) {
660                 src = sg_next(src);
661                 srcskip = 0;
662         }
663         if (sg_dma_len(dst) == dstskip) {
664                 dst = sg_next(dst);
665                 dstskip = 0;
666         }
667
668         while (src && dst &&
669                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
670                 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
671                                 CHCR_SRC_SG_SIZE);
672                 srclen += sless;
673                 srcsg++;
674                 offset = 0;
675                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
676                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
677                         if (srclen <= dstlen)
678                                 break;
679                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
680                                      dstskip, CHCR_DST_SG_SIZE);
681                         dstlen += less;
682                         offset += less;
683                         if ((offset + dstskip) == sg_dma_len(dst)) {
684                                 dst = sg_next(dst);
685                                 offset = 0;
686                         }
687                         dstsg++;
688                         dstskip = 0;
689                 }
690                 soffset += sless;
691                 if ((soffset + srcskip) == sg_dma_len(src)) {
692                         src = sg_next(src);
693                         srcskip = 0;
694                         soffset = 0;
695                 }
696
697         }
698         return min(srclen, dstlen);
699 }
700
701 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
702                                 u32 flags,
703                                 struct scatterlist *src,
704                                 struct scatterlist *dst,
705                                 unsigned int nbytes,
706                                 u8 *iv,
707                                 unsigned short op_type)
708 {
709         int err;
710
711         SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
712
713         skcipher_request_set_sync_tfm(subreq, cipher);
714         skcipher_request_set_callback(subreq, flags, NULL, NULL);
715         skcipher_request_set_crypt(subreq, src, dst,
716                                    nbytes, iv);
717
718         err = op_type ? crypto_skcipher_decrypt(subreq) :
719                 crypto_skcipher_encrypt(subreq);
720         skcipher_request_zero(subreq);
721
722         return err;
723
724 }
725 static inline void create_wreq(struct chcr_context *ctx,
726                                struct chcr_wr *chcr_req,
727                                struct crypto_async_request *req,
728                                unsigned int imm,
729                                int hash_sz,
730                                unsigned int len16,
731                                unsigned int sc_len,
732                                unsigned int lcb)
733 {
734         struct uld_ctx *u_ctx = ULD_CTX(ctx);
735         int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
736
737
738         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
739         chcr_req->wreq.pld_size_hash_size =
740                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
741         chcr_req->wreq.len16_pkd =
742                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
743         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
744         chcr_req->wreq.rx_chid_to_rx_q_id =
745                 FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
746                                 !!lcb, ctx->tx_qidx);
747
748         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
749                                                        qid);
750         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
751                                      ((sizeof(chcr_req->wreq)) >> 4)));
752
753         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
754         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
755                                            sizeof(chcr_req->key_ctx) + sc_len);
756 }
757
758 /**
759  *      create_cipher_wr - form the WR for cipher operations
760  *      @req: cipher req.
761  *      @ctx: crypto driver context of the request.
762  *      @qid: ingress qid where response of this WR should be received.
763  *      @op_type:       encryption or decryption
764  */
765 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
766 {
767         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
768         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
769         struct sk_buff *skb = NULL;
770         struct chcr_wr *chcr_req;
771         struct cpl_rx_phys_dsgl *phys_cpl;
772         struct ulptx_sgl *ulptx;
773         struct chcr_blkcipher_req_ctx *reqctx =
774                 ablkcipher_request_ctx(wrparam->req);
775         unsigned int temp = 0, transhdr_len, dst_size;
776         int error;
777         int nents;
778         unsigned int kctx_len;
779         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
780                         GFP_KERNEL : GFP_ATOMIC;
781         struct adapter *adap = padap(c_ctx(tfm)->dev);
782
783         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
784                               reqctx->dst_ofst);
785         dst_size = get_space_for_phys_dsgl(nents);
786         kctx_len = roundup(ablkctx->enckey_len, 16);
787         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
788         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
789                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
790         temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
791                                      (sgl_len(nents) * 8);
792         transhdr_len += temp;
793         transhdr_len = roundup(transhdr_len, 16);
794         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
795         if (!skb) {
796                 error = -ENOMEM;
797                 goto err;
798         }
799         chcr_req = __skb_put_zero(skb, transhdr_len);
800         chcr_req->sec_cpl.op_ivinsrtofst =
801                 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
802
803         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
804         chcr_req->sec_cpl.aadstart_cipherstop_hi =
805                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
806
807         chcr_req->sec_cpl.cipherstop_lo_authinsert =
808                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
809         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
810                                                          ablkctx->ciph_mode,
811                                                          0, 0, IV >> 1);
812         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
813                                                           0, 1, dst_size);
814
815         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
816         if ((reqctx->op == CHCR_DECRYPT_OP) &&
817             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
818                CRYPTO_ALG_SUB_TYPE_CTR)) &&
819             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
820                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
821                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
822         } else {
823                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
824                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
825                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
826                                ablkctx->enckey_len);
827                 } else {
828                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
829                                (ablkctx->enckey_len >> 1),
830                                ablkctx->enckey_len >> 1);
831                         memcpy(chcr_req->key_ctx.key +
832                                (ablkctx->enckey_len >> 1),
833                                ablkctx->key,
834                                ablkctx->enckey_len >> 1);
835                 }
836         }
837         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
838         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
839         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
840         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
841
842         atomic_inc(&adap->chcr_stats.cipher_rqst);
843         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
844                 + (reqctx->imm ? (wrparam->bytes) : 0);
845         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
846                     transhdr_len, temp,
847                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
848         reqctx->skb = skb;
849
850         if (reqctx->op && (ablkctx->ciph_mode ==
851                            CHCR_SCMD_CIPHER_MODE_AES_CBC))
852                 sg_pcopy_to_buffer(wrparam->req->src,
853                         sg_nents(wrparam->req->src), wrparam->req->info, 16,
854                         reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
855
856         return skb;
857 err:
858         return ERR_PTR(error);
859 }
860
861 static inline int chcr_keyctx_ck_size(unsigned int keylen)
862 {
863         int ck_size = 0;
864
865         if (keylen == AES_KEYSIZE_128)
866                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
867         else if (keylen == AES_KEYSIZE_192)
868                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
869         else if (keylen == AES_KEYSIZE_256)
870                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
871         else
872                 ck_size = 0;
873
874         return ck_size;
875 }
876 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
877                                        const u8 *key,
878                                        unsigned int keylen)
879 {
880         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
881         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
882         int err = 0;
883
884         crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
885                                 CRYPTO_TFM_REQ_MASK);
886         crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
887                                 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
888         err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
889         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
890         tfm->crt_flags |=
891                 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
892                 CRYPTO_TFM_RES_MASK;
893         return err;
894 }
895
896 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
897                                const u8 *key,
898                                unsigned int keylen)
899 {
900         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
901         unsigned int ck_size, context_size;
902         u16 alignment = 0;
903         int err;
904
905         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
906         if (err)
907                 goto badkey_err;
908
909         ck_size = chcr_keyctx_ck_size(keylen);
910         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
911         memcpy(ablkctx->key, key, keylen);
912         ablkctx->enckey_len = keylen;
913         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
914         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
915                         keylen + alignment) >> 4;
916
917         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
918                                                 0, 0, context_size);
919         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
920         return 0;
921 badkey_err:
922         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
923         ablkctx->enckey_len = 0;
924
925         return err;
926 }
927
928 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
929                                    const u8 *key,
930                                    unsigned int keylen)
931 {
932         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933         unsigned int ck_size, context_size;
934         u16 alignment = 0;
935         int err;
936
937         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
938         if (err)
939                 goto badkey_err;
940         ck_size = chcr_keyctx_ck_size(keylen);
941         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
942         memcpy(ablkctx->key, key, keylen);
943         ablkctx->enckey_len = keylen;
944         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945                         keylen + alignment) >> 4;
946
947         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948                                                 0, 0, context_size);
949         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
950
951         return 0;
952 badkey_err:
953         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
954         ablkctx->enckey_len = 0;
955
956         return err;
957 }
958
959 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
960                                    const u8 *key,
961                                    unsigned int keylen)
962 {
963         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
964         unsigned int ck_size, context_size;
965         u16 alignment = 0;
966         int err;
967
968         if (keylen < CTR_RFC3686_NONCE_SIZE)
969                 return -EINVAL;
970         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
971                CTR_RFC3686_NONCE_SIZE);
972
973         keylen -= CTR_RFC3686_NONCE_SIZE;
974         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
975         if (err)
976                 goto badkey_err;
977
978         ck_size = chcr_keyctx_ck_size(keylen);
979         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
980         memcpy(ablkctx->key, key, keylen);
981         ablkctx->enckey_len = keylen;
982         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
983                         keylen + alignment) >> 4;
984
985         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
986                                                 0, 0, context_size);
987         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
988
989         return 0;
990 badkey_err:
991         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
992         ablkctx->enckey_len = 0;
993
994         return err;
995 }
996 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
997 {
998         unsigned int size = AES_BLOCK_SIZE;
999         __be32 *b = (__be32 *)(dstiv + size);
1000         u32 c, prev;
1001
1002         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1003         for (; size >= 4; size -= 4) {
1004                 prev = be32_to_cpu(*--b);
1005                 c = prev + add;
1006                 *b = cpu_to_be32(c);
1007                 if (prev < c)
1008                         break;
1009                 add = 1;
1010         }
1011
1012 }
1013
1014 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1015 {
1016         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1017         u64 c;
1018         u32 temp = be32_to_cpu(*--b);
1019
1020         temp = ~temp;
1021         c = (u64)temp +  1; // No of block can processed withou overflow
1022         if ((bytes / AES_BLOCK_SIZE) > c)
1023                 bytes = c * AES_BLOCK_SIZE;
1024         return bytes;
1025 }
1026
1027 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1028                              u32 isfinal)
1029 {
1030         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1031         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1032         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1033         struct crypto_cipher *cipher;
1034         int ret, i;
1035         u8 *key;
1036         unsigned int keylen;
1037         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1038         int round8 = round / 8;
1039
1040         cipher = ablkctx->aes_generic;
1041         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1042
1043         keylen = ablkctx->enckey_len / 2;
1044         key = ablkctx->key + keylen;
1045         ret = crypto_cipher_setkey(cipher, key, keylen);
1046         if (ret)
1047                 goto out;
1048         crypto_cipher_encrypt_one(cipher, iv, iv);
1049         for (i = 0; i < round8; i++)
1050                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1051
1052         for (i = 0; i < (round % 8); i++)
1053                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1054
1055         if (!isfinal)
1056                 crypto_cipher_decrypt_one(cipher, iv, iv);
1057 out:
1058         return ret;
1059 }
1060
1061 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1062                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1063 {
1064         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1065         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1066         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1067         int ret = 0;
1068
1069         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1070                 ctr_add_iv(iv, req->info, (reqctx->processed /
1071                            AES_BLOCK_SIZE));
1072         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1073                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1074                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1075                                                 AES_BLOCK_SIZE) + 1);
1076         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1077                 ret = chcr_update_tweak(req, iv, 0);
1078         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1079                 if (reqctx->op)
1080                         /*Updated before sending last WR*/
1081                         memcpy(iv, req->info, AES_BLOCK_SIZE);
1082                 else
1083                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1084         }
1085
1086         return ret;
1087
1088 }
1089
1090 /* We need separate function for final iv because in rfc3686  Initial counter
1091  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1092  * for subsequent update requests
1093  */
1094
1095 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1096                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097 {
1098         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1099         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1100         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1101         int ret = 0;
1102
1103         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104                 ctr_add_iv(iv, req->info, (reqctx->processed /
1105                            AES_BLOCK_SIZE));
1106         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1107                 ret = chcr_update_tweak(req, iv, 1);
1108         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1109                 /*Already updated for Decrypt*/
1110                 if (!reqctx->op)
1111                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1112
1113         }
1114         return ret;
1115
1116 }
1117
1118 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1119                                    unsigned char *input, int err)
1120 {
1121         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1122         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1123         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1124         struct sk_buff *skb;
1125         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1126         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1127         struct  cipher_wr_param wrparam;
1128         struct chcr_dev *dev = c_ctx(tfm)->dev;
1129         int bytes;
1130
1131         if (err)
1132                 goto unmap;
1133         if (req->nbytes == reqctx->processed) {
1134                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1135                                       req);
1136                 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1137                 goto complete;
1138         }
1139
1140         if (!reqctx->imm) {
1141                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1142                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1143                                           reqctx->src_ofst, reqctx->dst_ofst);
1144                 if ((bytes + reqctx->processed) >= req->nbytes)
1145                         bytes  = req->nbytes - reqctx->processed;
1146                 else
1147                         bytes = rounddown(bytes, 16);
1148         } else {
1149                 /*CTR mode counter overfloa*/
1150                 bytes  = req->nbytes - reqctx->processed;
1151         }
1152         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1153         if (err)
1154                 goto unmap;
1155
1156         if (unlikely(bytes == 0)) {
1157                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1158                                       req);
1159                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1160                                      req->base.flags,
1161                                      req->src,
1162                                      req->dst,
1163                                      req->nbytes,
1164                                      req->info,
1165                                      reqctx->op);
1166                 goto complete;
1167         }
1168
1169         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1170             CRYPTO_ALG_SUB_TYPE_CTR)
1171                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1172         wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1173         wrparam.req = req;
1174         wrparam.bytes = bytes;
1175         skb = create_cipher_wr(&wrparam);
1176         if (IS_ERR(skb)) {
1177                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1178                 err = PTR_ERR(skb);
1179                 goto unmap;
1180         }
1181         skb->dev = u_ctx->lldi.ports[0];
1182         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1183         chcr_send_wr(skb);
1184         reqctx->last_req_len = bytes;
1185         reqctx->processed += bytes;
1186         return 0;
1187 unmap:
1188         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1189 complete:
1190         chcr_dec_wrcount(dev);
1191         req->base.complete(&req->base, err);
1192         return err;
1193 }
1194
1195 static int process_cipher(struct ablkcipher_request *req,
1196                                   unsigned short qid,
1197                                   struct sk_buff **skb,
1198                                   unsigned short op_type)
1199 {
1200         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1201         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1202         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1203         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1204         struct  cipher_wr_param wrparam;
1205         int bytes, err = -EINVAL;
1206
1207         reqctx->processed = 0;
1208         if (!req->info)
1209                 goto error;
1210         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1211             (req->nbytes == 0) ||
1212             (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1213                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1214                        ablkctx->enckey_len, req->nbytes, ivsize);
1215                 goto error;
1216         }
1217
1218         err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1219         if (err)
1220                 goto error;
1221         if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1222                                             AES_MIN_KEY_SIZE +
1223                                             sizeof(struct cpl_rx_phys_dsgl) +
1224                                         /*Min dsgl size*/
1225                                             32))) {
1226                 /* Can be sent as Imm*/
1227                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1228
1229                 dnents = sg_nents_xlen(req->dst, req->nbytes,
1230                                        CHCR_DST_SG_SIZE, 0);
1231                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1232                 kctx_len = roundup(ablkctx->enckey_len, 16);
1233                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1234                 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1235                         SGE_MAX_WR_LEN;
1236                 bytes = IV + req->nbytes;
1237
1238         } else {
1239                 reqctx->imm = 0;
1240         }
1241
1242         if (!reqctx->imm) {
1243                 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1244                                           CIP_SPACE_LEFT(ablkctx->enckey_len),
1245                                           0, 0);
1246                 if ((bytes + reqctx->processed) >= req->nbytes)
1247                         bytes  = req->nbytes - reqctx->processed;
1248                 else
1249                         bytes = rounddown(bytes, 16);
1250         } else {
1251                 bytes = req->nbytes;
1252         }
1253         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1254             CRYPTO_ALG_SUB_TYPE_CTR) {
1255                 bytes = adjust_ctr_overflow(req->info, bytes);
1256         }
1257         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1258             CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1259                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1260                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1261                                 CTR_RFC3686_IV_SIZE);
1262
1263                 /* initialize counter portion of counter block */
1264                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1265                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1266
1267         } else {
1268
1269                 memcpy(reqctx->iv, req->info, IV);
1270         }
1271         if (unlikely(bytes == 0)) {
1272                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1273                                       req);
1274                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1275                                            req->base.flags,
1276                                            req->src,
1277                                            req->dst,
1278                                            req->nbytes,
1279                                            reqctx->iv,
1280                                            op_type);
1281                 goto error;
1282         }
1283         reqctx->op = op_type;
1284         reqctx->srcsg = req->src;
1285         reqctx->dstsg = req->dst;
1286         reqctx->src_ofst = 0;
1287         reqctx->dst_ofst = 0;
1288         wrparam.qid = qid;
1289         wrparam.req = req;
1290         wrparam.bytes = bytes;
1291         *skb = create_cipher_wr(&wrparam);
1292         if (IS_ERR(*skb)) {
1293                 err = PTR_ERR(*skb);
1294                 goto unmap;
1295         }
1296         reqctx->processed = bytes;
1297         reqctx->last_req_len = bytes;
1298
1299         return 0;
1300 unmap:
1301         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1302 error:
1303         return err;
1304 }
1305
1306 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1307 {
1308         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1309         struct chcr_dev *dev = c_ctx(tfm)->dev;
1310         struct sk_buff *skb = NULL;
1311         int err, isfull = 0;
1312         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1313
1314         err = chcr_inc_wrcount(dev);
1315         if (err)
1316                 return -ENXIO;
1317         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1318                                             c_ctx(tfm)->tx_qidx))) {
1319                 isfull = 1;
1320                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1321                         err = -ENOSPC;
1322                         goto error;
1323                 }
1324         }
1325
1326         err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1327                              &skb, CHCR_ENCRYPT_OP);
1328         if (err || !skb)
1329                 return  err;
1330         skb->dev = u_ctx->lldi.ports[0];
1331         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1332         chcr_send_wr(skb);
1333         return isfull ? -EBUSY : -EINPROGRESS;
1334 error:
1335         chcr_dec_wrcount(dev);
1336         return err;
1337 }
1338
1339 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1340 {
1341         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1342         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1343         struct chcr_dev *dev = c_ctx(tfm)->dev;
1344         struct sk_buff *skb = NULL;
1345         int err, isfull = 0;
1346
1347         err = chcr_inc_wrcount(dev);
1348         if (err)
1349                 return -ENXIO;
1350
1351         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1352                                             c_ctx(tfm)->tx_qidx))) {
1353                 isfull = 1;
1354                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1355                         return -ENOSPC;
1356         }
1357
1358         err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1359                              &skb, CHCR_DECRYPT_OP);
1360         if (err || !skb)
1361                 return err;
1362         skb->dev = u_ctx->lldi.ports[0];
1363         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1364         chcr_send_wr(skb);
1365         return isfull ? -EBUSY : -EINPROGRESS;
1366 }
1367
1368 static int chcr_device_init(struct chcr_context *ctx)
1369 {
1370         struct uld_ctx *u_ctx = NULL;
1371         struct adapter *adap;
1372         unsigned int id;
1373         int txq_perchan, txq_idx, ntxq;
1374         int err = 0, rxq_perchan, rxq_idx;
1375
1376         id = smp_processor_id();
1377         if (!ctx->dev) {
1378                 u_ctx = assign_chcr_device();
1379                 if (!u_ctx) {
1380                         err = -ENXIO;
1381                         pr_err("chcr device assignment fails\n");
1382                         goto out;
1383                 }
1384                 ctx->dev = &u_ctx->dev;
1385                 adap = padap(ctx->dev);
1386                 ntxq = u_ctx->lldi.ntxq;
1387                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1388                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1389                 spin_lock(&ctx->dev->lock_chcr_dev);
1390                 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1391                 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1392                 spin_unlock(&ctx->dev->lock_chcr_dev);
1393                 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1394                 rxq_idx += id % rxq_perchan;
1395                 txq_idx = ctx->tx_chan_id * txq_perchan;
1396                 txq_idx += id % txq_perchan;
1397                 ctx->rx_qidx = rxq_idx;
1398                 ctx->tx_qidx = txq_idx;
1399                 /* Channel Id used by SGE to forward packet to Host.
1400                  * Same value should be used in cpl_fw6_pld RSS_CH field
1401                  * by FW. Driver programs PCI channel ID to be used in fw
1402                  * at the time of queue allocation with value "pi->tx_chan"
1403                  */
1404                 ctx->pci_chan_id = txq_idx / txq_perchan;
1405         }
1406 out:
1407         return err;
1408 }
1409
1410 static int chcr_cra_init(struct crypto_tfm *tfm)
1411 {
1412         struct crypto_alg *alg = tfm->__crt_alg;
1413         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1414         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1415
1416         ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1417                                 CRYPTO_ALG_NEED_FALLBACK);
1418         if (IS_ERR(ablkctx->sw_cipher)) {
1419                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1420                 return PTR_ERR(ablkctx->sw_cipher);
1421         }
1422
1423         if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1424                 /* To update tweak*/
1425                 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1426                 if (IS_ERR(ablkctx->aes_generic)) {
1427                         pr_err("failed to allocate aes cipher for tweak\n");
1428                         return PTR_ERR(ablkctx->aes_generic);
1429                 }
1430         } else
1431                 ablkctx->aes_generic = NULL;
1432
1433         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1434         return chcr_device_init(crypto_tfm_ctx(tfm));
1435 }
1436
1437 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1438 {
1439         struct crypto_alg *alg = tfm->__crt_alg;
1440         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1441         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1442
1443         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1444          * cannot be used as fallback in chcr_handle_cipher_response
1445          */
1446         ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1447                                 CRYPTO_ALG_NEED_FALLBACK);
1448         if (IS_ERR(ablkctx->sw_cipher)) {
1449                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1450                 return PTR_ERR(ablkctx->sw_cipher);
1451         }
1452         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1453         return chcr_device_init(crypto_tfm_ctx(tfm));
1454 }
1455
1456
1457 static void chcr_cra_exit(struct crypto_tfm *tfm)
1458 {
1459         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1460         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1461
1462         crypto_free_sync_skcipher(ablkctx->sw_cipher);
1463         if (ablkctx->aes_generic)
1464                 crypto_free_cipher(ablkctx->aes_generic);
1465 }
1466
1467 static int get_alg_config(struct algo_param *params,
1468                           unsigned int auth_size)
1469 {
1470         switch (auth_size) {
1471         case SHA1_DIGEST_SIZE:
1472                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1473                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1474                 params->result_size = SHA1_DIGEST_SIZE;
1475                 break;
1476         case SHA224_DIGEST_SIZE:
1477                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1478                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1479                 params->result_size = SHA256_DIGEST_SIZE;
1480                 break;
1481         case SHA256_DIGEST_SIZE:
1482                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1483                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1484                 params->result_size = SHA256_DIGEST_SIZE;
1485                 break;
1486         case SHA384_DIGEST_SIZE:
1487                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1488                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1489                 params->result_size = SHA512_DIGEST_SIZE;
1490                 break;
1491         case SHA512_DIGEST_SIZE:
1492                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1493                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1494                 params->result_size = SHA512_DIGEST_SIZE;
1495                 break;
1496         default:
1497                 pr_err("chcr : ERROR, unsupported digest size\n");
1498                 return -EINVAL;
1499         }
1500         return 0;
1501 }
1502
1503 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1504 {
1505                 crypto_free_shash(base_hash);
1506 }
1507
1508 /**
1509  *      create_hash_wr - Create hash work request
1510  *      @req - Cipher req base
1511  */
1512 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1513                                       struct hash_wr_param *param)
1514 {
1515         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1516         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1517         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1518         struct sk_buff *skb = NULL;
1519         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1520         struct chcr_wr *chcr_req;
1521         struct ulptx_sgl *ulptx;
1522         unsigned int nents = 0, transhdr_len;
1523         unsigned int temp = 0;
1524         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1525                 GFP_ATOMIC;
1526         struct adapter *adap = padap(h_ctx(tfm)->dev);
1527         int error = 0;
1528
1529         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1530         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1531                                 param->sg_len) <= SGE_MAX_WR_LEN;
1532         nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1533                       CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1534         nents += param->bfr_len ? 1 : 0;
1535         transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1536                                 param->sg_len, 16) : (sgl_len(nents) * 8);
1537         transhdr_len = roundup(transhdr_len, 16);
1538
1539         skb = alloc_skb(transhdr_len, flags);
1540         if (!skb)
1541                 return ERR_PTR(-ENOMEM);
1542         chcr_req = __skb_put_zero(skb, transhdr_len);
1543
1544         chcr_req->sec_cpl.op_ivinsrtofst =
1545                 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1546         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1547
1548         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1549                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1550         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1551                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1552         chcr_req->sec_cpl.seqno_numivs =
1553                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1554                                          param->opad_needed, 0);
1555
1556         chcr_req->sec_cpl.ivgen_hdrlen =
1557                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1558
1559         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1560                param->alg_prm.result_size);
1561
1562         if (param->opad_needed)
1563                 memcpy(chcr_req->key_ctx.key +
1564                        ((param->alg_prm.result_size <= 32) ? 32 :
1565                         CHCR_HASH_MAX_DIGEST_SIZE),
1566                        hmacctx->opad, param->alg_prm.result_size);
1567
1568         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1569                                             param->alg_prm.mk_size, 0,
1570                                             param->opad_needed,
1571                                             ((param->kctx_len +
1572                                              sizeof(chcr_req->key_ctx)) >> 4));
1573         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1574         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1575                                      DUMMY_BYTES);
1576         if (param->bfr_len != 0) {
1577                 req_ctx->hctx_wr.dma_addr =
1578                         dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1579                                        param->bfr_len, DMA_TO_DEVICE);
1580                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1581                                        req_ctx->hctx_wr. dma_addr)) {
1582                         error = -ENOMEM;
1583                         goto err;
1584                 }
1585                 req_ctx->hctx_wr.dma_len = param->bfr_len;
1586         } else {
1587                 req_ctx->hctx_wr.dma_addr = 0;
1588         }
1589         chcr_add_hash_src_ent(req, ulptx, param);
1590         /* Request upto max wr size */
1591         temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1592                                 (param->sg_len + param->bfr_len) : 0);
1593         atomic_inc(&adap->chcr_stats.digest_rqst);
1594         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1595                     param->hash_size, transhdr_len,
1596                     temp,  0);
1597         req_ctx->hctx_wr.skb = skb;
1598         return skb;
1599 err:
1600         kfree_skb(skb);
1601         return  ERR_PTR(error);
1602 }
1603
1604 static int chcr_ahash_update(struct ahash_request *req)
1605 {
1606         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1607         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1608         struct uld_ctx *u_ctx = NULL;
1609         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1610         struct sk_buff *skb;
1611         u8 remainder = 0, bs;
1612         unsigned int nbytes = req->nbytes;
1613         struct hash_wr_param params;
1614         int error, isfull = 0;
1615
1616         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1617         u_ctx = ULD_CTX(h_ctx(rtfm));
1618
1619         if (nbytes + req_ctx->reqlen >= bs) {
1620                 remainder = (nbytes + req_ctx->reqlen) % bs;
1621                 nbytes = nbytes + req_ctx->reqlen - remainder;
1622         } else {
1623                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1624                                    + req_ctx->reqlen, nbytes, 0);
1625                 req_ctx->reqlen += nbytes;
1626                 return 0;
1627         }
1628         error = chcr_inc_wrcount(dev);
1629         if (error)
1630                 return -ENXIO;
1631         /* Detach state for CHCR means lldi or padap is freed. Increasing
1632          * inflight count for dev guarantees that lldi and padap is valid
1633          */
1634         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1635                                             h_ctx(rtfm)->tx_qidx))) {
1636                 isfull = 1;
1637                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1638                         error = -ENOSPC;
1639                         goto err;
1640                 }
1641         }
1642
1643         chcr_init_hctx_per_wr(req_ctx);
1644         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1645         if (error) {
1646                 error = -ENOMEM;
1647                 goto err;
1648         }
1649         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1650         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1651         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1652                                      HASH_SPACE_LEFT(params.kctx_len), 0);
1653         if (params.sg_len > req->nbytes)
1654                 params.sg_len = req->nbytes;
1655         params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1656                         req_ctx->reqlen;
1657         params.opad_needed = 0;
1658         params.more = 1;
1659         params.last = 0;
1660         params.bfr_len = req_ctx->reqlen;
1661         params.scmd1 = 0;
1662         req_ctx->hctx_wr.srcsg = req->src;
1663
1664         params.hash_size = params.alg_prm.result_size;
1665         req_ctx->data_len += params.sg_len + params.bfr_len;
1666         skb = create_hash_wr(req, &params);
1667         if (IS_ERR(skb)) {
1668                 error = PTR_ERR(skb);
1669                 goto unmap;
1670         }
1671
1672         req_ctx->hctx_wr.processed += params.sg_len;
1673         if (remainder) {
1674                 /* Swap buffers */
1675                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1676                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1677                                    req_ctx->reqbfr, remainder, req->nbytes -
1678                                    remainder);
1679         }
1680         req_ctx->reqlen = remainder;
1681         skb->dev = u_ctx->lldi.ports[0];
1682         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1683         chcr_send_wr(skb);
1684
1685         return isfull ? -EBUSY : -EINPROGRESS;
1686 unmap:
1687         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1688 err:
1689         chcr_dec_wrcount(dev);
1690         return error;
1691 }
1692
1693 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1694 {
1695         memset(bfr_ptr, 0, bs);
1696         *bfr_ptr = 0x80;
1697         if (bs == 64)
1698                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1699         else
1700                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1701 }
1702
1703 static int chcr_ahash_final(struct ahash_request *req)
1704 {
1705         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1706         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1707         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1708         struct hash_wr_param params;
1709         struct sk_buff *skb;
1710         struct uld_ctx *u_ctx = NULL;
1711         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1712         int error = -EINVAL;
1713
1714         error = chcr_inc_wrcount(dev);
1715         if (error)
1716                 return -ENXIO;
1717
1718         chcr_init_hctx_per_wr(req_ctx);
1719         u_ctx = ULD_CTX(h_ctx(rtfm));
1720         if (is_hmac(crypto_ahash_tfm(rtfm)))
1721                 params.opad_needed = 1;
1722         else
1723                 params.opad_needed = 0;
1724         params.sg_len = 0;
1725         req_ctx->hctx_wr.isfinal = 1;
1726         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1727         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1728         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1729                 params.opad_needed = 1;
1730                 params.kctx_len *= 2;
1731         } else {
1732                 params.opad_needed = 0;
1733         }
1734
1735         req_ctx->hctx_wr.result = 1;
1736         params.bfr_len = req_ctx->reqlen;
1737         req_ctx->data_len += params.bfr_len + params.sg_len;
1738         req_ctx->hctx_wr.srcsg = req->src;
1739         if (req_ctx->reqlen == 0) {
1740                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1741                 params.last = 0;
1742                 params.more = 1;
1743                 params.scmd1 = 0;
1744                 params.bfr_len = bs;
1745
1746         } else {
1747                 params.scmd1 = req_ctx->data_len;
1748                 params.last = 1;
1749                 params.more = 0;
1750         }
1751         params.hash_size = crypto_ahash_digestsize(rtfm);
1752         skb = create_hash_wr(req, &params);
1753         if (IS_ERR(skb)) {
1754                 error = PTR_ERR(skb);
1755                 goto err;
1756         }
1757         req_ctx->reqlen = 0;
1758         skb->dev = u_ctx->lldi.ports[0];
1759         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1760         chcr_send_wr(skb);
1761         return -EINPROGRESS;
1762 err:
1763         chcr_dec_wrcount(dev);
1764         return error;
1765 }
1766
1767 static int chcr_ahash_finup(struct ahash_request *req)
1768 {
1769         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1770         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1771         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1772         struct uld_ctx *u_ctx = NULL;
1773         struct sk_buff *skb;
1774         struct hash_wr_param params;
1775         u8  bs;
1776         int error, isfull = 0;
1777
1778         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1779         u_ctx = ULD_CTX(h_ctx(rtfm));
1780         error = chcr_inc_wrcount(dev);
1781         if (error)
1782                 return -ENXIO;
1783
1784         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1785                                             h_ctx(rtfm)->tx_qidx))) {
1786                 isfull = 1;
1787                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1788                         error = -ENOSPC;
1789                         goto err;
1790                 }
1791         }
1792         chcr_init_hctx_per_wr(req_ctx);
1793         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1794         if (error) {
1795                 error = -ENOMEM;
1796                 goto err;
1797         }
1798
1799         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1800         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1801         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1802                 params.kctx_len *= 2;
1803                 params.opad_needed = 1;
1804         } else {
1805                 params.opad_needed = 0;
1806         }
1807
1808         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1809                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1810         if (params.sg_len < req->nbytes) {
1811                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1812                         params.kctx_len /= 2;
1813                         params.opad_needed = 0;
1814                 }
1815                 params.last = 0;
1816                 params.more = 1;
1817                 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1818                                         - req_ctx->reqlen;
1819                 params.hash_size = params.alg_prm.result_size;
1820                 params.scmd1 = 0;
1821         } else {
1822                 params.last = 1;
1823                 params.more = 0;
1824                 params.sg_len = req->nbytes;
1825                 params.hash_size = crypto_ahash_digestsize(rtfm);
1826                 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1827                                 params.sg_len;
1828         }
1829         params.bfr_len = req_ctx->reqlen;
1830         req_ctx->data_len += params.bfr_len + params.sg_len;
1831         req_ctx->hctx_wr.result = 1;
1832         req_ctx->hctx_wr.srcsg = req->src;
1833         if ((req_ctx->reqlen + req->nbytes) == 0) {
1834                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1835                 params.last = 0;
1836                 params.more = 1;
1837                 params.scmd1 = 0;
1838                 params.bfr_len = bs;
1839         }
1840         skb = create_hash_wr(req, &params);
1841         if (IS_ERR(skb)) {
1842                 error = PTR_ERR(skb);
1843                 goto unmap;
1844         }
1845         req_ctx->reqlen = 0;
1846         req_ctx->hctx_wr.processed += params.sg_len;
1847         skb->dev = u_ctx->lldi.ports[0];
1848         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1849         chcr_send_wr(skb);
1850
1851         return isfull ? -EBUSY : -EINPROGRESS;
1852 unmap:
1853         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1854 err:
1855         chcr_dec_wrcount(dev);
1856         return error;
1857 }
1858
1859 static int chcr_ahash_digest(struct ahash_request *req)
1860 {
1861         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1862         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1863         struct chcr_dev *dev = h_ctx(rtfm)->dev;
1864         struct uld_ctx *u_ctx = NULL;
1865         struct sk_buff *skb;
1866         struct hash_wr_param params;
1867         u8  bs;
1868         int error, isfull = 0;
1869
1870         rtfm->init(req);
1871         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1872         error = chcr_inc_wrcount(dev);
1873         if (error)
1874                 return -ENXIO;
1875
1876         u_ctx = ULD_CTX(h_ctx(rtfm));
1877         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1878                                             h_ctx(rtfm)->tx_qidx))) {
1879                 isfull = 1;
1880                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1881                         error = -ENOSPC;
1882                         goto err;
1883                 }
1884         }
1885
1886         chcr_init_hctx_per_wr(req_ctx);
1887         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1888         if (error) {
1889                 error = -ENOMEM;
1890                 goto err;
1891         }
1892
1893         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1894         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1895         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1896                 params.kctx_len *= 2;
1897                 params.opad_needed = 1;
1898         } else {
1899                 params.opad_needed = 0;
1900         }
1901         params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1902                                 HASH_SPACE_LEFT(params.kctx_len), 0);
1903         if (params.sg_len < req->nbytes) {
1904                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1905                         params.kctx_len /= 2;
1906                         params.opad_needed = 0;
1907                 }
1908                 params.last = 0;
1909                 params.more = 1;
1910                 params.scmd1 = 0;
1911                 params.sg_len = rounddown(params.sg_len, bs);
1912                 params.hash_size = params.alg_prm.result_size;
1913         } else {
1914                 params.sg_len = req->nbytes;
1915                 params.hash_size = crypto_ahash_digestsize(rtfm);
1916                 params.last = 1;
1917                 params.more = 0;
1918                 params.scmd1 = req->nbytes + req_ctx->data_len;
1919
1920         }
1921         params.bfr_len = 0;
1922         req_ctx->hctx_wr.result = 1;
1923         req_ctx->hctx_wr.srcsg = req->src;
1924         req_ctx->data_len += params.bfr_len + params.sg_len;
1925
1926         if (req->nbytes == 0) {
1927                 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1928                 params.more = 1;
1929                 params.bfr_len = bs;
1930         }
1931
1932         skb = create_hash_wr(req, &params);
1933         if (IS_ERR(skb)) {
1934                 error = PTR_ERR(skb);
1935                 goto unmap;
1936         }
1937         req_ctx->hctx_wr.processed += params.sg_len;
1938         skb->dev = u_ctx->lldi.ports[0];
1939         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1940         chcr_send_wr(skb);
1941         return isfull ? -EBUSY : -EINPROGRESS;
1942 unmap:
1943         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1944 err:
1945         chcr_dec_wrcount(dev);
1946         return error;
1947 }
1948
1949 static int chcr_ahash_continue(struct ahash_request *req)
1950 {
1951         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1952         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1953         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1954         struct uld_ctx *u_ctx = NULL;
1955         struct sk_buff *skb;
1956         struct hash_wr_param params;
1957         u8  bs;
1958         int error;
1959
1960         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1961         u_ctx = ULD_CTX(h_ctx(rtfm));
1962         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1963         params.kctx_len = roundup(params.alg_prm.result_size, 16);
1964         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1965                 params.kctx_len *= 2;
1966                 params.opad_needed = 1;
1967         } else {
1968                 params.opad_needed = 0;
1969         }
1970         params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1971                                             HASH_SPACE_LEFT(params.kctx_len),
1972                                             hctx_wr->src_ofst);
1973         if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1974                 params.sg_len = req->nbytes - hctx_wr->processed;
1975         if (!hctx_wr->result ||
1976             ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1977                 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1978                         params.kctx_len /= 2;
1979                         params.opad_needed = 0;
1980                 }
1981                 params.last = 0;
1982                 params.more = 1;
1983                 params.sg_len = rounddown(params.sg_len, bs);
1984                 params.hash_size = params.alg_prm.result_size;
1985                 params.scmd1 = 0;
1986         } else {
1987                 params.last = 1;
1988                 params.more = 0;
1989                 params.hash_size = crypto_ahash_digestsize(rtfm);
1990                 params.scmd1 = reqctx->data_len + params.sg_len;
1991         }
1992         params.bfr_len = 0;
1993         reqctx->data_len += params.sg_len;
1994         skb = create_hash_wr(req, &params);
1995         if (IS_ERR(skb)) {
1996                 error = PTR_ERR(skb);
1997                 goto err;
1998         }
1999         hctx_wr->processed += params.sg_len;
2000         skb->dev = u_ctx->lldi.ports[0];
2001         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
2002         chcr_send_wr(skb);
2003         return 0;
2004 err:
2005         return error;
2006 }
2007
2008 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2009                                           unsigned char *input,
2010                                           int err)
2011 {
2012         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2013         struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2014         int digestsize, updated_digestsize;
2015         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2016         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2017         struct chcr_dev *dev = h_ctx(tfm)->dev;
2018
2019         if (input == NULL)
2020                 goto out;
2021         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2022         updated_digestsize = digestsize;
2023         if (digestsize == SHA224_DIGEST_SIZE)
2024                 updated_digestsize = SHA256_DIGEST_SIZE;
2025         else if (digestsize == SHA384_DIGEST_SIZE)
2026                 updated_digestsize = SHA512_DIGEST_SIZE;
2027
2028         if (hctx_wr->dma_addr) {
2029                 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2030                                  hctx_wr->dma_len, DMA_TO_DEVICE);
2031                 hctx_wr->dma_addr = 0;
2032         }
2033         if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2034                                  req->nbytes)) {
2035                 if (hctx_wr->result == 1) {
2036                         hctx_wr->result = 0;
2037                         memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2038                                digestsize);
2039                 } else {
2040                         memcpy(reqctx->partial_hash,
2041                                input + sizeof(struct cpl_fw6_pld),
2042                                updated_digestsize);
2043
2044                 }
2045                 goto unmap;
2046         }
2047         memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2048                updated_digestsize);
2049
2050         err = chcr_ahash_continue(req);
2051         if (err)
2052                 goto unmap;
2053         return;
2054 unmap:
2055         if (hctx_wr->is_sg_map)
2056                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2057
2058
2059 out:
2060         chcr_dec_wrcount(dev);
2061         req->base.complete(&req->base, err);
2062 }
2063
2064 /*
2065  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
2066  *      @req: crypto request
2067  */
2068 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2069                          int err)
2070 {
2071         struct crypto_tfm *tfm = req->tfm;
2072         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2073         struct adapter *adap = padap(ctx->dev);
2074
2075         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2076         case CRYPTO_ALG_TYPE_AEAD:
2077                 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2078                 break;
2079
2080         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2081                  chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2082                                                input, err);
2083                 break;
2084         case CRYPTO_ALG_TYPE_AHASH:
2085                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2086                 }
2087         atomic_inc(&adap->chcr_stats.complete);
2088         return err;
2089 }
2090 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2091 {
2092         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2093         struct chcr_ahash_req_ctx *state = out;
2094
2095         state->reqlen = req_ctx->reqlen;
2096         state->data_len = req_ctx->data_len;
2097         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2098         memcpy(state->partial_hash, req_ctx->partial_hash,
2099                CHCR_HASH_MAX_DIGEST_SIZE);
2100         chcr_init_hctx_per_wr(state);
2101         return 0;
2102 }
2103
2104 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2105 {
2106         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2107         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2108
2109         req_ctx->reqlen = state->reqlen;
2110         req_ctx->data_len = state->data_len;
2111         req_ctx->reqbfr = req_ctx->bfr1;
2112         req_ctx->skbfr = req_ctx->bfr2;
2113         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2114         memcpy(req_ctx->partial_hash, state->partial_hash,
2115                CHCR_HASH_MAX_DIGEST_SIZE);
2116         chcr_init_hctx_per_wr(req_ctx);
2117         return 0;
2118 }
2119
2120 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2121                              unsigned int keylen)
2122 {
2123         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2124         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2125         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2126         unsigned int i, err = 0, updated_digestsize;
2127
2128         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2129
2130         /* use the key to calculate the ipad and opad. ipad will sent with the
2131          * first request's data. opad will be sent with the final hash result
2132          * ipad in hmacctx->ipad and opad in hmacctx->opad location
2133          */
2134         shash->tfm = hmacctx->base_hash;
2135         shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2136         if (keylen > bs) {
2137                 err = crypto_shash_digest(shash, key, keylen,
2138                                           hmacctx->ipad);
2139                 if (err)
2140                         goto out;
2141                 keylen = digestsize;
2142         } else {
2143                 memcpy(hmacctx->ipad, key, keylen);
2144         }
2145         memset(hmacctx->ipad + keylen, 0, bs - keylen);
2146         memcpy(hmacctx->opad, hmacctx->ipad, bs);
2147
2148         for (i = 0; i < bs / sizeof(int); i++) {
2149                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2150                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2151         }
2152
2153         updated_digestsize = digestsize;
2154         if (digestsize == SHA224_DIGEST_SIZE)
2155                 updated_digestsize = SHA256_DIGEST_SIZE;
2156         else if (digestsize == SHA384_DIGEST_SIZE)
2157                 updated_digestsize = SHA512_DIGEST_SIZE;
2158         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2159                                         hmacctx->ipad, digestsize);
2160         if (err)
2161                 goto out;
2162         chcr_change_order(hmacctx->ipad, updated_digestsize);
2163
2164         err = chcr_compute_partial_hash(shash, hmacctx->opad,
2165                                         hmacctx->opad, digestsize);
2166         if (err)
2167                 goto out;
2168         chcr_change_order(hmacctx->opad, updated_digestsize);
2169 out:
2170         return err;
2171 }
2172
2173 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2174                                unsigned int key_len)
2175 {
2176         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2177         unsigned short context_size = 0;
2178         int err;
2179
2180         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2181         if (err)
2182                 goto badkey_err;
2183
2184         memcpy(ablkctx->key, key, key_len);
2185         ablkctx->enckey_len = key_len;
2186         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2187         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2188         ablkctx->key_ctx_hdr =
2189                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2190                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2191                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2192                                  CHCR_KEYCTX_NO_KEY, 1,
2193                                  0, context_size);
2194         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2195         return 0;
2196 badkey_err:
2197         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2198         ablkctx->enckey_len = 0;
2199
2200         return err;
2201 }
2202
2203 static int chcr_sha_init(struct ahash_request *areq)
2204 {
2205         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2206         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2207         int digestsize =  crypto_ahash_digestsize(tfm);
2208
2209         req_ctx->data_len = 0;
2210         req_ctx->reqlen = 0;
2211         req_ctx->reqbfr = req_ctx->bfr1;
2212         req_ctx->skbfr = req_ctx->bfr2;
2213         copy_hash_init_values(req_ctx->partial_hash, digestsize);
2214
2215         return 0;
2216 }
2217
2218 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2219 {
2220         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2221                                  sizeof(struct chcr_ahash_req_ctx));
2222         return chcr_device_init(crypto_tfm_ctx(tfm));
2223 }
2224
2225 static int chcr_hmac_init(struct ahash_request *areq)
2226 {
2227         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2228         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2229         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2230         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2231         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2232
2233         chcr_sha_init(areq);
2234         req_ctx->data_len = bs;
2235         if (is_hmac(crypto_ahash_tfm(rtfm))) {
2236                 if (digestsize == SHA224_DIGEST_SIZE)
2237                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2238                                SHA256_DIGEST_SIZE);
2239                 else if (digestsize == SHA384_DIGEST_SIZE)
2240                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2241                                SHA512_DIGEST_SIZE);
2242                 else
2243                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
2244                                digestsize);
2245         }
2246         return 0;
2247 }
2248
2249 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2250 {
2251         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2252         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2253         unsigned int digestsize =
2254                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2255
2256         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2257                                  sizeof(struct chcr_ahash_req_ctx));
2258         hmacctx->base_hash = chcr_alloc_shash(digestsize);
2259         if (IS_ERR(hmacctx->base_hash))
2260                 return PTR_ERR(hmacctx->base_hash);
2261         return chcr_device_init(crypto_tfm_ctx(tfm));
2262 }
2263
2264 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2265 {
2266         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2267         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2268
2269         if (hmacctx->base_hash) {
2270                 chcr_free_shash(hmacctx->base_hash);
2271                 hmacctx->base_hash = NULL;
2272         }
2273 }
2274
2275 inline void chcr_aead_common_exit(struct aead_request *req)
2276 {
2277         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2278         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2279         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2280
2281         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2282 }
2283
2284 static int chcr_aead_common_init(struct aead_request *req)
2285 {
2286         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2287         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2288         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2289         unsigned int authsize = crypto_aead_authsize(tfm);
2290         int error = -EINVAL;
2291
2292         /* validate key size */
2293         if (aeadctx->enckey_len == 0)
2294                 goto err;
2295         if (reqctx->op && req->cryptlen < authsize)
2296                 goto err;
2297         if (reqctx->b0_len)
2298                 reqctx->scratch_pad = reqctx->iv + IV;
2299         else
2300                 reqctx->scratch_pad = NULL;
2301
2302         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2303                                   reqctx->op);
2304         if (error) {
2305                 error = -ENOMEM;
2306                 goto err;
2307         }
2308
2309         return 0;
2310 err:
2311         return error;
2312 }
2313
2314 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2315                                    int aadmax, int wrlen,
2316                                    unsigned short op_type)
2317 {
2318         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2319
2320         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2321             dst_nents > MAX_DSGL_ENT ||
2322             (req->assoclen > aadmax) ||
2323             (wrlen > SGE_MAX_WR_LEN))
2324                 return 1;
2325         return 0;
2326 }
2327
2328 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2329 {
2330         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2331         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2332         struct aead_request *subreq = aead_request_ctx(req);
2333
2334         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2335         aead_request_set_callback(subreq, req->base.flags,
2336                                   req->base.complete, req->base.data);
2337         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2338                                  req->iv);
2339         aead_request_set_ad(subreq, req->assoclen);
2340         return op_type ? crypto_aead_decrypt(subreq) :
2341                 crypto_aead_encrypt(subreq);
2342 }
2343
2344 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2345                                          unsigned short qid,
2346                                          int size)
2347 {
2348         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2349         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2350         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2351         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2352         struct sk_buff *skb = NULL;
2353         struct chcr_wr *chcr_req;
2354         struct cpl_rx_phys_dsgl *phys_cpl;
2355         struct ulptx_sgl *ulptx;
2356         unsigned int transhdr_len;
2357         unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2358         unsigned int   kctx_len = 0, dnents, snents;
2359         unsigned int  authsize = crypto_aead_authsize(tfm);
2360         int error = -EINVAL;
2361         u8 *ivptr;
2362         int null = 0;
2363         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2364                 GFP_ATOMIC;
2365         struct adapter *adap = padap(a_ctx(tfm)->dev);
2366
2367         if (req->cryptlen == 0)
2368                 return NULL;
2369
2370         reqctx->b0_len = 0;
2371         error = chcr_aead_common_init(req);
2372         if (error)
2373                 return ERR_PTR(error);
2374
2375         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2376                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2377                 null = 1;
2378         }
2379         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2380                 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2381         dnents += MIN_AUTH_SG; // For IV
2382         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2383                                CHCR_SRC_SG_SIZE, 0);
2384         dst_size = get_space_for_phys_dsgl(dnents);
2385         kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2386                 - sizeof(chcr_req->key_ctx);
2387         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2388         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2389                         SGE_MAX_WR_LEN;
2390         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2391                         : (sgl_len(snents) * 8);
2392         transhdr_len += temp;
2393         transhdr_len = roundup(transhdr_len, 16);
2394
2395         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2396                                     transhdr_len, reqctx->op)) {
2397                 atomic_inc(&adap->chcr_stats.fallback);
2398                 chcr_aead_common_exit(req);
2399                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2400         }
2401         skb = alloc_skb(transhdr_len, flags);
2402         if (!skb) {
2403                 error = -ENOMEM;
2404                 goto err;
2405         }
2406
2407         chcr_req = __skb_put_zero(skb, transhdr_len);
2408
2409         temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2410
2411         /*
2412          * Input order  is AAD,IV and Payload. where IV should be included as
2413          * the part of authdata. All other fields should be filled according
2414          * to the hardware spec
2415          */
2416         chcr_req->sec_cpl.op_ivinsrtofst =
2417                 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2418         chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2419         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2420                                         null ? 0 : 1 + IV,
2421                                         null ? 0 : IV + req->assoclen,
2422                                         req->assoclen + IV + 1,
2423                                         (temp & 0x1F0) >> 4);
2424         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2425                                         temp & 0xF,
2426                                         null ? 0 : req->assoclen + IV + 1,
2427                                         temp, temp);
2428         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2429             subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2430                 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2431         else
2432                 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2433         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2434                                         (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2435                                         temp,
2436                                         actx->auth_mode, aeadctx->hmac_ctrl,
2437                                         IV >> 1);
2438         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2439                                          0, 0, dst_size);
2440
2441         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2442         if (reqctx->op == CHCR_ENCRYPT_OP ||
2443                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2444                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2445                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2446                        aeadctx->enckey_len);
2447         else
2448                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2449                        aeadctx->enckey_len);
2450
2451         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2452                actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2453         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2454         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2455         ulptx = (struct ulptx_sgl *)(ivptr + IV);
2456         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2457             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2458                 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2459                 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2460                                 CTR_RFC3686_IV_SIZE);
2461                 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2462                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2463         } else {
2464                 memcpy(ivptr, req->iv, IV);
2465         }
2466         chcr_add_aead_dst_ent(req, phys_cpl, qid);
2467         chcr_add_aead_src_ent(req, ulptx);
2468         atomic_inc(&adap->chcr_stats.cipher_rqst);
2469         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2470                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2471         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2472                    transhdr_len, temp, 0);
2473         reqctx->skb = skb;
2474
2475         return skb;
2476 err:
2477         chcr_aead_common_exit(req);
2478
2479         return ERR_PTR(error);
2480 }
2481
2482 int chcr_aead_dma_map(struct device *dev,
2483                       struct aead_request *req,
2484                       unsigned short op_type)
2485 {
2486         int error;
2487         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2488         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2489         unsigned int authsize = crypto_aead_authsize(tfm);
2490         int dst_size;
2491
2492         dst_size = req->assoclen + req->cryptlen + (op_type ?
2493                                 -authsize : authsize);
2494         if (!req->cryptlen || !dst_size)
2495                 return 0;
2496         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2497                                         DMA_BIDIRECTIONAL);
2498         if (dma_mapping_error(dev, reqctx->iv_dma))
2499                 return -ENOMEM;
2500         if (reqctx->b0_len)
2501                 reqctx->b0_dma = reqctx->iv_dma + IV;
2502         else
2503                 reqctx->b0_dma = 0;
2504         if (req->src == req->dst) {
2505                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2506                                    DMA_BIDIRECTIONAL);
2507                 if (!error)
2508                         goto err;
2509         } else {
2510                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2511                                    DMA_TO_DEVICE);
2512                 if (!error)
2513                         goto err;
2514                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2515                                    DMA_FROM_DEVICE);
2516                 if (!error) {
2517                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2518                                    DMA_TO_DEVICE);
2519                         goto err;
2520                 }
2521         }
2522
2523         return 0;
2524 err:
2525         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2526         return -ENOMEM;
2527 }
2528
2529 void chcr_aead_dma_unmap(struct device *dev,
2530                          struct aead_request *req,
2531                          unsigned short op_type)
2532 {
2533         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2534         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2535         unsigned int authsize = crypto_aead_authsize(tfm);
2536         int dst_size;
2537
2538         dst_size = req->assoclen + req->cryptlen + (op_type ?
2539                                         -authsize : authsize);
2540         if (!req->cryptlen || !dst_size)
2541                 return;
2542
2543         dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2544                                         DMA_BIDIRECTIONAL);
2545         if (req->src == req->dst) {
2546                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2547                                    DMA_BIDIRECTIONAL);
2548         } else {
2549                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2550                                    DMA_TO_DEVICE);
2551                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2552                                    DMA_FROM_DEVICE);
2553         }
2554 }
2555
2556 void chcr_add_aead_src_ent(struct aead_request *req,
2557                            struct ulptx_sgl *ulptx)
2558 {
2559         struct ulptx_walk ulp_walk;
2560         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2561
2562         if (reqctx->imm) {
2563                 u8 *buf = (u8 *)ulptx;
2564
2565                 if (reqctx->b0_len) {
2566                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2567                         buf += reqctx->b0_len;
2568                 }
2569                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2570                                    buf, req->cryptlen + req->assoclen, 0);
2571         } else {
2572                 ulptx_walk_init(&ulp_walk, ulptx);
2573                 if (reqctx->b0_len)
2574                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2575                                             reqctx->b0_dma);
2576                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2577                                   req->assoclen,  0);
2578                 ulptx_walk_end(&ulp_walk);
2579         }
2580 }
2581
2582 void chcr_add_aead_dst_ent(struct aead_request *req,
2583                            struct cpl_rx_phys_dsgl *phys_cpl,
2584                            unsigned short qid)
2585 {
2586         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2587         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2588         struct dsgl_walk dsgl_walk;
2589         unsigned int authsize = crypto_aead_authsize(tfm);
2590         struct chcr_context *ctx = a_ctx(tfm);
2591         u32 temp;
2592
2593         dsgl_walk_init(&dsgl_walk, phys_cpl);
2594         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2595         temp = req->assoclen + req->cryptlen +
2596                 (reqctx->op ? -authsize : authsize);
2597         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2598         dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2599 }
2600
2601 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2602                              void *ulptx,
2603                              struct  cipher_wr_param *wrparam)
2604 {
2605         struct ulptx_walk ulp_walk;
2606         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2607         u8 *buf = ulptx;
2608
2609         memcpy(buf, reqctx->iv, IV);
2610         buf += IV;
2611         if (reqctx->imm) {
2612                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2613                                    buf, wrparam->bytes, reqctx->processed);
2614         } else {
2615                 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2616                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2617                                   reqctx->src_ofst);
2618                 reqctx->srcsg = ulp_walk.last_sg;
2619                 reqctx->src_ofst = ulp_walk.last_sg_len;
2620                 ulptx_walk_end(&ulp_walk);
2621         }
2622 }
2623
2624 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2625                              struct cpl_rx_phys_dsgl *phys_cpl,
2626                              struct  cipher_wr_param *wrparam,
2627                              unsigned short qid)
2628 {
2629         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2630         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2631         struct chcr_context *ctx = c_ctx(tfm);
2632         struct dsgl_walk dsgl_walk;
2633
2634         dsgl_walk_init(&dsgl_walk, phys_cpl);
2635         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2636                          reqctx->dst_ofst);
2637         reqctx->dstsg = dsgl_walk.last_sg;
2638         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2639
2640         dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2641 }
2642
2643 void chcr_add_hash_src_ent(struct ahash_request *req,
2644                            struct ulptx_sgl *ulptx,
2645                            struct hash_wr_param *param)
2646 {
2647         struct ulptx_walk ulp_walk;
2648         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2649
2650         if (reqctx->hctx_wr.imm) {
2651                 u8 *buf = (u8 *)ulptx;
2652
2653                 if (param->bfr_len) {
2654                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2655                         buf += param->bfr_len;
2656                 }
2657
2658                 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2659                                    sg_nents(reqctx->hctx_wr.srcsg), buf,
2660                                    param->sg_len, 0);
2661         } else {
2662                 ulptx_walk_init(&ulp_walk, ulptx);
2663                 if (param->bfr_len)
2664                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2665                                             reqctx->hctx_wr.dma_addr);
2666                 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2667                                   param->sg_len, reqctx->hctx_wr.src_ofst);
2668                 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2669                 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2670                 ulptx_walk_end(&ulp_walk);
2671         }
2672 }
2673
2674 int chcr_hash_dma_map(struct device *dev,
2675                       struct ahash_request *req)
2676 {
2677         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2678         int error = 0;
2679
2680         if (!req->nbytes)
2681                 return 0;
2682         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2683                            DMA_TO_DEVICE);
2684         if (!error)
2685                 return -ENOMEM;
2686         req_ctx->hctx_wr.is_sg_map = 1;
2687         return 0;
2688 }
2689
2690 void chcr_hash_dma_unmap(struct device *dev,
2691                          struct ahash_request *req)
2692 {
2693         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2694
2695         if (!req->nbytes)
2696                 return;
2697
2698         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2699                            DMA_TO_DEVICE);
2700         req_ctx->hctx_wr.is_sg_map = 0;
2701
2702 }
2703
2704 int chcr_cipher_dma_map(struct device *dev,
2705                         struct ablkcipher_request *req)
2706 {
2707         int error;
2708
2709         if (req->src == req->dst) {
2710                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2711                                    DMA_BIDIRECTIONAL);
2712                 if (!error)
2713                         goto err;
2714         } else {
2715                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2716                                    DMA_TO_DEVICE);
2717                 if (!error)
2718                         goto err;
2719                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2720                                    DMA_FROM_DEVICE);
2721                 if (!error) {
2722                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2723                                    DMA_TO_DEVICE);
2724                         goto err;
2725                 }
2726         }
2727
2728         return 0;
2729 err:
2730         return -ENOMEM;
2731 }
2732
2733 void chcr_cipher_dma_unmap(struct device *dev,
2734                            struct ablkcipher_request *req)
2735 {
2736         if (req->src == req->dst) {
2737                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2738                                    DMA_BIDIRECTIONAL);
2739         } else {
2740                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2741                                    DMA_TO_DEVICE);
2742                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2743                                    DMA_FROM_DEVICE);
2744         }
2745 }
2746
2747 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2748 {
2749         __be32 data;
2750
2751         memset(block, 0, csize);
2752         block += csize;
2753
2754         if (csize >= 4)
2755                 csize = 4;
2756         else if (msglen > (unsigned int)(1 << (8 * csize)))
2757                 return -EOVERFLOW;
2758
2759         data = cpu_to_be32(msglen);
2760         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2761
2762         return 0;
2763 }
2764
2765 static void generate_b0(struct aead_request *req, u8 *ivptr,
2766                         unsigned short op_type)
2767 {
2768         unsigned int l, lp, m;
2769         int rc;
2770         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2771         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2772         u8 *b0 = reqctx->scratch_pad;
2773
2774         m = crypto_aead_authsize(aead);
2775
2776         memcpy(b0, ivptr, 16);
2777
2778         lp = b0[0];
2779         l = lp + 1;
2780
2781         /* set m, bits 3-5 */
2782         *b0 |= (8 * ((m - 2) / 2));
2783
2784         /* set adata, bit 6, if associated data is used */
2785         if (req->assoclen)
2786                 *b0 |= 64;
2787         rc = set_msg_len(b0 + 16 - l,
2788                          (op_type == CHCR_DECRYPT_OP) ?
2789                          req->cryptlen - m : req->cryptlen, l);
2790 }
2791
2792 static inline int crypto_ccm_check_iv(const u8 *iv)
2793 {
2794         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2795         if (iv[0] < 1 || iv[0] > 7)
2796                 return -EINVAL;
2797
2798         return 0;
2799 }
2800
2801 static int ccm_format_packet(struct aead_request *req,
2802                              u8 *ivptr,
2803                              unsigned int sub_type,
2804                              unsigned short op_type,
2805                              unsigned int assoclen)
2806 {
2807         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2808         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2809         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2810         int rc = 0;
2811
2812         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2813                 ivptr[0] = 3;
2814                 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2815                 memcpy(ivptr + 4, req->iv, 8);
2816                 memset(ivptr + 12, 0, 4);
2817         } else {
2818                 memcpy(ivptr, req->iv, 16);
2819         }
2820         if (assoclen)
2821                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2822                                 htons(assoclen);
2823
2824         generate_b0(req, ivptr, op_type);
2825         /* zero the ctr value */
2826         memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2827         return rc;
2828 }
2829
2830 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2831                                   unsigned int dst_size,
2832                                   struct aead_request *req,
2833                                   unsigned short op_type)
2834 {
2835         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2836         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2837         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2838         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2839         unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2840         unsigned int ccm_xtra;
2841         unsigned char tag_offset = 0, auth_offset = 0;
2842         unsigned int assoclen;
2843
2844         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2845                 assoclen = req->assoclen - 8;
2846         else
2847                 assoclen = req->assoclen;
2848         ccm_xtra = CCM_B0_SIZE +
2849                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2850
2851         auth_offset = req->cryptlen ?
2852                 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2853         if (op_type == CHCR_DECRYPT_OP) {
2854                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2855                         tag_offset = crypto_aead_authsize(tfm);
2856                 else
2857                         auth_offset = 0;
2858         }
2859
2860
2861         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2862                                          2, 1);
2863         sec_cpl->pldlen =
2864                 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2865         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2866         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2867                                 1 + IV, IV + assoclen + ccm_xtra,
2868                                 req->assoclen + IV + 1 + ccm_xtra, 0);
2869
2870         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2871                                         auth_offset, tag_offset,
2872                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2873                                         crypto_aead_authsize(tfm));
2874         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2875                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2876                                         cipher_mode, mac_mode,
2877                                         aeadctx->hmac_ctrl, IV >> 1);
2878
2879         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2880                                         0, dst_size);
2881 }
2882
2883 static int aead_ccm_validate_input(unsigned short op_type,
2884                                    struct aead_request *req,
2885                                    struct chcr_aead_ctx *aeadctx,
2886                                    unsigned int sub_type)
2887 {
2888         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2889                 if (crypto_ccm_check_iv(req->iv)) {
2890                         pr_err("CCM: IV check fails\n");
2891                         return -EINVAL;
2892                 }
2893         } else {
2894                 if (req->assoclen != 16 && req->assoclen != 20) {
2895                         pr_err("RFC4309: Invalid AAD length %d\n",
2896                                req->assoclen);
2897                         return -EINVAL;
2898                 }
2899         }
2900         return 0;
2901 }
2902
2903 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2904                                           unsigned short qid,
2905                                           int size)
2906 {
2907         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2908         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2909         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2910         struct sk_buff *skb = NULL;
2911         struct chcr_wr *chcr_req;
2912         struct cpl_rx_phys_dsgl *phys_cpl;
2913         struct ulptx_sgl *ulptx;
2914         unsigned int transhdr_len;
2915         unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2916         unsigned int sub_type, assoclen = req->assoclen;
2917         unsigned int authsize = crypto_aead_authsize(tfm);
2918         int error = -EINVAL;
2919         u8 *ivptr;
2920         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2921                 GFP_ATOMIC;
2922         struct adapter *adap = padap(a_ctx(tfm)->dev);
2923
2924         sub_type = get_aead_subtype(tfm);
2925         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2926                 assoclen -= 8;
2927         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2928         error = chcr_aead_common_init(req);
2929         if (error)
2930                 return ERR_PTR(error);
2931
2932         error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2933         if (error)
2934                 goto err;
2935         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2936                         + (reqctx->op ? -authsize : authsize),
2937                         CHCR_DST_SG_SIZE, 0);
2938         dnents += MIN_CCM_SG; // For IV and B0
2939         dst_size = get_space_for_phys_dsgl(dnents);
2940         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2941                                CHCR_SRC_SG_SIZE, 0);
2942         snents += MIN_CCM_SG; //For B0
2943         kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2944         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2945         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2946                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
2947         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2948                                      reqctx->b0_len, 16) :
2949                 (sgl_len(snents) *  8);
2950         transhdr_len += temp;
2951         transhdr_len = roundup(transhdr_len, 16);
2952
2953         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2954                                 reqctx->b0_len, transhdr_len, reqctx->op)) {
2955                 atomic_inc(&adap->chcr_stats.fallback);
2956                 chcr_aead_common_exit(req);
2957                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2958         }
2959         skb = alloc_skb(transhdr_len,  flags);
2960
2961         if (!skb) {
2962                 error = -ENOMEM;
2963                 goto err;
2964         }
2965
2966         chcr_req = __skb_put_zero(skb, transhdr_len);
2967
2968         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2969
2970         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2971         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2972         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2973                         aeadctx->key, aeadctx->enckey_len);
2974
2975         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2976         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2977         ulptx = (struct ulptx_sgl *)(ivptr + IV);
2978         error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2979         if (error)
2980                 goto dstmap_fail;
2981         chcr_add_aead_dst_ent(req, phys_cpl, qid);
2982         chcr_add_aead_src_ent(req, ulptx);
2983
2984         atomic_inc(&adap->chcr_stats.aead_rqst);
2985         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2986                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2987                 reqctx->b0_len) : 0);
2988         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2989                     transhdr_len, temp, 0);
2990         reqctx->skb = skb;
2991
2992         return skb;
2993 dstmap_fail:
2994         kfree_skb(skb);
2995 err:
2996         chcr_aead_common_exit(req);
2997         return ERR_PTR(error);
2998 }
2999
3000 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3001                                      unsigned short qid,
3002                                      int size)
3003 {
3004         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3005         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3006         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3007         struct sk_buff *skb = NULL;
3008         struct chcr_wr *chcr_req;
3009         struct cpl_rx_phys_dsgl *phys_cpl;
3010         struct ulptx_sgl *ulptx;
3011         unsigned int transhdr_len, dnents = 0, snents;
3012         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3013         unsigned int authsize = crypto_aead_authsize(tfm);
3014         int error = -EINVAL;
3015         u8 *ivptr;
3016         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3017                 GFP_ATOMIC;
3018         struct adapter *adap = padap(a_ctx(tfm)->dev);
3019
3020         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3021                 assoclen = req->assoclen - 8;
3022
3023         reqctx->b0_len = 0;
3024         error = chcr_aead_common_init(req);
3025         if (error)
3026                 return ERR_PTR(error);
3027         dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3028                                 (reqctx->op ? -authsize : authsize),
3029                                 CHCR_DST_SG_SIZE, 0);
3030         snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3031                                CHCR_SRC_SG_SIZE, 0);
3032         dnents += MIN_GCM_SG; // For IV
3033         dst_size = get_space_for_phys_dsgl(dnents);
3034         kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3035         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3036         reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3037                         SGE_MAX_WR_LEN;
3038         temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3039                 (sgl_len(snents) * 8);
3040         transhdr_len += temp;
3041         transhdr_len = roundup(transhdr_len, 16);
3042         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3043                             transhdr_len, reqctx->op)) {
3044
3045                 atomic_inc(&adap->chcr_stats.fallback);
3046                 chcr_aead_common_exit(req);
3047                 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3048         }
3049         skb = alloc_skb(transhdr_len, flags);
3050         if (!skb) {
3051                 error = -ENOMEM;
3052                 goto err;
3053         }
3054
3055         chcr_req = __skb_put_zero(skb, transhdr_len);
3056
3057         //Offset of tag from end
3058         temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3059         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3060                                         a_ctx(tfm)->tx_chan_id, 2, 1);
3061         chcr_req->sec_cpl.pldlen =
3062                 htonl(req->assoclen + IV + req->cryptlen);
3063         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3064                                         assoclen ? 1 + IV : 0,
3065                                         assoclen ? IV + assoclen : 0,
3066                                         req->assoclen + IV + 1, 0);
3067         chcr_req->sec_cpl.cipherstop_lo_authinsert =
3068                         FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3069                                                 temp, temp);
3070         chcr_req->sec_cpl.seqno_numivs =
3071                         FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3072                                         CHCR_ENCRYPT_OP) ? 1 : 0,
3073                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
3074                                         CHCR_SCMD_AUTH_MODE_GHASH,
3075                                         aeadctx->hmac_ctrl, IV >> 1);
3076         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3077                                         0, 0, dst_size);
3078         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3079         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3080         memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3081                GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3082
3083         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3084         ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3085         /* prepare a 16 byte iv */
3086         /* S   A   L  T |  IV | 0x00000001 */
3087         if (get_aead_subtype(tfm) ==
3088             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3089                 memcpy(ivptr, aeadctx->salt, 4);
3090                 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3091         } else {
3092                 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3093         }
3094         *((unsigned int *)(ivptr + 12)) = htonl(0x01);
3095
3096         ulptx = (struct ulptx_sgl *)(ivptr + 16);
3097
3098         chcr_add_aead_dst_ent(req, phys_cpl, qid);
3099         chcr_add_aead_src_ent(req, ulptx);
3100         atomic_inc(&adap->chcr_stats.aead_rqst);
3101         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3102                 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3103         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3104                     transhdr_len, temp, reqctx->verify);
3105         reqctx->skb = skb;
3106         return skb;
3107
3108 err:
3109         chcr_aead_common_exit(req);
3110         return ERR_PTR(error);
3111 }
3112
3113
3114
3115 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3116 {
3117         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3118         struct aead_alg *alg = crypto_aead_alg(tfm);
3119
3120         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3121                                                CRYPTO_ALG_NEED_FALLBACK |
3122                                                CRYPTO_ALG_ASYNC);
3123         if  (IS_ERR(aeadctx->sw_cipher))
3124                 return PTR_ERR(aeadctx->sw_cipher);
3125         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3126                                  sizeof(struct aead_request) +
3127                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
3128         return chcr_device_init(a_ctx(tfm));
3129 }
3130
3131 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3132 {
3133         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3134
3135         crypto_free_aead(aeadctx->sw_cipher);
3136 }
3137
3138 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3139                                         unsigned int authsize)
3140 {
3141         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3142
3143         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3144         aeadctx->mayverify = VERIFY_HW;
3145         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3146 }
3147 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3148                                     unsigned int authsize)
3149 {
3150         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3151         u32 maxauth = crypto_aead_maxauthsize(tfm);
3152
3153         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3154          * true for sha1. authsize == 12 condition should be before
3155          * authsize == (maxauth >> 1)
3156          */
3157         if (authsize == ICV_4) {
3158                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3159                 aeadctx->mayverify = VERIFY_HW;
3160         } else if (authsize == ICV_6) {
3161                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3162                 aeadctx->mayverify = VERIFY_HW;
3163         } else if (authsize == ICV_10) {
3164                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3165                 aeadctx->mayverify = VERIFY_HW;
3166         } else if (authsize == ICV_12) {
3167                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3168                 aeadctx->mayverify = VERIFY_HW;
3169         } else if (authsize == ICV_14) {
3170                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3171                 aeadctx->mayverify = VERIFY_HW;
3172         } else if (authsize == (maxauth >> 1)) {
3173                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3174                 aeadctx->mayverify = VERIFY_HW;
3175         } else if (authsize == maxauth) {
3176                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3177                 aeadctx->mayverify = VERIFY_HW;
3178         } else {
3179                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3180                 aeadctx->mayverify = VERIFY_SW;
3181         }
3182         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3183 }
3184
3185
3186 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3187 {
3188         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3189
3190         switch (authsize) {
3191         case ICV_4:
3192                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3193                 aeadctx->mayverify = VERIFY_HW;
3194                 break;
3195         case ICV_8:
3196                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3197                 aeadctx->mayverify = VERIFY_HW;
3198                 break;
3199         case ICV_12:
3200                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3201                 aeadctx->mayverify = VERIFY_HW;
3202                 break;
3203         case ICV_14:
3204                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3205                 aeadctx->mayverify = VERIFY_HW;
3206                 break;
3207         case ICV_16:
3208                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3209                 aeadctx->mayverify = VERIFY_HW;
3210                 break;
3211         case ICV_13:
3212         case ICV_15:
3213                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3214                 aeadctx->mayverify = VERIFY_SW;
3215                 break;
3216         default:
3217
3218                   crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3219                         CRYPTO_TFM_RES_BAD_KEY_LEN);
3220                 return -EINVAL;
3221         }
3222         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3223 }
3224
3225 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3226                                           unsigned int authsize)
3227 {
3228         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3229
3230         switch (authsize) {
3231         case ICV_8:
3232                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3233                 aeadctx->mayverify = VERIFY_HW;
3234                 break;
3235         case ICV_12:
3236                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3237                 aeadctx->mayverify = VERIFY_HW;
3238                 break;
3239         case ICV_16:
3240                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3241                 aeadctx->mayverify = VERIFY_HW;
3242                 break;
3243         default:
3244                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3245                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3246                 return -EINVAL;
3247         }
3248         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3249 }
3250
3251 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3252                                 unsigned int authsize)
3253 {
3254         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3255
3256         switch (authsize) {
3257         case ICV_4:
3258                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3259                 aeadctx->mayverify = VERIFY_HW;
3260                 break;
3261         case ICV_6:
3262                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3263                 aeadctx->mayverify = VERIFY_HW;
3264                 break;
3265         case ICV_8:
3266                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3267                 aeadctx->mayverify = VERIFY_HW;
3268                 break;
3269         case ICV_10:
3270                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3271                 aeadctx->mayverify = VERIFY_HW;
3272                 break;
3273         case ICV_12:
3274                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3275                 aeadctx->mayverify = VERIFY_HW;
3276                 break;
3277         case ICV_14:
3278                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3279                 aeadctx->mayverify = VERIFY_HW;
3280                 break;
3281         case ICV_16:
3282                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3283                 aeadctx->mayverify = VERIFY_HW;
3284                 break;
3285         default:
3286                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3287                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3288                 return -EINVAL;
3289         }
3290         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3291 }
3292
3293 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3294                                 const u8 *key,
3295                                 unsigned int keylen)
3296 {
3297         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3298         unsigned char ck_size, mk_size;
3299         int key_ctx_size = 0;
3300
3301         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3302         if (keylen == AES_KEYSIZE_128) {
3303                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3304                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3305         } else if (keylen == AES_KEYSIZE_192) {
3306                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3307                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3308         } else if (keylen == AES_KEYSIZE_256) {
3309                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3310                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3311         } else {
3312                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3313                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3314                 aeadctx->enckey_len = 0;
3315                 return  -EINVAL;
3316         }
3317         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3318                                                 key_ctx_size >> 4);
3319         memcpy(aeadctx->key, key, keylen);
3320         aeadctx->enckey_len = keylen;
3321
3322         return 0;
3323 }
3324
3325 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3326                                 const u8 *key,
3327                                 unsigned int keylen)
3328 {
3329         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3330         int error;
3331
3332         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3333         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3334                               CRYPTO_TFM_REQ_MASK);
3335         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3336         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3337         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3338                               CRYPTO_TFM_RES_MASK);
3339         if (error)
3340                 return error;
3341         return chcr_ccm_common_setkey(aead, key, keylen);
3342 }
3343
3344 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3345                                     unsigned int keylen)
3346 {
3347         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3348         int error;
3349
3350         if (keylen < 3) {
3351                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3352                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3353                 aeadctx->enckey_len = 0;
3354                 return  -EINVAL;
3355         }
3356         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3357         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3358                               CRYPTO_TFM_REQ_MASK);
3359         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3360         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3361         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3362                               CRYPTO_TFM_RES_MASK);
3363         if (error)
3364                 return error;
3365         keylen -= 3;
3366         memcpy(aeadctx->salt, key + keylen, 3);
3367         return chcr_ccm_common_setkey(aead, key, keylen);
3368 }
3369
3370 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3371                            unsigned int keylen)
3372 {
3373         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3374         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3375         struct crypto_cipher *cipher;
3376         unsigned int ck_size;
3377         int ret = 0, key_ctx_size = 0;
3378
3379         aeadctx->enckey_len = 0;
3380         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3381         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3382                               & CRYPTO_TFM_REQ_MASK);
3383         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3384         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3385         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3386                               CRYPTO_TFM_RES_MASK);
3387         if (ret)
3388                 goto out;
3389
3390         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3391             keylen > 3) {
3392                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3393                 memcpy(aeadctx->salt, key + keylen, 4);
3394         }
3395         if (keylen == AES_KEYSIZE_128) {
3396                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3397         } else if (keylen == AES_KEYSIZE_192) {
3398                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3399         } else if (keylen == AES_KEYSIZE_256) {
3400                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3401         } else {
3402                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3403                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3404                 pr_err("GCM: Invalid key length %d\n", keylen);
3405                 ret = -EINVAL;
3406                 goto out;
3407         }
3408
3409         memcpy(aeadctx->key, key, keylen);
3410         aeadctx->enckey_len = keylen;
3411         key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3412                 AEAD_H_SIZE;
3413         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3414                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3415                                                 0, 0,
3416                                                 key_ctx_size >> 4);
3417         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3418          * It will go in key context
3419          */
3420         cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3421         if (IS_ERR(cipher)) {
3422                 aeadctx->enckey_len = 0;
3423                 ret = -ENOMEM;
3424                 goto out;
3425         }
3426
3427         ret = crypto_cipher_setkey(cipher, key, keylen);
3428         if (ret) {
3429                 aeadctx->enckey_len = 0;
3430                 goto out1;
3431         }
3432         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3433         crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3434
3435 out1:
3436         crypto_free_cipher(cipher);
3437 out:
3438         return ret;
3439 }
3440
3441 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3442                                    unsigned int keylen)
3443 {
3444         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3445         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3446         /* it contains auth and cipher key both*/
3447         struct crypto_authenc_keys keys;
3448         unsigned int bs, subtype;
3449         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3450         int err = 0, i, key_ctx_len = 0;
3451         unsigned char ck_size = 0;
3452         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3453         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3454         struct algo_param param;
3455         int align;
3456         u8 *o_ptr = NULL;
3457
3458         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3459         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3460                               & CRYPTO_TFM_REQ_MASK);
3461         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3462         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3463         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3464                               & CRYPTO_TFM_RES_MASK);
3465         if (err)
3466                 goto out;
3467
3468         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3469                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3470                 goto out;
3471         }
3472
3473         if (get_alg_config(&param, max_authsize)) {
3474                 pr_err("chcr : Unsupported digest size\n");
3475                 goto out;
3476         }
3477         subtype = get_aead_subtype(authenc);
3478         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3479                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3480                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3481                         goto out;
3482                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3483                 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3484                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3485         }
3486         if (keys.enckeylen == AES_KEYSIZE_128) {
3487                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3488         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3489                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3490         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3491                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3492         } else {
3493                 pr_err("chcr : Unsupported cipher key\n");
3494                 goto out;
3495         }
3496
3497         /* Copy only encryption key. We use authkey to generate h(ipad) and
3498          * h(opad) so authkey is not needed again. authkeylen size have the
3499          * size of the hash digest size.
3500          */
3501         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3502         aeadctx->enckey_len = keys.enckeylen;
3503         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3504                 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3505
3506                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3507                             aeadctx->enckey_len << 3);
3508         }
3509         base_hash  = chcr_alloc_shash(max_authsize);
3510         if (IS_ERR(base_hash)) {
3511                 pr_err("chcr : Base driver cannot be loaded\n");
3512                 aeadctx->enckey_len = 0;
3513                 memzero_explicit(&keys, sizeof(keys));
3514                 return -EINVAL;
3515         }
3516         {
3517                 SHASH_DESC_ON_STACK(shash, base_hash);
3518
3519                 shash->tfm = base_hash;
3520                 shash->flags = crypto_shash_get_flags(base_hash);
3521                 bs = crypto_shash_blocksize(base_hash);
3522                 align = KEYCTX_ALIGN_PAD(max_authsize);
3523                 o_ptr =  actx->h_iopad + param.result_size + align;
3524
3525                 if (keys.authkeylen > bs) {
3526                         err = crypto_shash_digest(shash, keys.authkey,
3527                                                   keys.authkeylen,
3528                                                   o_ptr);
3529                         if (err) {
3530                                 pr_err("chcr : Base driver cannot be loaded\n");
3531                                 goto out;
3532                         }
3533                         keys.authkeylen = max_authsize;
3534                 } else
3535                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3536
3537                 /* Compute the ipad-digest*/
3538                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3539                 memcpy(pad, o_ptr, keys.authkeylen);
3540                 for (i = 0; i < bs >> 2; i++)
3541                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3542
3543                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3544                                               max_authsize))
3545                         goto out;
3546                 /* Compute the opad-digest */
3547                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3548                 memcpy(pad, o_ptr, keys.authkeylen);
3549                 for (i = 0; i < bs >> 2; i++)
3550                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3551
3552                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3553                         goto out;
3554
3555                 /* convert the ipad and opad digest to network order */
3556                 chcr_change_order(actx->h_iopad, param.result_size);
3557                 chcr_change_order(o_ptr, param.result_size);
3558                 key_ctx_len = sizeof(struct _key_ctx) +
3559                         roundup(keys.enckeylen, 16) +
3560                         (param.result_size + align) * 2;
3561                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3562                                                 0, 1, key_ctx_len >> 4);
3563                 actx->auth_mode = param.auth_mode;
3564                 chcr_free_shash(base_hash);
3565
3566                 memzero_explicit(&keys, sizeof(keys));
3567                 return 0;
3568         }
3569 out:
3570         aeadctx->enckey_len = 0;
3571         memzero_explicit(&keys, sizeof(keys));
3572         if (!IS_ERR(base_hash))
3573                 chcr_free_shash(base_hash);
3574         return -EINVAL;
3575 }
3576
3577 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3578                                         const u8 *key, unsigned int keylen)
3579 {
3580         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3581         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3582         struct crypto_authenc_keys keys;
3583         int err;
3584         /* it contains auth and cipher key both*/
3585         unsigned int subtype;
3586         int key_ctx_len = 0;
3587         unsigned char ck_size = 0;
3588
3589         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3590         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3591                               & CRYPTO_TFM_REQ_MASK);
3592         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3593         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3594         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3595                               & CRYPTO_TFM_RES_MASK);
3596         if (err)
3597                 goto out;
3598
3599         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3600                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3601                 goto out;
3602         }
3603         subtype = get_aead_subtype(authenc);
3604         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3605             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3606                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3607                         goto out;
3608                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3609                         - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3610                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3611         }
3612         if (keys.enckeylen == AES_KEYSIZE_128) {
3613                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3614         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3615                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3616         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3617                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3618         } else {
3619                 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3620                 goto out;
3621         }
3622         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3623         aeadctx->enckey_len = keys.enckeylen;
3624         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3625             subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3626                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3627                                 aeadctx->enckey_len << 3);
3628         }
3629         key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3630
3631         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3632                                                 0, key_ctx_len >> 4);
3633         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3634         memzero_explicit(&keys, sizeof(keys));
3635         return 0;
3636 out:
3637         aeadctx->enckey_len = 0;
3638         memzero_explicit(&keys, sizeof(keys));
3639         return -EINVAL;
3640 }
3641
3642 static int chcr_aead_op(struct aead_request *req,
3643                         int size,
3644                         create_wr_t create_wr_fn)
3645 {
3646         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3647         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3648         struct uld_ctx *u_ctx;
3649         struct sk_buff *skb;
3650         int isfull = 0;
3651         struct chcr_dev *cdev;
3652
3653         cdev = a_ctx(tfm)->dev;
3654         if (!cdev) {
3655                 pr_err("chcr : %s : No crypto device.\n", __func__);
3656                 return -ENXIO;
3657         }
3658
3659         if (chcr_inc_wrcount(cdev)) {
3660         /* Detach state for CHCR means lldi or padap is freed.
3661          * We cannot increment fallback here.
3662          */
3663                 return chcr_aead_fallback(req, reqctx->op);
3664         }
3665
3666         u_ctx = ULD_CTX(a_ctx(tfm));
3667         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3668                                    a_ctx(tfm)->tx_qidx)) {
3669                 isfull = 1;
3670                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3671                         chcr_dec_wrcount(cdev);
3672                         return -ENOSPC;
3673                 }
3674         }
3675
3676         /* Form a WR from req */
3677         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3678
3679         if (IS_ERR(skb) || !skb) {
3680                 chcr_dec_wrcount(cdev);
3681                 return PTR_ERR(skb);
3682         }
3683
3684         skb->dev = u_ctx->lldi.ports[0];
3685         set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3686         chcr_send_wr(skb);
3687         return isfull ? -EBUSY : -EINPROGRESS;
3688 }
3689
3690 static int chcr_aead_encrypt(struct aead_request *req)
3691 {
3692         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3693         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3694
3695         reqctx->verify = VERIFY_HW;
3696         reqctx->op = CHCR_ENCRYPT_OP;
3697
3698         switch (get_aead_subtype(tfm)) {
3699         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3700         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3701         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3702         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3703                 return chcr_aead_op(req, 0, create_authenc_wr);
3704         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3705         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3706                 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3707         default:
3708                 return chcr_aead_op(req, 0, create_gcm_wr);
3709         }
3710 }
3711
3712 static int chcr_aead_decrypt(struct aead_request *req)
3713 {
3714         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3715         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3716         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3717         int size;
3718
3719         if (aeadctx->mayverify == VERIFY_SW) {
3720                 size = crypto_aead_maxauthsize(tfm);
3721                 reqctx->verify = VERIFY_SW;
3722         } else {
3723                 size = 0;
3724                 reqctx->verify = VERIFY_HW;
3725         }
3726         reqctx->op = CHCR_DECRYPT_OP;
3727         switch (get_aead_subtype(tfm)) {
3728         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3729         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3730         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3731         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3732                 return chcr_aead_op(req, size, create_authenc_wr);
3733         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3734         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3735                 return chcr_aead_op(req, size, create_aead_ccm_wr);
3736         default:
3737                 return chcr_aead_op(req, size, create_gcm_wr);
3738         }
3739 }
3740
3741 static struct chcr_alg_template driver_algs[] = {
3742         /* AES-CBC */
3743         {
3744                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3745                 .is_registered = 0,
3746                 .alg.crypto = {
3747                         .cra_name               = "cbc(aes)",
3748                         .cra_driver_name        = "cbc-aes-chcr",
3749                         .cra_blocksize          = AES_BLOCK_SIZE,
3750                         .cra_init               = chcr_cra_init,
3751                         .cra_exit               = chcr_cra_exit,
3752                         .cra_u.ablkcipher       = {
3753                                 .min_keysize    = AES_MIN_KEY_SIZE,
3754                                 .max_keysize    = AES_MAX_KEY_SIZE,
3755                                 .ivsize         = AES_BLOCK_SIZE,
3756                                 .setkey                 = chcr_aes_cbc_setkey,
3757                                 .encrypt                = chcr_aes_encrypt,
3758                                 .decrypt                = chcr_aes_decrypt,
3759                         }
3760                 }
3761         },
3762         {
3763                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3764                 .is_registered = 0,
3765                 .alg.crypto =   {
3766                         .cra_name               = "xts(aes)",
3767                         .cra_driver_name        = "xts-aes-chcr",
3768                         .cra_blocksize          = AES_BLOCK_SIZE,
3769                         .cra_init               = chcr_cra_init,
3770                         .cra_exit               = NULL,
3771                         .cra_u .ablkcipher = {
3772                                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
3773                                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
3774                                         .ivsize         = AES_BLOCK_SIZE,
3775                                         .setkey         = chcr_aes_xts_setkey,
3776                                         .encrypt        = chcr_aes_encrypt,
3777                                         .decrypt        = chcr_aes_decrypt,
3778                                 }
3779                         }
3780         },
3781         {
3782                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3783                 .is_registered = 0,
3784                 .alg.crypto = {
3785                         .cra_name               = "ctr(aes)",
3786                         .cra_driver_name        = "ctr-aes-chcr",
3787                         .cra_blocksize          = 1,
3788                         .cra_init               = chcr_cra_init,
3789                         .cra_exit               = chcr_cra_exit,
3790                         .cra_u.ablkcipher       = {
3791                                 .min_keysize    = AES_MIN_KEY_SIZE,
3792                                 .max_keysize    = AES_MAX_KEY_SIZE,
3793                                 .ivsize         = AES_BLOCK_SIZE,
3794                                 .setkey         = chcr_aes_ctr_setkey,
3795                                 .encrypt        = chcr_aes_encrypt,
3796                                 .decrypt        = chcr_aes_decrypt,
3797                         }
3798                 }
3799         },
3800         {
3801                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3802                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3803                 .is_registered = 0,
3804                 .alg.crypto = {
3805                         .cra_name               = "rfc3686(ctr(aes))",
3806                         .cra_driver_name        = "rfc3686-ctr-aes-chcr",
3807                         .cra_blocksize          = 1,
3808                         .cra_init               = chcr_rfc3686_init,
3809                         .cra_exit               = chcr_cra_exit,
3810                         .cra_u.ablkcipher       = {
3811                                 .min_keysize    = AES_MIN_KEY_SIZE +
3812                                         CTR_RFC3686_NONCE_SIZE,
3813                                 .max_keysize    = AES_MAX_KEY_SIZE +
3814                                         CTR_RFC3686_NONCE_SIZE,
3815                                 .ivsize         = CTR_RFC3686_IV_SIZE,
3816                                 .setkey         = chcr_aes_rfc3686_setkey,
3817                                 .encrypt        = chcr_aes_encrypt,
3818                                 .decrypt        = chcr_aes_decrypt,
3819                         }
3820                 }
3821         },
3822         /* SHA */
3823         {
3824                 .type = CRYPTO_ALG_TYPE_AHASH,
3825                 .is_registered = 0,
3826                 .alg.hash = {
3827                         .halg.digestsize = SHA1_DIGEST_SIZE,
3828                         .halg.base = {
3829                                 .cra_name = "sha1",
3830                                 .cra_driver_name = "sha1-chcr",
3831                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3832                         }
3833                 }
3834         },
3835         {
3836                 .type = CRYPTO_ALG_TYPE_AHASH,
3837                 .is_registered = 0,
3838                 .alg.hash = {
3839                         .halg.digestsize = SHA256_DIGEST_SIZE,
3840                         .halg.base = {
3841                                 .cra_name = "sha256",
3842                                 .cra_driver_name = "sha256-chcr",
3843                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3844                         }
3845                 }
3846         },
3847         {
3848                 .type = CRYPTO_ALG_TYPE_AHASH,
3849                 .is_registered = 0,
3850                 .alg.hash = {
3851                         .halg.digestsize = SHA224_DIGEST_SIZE,
3852                         .halg.base = {
3853                                 .cra_name = "sha224",
3854                                 .cra_driver_name = "sha224-chcr",
3855                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3856                         }
3857                 }
3858         },
3859         {
3860                 .type = CRYPTO_ALG_TYPE_AHASH,
3861                 .is_registered = 0,
3862                 .alg.hash = {
3863                         .halg.digestsize = SHA384_DIGEST_SIZE,
3864                         .halg.base = {
3865                                 .cra_name = "sha384",
3866                                 .cra_driver_name = "sha384-chcr",
3867                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3868                         }
3869                 }
3870         },
3871         {
3872                 .type = CRYPTO_ALG_TYPE_AHASH,
3873                 .is_registered = 0,
3874                 .alg.hash = {
3875                         .halg.digestsize = SHA512_DIGEST_SIZE,
3876                         .halg.base = {
3877                                 .cra_name = "sha512",
3878                                 .cra_driver_name = "sha512-chcr",
3879                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3880                         }
3881                 }
3882         },
3883         /* HMAC */
3884         {
3885                 .type = CRYPTO_ALG_TYPE_HMAC,
3886                 .is_registered = 0,
3887                 .alg.hash = {
3888                         .halg.digestsize = SHA1_DIGEST_SIZE,
3889                         .halg.base = {
3890                                 .cra_name = "hmac(sha1)",
3891                                 .cra_driver_name = "hmac-sha1-chcr",
3892                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3893                         }
3894                 }
3895         },
3896         {
3897                 .type = CRYPTO_ALG_TYPE_HMAC,
3898                 .is_registered = 0,
3899                 .alg.hash = {
3900                         .halg.digestsize = SHA224_DIGEST_SIZE,
3901                         .halg.base = {
3902                                 .cra_name = "hmac(sha224)",
3903                                 .cra_driver_name = "hmac-sha224-chcr",
3904                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3905                         }
3906                 }
3907         },
3908         {
3909                 .type = CRYPTO_ALG_TYPE_HMAC,
3910                 .is_registered = 0,
3911                 .alg.hash = {
3912                         .halg.digestsize = SHA256_DIGEST_SIZE,
3913                         .halg.base = {
3914                                 .cra_name = "hmac(sha256)",
3915                                 .cra_driver_name = "hmac-sha256-chcr",
3916                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3917                         }
3918                 }
3919         },
3920         {
3921                 .type = CRYPTO_ALG_TYPE_HMAC,
3922                 .is_registered = 0,
3923                 .alg.hash = {
3924                         .halg.digestsize = SHA384_DIGEST_SIZE,
3925                         .halg.base = {
3926                                 .cra_name = "hmac(sha384)",
3927                                 .cra_driver_name = "hmac-sha384-chcr",
3928                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3929                         }
3930                 }
3931         },
3932         {
3933                 .type = CRYPTO_ALG_TYPE_HMAC,
3934                 .is_registered = 0,
3935                 .alg.hash = {
3936                         .halg.digestsize = SHA512_DIGEST_SIZE,
3937                         .halg.base = {
3938                                 .cra_name = "hmac(sha512)",
3939                                 .cra_driver_name = "hmac-sha512-chcr",
3940                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3941                         }
3942                 }
3943         },
3944         /* Add AEAD Algorithms */
3945         {
3946                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3947                 .is_registered = 0,
3948                 .alg.aead = {
3949                         .base = {
3950                                 .cra_name = "gcm(aes)",
3951                                 .cra_driver_name = "gcm-aes-chcr",
3952                                 .cra_blocksize  = 1,
3953                                 .cra_priority = CHCR_AEAD_PRIORITY,
3954                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3955                                                 sizeof(struct chcr_aead_ctx) +
3956                                                 sizeof(struct chcr_gcm_ctx),
3957                         },
3958                         .ivsize = GCM_AES_IV_SIZE,
3959                         .maxauthsize = GHASH_DIGEST_SIZE,
3960                         .setkey = chcr_gcm_setkey,
3961                         .setauthsize = chcr_gcm_setauthsize,
3962                 }
3963         },
3964         {
3965                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3966                 .is_registered = 0,
3967                 .alg.aead = {
3968                         .base = {
3969                                 .cra_name = "rfc4106(gcm(aes))",
3970                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3971                                 .cra_blocksize   = 1,
3972                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3973                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3974                                                 sizeof(struct chcr_aead_ctx) +
3975                                                 sizeof(struct chcr_gcm_ctx),
3976
3977                         },
3978                         .ivsize = GCM_RFC4106_IV_SIZE,
3979                         .maxauthsize    = GHASH_DIGEST_SIZE,
3980                         .setkey = chcr_gcm_setkey,
3981                         .setauthsize    = chcr_4106_4309_setauthsize,
3982                 }
3983         },
3984         {
3985                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3986                 .is_registered = 0,
3987                 .alg.aead = {
3988                         .base = {
3989                                 .cra_name = "ccm(aes)",
3990                                 .cra_driver_name = "ccm-aes-chcr",
3991                                 .cra_blocksize   = 1,
3992                                 .cra_priority = CHCR_AEAD_PRIORITY,
3993                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3994                                                 sizeof(struct chcr_aead_ctx),
3995
3996                         },
3997                         .ivsize = AES_BLOCK_SIZE,
3998                         .maxauthsize    = GHASH_DIGEST_SIZE,
3999                         .setkey = chcr_aead_ccm_setkey,
4000                         .setauthsize    = chcr_ccm_setauthsize,
4001                 }
4002         },
4003         {
4004                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4005                 .is_registered = 0,
4006                 .alg.aead = {
4007                         .base = {
4008                                 .cra_name = "rfc4309(ccm(aes))",
4009                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4010                                 .cra_blocksize   = 1,
4011                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4012                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4013                                                 sizeof(struct chcr_aead_ctx),
4014
4015                         },
4016                         .ivsize = 8,
4017                         .maxauthsize    = GHASH_DIGEST_SIZE,
4018                         .setkey = chcr_aead_rfc4309_setkey,
4019                         .setauthsize = chcr_4106_4309_setauthsize,
4020                 }
4021         },
4022         {
4023                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4024                 .is_registered = 0,
4025                 .alg.aead = {
4026                         .base = {
4027                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4028                                 .cra_driver_name =
4029                                         "authenc-hmac-sha1-cbc-aes-chcr",
4030                                 .cra_blocksize   = AES_BLOCK_SIZE,
4031                                 .cra_priority = CHCR_AEAD_PRIORITY,
4032                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4033                                                 sizeof(struct chcr_aead_ctx) +
4034                                                 sizeof(struct chcr_authenc_ctx),
4035
4036                         },
4037                         .ivsize = AES_BLOCK_SIZE,
4038                         .maxauthsize = SHA1_DIGEST_SIZE,
4039                         .setkey = chcr_authenc_setkey,
4040                         .setauthsize = chcr_authenc_setauthsize,
4041                 }
4042         },
4043         {
4044                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4045                 .is_registered = 0,
4046                 .alg.aead = {
4047                         .base = {
4048
4049                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4050                                 .cra_driver_name =
4051                                         "authenc-hmac-sha256-cbc-aes-chcr",
4052                                 .cra_blocksize   = AES_BLOCK_SIZE,
4053                                 .cra_priority = CHCR_AEAD_PRIORITY,
4054                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4055                                                 sizeof(struct chcr_aead_ctx) +
4056                                                 sizeof(struct chcr_authenc_ctx),
4057
4058                         },
4059                         .ivsize = AES_BLOCK_SIZE,
4060                         .maxauthsize    = SHA256_DIGEST_SIZE,
4061                         .setkey = chcr_authenc_setkey,
4062                         .setauthsize = chcr_authenc_setauthsize,
4063                 }
4064         },
4065         {
4066                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4067                 .is_registered = 0,
4068                 .alg.aead = {
4069                         .base = {
4070                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4071                                 .cra_driver_name =
4072                                         "authenc-hmac-sha224-cbc-aes-chcr",
4073                                 .cra_blocksize   = AES_BLOCK_SIZE,
4074                                 .cra_priority = CHCR_AEAD_PRIORITY,
4075                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4076                                                 sizeof(struct chcr_aead_ctx) +
4077                                                 sizeof(struct chcr_authenc_ctx),
4078                         },
4079                         .ivsize = AES_BLOCK_SIZE,
4080                         .maxauthsize = SHA224_DIGEST_SIZE,
4081                         .setkey = chcr_authenc_setkey,
4082                         .setauthsize = chcr_authenc_setauthsize,
4083                 }
4084         },
4085         {
4086                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4087                 .is_registered = 0,
4088                 .alg.aead = {
4089                         .base = {
4090                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4091                                 .cra_driver_name =
4092                                         "authenc-hmac-sha384-cbc-aes-chcr",
4093                                 .cra_blocksize   = AES_BLOCK_SIZE,
4094                                 .cra_priority = CHCR_AEAD_PRIORITY,
4095                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4096                                                 sizeof(struct chcr_aead_ctx) +
4097                                                 sizeof(struct chcr_authenc_ctx),
4098
4099                         },
4100                         .ivsize = AES_BLOCK_SIZE,
4101                         .maxauthsize = SHA384_DIGEST_SIZE,
4102                         .setkey = chcr_authenc_setkey,
4103                         .setauthsize = chcr_authenc_setauthsize,
4104                 }
4105         },
4106         {
4107                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4108                 .is_registered = 0,
4109                 .alg.aead = {
4110                         .base = {
4111                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4112                                 .cra_driver_name =
4113                                         "authenc-hmac-sha512-cbc-aes-chcr",
4114                                 .cra_blocksize   = AES_BLOCK_SIZE,
4115                                 .cra_priority = CHCR_AEAD_PRIORITY,
4116                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4117                                                 sizeof(struct chcr_aead_ctx) +
4118                                                 sizeof(struct chcr_authenc_ctx),
4119
4120                         },
4121                         .ivsize = AES_BLOCK_SIZE,
4122                         .maxauthsize = SHA512_DIGEST_SIZE,
4123                         .setkey = chcr_authenc_setkey,
4124                         .setauthsize = chcr_authenc_setauthsize,
4125                 }
4126         },
4127         {
4128                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4129                 .is_registered = 0,
4130                 .alg.aead = {
4131                         .base = {
4132                                 .cra_name = "authenc(digest_null,cbc(aes))",
4133                                 .cra_driver_name =
4134                                         "authenc-digest_null-cbc-aes-chcr",
4135                                 .cra_blocksize   = AES_BLOCK_SIZE,
4136                                 .cra_priority = CHCR_AEAD_PRIORITY,
4137                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4138                                                 sizeof(struct chcr_aead_ctx) +
4139                                                 sizeof(struct chcr_authenc_ctx),
4140
4141                         },
4142                         .ivsize  = AES_BLOCK_SIZE,
4143                         .maxauthsize = 0,
4144                         .setkey  = chcr_aead_digest_null_setkey,
4145                         .setauthsize = chcr_authenc_null_setauthsize,
4146                 }
4147         },
4148         {
4149                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4150                 .is_registered = 0,
4151                 .alg.aead = {
4152                         .base = {
4153                                 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4154                                 .cra_driver_name =
4155                                 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4156                                 .cra_blocksize   = 1,
4157                                 .cra_priority = CHCR_AEAD_PRIORITY,
4158                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4159                                                 sizeof(struct chcr_aead_ctx) +
4160                                                 sizeof(struct chcr_authenc_ctx),
4161
4162                         },
4163                         .ivsize = CTR_RFC3686_IV_SIZE,
4164                         .maxauthsize = SHA1_DIGEST_SIZE,
4165                         .setkey = chcr_authenc_setkey,
4166                         .setauthsize = chcr_authenc_setauthsize,
4167                 }
4168         },
4169         {
4170                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4171                 .is_registered = 0,
4172                 .alg.aead = {
4173                         .base = {
4174
4175                                 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4176                                 .cra_driver_name =
4177                                 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4178                                 .cra_blocksize   = 1,
4179                                 .cra_priority = CHCR_AEAD_PRIORITY,
4180                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4181                                                 sizeof(struct chcr_aead_ctx) +
4182                                                 sizeof(struct chcr_authenc_ctx),
4183
4184                         },
4185                         .ivsize = CTR_RFC3686_IV_SIZE,
4186                         .maxauthsize    = SHA256_DIGEST_SIZE,
4187                         .setkey = chcr_authenc_setkey,
4188                         .setauthsize = chcr_authenc_setauthsize,
4189                 }
4190         },
4191         {
4192                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4193                 .is_registered = 0,
4194                 .alg.aead = {
4195                         .base = {
4196                                 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4197                                 .cra_driver_name =
4198                                 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4199                                 .cra_blocksize   = 1,
4200                                 .cra_priority = CHCR_AEAD_PRIORITY,
4201                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4202                                                 sizeof(struct chcr_aead_ctx) +
4203                                                 sizeof(struct chcr_authenc_ctx),
4204                         },
4205                         .ivsize = CTR_RFC3686_IV_SIZE,
4206                         .maxauthsize = SHA224_DIGEST_SIZE,
4207                         .setkey = chcr_authenc_setkey,
4208                         .setauthsize = chcr_authenc_setauthsize,
4209                 }
4210         },
4211         {
4212                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4213                 .is_registered = 0,
4214                 .alg.aead = {
4215                         .base = {
4216                                 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4217                                 .cra_driver_name =
4218                                 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4219                                 .cra_blocksize   = 1,
4220                                 .cra_priority = CHCR_AEAD_PRIORITY,
4221                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4222                                                 sizeof(struct chcr_aead_ctx) +
4223                                                 sizeof(struct chcr_authenc_ctx),
4224
4225                         },
4226                         .ivsize = CTR_RFC3686_IV_SIZE,
4227                         .maxauthsize = SHA384_DIGEST_SIZE,
4228                         .setkey = chcr_authenc_setkey,
4229                         .setauthsize = chcr_authenc_setauthsize,
4230                 }
4231         },
4232         {
4233                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4234                 .is_registered = 0,
4235                 .alg.aead = {
4236                         .base = {
4237                                 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4238                                 .cra_driver_name =
4239                                 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4240                                 .cra_blocksize   = 1,
4241                                 .cra_priority = CHCR_AEAD_PRIORITY,
4242                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4243                                                 sizeof(struct chcr_aead_ctx) +
4244                                                 sizeof(struct chcr_authenc_ctx),
4245
4246                         },
4247                         .ivsize = CTR_RFC3686_IV_SIZE,
4248                         .maxauthsize = SHA512_DIGEST_SIZE,
4249                         .setkey = chcr_authenc_setkey,
4250                         .setauthsize = chcr_authenc_setauthsize,
4251                 }
4252         },
4253         {
4254                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4255                 .is_registered = 0,
4256                 .alg.aead = {
4257                         .base = {
4258                                 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4259                                 .cra_driver_name =
4260                                 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4261                                 .cra_blocksize   = 1,
4262                                 .cra_priority = CHCR_AEAD_PRIORITY,
4263                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4264                                                 sizeof(struct chcr_aead_ctx) +
4265                                                 sizeof(struct chcr_authenc_ctx),
4266
4267                         },
4268                         .ivsize  = CTR_RFC3686_IV_SIZE,
4269                         .maxauthsize = 0,
4270                         .setkey  = chcr_aead_digest_null_setkey,
4271                         .setauthsize = chcr_authenc_null_setauthsize,
4272                 }
4273         },
4274 };
4275
4276 /*
4277  *      chcr_unregister_alg - Deregister crypto algorithms with
4278  *      kernel framework.
4279  */
4280 static int chcr_unregister_alg(void)
4281 {
4282         int i;
4283
4284         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4285                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4286                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4287                         if (driver_algs[i].is_registered)
4288                                 crypto_unregister_alg(
4289                                                 &driver_algs[i].alg.crypto);
4290                         break;
4291                 case CRYPTO_ALG_TYPE_AEAD:
4292                         if (driver_algs[i].is_registered)
4293                                 crypto_unregister_aead(
4294                                                 &driver_algs[i].alg.aead);
4295                         break;
4296                 case CRYPTO_ALG_TYPE_AHASH:
4297                         if (driver_algs[i].is_registered)
4298                                 crypto_unregister_ahash(
4299                                                 &driver_algs[i].alg.hash);
4300                         break;
4301                 }
4302                 driver_algs[i].is_registered = 0;
4303         }
4304         return 0;
4305 }
4306
4307 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4308 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4309 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4310
4311 /*
4312  *      chcr_register_alg - Register crypto algorithms with kernel framework.
4313  */
4314 static int chcr_register_alg(void)
4315 {
4316         struct crypto_alg ai;
4317         struct ahash_alg *a_hash;
4318         int err = 0, i;
4319         char *name = NULL;
4320
4321         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4322                 if (driver_algs[i].is_registered)
4323                         continue;
4324                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4325                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4326                         driver_algs[i].alg.crypto.cra_priority =
4327                                 CHCR_CRA_PRIORITY;
4328                         driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4329                         driver_algs[i].alg.crypto.cra_flags =
4330                                 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4331                                 CRYPTO_ALG_NEED_FALLBACK;
4332                         driver_algs[i].alg.crypto.cra_ctxsize =
4333                                 sizeof(struct chcr_context) +
4334                                 sizeof(struct ablk_ctx);
4335                         driver_algs[i].alg.crypto.cra_alignmask = 0;
4336                         driver_algs[i].alg.crypto.cra_type =
4337                                 &crypto_ablkcipher_type;
4338                         err = crypto_register_alg(&driver_algs[i].alg.crypto);
4339                         name = driver_algs[i].alg.crypto.cra_driver_name;
4340                         break;
4341                 case CRYPTO_ALG_TYPE_AEAD:
4342                         driver_algs[i].alg.aead.base.cra_flags =
4343                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4344                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4345                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4346                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4347                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4348                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4349                         err = crypto_register_aead(&driver_algs[i].alg.aead);
4350                         name = driver_algs[i].alg.aead.base.cra_driver_name;
4351                         break;
4352                 case CRYPTO_ALG_TYPE_AHASH:
4353                         a_hash = &driver_algs[i].alg.hash;
4354                         a_hash->update = chcr_ahash_update;
4355                         a_hash->final = chcr_ahash_final;
4356                         a_hash->finup = chcr_ahash_finup;
4357                         a_hash->digest = chcr_ahash_digest;
4358                         a_hash->export = chcr_ahash_export;
4359                         a_hash->import = chcr_ahash_import;
4360                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4361                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4362                         a_hash->halg.base.cra_module = THIS_MODULE;
4363                         a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4364                         a_hash->halg.base.cra_alignmask = 0;
4365                         a_hash->halg.base.cra_exit = NULL;
4366
4367                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4368                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4369                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4370                                 a_hash->init = chcr_hmac_init;
4371                                 a_hash->setkey = chcr_ahash_setkey;
4372                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4373                         } else {
4374                                 a_hash->init = chcr_sha_init;
4375                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4376                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4377                         }
4378                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
4379                         ai = driver_algs[i].alg.hash.halg.base;
4380                         name = ai.cra_driver_name;
4381                         break;
4382                 }
4383                 if (err) {
4384                         pr_err("chcr : %s : Algorithm registration failed\n",
4385                                name);
4386                         goto register_err;
4387                 } else {
4388                         driver_algs[i].is_registered = 1;
4389                 }
4390         }
4391         return 0;
4392
4393 register_err:
4394         chcr_unregister_alg();
4395         return err;
4396 }
4397
4398 /*
4399  *      start_crypto - Register the crypto algorithms.
4400  *      This should called once when the first device comesup. After this
4401  *      kernel will start calling driver APIs for crypto operations.
4402  */
4403 int start_crypto(void)
4404 {
4405         return chcr_register_alg();
4406 }
4407
4408 /*
4409  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4410  *      This should be called once when the last device goes down. After this
4411  *      kernel will not call the driver API for crypto operations.
4412  */
4413 int stop_crypto(void)
4414 {
4415         chcr_unregister_alg();
4416         return 0;
4417 }