2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
101 return ctx->crypto_ctx->aeadctx;
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
106 return ctx->crypto_ctx->ablkctx;
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
111 return ctx->crypto_ctx->hmacctx;
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
116 return gctx->ctx->gcm;
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
121 return gctx->ctx->authenc;
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
126 return container_of(ctx->dev, struct uld_ctx, dev);
129 static inline int is_ofld_imm(const struct sk_buff *skb)
131 return (skb->len <= SGE_MAX_WR_LEN);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
145 unsigned int skip_len = 0;
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
168 static inline int get_aead_subtype(struct crypto_aead *aead)
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
205 spin_lock_bh(&dev->lock_chcr_dev);
206 if (dev->state == CHCR_DETACH)
209 atomic_inc(&dev->inflight);
211 spin_unlock_bh(&dev->lock_chcr_dev);
216 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
218 atomic_dec(&dev->inflight);
221 static inline int chcr_handle_aead_resp(struct aead_request *req,
222 unsigned char *input,
225 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
226 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
227 struct chcr_dev *dev = a_ctx(tfm)->dev;
229 chcr_aead_common_exit(req);
230 if (reqctx->verify == VERIFY_SW) {
231 chcr_verify_tag(req, input, &err);
232 reqctx->verify = VERIFY_HW;
234 chcr_dec_wrcount(dev);
235 req->base.complete(&req->base, err);
240 static void get_aes_decrypt_key(unsigned char *dec_key,
241 const unsigned char *key,
242 unsigned int keylength)
250 case AES_KEYLENGTH_128BIT:
251 nk = KEYLENGTH_4BYTES;
252 nr = NUMBER_OF_ROUNDS_10;
254 case AES_KEYLENGTH_192BIT:
255 nk = KEYLENGTH_6BYTES;
256 nr = NUMBER_OF_ROUNDS_12;
258 case AES_KEYLENGTH_256BIT:
259 nk = KEYLENGTH_8BYTES;
260 nr = NUMBER_OF_ROUNDS_14;
265 for (i = 0; i < nk; i++)
266 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
269 temp = w_ring[nk - 1];
270 while (i + nk < (nr + 1) * 4) {
273 temp = (temp << 8) | (temp >> 24);
274 temp = aes_ks_subword(temp);
275 temp ^= round_constant[i / nk];
276 } else if (nk == 8 && (i % 4 == 0)) {
277 temp = aes_ks_subword(temp);
279 w_ring[i % nk] ^= temp;
280 temp = w_ring[i % nk];
284 for (k = 0, j = i % nk; k < nk; k++) {
285 *((u32 *)dec_key + k) = htonl(w_ring[j]);
292 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
294 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
297 case SHA1_DIGEST_SIZE:
298 base_hash = crypto_alloc_shash("sha1", 0, 0);
300 case SHA224_DIGEST_SIZE:
301 base_hash = crypto_alloc_shash("sha224", 0, 0);
303 case SHA256_DIGEST_SIZE:
304 base_hash = crypto_alloc_shash("sha256", 0, 0);
306 case SHA384_DIGEST_SIZE:
307 base_hash = crypto_alloc_shash("sha384", 0, 0);
309 case SHA512_DIGEST_SIZE:
310 base_hash = crypto_alloc_shash("sha512", 0, 0);
317 static int chcr_compute_partial_hash(struct shash_desc *desc,
318 char *iopad, char *result_hash,
321 struct sha1_state sha1_st;
322 struct sha256_state sha256_st;
323 struct sha512_state sha512_st;
326 if (digest_size == SHA1_DIGEST_SIZE) {
327 error = crypto_shash_init(desc) ?:
328 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
329 crypto_shash_export(desc, (void *)&sha1_st);
330 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
331 } else if (digest_size == SHA224_DIGEST_SIZE) {
332 error = crypto_shash_init(desc) ?:
333 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
334 crypto_shash_export(desc, (void *)&sha256_st);
335 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
337 } else if (digest_size == SHA256_DIGEST_SIZE) {
338 error = crypto_shash_init(desc) ?:
339 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
340 crypto_shash_export(desc, (void *)&sha256_st);
341 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
343 } else if (digest_size == SHA384_DIGEST_SIZE) {
344 error = crypto_shash_init(desc) ?:
345 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
346 crypto_shash_export(desc, (void *)&sha512_st);
347 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
349 } else if (digest_size == SHA512_DIGEST_SIZE) {
350 error = crypto_shash_init(desc) ?:
351 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
352 crypto_shash_export(desc, (void *)&sha512_st);
353 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
356 pr_err("Unknown digest size %d\n", digest_size);
361 static void chcr_change_order(char *buf, int ds)
365 if (ds == SHA512_DIGEST_SIZE) {
366 for (i = 0; i < (ds / sizeof(u64)); i++)
367 *((__be64 *)buf + i) =
368 cpu_to_be64(*((u64 *)buf + i));
370 for (i = 0; i < (ds / sizeof(u32)); i++)
371 *((__be32 *)buf + i) =
372 cpu_to_be32(*((u32 *)buf + i));
376 static inline int is_hmac(struct crypto_tfm *tfm)
378 struct crypto_alg *alg = tfm->__crt_alg;
379 struct chcr_alg_template *chcr_crypto_alg =
380 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
382 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
387 static inline void dsgl_walk_init(struct dsgl_walk *walk,
388 struct cpl_rx_phys_dsgl *dsgl)
392 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
395 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
398 struct cpl_rx_phys_dsgl *phys_cpl;
400 phys_cpl = walk->dsgl;
402 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
403 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
404 phys_cpl->pcirlxorder_to_noofsgentr =
405 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
406 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
407 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
408 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
409 CPL_RX_PHYS_DSGL_DCAID_V(0) |
410 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
411 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
412 phys_cpl->rss_hdr_int.qid = htons(qid);
413 phys_cpl->rss_hdr_int.hash_val = 0;
414 phys_cpl->rss_hdr_int.channel = pci_chan_id;
417 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
426 walk->to->len[j % 8] = htons(size);
427 walk->to->addr[j % 8] = cpu_to_be64(addr);
434 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
435 struct scatterlist *sg,
440 unsigned int left_size = slen, len = 0;
441 unsigned int j = walk->nents;
447 if (sg_dma_len(sg) <= skip) {
448 skip -= sg_dma_len(sg);
457 while (left_size && sg) {
458 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
461 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
462 walk->to->len[j % 8] = htons(ent_len);
463 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
472 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
473 skip_len) + skip_len;
474 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
481 static inline void ulptx_walk_init(struct ulptx_walk *walk,
482 struct ulptx_sgl *ulp)
487 walk->pair = ulp->sge;
488 walk->last_sg = NULL;
489 walk->last_sg_len = 0;
492 static inline void ulptx_walk_end(struct ulptx_walk *walk)
494 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
495 ULPTX_NSGE_V(walk->nents));
499 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
506 if (walk->nents == 0) {
507 walk->sgl->len0 = cpu_to_be32(size);
508 walk->sgl->addr0 = cpu_to_be64(addr);
510 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
511 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
512 walk->pair_idx = !walk->pair_idx;
519 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
520 struct scatterlist *sg,
531 if (sg_dma_len(sg) <= skip) {
532 skip -= sg_dma_len(sg);
540 WARN(!sg, "SG should not be null here\n");
541 if (sg && (walk->nents == 0)) {
542 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
543 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
544 walk->sgl->len0 = cpu_to_be32(sgmin);
545 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
549 walk->last_sg_len = sgmin + skip_len;
551 if (sg_dma_len(sg) == skip_len) {
558 small = min(sg_dma_len(sg) - skip_len, len);
559 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
560 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
561 walk->pair->addr[walk->pair_idx] =
562 cpu_to_be64(sg_dma_address(sg) + skip_len);
563 walk->pair_idx = !walk->pair_idx;
570 walk->last_sg_len = skip_len;
571 if (sg_dma_len(sg) == skip_len) {
578 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
580 struct crypto_alg *alg = tfm->__crt_alg;
581 struct chcr_alg_template *chcr_crypto_alg =
582 container_of(alg, struct chcr_alg_template, alg.crypto);
584 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
587 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
589 struct adapter *adap = netdev2adap(dev);
590 struct sge_uld_txq_info *txq_info =
591 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
592 struct sge_uld_txq *txq;
596 txq = &txq_info->uldtxq[idx];
597 spin_lock(&txq->sendq.lock);
600 spin_unlock(&txq->sendq.lock);
605 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
606 struct _key_ctx *key_ctx)
608 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
609 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
612 ablkctx->key + (ablkctx->enckey_len >> 1),
613 ablkctx->enckey_len >> 1);
614 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
615 ablkctx->rrkey, ablkctx->enckey_len >> 1);
620 static int chcr_hash_ent_in_wr(struct scatterlist *src,
623 unsigned int srcskip)
627 int soffset = 0, sless;
629 if (sg_dma_len(src) == srcskip) {
633 while (src && space > (sgl_ent_len[srcsg + 1])) {
634 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
639 if (sg_dma_len(src) == (soffset + srcskip)) {
648 static int chcr_sg_ent_in_wr(struct scatterlist *src,
649 struct scatterlist *dst,
652 unsigned int srcskip,
653 unsigned int dstskip)
655 int srclen = 0, dstlen = 0;
656 int srcsg = minsg, dstsg = minsg;
657 int offset = 0, soffset = 0, less, sless = 0;
659 if (sg_dma_len(src) == srcskip) {
663 if (sg_dma_len(dst) == dstskip) {
669 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
670 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
675 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
676 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
677 if (srclen <= dstlen)
679 less = min_t(unsigned int, sg_dma_len(dst) - offset -
680 dstskip, CHCR_DST_SG_SIZE);
683 if ((offset + dstskip) == sg_dma_len(dst)) {
691 if ((soffset + srcskip) == sg_dma_len(src)) {
698 return min(srclen, dstlen);
701 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
703 struct scatterlist *src,
704 struct scatterlist *dst,
707 unsigned short op_type)
711 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
713 skcipher_request_set_sync_tfm(subreq, cipher);
714 skcipher_request_set_callback(subreq, flags, NULL, NULL);
715 skcipher_request_set_crypt(subreq, src, dst,
718 err = op_type ? crypto_skcipher_decrypt(subreq) :
719 crypto_skcipher_encrypt(subreq);
720 skcipher_request_zero(subreq);
725 static inline void create_wreq(struct chcr_context *ctx,
726 struct chcr_wr *chcr_req,
727 struct crypto_async_request *req,
734 struct uld_ctx *u_ctx = ULD_CTX(ctx);
735 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
738 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
739 chcr_req->wreq.pld_size_hash_size =
740 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
741 chcr_req->wreq.len16_pkd =
742 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
743 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
744 chcr_req->wreq.rx_chid_to_rx_q_id =
745 FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
746 !!lcb, ctx->tx_qidx);
748 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
750 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
751 ((sizeof(chcr_req->wreq)) >> 4)));
753 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
754 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
755 sizeof(chcr_req->key_ctx) + sc_len);
759 * create_cipher_wr - form the WR for cipher operations
761 * @ctx: crypto driver context of the request.
762 * @qid: ingress qid where response of this WR should be received.
763 * @op_type: encryption or decryption
765 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
767 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
768 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
769 struct sk_buff *skb = NULL;
770 struct chcr_wr *chcr_req;
771 struct cpl_rx_phys_dsgl *phys_cpl;
772 struct ulptx_sgl *ulptx;
773 struct chcr_blkcipher_req_ctx *reqctx =
774 ablkcipher_request_ctx(wrparam->req);
775 unsigned int temp = 0, transhdr_len, dst_size;
778 unsigned int kctx_len;
779 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
780 GFP_KERNEL : GFP_ATOMIC;
781 struct adapter *adap = padap(c_ctx(tfm)->dev);
783 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
785 dst_size = get_space_for_phys_dsgl(nents);
786 kctx_len = roundup(ablkctx->enckey_len, 16);
787 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
788 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
789 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
790 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
791 (sgl_len(nents) * 8);
792 transhdr_len += temp;
793 transhdr_len = roundup(transhdr_len, 16);
794 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
799 chcr_req = __skb_put_zero(skb, transhdr_len);
800 chcr_req->sec_cpl.op_ivinsrtofst =
801 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
803 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
804 chcr_req->sec_cpl.aadstart_cipherstop_hi =
805 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
807 chcr_req->sec_cpl.cipherstop_lo_authinsert =
808 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
809 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
812 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
815 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
816 if ((reqctx->op == CHCR_DECRYPT_OP) &&
817 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
818 CRYPTO_ALG_SUB_TYPE_CTR)) &&
819 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
820 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
821 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
823 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
824 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
825 memcpy(chcr_req->key_ctx.key, ablkctx->key,
826 ablkctx->enckey_len);
828 memcpy(chcr_req->key_ctx.key, ablkctx->key +
829 (ablkctx->enckey_len >> 1),
830 ablkctx->enckey_len >> 1);
831 memcpy(chcr_req->key_ctx.key +
832 (ablkctx->enckey_len >> 1),
834 ablkctx->enckey_len >> 1);
837 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
838 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
839 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
840 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
842 atomic_inc(&adap->chcr_stats.cipher_rqst);
843 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
844 + (reqctx->imm ? (wrparam->bytes) : 0);
845 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
847 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
850 if (reqctx->op && (ablkctx->ciph_mode ==
851 CHCR_SCMD_CIPHER_MODE_AES_CBC))
852 sg_pcopy_to_buffer(wrparam->req->src,
853 sg_nents(wrparam->req->src), wrparam->req->info, 16,
854 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
858 return ERR_PTR(error);
861 static inline int chcr_keyctx_ck_size(unsigned int keylen)
865 if (keylen == AES_KEYSIZE_128)
866 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
867 else if (keylen == AES_KEYSIZE_192)
868 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
869 else if (keylen == AES_KEYSIZE_256)
870 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
876 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
880 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
881 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
884 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
885 CRYPTO_TFM_REQ_MASK);
886 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
887 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
888 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
889 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
891 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
896 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
900 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
901 unsigned int ck_size, context_size;
905 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
909 ck_size = chcr_keyctx_ck_size(keylen);
910 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
911 memcpy(ablkctx->key, key, keylen);
912 ablkctx->enckey_len = keylen;
913 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
914 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
915 keylen + alignment) >> 4;
917 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
919 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
922 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
923 ablkctx->enckey_len = 0;
928 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
932 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933 unsigned int ck_size, context_size;
937 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
940 ck_size = chcr_keyctx_ck_size(keylen);
941 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
942 memcpy(ablkctx->key, key, keylen);
943 ablkctx->enckey_len = keylen;
944 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 keylen + alignment) >> 4;
947 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
949 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
953 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
954 ablkctx->enckey_len = 0;
959 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
963 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
964 unsigned int ck_size, context_size;
968 if (keylen < CTR_RFC3686_NONCE_SIZE)
970 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
971 CTR_RFC3686_NONCE_SIZE);
973 keylen -= CTR_RFC3686_NONCE_SIZE;
974 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
978 ck_size = chcr_keyctx_ck_size(keylen);
979 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
980 memcpy(ablkctx->key, key, keylen);
981 ablkctx->enckey_len = keylen;
982 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
983 keylen + alignment) >> 4;
985 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
987 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
991 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
992 ablkctx->enckey_len = 0;
996 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
998 unsigned int size = AES_BLOCK_SIZE;
999 __be32 *b = (__be32 *)(dstiv + size);
1002 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1003 for (; size >= 4; size -= 4) {
1004 prev = be32_to_cpu(*--b);
1006 *b = cpu_to_be32(c);
1014 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1016 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1018 u32 temp = be32_to_cpu(*--b);
1021 c = (u64)temp + 1; // No of block can processed withou overflow
1022 if ((bytes / AES_BLOCK_SIZE) > c)
1023 bytes = c * AES_BLOCK_SIZE;
1027 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1030 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1031 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1032 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1033 struct crypto_cipher *cipher;
1036 unsigned int keylen;
1037 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1038 int round8 = round / 8;
1040 cipher = ablkctx->aes_generic;
1041 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1043 keylen = ablkctx->enckey_len / 2;
1044 key = ablkctx->key + keylen;
1045 ret = crypto_cipher_setkey(cipher, key, keylen);
1048 crypto_cipher_encrypt_one(cipher, iv, iv);
1049 for (i = 0; i < round8; i++)
1050 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1052 for (i = 0; i < (round % 8); i++)
1053 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1056 crypto_cipher_decrypt_one(cipher, iv, iv);
1061 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1062 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1064 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1065 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1066 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1069 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1070 ctr_add_iv(iv, req->info, (reqctx->processed /
1072 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1073 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1074 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1075 AES_BLOCK_SIZE) + 1);
1076 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1077 ret = chcr_update_tweak(req, iv, 0);
1078 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1080 /*Updated before sending last WR*/
1081 memcpy(iv, req->info, AES_BLOCK_SIZE);
1083 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1090 /* We need separate function for final iv because in rfc3686 Initial counter
1091 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1092 * for subsequent update requests
1095 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1096 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1098 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1099 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1100 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1103 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104 ctr_add_iv(iv, req->info, (reqctx->processed /
1106 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1107 ret = chcr_update_tweak(req, iv, 1);
1108 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1109 /*Already updated for Decrypt*/
1111 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1119 unsigned char *input, int err)
1121 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1122 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1123 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1124 struct sk_buff *skb;
1125 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1126 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1127 struct cipher_wr_param wrparam;
1128 struct chcr_dev *dev = c_ctx(tfm)->dev;
1133 if (req->nbytes == reqctx->processed) {
1134 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1136 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1141 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1142 CIP_SPACE_LEFT(ablkctx->enckey_len),
1143 reqctx->src_ofst, reqctx->dst_ofst);
1144 if ((bytes + reqctx->processed) >= req->nbytes)
1145 bytes = req->nbytes - reqctx->processed;
1147 bytes = rounddown(bytes, 16);
1149 /*CTR mode counter overfloa*/
1150 bytes = req->nbytes - reqctx->processed;
1152 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1156 if (unlikely(bytes == 0)) {
1157 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1159 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1169 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1170 CRYPTO_ALG_SUB_TYPE_CTR)
1171 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1172 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1174 wrparam.bytes = bytes;
1175 skb = create_cipher_wr(&wrparam);
1177 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1181 skb->dev = u_ctx->lldi.ports[0];
1182 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1184 reqctx->last_req_len = bytes;
1185 reqctx->processed += bytes;
1188 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1190 chcr_dec_wrcount(dev);
1191 req->base.complete(&req->base, err);
1195 static int process_cipher(struct ablkcipher_request *req,
1197 struct sk_buff **skb,
1198 unsigned short op_type)
1200 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1201 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1202 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1203 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1204 struct cipher_wr_param wrparam;
1205 int bytes, err = -EINVAL;
1207 reqctx->processed = 0;
1210 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1211 (req->nbytes == 0) ||
1212 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1213 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1214 ablkctx->enckey_len, req->nbytes, ivsize);
1218 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1221 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1223 sizeof(struct cpl_rx_phys_dsgl) +
1226 /* Can be sent as Imm*/
1227 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1229 dnents = sg_nents_xlen(req->dst, req->nbytes,
1230 CHCR_DST_SG_SIZE, 0);
1231 phys_dsgl = get_space_for_phys_dsgl(dnents);
1232 kctx_len = roundup(ablkctx->enckey_len, 16);
1233 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1234 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1236 bytes = IV + req->nbytes;
1243 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1244 CIP_SPACE_LEFT(ablkctx->enckey_len),
1246 if ((bytes + reqctx->processed) >= req->nbytes)
1247 bytes = req->nbytes - reqctx->processed;
1249 bytes = rounddown(bytes, 16);
1251 bytes = req->nbytes;
1253 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1254 CRYPTO_ALG_SUB_TYPE_CTR) {
1255 bytes = adjust_ctr_overflow(req->info, bytes);
1257 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1258 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1259 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1260 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1261 CTR_RFC3686_IV_SIZE);
1263 /* initialize counter portion of counter block */
1264 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1265 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1269 memcpy(reqctx->iv, req->info, IV);
1271 if (unlikely(bytes == 0)) {
1272 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1274 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1283 reqctx->op = op_type;
1284 reqctx->srcsg = req->src;
1285 reqctx->dstsg = req->dst;
1286 reqctx->src_ofst = 0;
1287 reqctx->dst_ofst = 0;
1290 wrparam.bytes = bytes;
1291 *skb = create_cipher_wr(&wrparam);
1293 err = PTR_ERR(*skb);
1296 reqctx->processed = bytes;
1297 reqctx->last_req_len = bytes;
1301 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1306 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1308 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1309 struct chcr_dev *dev = c_ctx(tfm)->dev;
1310 struct sk_buff *skb = NULL;
1311 int err, isfull = 0;
1312 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1314 err = chcr_inc_wrcount(dev);
1317 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1318 c_ctx(tfm)->tx_qidx))) {
1320 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1326 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1327 &skb, CHCR_ENCRYPT_OP);
1330 skb->dev = u_ctx->lldi.ports[0];
1331 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1333 return isfull ? -EBUSY : -EINPROGRESS;
1335 chcr_dec_wrcount(dev);
1339 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1341 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1342 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1343 struct chcr_dev *dev = c_ctx(tfm)->dev;
1344 struct sk_buff *skb = NULL;
1345 int err, isfull = 0;
1347 err = chcr_inc_wrcount(dev);
1351 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1352 c_ctx(tfm)->tx_qidx))) {
1354 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1358 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1359 &skb, CHCR_DECRYPT_OP);
1362 skb->dev = u_ctx->lldi.ports[0];
1363 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1365 return isfull ? -EBUSY : -EINPROGRESS;
1368 static int chcr_device_init(struct chcr_context *ctx)
1370 struct uld_ctx *u_ctx = NULL;
1371 struct adapter *adap;
1373 int txq_perchan, txq_idx, ntxq;
1374 int err = 0, rxq_perchan, rxq_idx;
1376 id = smp_processor_id();
1378 u_ctx = assign_chcr_device();
1381 pr_err("chcr device assignment fails\n");
1384 ctx->dev = &u_ctx->dev;
1385 adap = padap(ctx->dev);
1386 ntxq = u_ctx->lldi.ntxq;
1387 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1388 txq_perchan = ntxq / u_ctx->lldi.nchan;
1389 spin_lock(&ctx->dev->lock_chcr_dev);
1390 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1391 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1392 spin_unlock(&ctx->dev->lock_chcr_dev);
1393 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1394 rxq_idx += id % rxq_perchan;
1395 txq_idx = ctx->tx_chan_id * txq_perchan;
1396 txq_idx += id % txq_perchan;
1397 ctx->rx_qidx = rxq_idx;
1398 ctx->tx_qidx = txq_idx;
1399 /* Channel Id used by SGE to forward packet to Host.
1400 * Same value should be used in cpl_fw6_pld RSS_CH field
1401 * by FW. Driver programs PCI channel ID to be used in fw
1402 * at the time of queue allocation with value "pi->tx_chan"
1404 ctx->pci_chan_id = txq_idx / txq_perchan;
1410 static int chcr_cra_init(struct crypto_tfm *tfm)
1412 struct crypto_alg *alg = tfm->__crt_alg;
1413 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1414 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1416 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1417 CRYPTO_ALG_NEED_FALLBACK);
1418 if (IS_ERR(ablkctx->sw_cipher)) {
1419 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1420 return PTR_ERR(ablkctx->sw_cipher);
1423 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1424 /* To update tweak*/
1425 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1426 if (IS_ERR(ablkctx->aes_generic)) {
1427 pr_err("failed to allocate aes cipher for tweak\n");
1428 return PTR_ERR(ablkctx->aes_generic);
1431 ablkctx->aes_generic = NULL;
1433 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1434 return chcr_device_init(crypto_tfm_ctx(tfm));
1437 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1439 struct crypto_alg *alg = tfm->__crt_alg;
1440 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1441 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1443 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1444 * cannot be used as fallback in chcr_handle_cipher_response
1446 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1447 CRYPTO_ALG_NEED_FALLBACK);
1448 if (IS_ERR(ablkctx->sw_cipher)) {
1449 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1450 return PTR_ERR(ablkctx->sw_cipher);
1452 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1453 return chcr_device_init(crypto_tfm_ctx(tfm));
1457 static void chcr_cra_exit(struct crypto_tfm *tfm)
1459 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1460 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1462 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1463 if (ablkctx->aes_generic)
1464 crypto_free_cipher(ablkctx->aes_generic);
1467 static int get_alg_config(struct algo_param *params,
1468 unsigned int auth_size)
1470 switch (auth_size) {
1471 case SHA1_DIGEST_SIZE:
1472 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1473 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1474 params->result_size = SHA1_DIGEST_SIZE;
1476 case SHA224_DIGEST_SIZE:
1477 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1478 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1479 params->result_size = SHA256_DIGEST_SIZE;
1481 case SHA256_DIGEST_SIZE:
1482 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1483 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1484 params->result_size = SHA256_DIGEST_SIZE;
1486 case SHA384_DIGEST_SIZE:
1487 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1488 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1489 params->result_size = SHA512_DIGEST_SIZE;
1491 case SHA512_DIGEST_SIZE:
1492 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1493 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1494 params->result_size = SHA512_DIGEST_SIZE;
1497 pr_err("chcr : ERROR, unsupported digest size\n");
1503 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1505 crypto_free_shash(base_hash);
1509 * create_hash_wr - Create hash work request
1510 * @req - Cipher req base
1512 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1513 struct hash_wr_param *param)
1515 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1516 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1517 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1518 struct sk_buff *skb = NULL;
1519 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1520 struct chcr_wr *chcr_req;
1521 struct ulptx_sgl *ulptx;
1522 unsigned int nents = 0, transhdr_len;
1523 unsigned int temp = 0;
1524 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1526 struct adapter *adap = padap(h_ctx(tfm)->dev);
1529 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1530 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1531 param->sg_len) <= SGE_MAX_WR_LEN;
1532 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1533 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1534 nents += param->bfr_len ? 1 : 0;
1535 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1536 param->sg_len, 16) : (sgl_len(nents) * 8);
1537 transhdr_len = roundup(transhdr_len, 16);
1539 skb = alloc_skb(transhdr_len, flags);
1541 return ERR_PTR(-ENOMEM);
1542 chcr_req = __skb_put_zero(skb, transhdr_len);
1544 chcr_req->sec_cpl.op_ivinsrtofst =
1545 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1546 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1548 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1549 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1550 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1551 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1552 chcr_req->sec_cpl.seqno_numivs =
1553 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1554 param->opad_needed, 0);
1556 chcr_req->sec_cpl.ivgen_hdrlen =
1557 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1559 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1560 param->alg_prm.result_size);
1562 if (param->opad_needed)
1563 memcpy(chcr_req->key_ctx.key +
1564 ((param->alg_prm.result_size <= 32) ? 32 :
1565 CHCR_HASH_MAX_DIGEST_SIZE),
1566 hmacctx->opad, param->alg_prm.result_size);
1568 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1569 param->alg_prm.mk_size, 0,
1572 sizeof(chcr_req->key_ctx)) >> 4));
1573 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1574 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1576 if (param->bfr_len != 0) {
1577 req_ctx->hctx_wr.dma_addr =
1578 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1579 param->bfr_len, DMA_TO_DEVICE);
1580 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1581 req_ctx->hctx_wr. dma_addr)) {
1585 req_ctx->hctx_wr.dma_len = param->bfr_len;
1587 req_ctx->hctx_wr.dma_addr = 0;
1589 chcr_add_hash_src_ent(req, ulptx, param);
1590 /* Request upto max wr size */
1591 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1592 (param->sg_len + param->bfr_len) : 0);
1593 atomic_inc(&adap->chcr_stats.digest_rqst);
1594 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1595 param->hash_size, transhdr_len,
1597 req_ctx->hctx_wr.skb = skb;
1601 return ERR_PTR(error);
1604 static int chcr_ahash_update(struct ahash_request *req)
1606 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1607 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1608 struct uld_ctx *u_ctx = NULL;
1609 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1610 struct sk_buff *skb;
1611 u8 remainder = 0, bs;
1612 unsigned int nbytes = req->nbytes;
1613 struct hash_wr_param params;
1614 int error, isfull = 0;
1616 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1617 u_ctx = ULD_CTX(h_ctx(rtfm));
1619 if (nbytes + req_ctx->reqlen >= bs) {
1620 remainder = (nbytes + req_ctx->reqlen) % bs;
1621 nbytes = nbytes + req_ctx->reqlen - remainder;
1623 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1624 + req_ctx->reqlen, nbytes, 0);
1625 req_ctx->reqlen += nbytes;
1628 error = chcr_inc_wrcount(dev);
1631 /* Detach state for CHCR means lldi or padap is freed. Increasing
1632 * inflight count for dev guarantees that lldi and padap is valid
1634 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1635 h_ctx(rtfm)->tx_qidx))) {
1637 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1643 chcr_init_hctx_per_wr(req_ctx);
1644 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1649 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1650 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1651 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1652 HASH_SPACE_LEFT(params.kctx_len), 0);
1653 if (params.sg_len > req->nbytes)
1654 params.sg_len = req->nbytes;
1655 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1657 params.opad_needed = 0;
1660 params.bfr_len = req_ctx->reqlen;
1662 req_ctx->hctx_wr.srcsg = req->src;
1664 params.hash_size = params.alg_prm.result_size;
1665 req_ctx->data_len += params.sg_len + params.bfr_len;
1666 skb = create_hash_wr(req, ¶ms);
1668 error = PTR_ERR(skb);
1672 req_ctx->hctx_wr.processed += params.sg_len;
1675 swap(req_ctx->reqbfr, req_ctx->skbfr);
1676 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1677 req_ctx->reqbfr, remainder, req->nbytes -
1680 req_ctx->reqlen = remainder;
1681 skb->dev = u_ctx->lldi.ports[0];
1682 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1685 return isfull ? -EBUSY : -EINPROGRESS;
1687 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1689 chcr_dec_wrcount(dev);
1693 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1695 memset(bfr_ptr, 0, bs);
1698 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1700 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1703 static int chcr_ahash_final(struct ahash_request *req)
1705 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1706 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1707 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1708 struct hash_wr_param params;
1709 struct sk_buff *skb;
1710 struct uld_ctx *u_ctx = NULL;
1711 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1712 int error = -EINVAL;
1714 error = chcr_inc_wrcount(dev);
1718 chcr_init_hctx_per_wr(req_ctx);
1719 u_ctx = ULD_CTX(h_ctx(rtfm));
1720 if (is_hmac(crypto_ahash_tfm(rtfm)))
1721 params.opad_needed = 1;
1723 params.opad_needed = 0;
1725 req_ctx->hctx_wr.isfinal = 1;
1726 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1727 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1728 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1729 params.opad_needed = 1;
1730 params.kctx_len *= 2;
1732 params.opad_needed = 0;
1735 req_ctx->hctx_wr.result = 1;
1736 params.bfr_len = req_ctx->reqlen;
1737 req_ctx->data_len += params.bfr_len + params.sg_len;
1738 req_ctx->hctx_wr.srcsg = req->src;
1739 if (req_ctx->reqlen == 0) {
1740 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1744 params.bfr_len = bs;
1747 params.scmd1 = req_ctx->data_len;
1751 params.hash_size = crypto_ahash_digestsize(rtfm);
1752 skb = create_hash_wr(req, ¶ms);
1754 error = PTR_ERR(skb);
1757 req_ctx->reqlen = 0;
1758 skb->dev = u_ctx->lldi.ports[0];
1759 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1761 return -EINPROGRESS;
1763 chcr_dec_wrcount(dev);
1767 static int chcr_ahash_finup(struct ahash_request *req)
1769 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1770 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1771 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1772 struct uld_ctx *u_ctx = NULL;
1773 struct sk_buff *skb;
1774 struct hash_wr_param params;
1776 int error, isfull = 0;
1778 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1779 u_ctx = ULD_CTX(h_ctx(rtfm));
1780 error = chcr_inc_wrcount(dev);
1784 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1785 h_ctx(rtfm)->tx_qidx))) {
1787 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1792 chcr_init_hctx_per_wr(req_ctx);
1793 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1799 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1800 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1801 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1802 params.kctx_len *= 2;
1803 params.opad_needed = 1;
1805 params.opad_needed = 0;
1808 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1809 HASH_SPACE_LEFT(params.kctx_len), 0);
1810 if (params.sg_len < req->nbytes) {
1811 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1812 params.kctx_len /= 2;
1813 params.opad_needed = 0;
1817 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1819 params.hash_size = params.alg_prm.result_size;
1824 params.sg_len = req->nbytes;
1825 params.hash_size = crypto_ahash_digestsize(rtfm);
1826 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1829 params.bfr_len = req_ctx->reqlen;
1830 req_ctx->data_len += params.bfr_len + params.sg_len;
1831 req_ctx->hctx_wr.result = 1;
1832 req_ctx->hctx_wr.srcsg = req->src;
1833 if ((req_ctx->reqlen + req->nbytes) == 0) {
1834 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1838 params.bfr_len = bs;
1840 skb = create_hash_wr(req, ¶ms);
1842 error = PTR_ERR(skb);
1845 req_ctx->reqlen = 0;
1846 req_ctx->hctx_wr.processed += params.sg_len;
1847 skb->dev = u_ctx->lldi.ports[0];
1848 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1851 return isfull ? -EBUSY : -EINPROGRESS;
1853 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1855 chcr_dec_wrcount(dev);
1859 static int chcr_ahash_digest(struct ahash_request *req)
1861 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1862 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1863 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1864 struct uld_ctx *u_ctx = NULL;
1865 struct sk_buff *skb;
1866 struct hash_wr_param params;
1868 int error, isfull = 0;
1871 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1872 error = chcr_inc_wrcount(dev);
1876 u_ctx = ULD_CTX(h_ctx(rtfm));
1877 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1878 h_ctx(rtfm)->tx_qidx))) {
1880 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1886 chcr_init_hctx_per_wr(req_ctx);
1887 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1893 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1894 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1895 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1896 params.kctx_len *= 2;
1897 params.opad_needed = 1;
1899 params.opad_needed = 0;
1901 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1902 HASH_SPACE_LEFT(params.kctx_len), 0);
1903 if (params.sg_len < req->nbytes) {
1904 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1905 params.kctx_len /= 2;
1906 params.opad_needed = 0;
1911 params.sg_len = rounddown(params.sg_len, bs);
1912 params.hash_size = params.alg_prm.result_size;
1914 params.sg_len = req->nbytes;
1915 params.hash_size = crypto_ahash_digestsize(rtfm);
1918 params.scmd1 = req->nbytes + req_ctx->data_len;
1922 req_ctx->hctx_wr.result = 1;
1923 req_ctx->hctx_wr.srcsg = req->src;
1924 req_ctx->data_len += params.bfr_len + params.sg_len;
1926 if (req->nbytes == 0) {
1927 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1929 params.bfr_len = bs;
1932 skb = create_hash_wr(req, ¶ms);
1934 error = PTR_ERR(skb);
1937 req_ctx->hctx_wr.processed += params.sg_len;
1938 skb->dev = u_ctx->lldi.ports[0];
1939 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1941 return isfull ? -EBUSY : -EINPROGRESS;
1943 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1945 chcr_dec_wrcount(dev);
1949 static int chcr_ahash_continue(struct ahash_request *req)
1951 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1952 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1953 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1954 struct uld_ctx *u_ctx = NULL;
1955 struct sk_buff *skb;
1956 struct hash_wr_param params;
1960 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1961 u_ctx = ULD_CTX(h_ctx(rtfm));
1962 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1963 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1964 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1965 params.kctx_len *= 2;
1966 params.opad_needed = 1;
1968 params.opad_needed = 0;
1970 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1971 HASH_SPACE_LEFT(params.kctx_len),
1973 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1974 params.sg_len = req->nbytes - hctx_wr->processed;
1975 if (!hctx_wr->result ||
1976 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1977 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1978 params.kctx_len /= 2;
1979 params.opad_needed = 0;
1983 params.sg_len = rounddown(params.sg_len, bs);
1984 params.hash_size = params.alg_prm.result_size;
1989 params.hash_size = crypto_ahash_digestsize(rtfm);
1990 params.scmd1 = reqctx->data_len + params.sg_len;
1993 reqctx->data_len += params.sg_len;
1994 skb = create_hash_wr(req, ¶ms);
1996 error = PTR_ERR(skb);
1999 hctx_wr->processed += params.sg_len;
2000 skb->dev = u_ctx->lldi.ports[0];
2001 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
2008 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2009 unsigned char *input,
2012 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2013 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2014 int digestsize, updated_digestsize;
2015 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2016 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2017 struct chcr_dev *dev = h_ctx(tfm)->dev;
2021 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2022 updated_digestsize = digestsize;
2023 if (digestsize == SHA224_DIGEST_SIZE)
2024 updated_digestsize = SHA256_DIGEST_SIZE;
2025 else if (digestsize == SHA384_DIGEST_SIZE)
2026 updated_digestsize = SHA512_DIGEST_SIZE;
2028 if (hctx_wr->dma_addr) {
2029 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2030 hctx_wr->dma_len, DMA_TO_DEVICE);
2031 hctx_wr->dma_addr = 0;
2033 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2035 if (hctx_wr->result == 1) {
2036 hctx_wr->result = 0;
2037 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2040 memcpy(reqctx->partial_hash,
2041 input + sizeof(struct cpl_fw6_pld),
2042 updated_digestsize);
2047 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2048 updated_digestsize);
2050 err = chcr_ahash_continue(req);
2055 if (hctx_wr->is_sg_map)
2056 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2060 chcr_dec_wrcount(dev);
2061 req->base.complete(&req->base, err);
2065 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2066 * @req: crypto request
2068 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2071 struct crypto_tfm *tfm = req->tfm;
2072 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2073 struct adapter *adap = padap(ctx->dev);
2075 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2076 case CRYPTO_ALG_TYPE_AEAD:
2077 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2080 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2081 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2084 case CRYPTO_ALG_TYPE_AHASH:
2085 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2087 atomic_inc(&adap->chcr_stats.complete);
2090 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2092 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2093 struct chcr_ahash_req_ctx *state = out;
2095 state->reqlen = req_ctx->reqlen;
2096 state->data_len = req_ctx->data_len;
2097 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2098 memcpy(state->partial_hash, req_ctx->partial_hash,
2099 CHCR_HASH_MAX_DIGEST_SIZE);
2100 chcr_init_hctx_per_wr(state);
2104 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2106 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2107 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2109 req_ctx->reqlen = state->reqlen;
2110 req_ctx->data_len = state->data_len;
2111 req_ctx->reqbfr = req_ctx->bfr1;
2112 req_ctx->skbfr = req_ctx->bfr2;
2113 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2114 memcpy(req_ctx->partial_hash, state->partial_hash,
2115 CHCR_HASH_MAX_DIGEST_SIZE);
2116 chcr_init_hctx_per_wr(req_ctx);
2120 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2121 unsigned int keylen)
2123 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2124 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2125 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2126 unsigned int i, err = 0, updated_digestsize;
2128 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2130 /* use the key to calculate the ipad and opad. ipad will sent with the
2131 * first request's data. opad will be sent with the final hash result
2132 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2134 shash->tfm = hmacctx->base_hash;
2135 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2137 err = crypto_shash_digest(shash, key, keylen,
2141 keylen = digestsize;
2143 memcpy(hmacctx->ipad, key, keylen);
2145 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2146 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2148 for (i = 0; i < bs / sizeof(int); i++) {
2149 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2150 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2153 updated_digestsize = digestsize;
2154 if (digestsize == SHA224_DIGEST_SIZE)
2155 updated_digestsize = SHA256_DIGEST_SIZE;
2156 else if (digestsize == SHA384_DIGEST_SIZE)
2157 updated_digestsize = SHA512_DIGEST_SIZE;
2158 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2159 hmacctx->ipad, digestsize);
2162 chcr_change_order(hmacctx->ipad, updated_digestsize);
2164 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2165 hmacctx->opad, digestsize);
2168 chcr_change_order(hmacctx->opad, updated_digestsize);
2173 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2174 unsigned int key_len)
2176 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2177 unsigned short context_size = 0;
2180 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2184 memcpy(ablkctx->key, key, key_len);
2185 ablkctx->enckey_len = key_len;
2186 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2187 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2188 ablkctx->key_ctx_hdr =
2189 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2190 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2191 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2192 CHCR_KEYCTX_NO_KEY, 1,
2194 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2197 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2198 ablkctx->enckey_len = 0;
2203 static int chcr_sha_init(struct ahash_request *areq)
2205 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2206 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2207 int digestsize = crypto_ahash_digestsize(tfm);
2209 req_ctx->data_len = 0;
2210 req_ctx->reqlen = 0;
2211 req_ctx->reqbfr = req_ctx->bfr1;
2212 req_ctx->skbfr = req_ctx->bfr2;
2213 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2218 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2220 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2221 sizeof(struct chcr_ahash_req_ctx));
2222 return chcr_device_init(crypto_tfm_ctx(tfm));
2225 static int chcr_hmac_init(struct ahash_request *areq)
2227 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2228 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2229 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2230 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2231 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2233 chcr_sha_init(areq);
2234 req_ctx->data_len = bs;
2235 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2236 if (digestsize == SHA224_DIGEST_SIZE)
2237 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2238 SHA256_DIGEST_SIZE);
2239 else if (digestsize == SHA384_DIGEST_SIZE)
2240 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2241 SHA512_DIGEST_SIZE);
2243 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2249 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2251 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2252 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2253 unsigned int digestsize =
2254 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2256 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2257 sizeof(struct chcr_ahash_req_ctx));
2258 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2259 if (IS_ERR(hmacctx->base_hash))
2260 return PTR_ERR(hmacctx->base_hash);
2261 return chcr_device_init(crypto_tfm_ctx(tfm));
2264 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2266 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2267 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2269 if (hmacctx->base_hash) {
2270 chcr_free_shash(hmacctx->base_hash);
2271 hmacctx->base_hash = NULL;
2275 inline void chcr_aead_common_exit(struct aead_request *req)
2277 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2278 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2279 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2281 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2284 static int chcr_aead_common_init(struct aead_request *req)
2286 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2287 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2288 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2289 unsigned int authsize = crypto_aead_authsize(tfm);
2290 int error = -EINVAL;
2292 /* validate key size */
2293 if (aeadctx->enckey_len == 0)
2295 if (reqctx->op && req->cryptlen < authsize)
2298 reqctx->scratch_pad = reqctx->iv + IV;
2300 reqctx->scratch_pad = NULL;
2302 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2314 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2315 int aadmax, int wrlen,
2316 unsigned short op_type)
2318 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2320 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2321 dst_nents > MAX_DSGL_ENT ||
2322 (req->assoclen > aadmax) ||
2323 (wrlen > SGE_MAX_WR_LEN))
2328 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2330 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2331 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2332 struct aead_request *subreq = aead_request_ctx(req);
2334 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2335 aead_request_set_callback(subreq, req->base.flags,
2336 req->base.complete, req->base.data);
2337 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2339 aead_request_set_ad(subreq, req->assoclen);
2340 return op_type ? crypto_aead_decrypt(subreq) :
2341 crypto_aead_encrypt(subreq);
2344 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2348 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2349 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2350 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2351 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2352 struct sk_buff *skb = NULL;
2353 struct chcr_wr *chcr_req;
2354 struct cpl_rx_phys_dsgl *phys_cpl;
2355 struct ulptx_sgl *ulptx;
2356 unsigned int transhdr_len;
2357 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2358 unsigned int kctx_len = 0, dnents, snents;
2359 unsigned int authsize = crypto_aead_authsize(tfm);
2360 int error = -EINVAL;
2363 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2365 struct adapter *adap = padap(a_ctx(tfm)->dev);
2367 if (req->cryptlen == 0)
2371 error = chcr_aead_common_init(req);
2373 return ERR_PTR(error);
2375 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2376 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2379 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2380 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2381 dnents += MIN_AUTH_SG; // For IV
2382 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2383 CHCR_SRC_SG_SIZE, 0);
2384 dst_size = get_space_for_phys_dsgl(dnents);
2385 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2386 - sizeof(chcr_req->key_ctx);
2387 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2388 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2390 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2391 : (sgl_len(snents) * 8);
2392 transhdr_len += temp;
2393 transhdr_len = roundup(transhdr_len, 16);
2395 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2396 transhdr_len, reqctx->op)) {
2397 atomic_inc(&adap->chcr_stats.fallback);
2398 chcr_aead_common_exit(req);
2399 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2401 skb = alloc_skb(transhdr_len, flags);
2407 chcr_req = __skb_put_zero(skb, transhdr_len);
2409 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2412 * Input order is AAD,IV and Payload. where IV should be included as
2413 * the part of authdata. All other fields should be filled according
2414 * to the hardware spec
2416 chcr_req->sec_cpl.op_ivinsrtofst =
2417 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2418 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2419 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2421 null ? 0 : IV + req->assoclen,
2422 req->assoclen + IV + 1,
2423 (temp & 0x1F0) >> 4);
2424 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2426 null ? 0 : req->assoclen + IV + 1,
2428 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2429 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2430 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2432 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2433 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2434 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2436 actx->auth_mode, aeadctx->hmac_ctrl,
2438 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2441 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2442 if (reqctx->op == CHCR_ENCRYPT_OP ||
2443 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2444 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2445 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2446 aeadctx->enckey_len);
2448 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2449 aeadctx->enckey_len);
2451 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2452 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2453 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2454 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2455 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2456 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2457 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2458 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2459 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2460 CTR_RFC3686_IV_SIZE);
2461 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2462 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2464 memcpy(ivptr, req->iv, IV);
2466 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2467 chcr_add_aead_src_ent(req, ulptx);
2468 atomic_inc(&adap->chcr_stats.cipher_rqst);
2469 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2470 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2471 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2472 transhdr_len, temp, 0);
2477 chcr_aead_common_exit(req);
2479 return ERR_PTR(error);
2482 int chcr_aead_dma_map(struct device *dev,
2483 struct aead_request *req,
2484 unsigned short op_type)
2487 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2488 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2489 unsigned int authsize = crypto_aead_authsize(tfm);
2492 dst_size = req->assoclen + req->cryptlen + (op_type ?
2493 -authsize : authsize);
2494 if (!req->cryptlen || !dst_size)
2496 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2498 if (dma_mapping_error(dev, reqctx->iv_dma))
2501 reqctx->b0_dma = reqctx->iv_dma + IV;
2504 if (req->src == req->dst) {
2505 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2510 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2514 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2517 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2525 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2529 void chcr_aead_dma_unmap(struct device *dev,
2530 struct aead_request *req,
2531 unsigned short op_type)
2533 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2534 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2535 unsigned int authsize = crypto_aead_authsize(tfm);
2538 dst_size = req->assoclen + req->cryptlen + (op_type ?
2539 -authsize : authsize);
2540 if (!req->cryptlen || !dst_size)
2543 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2545 if (req->src == req->dst) {
2546 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2549 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2551 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2556 void chcr_add_aead_src_ent(struct aead_request *req,
2557 struct ulptx_sgl *ulptx)
2559 struct ulptx_walk ulp_walk;
2560 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2563 u8 *buf = (u8 *)ulptx;
2565 if (reqctx->b0_len) {
2566 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2567 buf += reqctx->b0_len;
2569 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2570 buf, req->cryptlen + req->assoclen, 0);
2572 ulptx_walk_init(&ulp_walk, ulptx);
2574 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2576 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2578 ulptx_walk_end(&ulp_walk);
2582 void chcr_add_aead_dst_ent(struct aead_request *req,
2583 struct cpl_rx_phys_dsgl *phys_cpl,
2586 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2587 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2588 struct dsgl_walk dsgl_walk;
2589 unsigned int authsize = crypto_aead_authsize(tfm);
2590 struct chcr_context *ctx = a_ctx(tfm);
2593 dsgl_walk_init(&dsgl_walk, phys_cpl);
2594 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2595 temp = req->assoclen + req->cryptlen +
2596 (reqctx->op ? -authsize : authsize);
2597 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2598 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2601 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2603 struct cipher_wr_param *wrparam)
2605 struct ulptx_walk ulp_walk;
2606 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2609 memcpy(buf, reqctx->iv, IV);
2612 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2613 buf, wrparam->bytes, reqctx->processed);
2615 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2616 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2618 reqctx->srcsg = ulp_walk.last_sg;
2619 reqctx->src_ofst = ulp_walk.last_sg_len;
2620 ulptx_walk_end(&ulp_walk);
2624 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2625 struct cpl_rx_phys_dsgl *phys_cpl,
2626 struct cipher_wr_param *wrparam,
2629 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2630 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2631 struct chcr_context *ctx = c_ctx(tfm);
2632 struct dsgl_walk dsgl_walk;
2634 dsgl_walk_init(&dsgl_walk, phys_cpl);
2635 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2637 reqctx->dstsg = dsgl_walk.last_sg;
2638 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2640 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2643 void chcr_add_hash_src_ent(struct ahash_request *req,
2644 struct ulptx_sgl *ulptx,
2645 struct hash_wr_param *param)
2647 struct ulptx_walk ulp_walk;
2648 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2650 if (reqctx->hctx_wr.imm) {
2651 u8 *buf = (u8 *)ulptx;
2653 if (param->bfr_len) {
2654 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2655 buf += param->bfr_len;
2658 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2659 sg_nents(reqctx->hctx_wr.srcsg), buf,
2662 ulptx_walk_init(&ulp_walk, ulptx);
2664 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2665 reqctx->hctx_wr.dma_addr);
2666 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2667 param->sg_len, reqctx->hctx_wr.src_ofst);
2668 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2669 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2670 ulptx_walk_end(&ulp_walk);
2674 int chcr_hash_dma_map(struct device *dev,
2675 struct ahash_request *req)
2677 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2682 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2686 req_ctx->hctx_wr.is_sg_map = 1;
2690 void chcr_hash_dma_unmap(struct device *dev,
2691 struct ahash_request *req)
2693 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2698 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2700 req_ctx->hctx_wr.is_sg_map = 0;
2704 int chcr_cipher_dma_map(struct device *dev,
2705 struct ablkcipher_request *req)
2709 if (req->src == req->dst) {
2710 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2715 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2719 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2722 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2733 void chcr_cipher_dma_unmap(struct device *dev,
2734 struct ablkcipher_request *req)
2736 if (req->src == req->dst) {
2737 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2740 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2742 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2747 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2751 memset(block, 0, csize);
2756 else if (msglen > (unsigned int)(1 << (8 * csize)))
2759 data = cpu_to_be32(msglen);
2760 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2765 static void generate_b0(struct aead_request *req, u8 *ivptr,
2766 unsigned short op_type)
2768 unsigned int l, lp, m;
2770 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2771 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2772 u8 *b0 = reqctx->scratch_pad;
2774 m = crypto_aead_authsize(aead);
2776 memcpy(b0, ivptr, 16);
2781 /* set m, bits 3-5 */
2782 *b0 |= (8 * ((m - 2) / 2));
2784 /* set adata, bit 6, if associated data is used */
2787 rc = set_msg_len(b0 + 16 - l,
2788 (op_type == CHCR_DECRYPT_OP) ?
2789 req->cryptlen - m : req->cryptlen, l);
2792 static inline int crypto_ccm_check_iv(const u8 *iv)
2794 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2795 if (iv[0] < 1 || iv[0] > 7)
2801 static int ccm_format_packet(struct aead_request *req,
2803 unsigned int sub_type,
2804 unsigned short op_type,
2805 unsigned int assoclen)
2807 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2808 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2809 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2812 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2814 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2815 memcpy(ivptr + 4, req->iv, 8);
2816 memset(ivptr + 12, 0, 4);
2818 memcpy(ivptr, req->iv, 16);
2821 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2824 generate_b0(req, ivptr, op_type);
2825 /* zero the ctr value */
2826 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2830 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2831 unsigned int dst_size,
2832 struct aead_request *req,
2833 unsigned short op_type)
2835 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2836 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2837 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2838 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2839 unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2840 unsigned int ccm_xtra;
2841 unsigned char tag_offset = 0, auth_offset = 0;
2842 unsigned int assoclen;
2844 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2845 assoclen = req->assoclen - 8;
2847 assoclen = req->assoclen;
2848 ccm_xtra = CCM_B0_SIZE +
2849 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2851 auth_offset = req->cryptlen ?
2852 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2853 if (op_type == CHCR_DECRYPT_OP) {
2854 if (crypto_aead_authsize(tfm) != req->cryptlen)
2855 tag_offset = crypto_aead_authsize(tfm);
2861 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2864 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2865 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2866 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2867 1 + IV, IV + assoclen + ccm_xtra,
2868 req->assoclen + IV + 1 + ccm_xtra, 0);
2870 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2871 auth_offset, tag_offset,
2872 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2873 crypto_aead_authsize(tfm));
2874 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2875 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2876 cipher_mode, mac_mode,
2877 aeadctx->hmac_ctrl, IV >> 1);
2879 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2883 static int aead_ccm_validate_input(unsigned short op_type,
2884 struct aead_request *req,
2885 struct chcr_aead_ctx *aeadctx,
2886 unsigned int sub_type)
2888 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2889 if (crypto_ccm_check_iv(req->iv)) {
2890 pr_err("CCM: IV check fails\n");
2894 if (req->assoclen != 16 && req->assoclen != 20) {
2895 pr_err("RFC4309: Invalid AAD length %d\n",
2903 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2907 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2908 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2909 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2910 struct sk_buff *skb = NULL;
2911 struct chcr_wr *chcr_req;
2912 struct cpl_rx_phys_dsgl *phys_cpl;
2913 struct ulptx_sgl *ulptx;
2914 unsigned int transhdr_len;
2915 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2916 unsigned int sub_type, assoclen = req->assoclen;
2917 unsigned int authsize = crypto_aead_authsize(tfm);
2918 int error = -EINVAL;
2920 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2922 struct adapter *adap = padap(a_ctx(tfm)->dev);
2924 sub_type = get_aead_subtype(tfm);
2925 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2927 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2928 error = chcr_aead_common_init(req);
2930 return ERR_PTR(error);
2932 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2935 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2936 + (reqctx->op ? -authsize : authsize),
2937 CHCR_DST_SG_SIZE, 0);
2938 dnents += MIN_CCM_SG; // For IV and B0
2939 dst_size = get_space_for_phys_dsgl(dnents);
2940 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2941 CHCR_SRC_SG_SIZE, 0);
2942 snents += MIN_CCM_SG; //For B0
2943 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2944 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2945 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2946 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2947 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2948 reqctx->b0_len, 16) :
2949 (sgl_len(snents) * 8);
2950 transhdr_len += temp;
2951 transhdr_len = roundup(transhdr_len, 16);
2953 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2954 reqctx->b0_len, transhdr_len, reqctx->op)) {
2955 atomic_inc(&adap->chcr_stats.fallback);
2956 chcr_aead_common_exit(req);
2957 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2959 skb = alloc_skb(transhdr_len, flags);
2966 chcr_req = __skb_put_zero(skb, transhdr_len);
2968 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2970 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2971 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2972 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2973 aeadctx->key, aeadctx->enckey_len);
2975 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2976 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2977 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2978 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2981 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2982 chcr_add_aead_src_ent(req, ulptx);
2984 atomic_inc(&adap->chcr_stats.aead_rqst);
2985 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2986 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2987 reqctx->b0_len) : 0);
2988 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2989 transhdr_len, temp, 0);
2996 chcr_aead_common_exit(req);
2997 return ERR_PTR(error);
3000 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3004 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3005 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3006 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3007 struct sk_buff *skb = NULL;
3008 struct chcr_wr *chcr_req;
3009 struct cpl_rx_phys_dsgl *phys_cpl;
3010 struct ulptx_sgl *ulptx;
3011 unsigned int transhdr_len, dnents = 0, snents;
3012 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3013 unsigned int authsize = crypto_aead_authsize(tfm);
3014 int error = -EINVAL;
3016 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3018 struct adapter *adap = padap(a_ctx(tfm)->dev);
3020 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3021 assoclen = req->assoclen - 8;
3024 error = chcr_aead_common_init(req);
3026 return ERR_PTR(error);
3027 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3028 (reqctx->op ? -authsize : authsize),
3029 CHCR_DST_SG_SIZE, 0);
3030 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3031 CHCR_SRC_SG_SIZE, 0);
3032 dnents += MIN_GCM_SG; // For IV
3033 dst_size = get_space_for_phys_dsgl(dnents);
3034 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3035 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3036 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3038 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3039 (sgl_len(snents) * 8);
3040 transhdr_len += temp;
3041 transhdr_len = roundup(transhdr_len, 16);
3042 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3043 transhdr_len, reqctx->op)) {
3045 atomic_inc(&adap->chcr_stats.fallback);
3046 chcr_aead_common_exit(req);
3047 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3049 skb = alloc_skb(transhdr_len, flags);
3055 chcr_req = __skb_put_zero(skb, transhdr_len);
3057 //Offset of tag from end
3058 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3059 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3060 a_ctx(tfm)->tx_chan_id, 2, 1);
3061 chcr_req->sec_cpl.pldlen =
3062 htonl(req->assoclen + IV + req->cryptlen);
3063 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3064 assoclen ? 1 + IV : 0,
3065 assoclen ? IV + assoclen : 0,
3066 req->assoclen + IV + 1, 0);
3067 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3068 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3070 chcr_req->sec_cpl.seqno_numivs =
3071 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3072 CHCR_ENCRYPT_OP) ? 1 : 0,
3073 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3074 CHCR_SCMD_AUTH_MODE_GHASH,
3075 aeadctx->hmac_ctrl, IV >> 1);
3076 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3078 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3079 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3080 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3081 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3083 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3084 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3085 /* prepare a 16 byte iv */
3086 /* S A L T | IV | 0x00000001 */
3087 if (get_aead_subtype(tfm) ==
3088 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3089 memcpy(ivptr, aeadctx->salt, 4);
3090 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3092 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3094 *((unsigned int *)(ivptr + 12)) = htonl(0x01);
3096 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3098 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3099 chcr_add_aead_src_ent(req, ulptx);
3100 atomic_inc(&adap->chcr_stats.aead_rqst);
3101 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3102 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3103 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3104 transhdr_len, temp, reqctx->verify);
3109 chcr_aead_common_exit(req);
3110 return ERR_PTR(error);
3115 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3117 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3118 struct aead_alg *alg = crypto_aead_alg(tfm);
3120 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3121 CRYPTO_ALG_NEED_FALLBACK |
3123 if (IS_ERR(aeadctx->sw_cipher))
3124 return PTR_ERR(aeadctx->sw_cipher);
3125 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3126 sizeof(struct aead_request) +
3127 crypto_aead_reqsize(aeadctx->sw_cipher)));
3128 return chcr_device_init(a_ctx(tfm));
3131 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3133 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3135 crypto_free_aead(aeadctx->sw_cipher);
3138 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3139 unsigned int authsize)
3141 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3143 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3144 aeadctx->mayverify = VERIFY_HW;
3145 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3147 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3148 unsigned int authsize)
3150 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3151 u32 maxauth = crypto_aead_maxauthsize(tfm);
3153 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3154 * true for sha1. authsize == 12 condition should be before
3155 * authsize == (maxauth >> 1)
3157 if (authsize == ICV_4) {
3158 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3159 aeadctx->mayverify = VERIFY_HW;
3160 } else if (authsize == ICV_6) {
3161 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3162 aeadctx->mayverify = VERIFY_HW;
3163 } else if (authsize == ICV_10) {
3164 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3165 aeadctx->mayverify = VERIFY_HW;
3166 } else if (authsize == ICV_12) {
3167 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3168 aeadctx->mayverify = VERIFY_HW;
3169 } else if (authsize == ICV_14) {
3170 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3171 aeadctx->mayverify = VERIFY_HW;
3172 } else if (authsize == (maxauth >> 1)) {
3173 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3174 aeadctx->mayverify = VERIFY_HW;
3175 } else if (authsize == maxauth) {
3176 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3177 aeadctx->mayverify = VERIFY_HW;
3179 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3180 aeadctx->mayverify = VERIFY_SW;
3182 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3186 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3188 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3192 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3193 aeadctx->mayverify = VERIFY_HW;
3196 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3197 aeadctx->mayverify = VERIFY_HW;
3200 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3201 aeadctx->mayverify = VERIFY_HW;
3204 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3205 aeadctx->mayverify = VERIFY_HW;
3208 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3209 aeadctx->mayverify = VERIFY_HW;
3213 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3214 aeadctx->mayverify = VERIFY_SW;
3218 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3219 CRYPTO_TFM_RES_BAD_KEY_LEN);
3222 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3225 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3226 unsigned int authsize)
3228 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3232 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3233 aeadctx->mayverify = VERIFY_HW;
3236 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3237 aeadctx->mayverify = VERIFY_HW;
3240 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3241 aeadctx->mayverify = VERIFY_HW;
3244 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3245 CRYPTO_TFM_RES_BAD_KEY_LEN);
3248 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3251 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3252 unsigned int authsize)
3254 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3259 aeadctx->mayverify = VERIFY_HW;
3262 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3263 aeadctx->mayverify = VERIFY_HW;
3266 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3267 aeadctx->mayverify = VERIFY_HW;
3270 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3271 aeadctx->mayverify = VERIFY_HW;
3274 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3275 aeadctx->mayverify = VERIFY_HW;
3278 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3279 aeadctx->mayverify = VERIFY_HW;
3282 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3283 aeadctx->mayverify = VERIFY_HW;
3286 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3287 CRYPTO_TFM_RES_BAD_KEY_LEN);
3290 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3293 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3295 unsigned int keylen)
3297 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3298 unsigned char ck_size, mk_size;
3299 int key_ctx_size = 0;
3301 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3302 if (keylen == AES_KEYSIZE_128) {
3303 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3304 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3305 } else if (keylen == AES_KEYSIZE_192) {
3306 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3307 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3308 } else if (keylen == AES_KEYSIZE_256) {
3309 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3310 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3312 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3313 CRYPTO_TFM_RES_BAD_KEY_LEN);
3314 aeadctx->enckey_len = 0;
3317 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3319 memcpy(aeadctx->key, key, keylen);
3320 aeadctx->enckey_len = keylen;
3325 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3327 unsigned int keylen)
3329 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3332 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3333 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3334 CRYPTO_TFM_REQ_MASK);
3335 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3336 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3337 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3338 CRYPTO_TFM_RES_MASK);
3341 return chcr_ccm_common_setkey(aead, key, keylen);
3344 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3345 unsigned int keylen)
3347 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3351 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3352 CRYPTO_TFM_RES_BAD_KEY_LEN);
3353 aeadctx->enckey_len = 0;
3356 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3357 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3358 CRYPTO_TFM_REQ_MASK);
3359 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3360 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3361 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3362 CRYPTO_TFM_RES_MASK);
3366 memcpy(aeadctx->salt, key + keylen, 3);
3367 return chcr_ccm_common_setkey(aead, key, keylen);
3370 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3371 unsigned int keylen)
3373 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3374 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3375 struct crypto_cipher *cipher;
3376 unsigned int ck_size;
3377 int ret = 0, key_ctx_size = 0;
3379 aeadctx->enckey_len = 0;
3380 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3381 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3382 & CRYPTO_TFM_REQ_MASK);
3383 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3384 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3385 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3386 CRYPTO_TFM_RES_MASK);
3390 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3392 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3393 memcpy(aeadctx->salt, key + keylen, 4);
3395 if (keylen == AES_KEYSIZE_128) {
3396 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3397 } else if (keylen == AES_KEYSIZE_192) {
3398 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3399 } else if (keylen == AES_KEYSIZE_256) {
3400 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3402 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3403 CRYPTO_TFM_RES_BAD_KEY_LEN);
3404 pr_err("GCM: Invalid key length %d\n", keylen);
3409 memcpy(aeadctx->key, key, keylen);
3410 aeadctx->enckey_len = keylen;
3411 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3413 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3414 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3417 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3418 * It will go in key context
3420 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3421 if (IS_ERR(cipher)) {
3422 aeadctx->enckey_len = 0;
3427 ret = crypto_cipher_setkey(cipher, key, keylen);
3429 aeadctx->enckey_len = 0;
3432 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3433 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3436 crypto_free_cipher(cipher);
3441 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3442 unsigned int keylen)
3444 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3445 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3446 /* it contains auth and cipher key both*/
3447 struct crypto_authenc_keys keys;
3448 unsigned int bs, subtype;
3449 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3450 int err = 0, i, key_ctx_len = 0;
3451 unsigned char ck_size = 0;
3452 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3453 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3454 struct algo_param param;
3458 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3459 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3460 & CRYPTO_TFM_REQ_MASK);
3461 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3462 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3463 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3464 & CRYPTO_TFM_RES_MASK);
3468 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3469 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3473 if (get_alg_config(¶m, max_authsize)) {
3474 pr_err("chcr : Unsupported digest size\n");
3477 subtype = get_aead_subtype(authenc);
3478 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3479 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3480 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3482 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3483 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3484 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3486 if (keys.enckeylen == AES_KEYSIZE_128) {
3487 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3488 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3489 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3490 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3491 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3493 pr_err("chcr : Unsupported cipher key\n");
3497 /* Copy only encryption key. We use authkey to generate h(ipad) and
3498 * h(opad) so authkey is not needed again. authkeylen size have the
3499 * size of the hash digest size.
3501 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3502 aeadctx->enckey_len = keys.enckeylen;
3503 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3504 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3506 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3507 aeadctx->enckey_len << 3);
3509 base_hash = chcr_alloc_shash(max_authsize);
3510 if (IS_ERR(base_hash)) {
3511 pr_err("chcr : Base driver cannot be loaded\n");
3512 aeadctx->enckey_len = 0;
3513 memzero_explicit(&keys, sizeof(keys));
3517 SHASH_DESC_ON_STACK(shash, base_hash);
3519 shash->tfm = base_hash;
3520 shash->flags = crypto_shash_get_flags(base_hash);
3521 bs = crypto_shash_blocksize(base_hash);
3522 align = KEYCTX_ALIGN_PAD(max_authsize);
3523 o_ptr = actx->h_iopad + param.result_size + align;
3525 if (keys.authkeylen > bs) {
3526 err = crypto_shash_digest(shash, keys.authkey,
3530 pr_err("chcr : Base driver cannot be loaded\n");
3533 keys.authkeylen = max_authsize;
3535 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3537 /* Compute the ipad-digest*/
3538 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3539 memcpy(pad, o_ptr, keys.authkeylen);
3540 for (i = 0; i < bs >> 2; i++)
3541 *((unsigned int *)pad + i) ^= IPAD_DATA;
3543 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3546 /* Compute the opad-digest */
3547 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3548 memcpy(pad, o_ptr, keys.authkeylen);
3549 for (i = 0; i < bs >> 2; i++)
3550 *((unsigned int *)pad + i) ^= OPAD_DATA;
3552 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3555 /* convert the ipad and opad digest to network order */
3556 chcr_change_order(actx->h_iopad, param.result_size);
3557 chcr_change_order(o_ptr, param.result_size);
3558 key_ctx_len = sizeof(struct _key_ctx) +
3559 roundup(keys.enckeylen, 16) +
3560 (param.result_size + align) * 2;
3561 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3562 0, 1, key_ctx_len >> 4);
3563 actx->auth_mode = param.auth_mode;
3564 chcr_free_shash(base_hash);
3566 memzero_explicit(&keys, sizeof(keys));
3570 aeadctx->enckey_len = 0;
3571 memzero_explicit(&keys, sizeof(keys));
3572 if (!IS_ERR(base_hash))
3573 chcr_free_shash(base_hash);
3577 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3578 const u8 *key, unsigned int keylen)
3580 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3581 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3582 struct crypto_authenc_keys keys;
3584 /* it contains auth and cipher key both*/
3585 unsigned int subtype;
3586 int key_ctx_len = 0;
3587 unsigned char ck_size = 0;
3589 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3590 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3591 & CRYPTO_TFM_REQ_MASK);
3592 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3593 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3594 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3595 & CRYPTO_TFM_RES_MASK);
3599 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3600 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3603 subtype = get_aead_subtype(authenc);
3604 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3605 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3606 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3608 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3609 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3610 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3612 if (keys.enckeylen == AES_KEYSIZE_128) {
3613 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3614 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3615 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3616 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3617 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3619 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3622 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3623 aeadctx->enckey_len = keys.enckeylen;
3624 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3625 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3626 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3627 aeadctx->enckey_len << 3);
3629 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3631 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3632 0, key_ctx_len >> 4);
3633 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3634 memzero_explicit(&keys, sizeof(keys));
3637 aeadctx->enckey_len = 0;
3638 memzero_explicit(&keys, sizeof(keys));
3642 static int chcr_aead_op(struct aead_request *req,
3644 create_wr_t create_wr_fn)
3646 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3647 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3648 struct uld_ctx *u_ctx;
3649 struct sk_buff *skb;
3651 struct chcr_dev *cdev;
3653 cdev = a_ctx(tfm)->dev;
3655 pr_err("chcr : %s : No crypto device.\n", __func__);
3659 if (chcr_inc_wrcount(cdev)) {
3660 /* Detach state for CHCR means lldi or padap is freed.
3661 * We cannot increment fallback here.
3663 return chcr_aead_fallback(req, reqctx->op);
3666 u_ctx = ULD_CTX(a_ctx(tfm));
3667 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3668 a_ctx(tfm)->tx_qidx)) {
3670 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3671 chcr_dec_wrcount(cdev);
3676 /* Form a WR from req */
3677 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3679 if (IS_ERR(skb) || !skb) {
3680 chcr_dec_wrcount(cdev);
3681 return PTR_ERR(skb);
3684 skb->dev = u_ctx->lldi.ports[0];
3685 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3687 return isfull ? -EBUSY : -EINPROGRESS;
3690 static int chcr_aead_encrypt(struct aead_request *req)
3692 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3693 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3695 reqctx->verify = VERIFY_HW;
3696 reqctx->op = CHCR_ENCRYPT_OP;
3698 switch (get_aead_subtype(tfm)) {
3699 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3700 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3701 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3702 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3703 return chcr_aead_op(req, 0, create_authenc_wr);
3704 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3705 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3706 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3708 return chcr_aead_op(req, 0, create_gcm_wr);
3712 static int chcr_aead_decrypt(struct aead_request *req)
3714 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3715 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3716 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3719 if (aeadctx->mayverify == VERIFY_SW) {
3720 size = crypto_aead_maxauthsize(tfm);
3721 reqctx->verify = VERIFY_SW;
3724 reqctx->verify = VERIFY_HW;
3726 reqctx->op = CHCR_DECRYPT_OP;
3727 switch (get_aead_subtype(tfm)) {
3728 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3729 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3730 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3731 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3732 return chcr_aead_op(req, size, create_authenc_wr);
3733 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3734 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3735 return chcr_aead_op(req, size, create_aead_ccm_wr);
3737 return chcr_aead_op(req, size, create_gcm_wr);
3741 static struct chcr_alg_template driver_algs[] = {
3744 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3747 .cra_name = "cbc(aes)",
3748 .cra_driver_name = "cbc-aes-chcr",
3749 .cra_blocksize = AES_BLOCK_SIZE,
3750 .cra_init = chcr_cra_init,
3751 .cra_exit = chcr_cra_exit,
3752 .cra_u.ablkcipher = {
3753 .min_keysize = AES_MIN_KEY_SIZE,
3754 .max_keysize = AES_MAX_KEY_SIZE,
3755 .ivsize = AES_BLOCK_SIZE,
3756 .setkey = chcr_aes_cbc_setkey,
3757 .encrypt = chcr_aes_encrypt,
3758 .decrypt = chcr_aes_decrypt,
3763 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3766 .cra_name = "xts(aes)",
3767 .cra_driver_name = "xts-aes-chcr",
3768 .cra_blocksize = AES_BLOCK_SIZE,
3769 .cra_init = chcr_cra_init,
3771 .cra_u .ablkcipher = {
3772 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3773 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3774 .ivsize = AES_BLOCK_SIZE,
3775 .setkey = chcr_aes_xts_setkey,
3776 .encrypt = chcr_aes_encrypt,
3777 .decrypt = chcr_aes_decrypt,
3782 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3785 .cra_name = "ctr(aes)",
3786 .cra_driver_name = "ctr-aes-chcr",
3788 .cra_init = chcr_cra_init,
3789 .cra_exit = chcr_cra_exit,
3790 .cra_u.ablkcipher = {
3791 .min_keysize = AES_MIN_KEY_SIZE,
3792 .max_keysize = AES_MAX_KEY_SIZE,
3793 .ivsize = AES_BLOCK_SIZE,
3794 .setkey = chcr_aes_ctr_setkey,
3795 .encrypt = chcr_aes_encrypt,
3796 .decrypt = chcr_aes_decrypt,
3801 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3802 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3805 .cra_name = "rfc3686(ctr(aes))",
3806 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3808 .cra_init = chcr_rfc3686_init,
3809 .cra_exit = chcr_cra_exit,
3810 .cra_u.ablkcipher = {
3811 .min_keysize = AES_MIN_KEY_SIZE +
3812 CTR_RFC3686_NONCE_SIZE,
3813 .max_keysize = AES_MAX_KEY_SIZE +
3814 CTR_RFC3686_NONCE_SIZE,
3815 .ivsize = CTR_RFC3686_IV_SIZE,
3816 .setkey = chcr_aes_rfc3686_setkey,
3817 .encrypt = chcr_aes_encrypt,
3818 .decrypt = chcr_aes_decrypt,
3824 .type = CRYPTO_ALG_TYPE_AHASH,
3827 .halg.digestsize = SHA1_DIGEST_SIZE,
3830 .cra_driver_name = "sha1-chcr",
3831 .cra_blocksize = SHA1_BLOCK_SIZE,
3836 .type = CRYPTO_ALG_TYPE_AHASH,
3839 .halg.digestsize = SHA256_DIGEST_SIZE,
3841 .cra_name = "sha256",
3842 .cra_driver_name = "sha256-chcr",
3843 .cra_blocksize = SHA256_BLOCK_SIZE,
3848 .type = CRYPTO_ALG_TYPE_AHASH,
3851 .halg.digestsize = SHA224_DIGEST_SIZE,
3853 .cra_name = "sha224",
3854 .cra_driver_name = "sha224-chcr",
3855 .cra_blocksize = SHA224_BLOCK_SIZE,
3860 .type = CRYPTO_ALG_TYPE_AHASH,
3863 .halg.digestsize = SHA384_DIGEST_SIZE,
3865 .cra_name = "sha384",
3866 .cra_driver_name = "sha384-chcr",
3867 .cra_blocksize = SHA384_BLOCK_SIZE,
3872 .type = CRYPTO_ALG_TYPE_AHASH,
3875 .halg.digestsize = SHA512_DIGEST_SIZE,
3877 .cra_name = "sha512",
3878 .cra_driver_name = "sha512-chcr",
3879 .cra_blocksize = SHA512_BLOCK_SIZE,
3885 .type = CRYPTO_ALG_TYPE_HMAC,
3888 .halg.digestsize = SHA1_DIGEST_SIZE,
3890 .cra_name = "hmac(sha1)",
3891 .cra_driver_name = "hmac-sha1-chcr",
3892 .cra_blocksize = SHA1_BLOCK_SIZE,
3897 .type = CRYPTO_ALG_TYPE_HMAC,
3900 .halg.digestsize = SHA224_DIGEST_SIZE,
3902 .cra_name = "hmac(sha224)",
3903 .cra_driver_name = "hmac-sha224-chcr",
3904 .cra_blocksize = SHA224_BLOCK_SIZE,
3909 .type = CRYPTO_ALG_TYPE_HMAC,
3912 .halg.digestsize = SHA256_DIGEST_SIZE,
3914 .cra_name = "hmac(sha256)",
3915 .cra_driver_name = "hmac-sha256-chcr",
3916 .cra_blocksize = SHA256_BLOCK_SIZE,
3921 .type = CRYPTO_ALG_TYPE_HMAC,
3924 .halg.digestsize = SHA384_DIGEST_SIZE,
3926 .cra_name = "hmac(sha384)",
3927 .cra_driver_name = "hmac-sha384-chcr",
3928 .cra_blocksize = SHA384_BLOCK_SIZE,
3933 .type = CRYPTO_ALG_TYPE_HMAC,
3936 .halg.digestsize = SHA512_DIGEST_SIZE,
3938 .cra_name = "hmac(sha512)",
3939 .cra_driver_name = "hmac-sha512-chcr",
3940 .cra_blocksize = SHA512_BLOCK_SIZE,
3944 /* Add AEAD Algorithms */
3946 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3950 .cra_name = "gcm(aes)",
3951 .cra_driver_name = "gcm-aes-chcr",
3953 .cra_priority = CHCR_AEAD_PRIORITY,
3954 .cra_ctxsize = sizeof(struct chcr_context) +
3955 sizeof(struct chcr_aead_ctx) +
3956 sizeof(struct chcr_gcm_ctx),
3958 .ivsize = GCM_AES_IV_SIZE,
3959 .maxauthsize = GHASH_DIGEST_SIZE,
3960 .setkey = chcr_gcm_setkey,
3961 .setauthsize = chcr_gcm_setauthsize,
3965 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3969 .cra_name = "rfc4106(gcm(aes))",
3970 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3972 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3973 .cra_ctxsize = sizeof(struct chcr_context) +
3974 sizeof(struct chcr_aead_ctx) +
3975 sizeof(struct chcr_gcm_ctx),
3978 .ivsize = GCM_RFC4106_IV_SIZE,
3979 .maxauthsize = GHASH_DIGEST_SIZE,
3980 .setkey = chcr_gcm_setkey,
3981 .setauthsize = chcr_4106_4309_setauthsize,
3985 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3989 .cra_name = "ccm(aes)",
3990 .cra_driver_name = "ccm-aes-chcr",
3992 .cra_priority = CHCR_AEAD_PRIORITY,
3993 .cra_ctxsize = sizeof(struct chcr_context) +
3994 sizeof(struct chcr_aead_ctx),
3997 .ivsize = AES_BLOCK_SIZE,
3998 .maxauthsize = GHASH_DIGEST_SIZE,
3999 .setkey = chcr_aead_ccm_setkey,
4000 .setauthsize = chcr_ccm_setauthsize,
4004 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4008 .cra_name = "rfc4309(ccm(aes))",
4009 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4011 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4012 .cra_ctxsize = sizeof(struct chcr_context) +
4013 sizeof(struct chcr_aead_ctx),
4017 .maxauthsize = GHASH_DIGEST_SIZE,
4018 .setkey = chcr_aead_rfc4309_setkey,
4019 .setauthsize = chcr_4106_4309_setauthsize,
4023 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4027 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4029 "authenc-hmac-sha1-cbc-aes-chcr",
4030 .cra_blocksize = AES_BLOCK_SIZE,
4031 .cra_priority = CHCR_AEAD_PRIORITY,
4032 .cra_ctxsize = sizeof(struct chcr_context) +
4033 sizeof(struct chcr_aead_ctx) +
4034 sizeof(struct chcr_authenc_ctx),
4037 .ivsize = AES_BLOCK_SIZE,
4038 .maxauthsize = SHA1_DIGEST_SIZE,
4039 .setkey = chcr_authenc_setkey,
4040 .setauthsize = chcr_authenc_setauthsize,
4044 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4049 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4051 "authenc-hmac-sha256-cbc-aes-chcr",
4052 .cra_blocksize = AES_BLOCK_SIZE,
4053 .cra_priority = CHCR_AEAD_PRIORITY,
4054 .cra_ctxsize = sizeof(struct chcr_context) +
4055 sizeof(struct chcr_aead_ctx) +
4056 sizeof(struct chcr_authenc_ctx),
4059 .ivsize = AES_BLOCK_SIZE,
4060 .maxauthsize = SHA256_DIGEST_SIZE,
4061 .setkey = chcr_authenc_setkey,
4062 .setauthsize = chcr_authenc_setauthsize,
4066 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4070 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4072 "authenc-hmac-sha224-cbc-aes-chcr",
4073 .cra_blocksize = AES_BLOCK_SIZE,
4074 .cra_priority = CHCR_AEAD_PRIORITY,
4075 .cra_ctxsize = sizeof(struct chcr_context) +
4076 sizeof(struct chcr_aead_ctx) +
4077 sizeof(struct chcr_authenc_ctx),
4079 .ivsize = AES_BLOCK_SIZE,
4080 .maxauthsize = SHA224_DIGEST_SIZE,
4081 .setkey = chcr_authenc_setkey,
4082 .setauthsize = chcr_authenc_setauthsize,
4086 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4090 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4092 "authenc-hmac-sha384-cbc-aes-chcr",
4093 .cra_blocksize = AES_BLOCK_SIZE,
4094 .cra_priority = CHCR_AEAD_PRIORITY,
4095 .cra_ctxsize = sizeof(struct chcr_context) +
4096 sizeof(struct chcr_aead_ctx) +
4097 sizeof(struct chcr_authenc_ctx),
4100 .ivsize = AES_BLOCK_SIZE,
4101 .maxauthsize = SHA384_DIGEST_SIZE,
4102 .setkey = chcr_authenc_setkey,
4103 .setauthsize = chcr_authenc_setauthsize,
4107 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4111 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4113 "authenc-hmac-sha512-cbc-aes-chcr",
4114 .cra_blocksize = AES_BLOCK_SIZE,
4115 .cra_priority = CHCR_AEAD_PRIORITY,
4116 .cra_ctxsize = sizeof(struct chcr_context) +
4117 sizeof(struct chcr_aead_ctx) +
4118 sizeof(struct chcr_authenc_ctx),
4121 .ivsize = AES_BLOCK_SIZE,
4122 .maxauthsize = SHA512_DIGEST_SIZE,
4123 .setkey = chcr_authenc_setkey,
4124 .setauthsize = chcr_authenc_setauthsize,
4128 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4132 .cra_name = "authenc(digest_null,cbc(aes))",
4134 "authenc-digest_null-cbc-aes-chcr",
4135 .cra_blocksize = AES_BLOCK_SIZE,
4136 .cra_priority = CHCR_AEAD_PRIORITY,
4137 .cra_ctxsize = sizeof(struct chcr_context) +
4138 sizeof(struct chcr_aead_ctx) +
4139 sizeof(struct chcr_authenc_ctx),
4142 .ivsize = AES_BLOCK_SIZE,
4144 .setkey = chcr_aead_digest_null_setkey,
4145 .setauthsize = chcr_authenc_null_setauthsize,
4149 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4153 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4155 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4157 .cra_priority = CHCR_AEAD_PRIORITY,
4158 .cra_ctxsize = sizeof(struct chcr_context) +
4159 sizeof(struct chcr_aead_ctx) +
4160 sizeof(struct chcr_authenc_ctx),
4163 .ivsize = CTR_RFC3686_IV_SIZE,
4164 .maxauthsize = SHA1_DIGEST_SIZE,
4165 .setkey = chcr_authenc_setkey,
4166 .setauthsize = chcr_authenc_setauthsize,
4170 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4175 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4177 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4179 .cra_priority = CHCR_AEAD_PRIORITY,
4180 .cra_ctxsize = sizeof(struct chcr_context) +
4181 sizeof(struct chcr_aead_ctx) +
4182 sizeof(struct chcr_authenc_ctx),
4185 .ivsize = CTR_RFC3686_IV_SIZE,
4186 .maxauthsize = SHA256_DIGEST_SIZE,
4187 .setkey = chcr_authenc_setkey,
4188 .setauthsize = chcr_authenc_setauthsize,
4192 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4196 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4198 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4200 .cra_priority = CHCR_AEAD_PRIORITY,
4201 .cra_ctxsize = sizeof(struct chcr_context) +
4202 sizeof(struct chcr_aead_ctx) +
4203 sizeof(struct chcr_authenc_ctx),
4205 .ivsize = CTR_RFC3686_IV_SIZE,
4206 .maxauthsize = SHA224_DIGEST_SIZE,
4207 .setkey = chcr_authenc_setkey,
4208 .setauthsize = chcr_authenc_setauthsize,
4212 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4216 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4218 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4220 .cra_priority = CHCR_AEAD_PRIORITY,
4221 .cra_ctxsize = sizeof(struct chcr_context) +
4222 sizeof(struct chcr_aead_ctx) +
4223 sizeof(struct chcr_authenc_ctx),
4226 .ivsize = CTR_RFC3686_IV_SIZE,
4227 .maxauthsize = SHA384_DIGEST_SIZE,
4228 .setkey = chcr_authenc_setkey,
4229 .setauthsize = chcr_authenc_setauthsize,
4233 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4237 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4239 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4241 .cra_priority = CHCR_AEAD_PRIORITY,
4242 .cra_ctxsize = sizeof(struct chcr_context) +
4243 sizeof(struct chcr_aead_ctx) +
4244 sizeof(struct chcr_authenc_ctx),
4247 .ivsize = CTR_RFC3686_IV_SIZE,
4248 .maxauthsize = SHA512_DIGEST_SIZE,
4249 .setkey = chcr_authenc_setkey,
4250 .setauthsize = chcr_authenc_setauthsize,
4254 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4258 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4260 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4262 .cra_priority = CHCR_AEAD_PRIORITY,
4263 .cra_ctxsize = sizeof(struct chcr_context) +
4264 sizeof(struct chcr_aead_ctx) +
4265 sizeof(struct chcr_authenc_ctx),
4268 .ivsize = CTR_RFC3686_IV_SIZE,
4270 .setkey = chcr_aead_digest_null_setkey,
4271 .setauthsize = chcr_authenc_null_setauthsize,
4277 * chcr_unregister_alg - Deregister crypto algorithms with
4280 static int chcr_unregister_alg(void)
4284 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4285 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4286 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4287 if (driver_algs[i].is_registered)
4288 crypto_unregister_alg(
4289 &driver_algs[i].alg.crypto);
4291 case CRYPTO_ALG_TYPE_AEAD:
4292 if (driver_algs[i].is_registered)
4293 crypto_unregister_aead(
4294 &driver_algs[i].alg.aead);
4296 case CRYPTO_ALG_TYPE_AHASH:
4297 if (driver_algs[i].is_registered)
4298 crypto_unregister_ahash(
4299 &driver_algs[i].alg.hash);
4302 driver_algs[i].is_registered = 0;
4307 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4308 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4309 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4312 * chcr_register_alg - Register crypto algorithms with kernel framework.
4314 static int chcr_register_alg(void)
4316 struct crypto_alg ai;
4317 struct ahash_alg *a_hash;
4321 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4322 if (driver_algs[i].is_registered)
4324 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4325 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4326 driver_algs[i].alg.crypto.cra_priority =
4328 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4329 driver_algs[i].alg.crypto.cra_flags =
4330 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4331 CRYPTO_ALG_NEED_FALLBACK;
4332 driver_algs[i].alg.crypto.cra_ctxsize =
4333 sizeof(struct chcr_context) +
4334 sizeof(struct ablk_ctx);
4335 driver_algs[i].alg.crypto.cra_alignmask = 0;
4336 driver_algs[i].alg.crypto.cra_type =
4337 &crypto_ablkcipher_type;
4338 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4339 name = driver_algs[i].alg.crypto.cra_driver_name;
4341 case CRYPTO_ALG_TYPE_AEAD:
4342 driver_algs[i].alg.aead.base.cra_flags =
4343 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4344 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4345 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4346 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4347 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4348 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4349 err = crypto_register_aead(&driver_algs[i].alg.aead);
4350 name = driver_algs[i].alg.aead.base.cra_driver_name;
4352 case CRYPTO_ALG_TYPE_AHASH:
4353 a_hash = &driver_algs[i].alg.hash;
4354 a_hash->update = chcr_ahash_update;
4355 a_hash->final = chcr_ahash_final;
4356 a_hash->finup = chcr_ahash_finup;
4357 a_hash->digest = chcr_ahash_digest;
4358 a_hash->export = chcr_ahash_export;
4359 a_hash->import = chcr_ahash_import;
4360 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4361 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4362 a_hash->halg.base.cra_module = THIS_MODULE;
4363 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4364 a_hash->halg.base.cra_alignmask = 0;
4365 a_hash->halg.base.cra_exit = NULL;
4367 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4368 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4369 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4370 a_hash->init = chcr_hmac_init;
4371 a_hash->setkey = chcr_ahash_setkey;
4372 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4374 a_hash->init = chcr_sha_init;
4375 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4376 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4378 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4379 ai = driver_algs[i].alg.hash.halg.base;
4380 name = ai.cra_driver_name;
4384 pr_err("chcr : %s : Algorithm registration failed\n",
4388 driver_algs[i].is_registered = 1;
4394 chcr_unregister_alg();
4399 * start_crypto - Register the crypto algorithms.
4400 * This should called once when the first device comesup. After this
4401 * kernel will start calling driver APIs for crypto operations.
4403 int start_crypto(void)
4405 return chcr_register_alg();
4409 * stop_crypto - Deregister all the crypto algorithms with kernel.
4410 * This should be called once when the last device goes down. After this
4411 * kernel will not call the driver API for crypto operations.
4413 int stop_crypto(void)
4415 chcr_unregister_alg();