2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
101 return ctx->crypto_ctx->aeadctx;
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
106 return ctx->crypto_ctx->ablkctx;
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
111 return ctx->crypto_ctx->hmacctx;
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
116 return gctx->ctx->gcm;
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
121 return gctx->ctx->authenc;
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
126 return ctx->dev->u_ctx;
129 static inline int is_ofld_imm(const struct sk_buff *skb)
131 return (skb->len <= SGE_MAX_WR_LEN);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
145 unsigned int skip_len = 0;
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
168 static inline int get_aead_subtype(struct crypto_aead *aead)
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
201 static inline void chcr_handle_aead_resp(struct aead_request *req,
202 unsigned char *input,
205 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
207 chcr_aead_common_exit(req);
208 if (reqctx->verify == VERIFY_SW) {
209 chcr_verify_tag(req, input, &err);
210 reqctx->verify = VERIFY_HW;
212 req->base.complete(&req->base, err);
215 static void get_aes_decrypt_key(unsigned char *dec_key,
216 const unsigned char *key,
217 unsigned int keylength)
225 case AES_KEYLENGTH_128BIT:
226 nk = KEYLENGTH_4BYTES;
227 nr = NUMBER_OF_ROUNDS_10;
229 case AES_KEYLENGTH_192BIT:
230 nk = KEYLENGTH_6BYTES;
231 nr = NUMBER_OF_ROUNDS_12;
233 case AES_KEYLENGTH_256BIT:
234 nk = KEYLENGTH_8BYTES;
235 nr = NUMBER_OF_ROUNDS_14;
240 for (i = 0; i < nk; i++)
241 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
244 temp = w_ring[nk - 1];
245 while (i + nk < (nr + 1) * 4) {
248 temp = (temp << 8) | (temp >> 24);
249 temp = aes_ks_subword(temp);
250 temp ^= round_constant[i / nk];
251 } else if (nk == 8 && (i % 4 == 0)) {
252 temp = aes_ks_subword(temp);
254 w_ring[i % nk] ^= temp;
255 temp = w_ring[i % nk];
259 for (k = 0, j = i % nk; k < nk; k++) {
260 *((u32 *)dec_key + k) = htonl(w_ring[j]);
267 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
269 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
272 case SHA1_DIGEST_SIZE:
273 base_hash = crypto_alloc_shash("sha1", 0, 0);
275 case SHA224_DIGEST_SIZE:
276 base_hash = crypto_alloc_shash("sha224", 0, 0);
278 case SHA256_DIGEST_SIZE:
279 base_hash = crypto_alloc_shash("sha256", 0, 0);
281 case SHA384_DIGEST_SIZE:
282 base_hash = crypto_alloc_shash("sha384", 0, 0);
284 case SHA512_DIGEST_SIZE:
285 base_hash = crypto_alloc_shash("sha512", 0, 0);
292 static int chcr_compute_partial_hash(struct shash_desc *desc,
293 char *iopad, char *result_hash,
296 struct sha1_state sha1_st;
297 struct sha256_state sha256_st;
298 struct sha512_state sha512_st;
301 if (digest_size == SHA1_DIGEST_SIZE) {
302 error = crypto_shash_init(desc) ?:
303 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
304 crypto_shash_export(desc, (void *)&sha1_st);
305 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
306 } else if (digest_size == SHA224_DIGEST_SIZE) {
307 error = crypto_shash_init(desc) ?:
308 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
309 crypto_shash_export(desc, (void *)&sha256_st);
310 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
312 } else if (digest_size == SHA256_DIGEST_SIZE) {
313 error = crypto_shash_init(desc) ?:
314 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
315 crypto_shash_export(desc, (void *)&sha256_st);
316 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
318 } else if (digest_size == SHA384_DIGEST_SIZE) {
319 error = crypto_shash_init(desc) ?:
320 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
321 crypto_shash_export(desc, (void *)&sha512_st);
322 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
324 } else if (digest_size == SHA512_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha512_st);
328 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
331 pr_err("Unknown digest size %d\n", digest_size);
336 static void chcr_change_order(char *buf, int ds)
340 if (ds == SHA512_DIGEST_SIZE) {
341 for (i = 0; i < (ds / sizeof(u64)); i++)
342 *((__be64 *)buf + i) =
343 cpu_to_be64(*((u64 *)buf + i));
345 for (i = 0; i < (ds / sizeof(u32)); i++)
346 *((__be32 *)buf + i) =
347 cpu_to_be32(*((u32 *)buf + i));
351 static inline int is_hmac(struct crypto_tfm *tfm)
353 struct crypto_alg *alg = tfm->__crt_alg;
354 struct chcr_alg_template *chcr_crypto_alg =
355 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
357 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
362 static inline void dsgl_walk_init(struct dsgl_walk *walk,
363 struct cpl_rx_phys_dsgl *dsgl)
367 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
370 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
373 struct cpl_rx_phys_dsgl *phys_cpl;
375 phys_cpl = walk->dsgl;
377 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
378 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
379 phys_cpl->pcirlxorder_to_noofsgentr =
380 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
381 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
382 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
383 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
384 CPL_RX_PHYS_DSGL_DCAID_V(0) |
385 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
386 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
387 phys_cpl->rss_hdr_int.qid = htons(qid);
388 phys_cpl->rss_hdr_int.hash_val = 0;
389 phys_cpl->rss_hdr_int.channel = pci_chan_id;
392 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
401 walk->to->len[j % 8] = htons(size);
402 walk->to->addr[j % 8] = cpu_to_be64(*addr);
409 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
410 struct scatterlist *sg,
415 unsigned int left_size = slen, len = 0;
416 unsigned int j = walk->nents;
422 if (sg_dma_len(sg) <= skip) {
423 skip -= sg_dma_len(sg);
432 while (left_size && sg) {
433 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
436 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
437 walk->to->len[j % 8] = htons(ent_len);
438 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
447 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
448 skip_len) + skip_len;
449 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
456 static inline void ulptx_walk_init(struct ulptx_walk *walk,
457 struct ulptx_sgl *ulp)
462 walk->pair = ulp->sge;
463 walk->last_sg = NULL;
464 walk->last_sg_len = 0;
467 static inline void ulptx_walk_end(struct ulptx_walk *walk)
469 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
470 ULPTX_NSGE_V(walk->nents));
474 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
481 if (walk->nents == 0) {
482 walk->sgl->len0 = cpu_to_be32(size);
483 walk->sgl->addr0 = cpu_to_be64(*addr);
485 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
486 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
487 walk->pair_idx = !walk->pair_idx;
494 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
495 struct scatterlist *sg,
506 if (sg_dma_len(sg) <= skip) {
507 skip -= sg_dma_len(sg);
515 WARN(!sg, "SG should not be null here\n");
516 if (sg && (walk->nents == 0)) {
517 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
518 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
519 walk->sgl->len0 = cpu_to_be32(sgmin);
520 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
524 walk->last_sg_len = sgmin + skip_len;
526 if (sg_dma_len(sg) == skip_len) {
533 small = min(sg_dma_len(sg) - skip_len, len);
534 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
535 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
536 walk->pair->addr[walk->pair_idx] =
537 cpu_to_be64(sg_dma_address(sg) + skip_len);
538 walk->pair_idx = !walk->pair_idx;
545 walk->last_sg_len = skip_len;
546 if (sg_dma_len(sg) == skip_len) {
553 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
555 struct crypto_alg *alg = tfm->__crt_alg;
556 struct chcr_alg_template *chcr_crypto_alg =
557 container_of(alg, struct chcr_alg_template, alg.crypto);
559 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
562 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
564 struct adapter *adap = netdev2adap(dev);
565 struct sge_uld_txq_info *txq_info =
566 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
567 struct sge_uld_txq *txq;
571 txq = &txq_info->uldtxq[idx];
572 spin_lock(&txq->sendq.lock);
575 spin_unlock(&txq->sendq.lock);
580 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
581 struct _key_ctx *key_ctx)
583 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
584 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
587 ablkctx->key + (ablkctx->enckey_len >> 1),
588 ablkctx->enckey_len >> 1);
589 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
590 ablkctx->rrkey, ablkctx->enckey_len >> 1);
595 static int chcr_hash_ent_in_wr(struct scatterlist *src,
598 unsigned int srcskip)
602 int soffset = 0, sless;
604 if (sg_dma_len(src) == srcskip) {
608 while (src && space > (sgl_ent_len[srcsg + 1])) {
609 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
614 if (sg_dma_len(src) == (soffset + srcskip)) {
623 static int chcr_sg_ent_in_wr(struct scatterlist *src,
624 struct scatterlist *dst,
627 unsigned int srcskip,
628 unsigned int dstskip)
630 int srclen = 0, dstlen = 0;
631 int srcsg = minsg, dstsg = minsg;
632 int offset = 0, soffset = 0, less, sless = 0;
634 if (sg_dma_len(src) == srcskip) {
638 if (sg_dma_len(dst) == dstskip) {
644 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
645 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
650 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
651 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
652 if (srclen <= dstlen)
654 less = min_t(unsigned int, sg_dma_len(dst) - offset -
655 dstskip, CHCR_DST_SG_SIZE);
658 if ((offset + dstskip) == sg_dma_len(dst)) {
666 if ((soffset + srcskip) == sg_dma_len(src)) {
673 return min(srclen, dstlen);
676 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
678 struct scatterlist *src,
679 struct scatterlist *dst,
682 unsigned short op_type)
686 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
688 skcipher_request_set_sync_tfm(subreq, cipher);
689 skcipher_request_set_callback(subreq, flags, NULL, NULL);
690 skcipher_request_set_crypt(subreq, src, dst,
693 err = op_type ? crypto_skcipher_decrypt(subreq) :
694 crypto_skcipher_encrypt(subreq);
695 skcipher_request_zero(subreq);
700 static inline void create_wreq(struct chcr_context *ctx,
701 struct chcr_wr *chcr_req,
702 struct crypto_async_request *req,
709 struct uld_ctx *u_ctx = ULD_CTX(ctx);
710 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
713 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
714 chcr_req->wreq.pld_size_hash_size =
715 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
716 chcr_req->wreq.len16_pkd =
717 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
718 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
719 chcr_req->wreq.rx_chid_to_rx_q_id =
720 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
721 !!lcb, ctx->tx_qidx);
723 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
725 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
726 ((sizeof(chcr_req->wreq)) >> 4)));
728 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
729 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
730 sizeof(chcr_req->key_ctx) + sc_len);
734 * create_cipher_wr - form the WR for cipher operations
736 * @ctx: crypto driver context of the request.
737 * @qid: ingress qid where response of this WR should be received.
738 * @op_type: encryption or decryption
740 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
742 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
743 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
744 struct sk_buff *skb = NULL;
745 struct chcr_wr *chcr_req;
746 struct cpl_rx_phys_dsgl *phys_cpl;
747 struct ulptx_sgl *ulptx;
748 struct chcr_blkcipher_req_ctx *reqctx =
749 ablkcipher_request_ctx(wrparam->req);
750 unsigned int temp = 0, transhdr_len, dst_size;
753 unsigned int kctx_len;
754 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
755 GFP_KERNEL : GFP_ATOMIC;
756 struct adapter *adap = padap(c_ctx(tfm)->dev);
758 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
760 dst_size = get_space_for_phys_dsgl(nents);
761 kctx_len = roundup(ablkctx->enckey_len, 16);
762 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
763 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
764 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
765 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
766 (sgl_len(nents) * 8);
767 transhdr_len += temp;
768 transhdr_len = roundup(transhdr_len, 16);
769 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
774 chcr_req = __skb_put_zero(skb, transhdr_len);
775 chcr_req->sec_cpl.op_ivinsrtofst =
776 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
778 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
779 chcr_req->sec_cpl.aadstart_cipherstop_hi =
780 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
782 chcr_req->sec_cpl.cipherstop_lo_authinsert =
783 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
784 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
787 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
790 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
791 if ((reqctx->op == CHCR_DECRYPT_OP) &&
792 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
793 CRYPTO_ALG_SUB_TYPE_CTR)) &&
794 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
795 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
796 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
798 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
799 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
800 memcpy(chcr_req->key_ctx.key, ablkctx->key,
801 ablkctx->enckey_len);
803 memcpy(chcr_req->key_ctx.key, ablkctx->key +
804 (ablkctx->enckey_len >> 1),
805 ablkctx->enckey_len >> 1);
806 memcpy(chcr_req->key_ctx.key +
807 (ablkctx->enckey_len >> 1),
809 ablkctx->enckey_len >> 1);
812 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
813 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
814 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
815 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
817 atomic_inc(&adap->chcr_stats.cipher_rqst);
818 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
819 + (reqctx->imm ? (wrparam->bytes) : 0);
820 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
822 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
825 if (reqctx->op && (ablkctx->ciph_mode ==
826 CHCR_SCMD_CIPHER_MODE_AES_CBC))
827 sg_pcopy_to_buffer(wrparam->req->src,
828 sg_nents(wrparam->req->src), wrparam->req->info, 16,
829 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
833 return ERR_PTR(error);
836 static inline int chcr_keyctx_ck_size(unsigned int keylen)
840 if (keylen == AES_KEYSIZE_128)
841 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
842 else if (keylen == AES_KEYSIZE_192)
843 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
844 else if (keylen == AES_KEYSIZE_256)
845 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
851 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
855 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
856 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
859 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
860 CRYPTO_TFM_REQ_MASK);
861 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
862 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
863 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
864 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
866 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
871 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
875 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
876 unsigned int ck_size, context_size;
880 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
884 ck_size = chcr_keyctx_ck_size(keylen);
885 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
886 memcpy(ablkctx->key, key, keylen);
887 ablkctx->enckey_len = keylen;
888 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
889 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
890 keylen + alignment) >> 4;
892 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
894 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
897 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
898 ablkctx->enckey_len = 0;
903 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
907 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
908 unsigned int ck_size, context_size;
912 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
915 ck_size = chcr_keyctx_ck_size(keylen);
916 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
917 memcpy(ablkctx->key, key, keylen);
918 ablkctx->enckey_len = keylen;
919 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
920 keylen + alignment) >> 4;
922 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
924 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
928 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
929 ablkctx->enckey_len = 0;
934 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
938 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
939 unsigned int ck_size, context_size;
943 if (keylen < CTR_RFC3686_NONCE_SIZE)
945 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
946 CTR_RFC3686_NONCE_SIZE);
948 keylen -= CTR_RFC3686_NONCE_SIZE;
949 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
953 ck_size = chcr_keyctx_ck_size(keylen);
954 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
955 memcpy(ablkctx->key, key, keylen);
956 ablkctx->enckey_len = keylen;
957 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
958 keylen + alignment) >> 4;
960 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
962 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
966 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
967 ablkctx->enckey_len = 0;
971 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
973 unsigned int size = AES_BLOCK_SIZE;
974 __be32 *b = (__be32 *)(dstiv + size);
977 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
978 for (; size >= 4; size -= 4) {
979 prev = be32_to_cpu(*--b);
989 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
991 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
993 u32 temp = be32_to_cpu(*--b);
996 c = (u64)temp + 1; // No of block can processed withou overflow
997 if ((bytes / AES_BLOCK_SIZE) > c)
998 bytes = c * AES_BLOCK_SIZE;
1002 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1005 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1006 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1007 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1008 struct crypto_cipher *cipher;
1011 unsigned int keylen;
1012 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1013 int round8 = round / 8;
1015 cipher = ablkctx->aes_generic;
1016 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1018 keylen = ablkctx->enckey_len / 2;
1019 key = ablkctx->key + keylen;
1020 ret = crypto_cipher_setkey(cipher, key, keylen);
1023 crypto_cipher_encrypt_one(cipher, iv, iv);
1024 for (i = 0; i < round8; i++)
1025 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1027 for (i = 0; i < (round % 8); i++)
1028 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1031 crypto_cipher_decrypt_one(cipher, iv, iv);
1036 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1037 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1039 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1040 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1041 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1044 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1045 ctr_add_iv(iv, req->info, (reqctx->processed /
1047 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1048 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1049 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1050 AES_BLOCK_SIZE) + 1);
1051 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1052 ret = chcr_update_tweak(req, iv, 0);
1053 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1055 /*Updated before sending last WR*/
1056 memcpy(iv, req->info, AES_BLOCK_SIZE);
1058 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1065 /* We need separate function for final iv because in rfc3686 Initial counter
1066 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1067 * for subsequent update requests
1070 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1071 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1073 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1074 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1075 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1078 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1079 ctr_add_iv(iv, req->info, (reqctx->processed /
1081 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1082 ret = chcr_update_tweak(req, iv, 1);
1083 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1084 /*Already updated for Decrypt*/
1086 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1093 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1094 unsigned char *input, int err)
1096 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1097 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1098 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1099 struct sk_buff *skb;
1100 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1101 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1102 struct cipher_wr_param wrparam;
1107 if (req->nbytes == reqctx->processed) {
1108 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1110 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1115 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1116 CIP_SPACE_LEFT(ablkctx->enckey_len),
1117 reqctx->src_ofst, reqctx->dst_ofst);
1118 if ((bytes + reqctx->processed) >= req->nbytes)
1119 bytes = req->nbytes - reqctx->processed;
1121 bytes = rounddown(bytes, 16);
1123 /*CTR mode counter overfloa*/
1124 bytes = req->nbytes - reqctx->processed;
1126 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1130 if (unlikely(bytes == 0)) {
1131 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1133 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1143 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1144 CRYPTO_ALG_SUB_TYPE_CTR)
1145 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1146 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1148 wrparam.bytes = bytes;
1149 skb = create_cipher_wr(&wrparam);
1151 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1155 skb->dev = u_ctx->lldi.ports[0];
1156 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1158 reqctx->last_req_len = bytes;
1159 reqctx->processed += bytes;
1162 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1164 req->base.complete(&req->base, err);
1168 static int process_cipher(struct ablkcipher_request *req,
1170 struct sk_buff **skb,
1171 unsigned short op_type)
1173 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1174 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1175 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1176 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1177 struct cipher_wr_param wrparam;
1178 int bytes, err = -EINVAL;
1180 reqctx->processed = 0;
1183 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1184 (req->nbytes == 0) ||
1185 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1186 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1187 ablkctx->enckey_len, req->nbytes, ivsize);
1190 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1191 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1193 sizeof(struct cpl_rx_phys_dsgl) +
1196 /* Can be sent as Imm*/
1197 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1199 dnents = sg_nents_xlen(req->dst, req->nbytes,
1200 CHCR_DST_SG_SIZE, 0);
1201 phys_dsgl = get_space_for_phys_dsgl(dnents);
1202 kctx_len = roundup(ablkctx->enckey_len, 16);
1203 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1204 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1206 bytes = IV + req->nbytes;
1213 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1214 CIP_SPACE_LEFT(ablkctx->enckey_len),
1216 if ((bytes + reqctx->processed) >= req->nbytes)
1217 bytes = req->nbytes - reqctx->processed;
1219 bytes = rounddown(bytes, 16);
1221 bytes = req->nbytes;
1223 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1224 CRYPTO_ALG_SUB_TYPE_CTR) {
1225 bytes = adjust_ctr_overflow(req->info, bytes);
1227 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1228 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1229 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1230 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1231 CTR_RFC3686_IV_SIZE);
1233 /* initialize counter portion of counter block */
1234 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1235 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1239 memcpy(reqctx->iv, req->info, IV);
1241 if (unlikely(bytes == 0)) {
1242 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1244 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1253 reqctx->op = op_type;
1254 reqctx->srcsg = req->src;
1255 reqctx->dstsg = req->dst;
1256 reqctx->src_ofst = 0;
1257 reqctx->dst_ofst = 0;
1260 wrparam.bytes = bytes;
1261 *skb = create_cipher_wr(&wrparam);
1263 err = PTR_ERR(*skb);
1266 reqctx->processed = bytes;
1267 reqctx->last_req_len = bytes;
1271 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1276 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1278 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1279 struct sk_buff *skb = NULL;
1280 int err, isfull = 0;
1281 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1283 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1284 c_ctx(tfm)->tx_qidx))) {
1286 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1290 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1291 &skb, CHCR_ENCRYPT_OP);
1294 skb->dev = u_ctx->lldi.ports[0];
1295 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1297 return isfull ? -EBUSY : -EINPROGRESS;
1300 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1302 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1303 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1304 struct sk_buff *skb = NULL;
1305 int err, isfull = 0;
1307 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1308 c_ctx(tfm)->tx_qidx))) {
1310 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1314 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1315 &skb, CHCR_DECRYPT_OP);
1318 skb->dev = u_ctx->lldi.ports[0];
1319 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1321 return isfull ? -EBUSY : -EINPROGRESS;
1324 static int chcr_device_init(struct chcr_context *ctx)
1326 struct uld_ctx *u_ctx = NULL;
1327 struct adapter *adap;
1329 int txq_perchan, txq_idx, ntxq;
1330 int err = 0, rxq_perchan, rxq_idx;
1332 id = smp_processor_id();
1334 u_ctx = assign_chcr_device();
1336 pr_err("chcr device assignment fails\n");
1339 ctx->dev = u_ctx->dev;
1340 adap = padap(ctx->dev);
1341 ntxq = u_ctx->lldi.ntxq;
1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1343 txq_perchan = ntxq / u_ctx->lldi.nchan;
1344 spin_lock(&ctx->dev->lock_chcr_dev);
1345 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1346 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1347 ctx->dev->rx_channel_id = 0;
1348 spin_unlock(&ctx->dev->lock_chcr_dev);
1349 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1350 rxq_idx += id % rxq_perchan;
1351 txq_idx = ctx->tx_chan_id * txq_perchan;
1352 txq_idx += id % txq_perchan;
1353 ctx->rx_qidx = rxq_idx;
1354 ctx->tx_qidx = txq_idx;
1355 /* Channel Id used by SGE to forward packet to Host.
1356 * Same value should be used in cpl_fw6_pld RSS_CH field
1357 * by FW. Driver programs PCI channel ID to be used in fw
1358 * at the time of queue allocation with value "pi->tx_chan"
1360 ctx->pci_chan_id = txq_idx / txq_perchan;
1366 static int chcr_cra_init(struct crypto_tfm *tfm)
1368 struct crypto_alg *alg = tfm->__crt_alg;
1369 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1370 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1372 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1373 CRYPTO_ALG_NEED_FALLBACK);
1374 if (IS_ERR(ablkctx->sw_cipher)) {
1375 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1376 return PTR_ERR(ablkctx->sw_cipher);
1379 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1380 /* To update tweak*/
1381 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1382 if (IS_ERR(ablkctx->aes_generic)) {
1383 pr_err("failed to allocate aes cipher for tweak\n");
1384 return PTR_ERR(ablkctx->aes_generic);
1387 ablkctx->aes_generic = NULL;
1389 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1390 return chcr_device_init(crypto_tfm_ctx(tfm));
1393 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1395 struct crypto_alg *alg = tfm->__crt_alg;
1396 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1397 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1399 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1400 * cannot be used as fallback in chcr_handle_cipher_response
1402 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1403 CRYPTO_ALG_NEED_FALLBACK);
1404 if (IS_ERR(ablkctx->sw_cipher)) {
1405 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1406 return PTR_ERR(ablkctx->sw_cipher);
1408 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1409 return chcr_device_init(crypto_tfm_ctx(tfm));
1413 static void chcr_cra_exit(struct crypto_tfm *tfm)
1415 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1416 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1418 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1419 if (ablkctx->aes_generic)
1420 crypto_free_cipher(ablkctx->aes_generic);
1423 static int get_alg_config(struct algo_param *params,
1424 unsigned int auth_size)
1426 switch (auth_size) {
1427 case SHA1_DIGEST_SIZE:
1428 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1429 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1430 params->result_size = SHA1_DIGEST_SIZE;
1432 case SHA224_DIGEST_SIZE:
1433 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1434 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1435 params->result_size = SHA256_DIGEST_SIZE;
1437 case SHA256_DIGEST_SIZE:
1438 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1439 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1440 params->result_size = SHA256_DIGEST_SIZE;
1442 case SHA384_DIGEST_SIZE:
1443 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1444 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1445 params->result_size = SHA512_DIGEST_SIZE;
1447 case SHA512_DIGEST_SIZE:
1448 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1449 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1450 params->result_size = SHA512_DIGEST_SIZE;
1453 pr_err("chcr : ERROR, unsupported digest size\n");
1459 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1461 crypto_free_shash(base_hash);
1465 * create_hash_wr - Create hash work request
1466 * @req - Cipher req base
1468 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1469 struct hash_wr_param *param)
1471 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1472 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1473 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1474 struct sk_buff *skb = NULL;
1475 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1476 struct chcr_wr *chcr_req;
1477 struct ulptx_sgl *ulptx;
1478 unsigned int nents = 0, transhdr_len;
1479 unsigned int temp = 0;
1480 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1482 struct adapter *adap = padap(h_ctx(tfm)->dev);
1485 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1486 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1487 param->sg_len) <= SGE_MAX_WR_LEN;
1488 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1489 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1490 nents += param->bfr_len ? 1 : 0;
1491 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1492 param->sg_len, 16) : (sgl_len(nents) * 8);
1493 transhdr_len = roundup(transhdr_len, 16);
1495 skb = alloc_skb(transhdr_len, flags);
1497 return ERR_PTR(-ENOMEM);
1498 chcr_req = __skb_put_zero(skb, transhdr_len);
1500 chcr_req->sec_cpl.op_ivinsrtofst =
1501 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1502 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1504 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1505 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1506 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1507 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1508 chcr_req->sec_cpl.seqno_numivs =
1509 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1510 param->opad_needed, 0);
1512 chcr_req->sec_cpl.ivgen_hdrlen =
1513 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1515 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1516 param->alg_prm.result_size);
1518 if (param->opad_needed)
1519 memcpy(chcr_req->key_ctx.key +
1520 ((param->alg_prm.result_size <= 32) ? 32 :
1521 CHCR_HASH_MAX_DIGEST_SIZE),
1522 hmacctx->opad, param->alg_prm.result_size);
1524 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1525 param->alg_prm.mk_size, 0,
1528 sizeof(chcr_req->key_ctx)) >> 4));
1529 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1530 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1532 if (param->bfr_len != 0) {
1533 req_ctx->hctx_wr.dma_addr =
1534 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1535 param->bfr_len, DMA_TO_DEVICE);
1536 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1537 req_ctx->hctx_wr. dma_addr)) {
1541 req_ctx->hctx_wr.dma_len = param->bfr_len;
1543 req_ctx->hctx_wr.dma_addr = 0;
1545 chcr_add_hash_src_ent(req, ulptx, param);
1546 /* Request upto max wr size */
1547 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1548 (param->sg_len + param->bfr_len) : 0);
1549 atomic_inc(&adap->chcr_stats.digest_rqst);
1550 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1551 param->hash_size, transhdr_len,
1553 req_ctx->hctx_wr.skb = skb;
1557 return ERR_PTR(error);
1560 static int chcr_ahash_update(struct ahash_request *req)
1562 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1563 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1564 struct uld_ctx *u_ctx = NULL;
1565 struct sk_buff *skb;
1566 u8 remainder = 0, bs;
1567 unsigned int nbytes = req->nbytes;
1568 struct hash_wr_param params;
1569 int error, isfull = 0;
1571 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1572 u_ctx = ULD_CTX(h_ctx(rtfm));
1573 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1574 h_ctx(rtfm)->tx_qidx))) {
1576 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1580 if (nbytes + req_ctx->reqlen >= bs) {
1581 remainder = (nbytes + req_ctx->reqlen) % bs;
1582 nbytes = nbytes + req_ctx->reqlen - remainder;
1584 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1585 + req_ctx->reqlen, nbytes, 0);
1586 req_ctx->reqlen += nbytes;
1589 chcr_init_hctx_per_wr(req_ctx);
1590 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1593 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1594 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1595 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1596 HASH_SPACE_LEFT(params.kctx_len), 0);
1597 if (params.sg_len > req->nbytes)
1598 params.sg_len = req->nbytes;
1599 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1601 params.opad_needed = 0;
1604 params.bfr_len = req_ctx->reqlen;
1606 req_ctx->hctx_wr.srcsg = req->src;
1608 params.hash_size = params.alg_prm.result_size;
1609 req_ctx->data_len += params.sg_len + params.bfr_len;
1610 skb = create_hash_wr(req, ¶ms);
1612 error = PTR_ERR(skb);
1616 req_ctx->hctx_wr.processed += params.sg_len;
1619 swap(req_ctx->reqbfr, req_ctx->skbfr);
1620 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1621 req_ctx->reqbfr, remainder, req->nbytes -
1624 req_ctx->reqlen = remainder;
1625 skb->dev = u_ctx->lldi.ports[0];
1626 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1629 return isfull ? -EBUSY : -EINPROGRESS;
1631 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1635 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1637 memset(bfr_ptr, 0, bs);
1640 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1642 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1645 static int chcr_ahash_final(struct ahash_request *req)
1647 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1648 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1649 struct hash_wr_param params;
1650 struct sk_buff *skb;
1651 struct uld_ctx *u_ctx = NULL;
1652 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1654 chcr_init_hctx_per_wr(req_ctx);
1655 u_ctx = ULD_CTX(h_ctx(rtfm));
1656 if (is_hmac(crypto_ahash_tfm(rtfm)))
1657 params.opad_needed = 1;
1659 params.opad_needed = 0;
1661 req_ctx->hctx_wr.isfinal = 1;
1662 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1663 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1664 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1665 params.opad_needed = 1;
1666 params.kctx_len *= 2;
1668 params.opad_needed = 0;
1671 req_ctx->hctx_wr.result = 1;
1672 params.bfr_len = req_ctx->reqlen;
1673 req_ctx->data_len += params.bfr_len + params.sg_len;
1674 req_ctx->hctx_wr.srcsg = req->src;
1675 if (req_ctx->reqlen == 0) {
1676 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1680 params.bfr_len = bs;
1683 params.scmd1 = req_ctx->data_len;
1687 params.hash_size = crypto_ahash_digestsize(rtfm);
1688 skb = create_hash_wr(req, ¶ms);
1690 return PTR_ERR(skb);
1691 req_ctx->reqlen = 0;
1692 skb->dev = u_ctx->lldi.ports[0];
1693 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1695 return -EINPROGRESS;
1698 static int chcr_ahash_finup(struct ahash_request *req)
1700 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1701 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1702 struct uld_ctx *u_ctx = NULL;
1703 struct sk_buff *skb;
1704 struct hash_wr_param params;
1706 int error, isfull = 0;
1708 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1709 u_ctx = ULD_CTX(h_ctx(rtfm));
1711 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1712 h_ctx(rtfm)->tx_qidx))) {
1714 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1717 chcr_init_hctx_per_wr(req_ctx);
1718 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1722 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1723 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1724 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1725 params.kctx_len *= 2;
1726 params.opad_needed = 1;
1728 params.opad_needed = 0;
1731 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1732 HASH_SPACE_LEFT(params.kctx_len), 0);
1733 if (params.sg_len < req->nbytes) {
1734 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1735 params.kctx_len /= 2;
1736 params.opad_needed = 0;
1740 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1742 params.hash_size = params.alg_prm.result_size;
1747 params.sg_len = req->nbytes;
1748 params.hash_size = crypto_ahash_digestsize(rtfm);
1749 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1752 params.bfr_len = req_ctx->reqlen;
1753 req_ctx->data_len += params.bfr_len + params.sg_len;
1754 req_ctx->hctx_wr.result = 1;
1755 req_ctx->hctx_wr.srcsg = req->src;
1756 if ((req_ctx->reqlen + req->nbytes) == 0) {
1757 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1761 params.bfr_len = bs;
1763 skb = create_hash_wr(req, ¶ms);
1765 error = PTR_ERR(skb);
1768 req_ctx->reqlen = 0;
1769 req_ctx->hctx_wr.processed += params.sg_len;
1770 skb->dev = u_ctx->lldi.ports[0];
1771 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1774 return isfull ? -EBUSY : -EINPROGRESS;
1776 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1780 static int chcr_ahash_digest(struct ahash_request *req)
1782 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1783 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1784 struct uld_ctx *u_ctx = NULL;
1785 struct sk_buff *skb;
1786 struct hash_wr_param params;
1788 int error, isfull = 0;
1791 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1793 u_ctx = ULD_CTX(h_ctx(rtfm));
1794 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1795 h_ctx(rtfm)->tx_qidx))) {
1797 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1801 chcr_init_hctx_per_wr(req_ctx);
1802 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1806 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1807 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1808 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1809 params.kctx_len *= 2;
1810 params.opad_needed = 1;
1812 params.opad_needed = 0;
1814 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1815 HASH_SPACE_LEFT(params.kctx_len), 0);
1816 if (params.sg_len < req->nbytes) {
1817 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1818 params.kctx_len /= 2;
1819 params.opad_needed = 0;
1824 params.sg_len = rounddown(params.sg_len, bs);
1825 params.hash_size = params.alg_prm.result_size;
1827 params.sg_len = req->nbytes;
1828 params.hash_size = crypto_ahash_digestsize(rtfm);
1831 params.scmd1 = req->nbytes + req_ctx->data_len;
1835 req_ctx->hctx_wr.result = 1;
1836 req_ctx->hctx_wr.srcsg = req->src;
1837 req_ctx->data_len += params.bfr_len + params.sg_len;
1839 if (req->nbytes == 0) {
1840 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1842 params.bfr_len = bs;
1845 skb = create_hash_wr(req, ¶ms);
1847 error = PTR_ERR(skb);
1850 req_ctx->hctx_wr.processed += params.sg_len;
1851 skb->dev = u_ctx->lldi.ports[0];
1852 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1854 return isfull ? -EBUSY : -EINPROGRESS;
1856 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1860 static int chcr_ahash_continue(struct ahash_request *req)
1862 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1863 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1864 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1865 struct uld_ctx *u_ctx = NULL;
1866 struct sk_buff *skb;
1867 struct hash_wr_param params;
1871 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1872 u_ctx = ULD_CTX(h_ctx(rtfm));
1873 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1874 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1875 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1876 params.kctx_len *= 2;
1877 params.opad_needed = 1;
1879 params.opad_needed = 0;
1881 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1882 HASH_SPACE_LEFT(params.kctx_len),
1884 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1885 params.sg_len = req->nbytes - hctx_wr->processed;
1886 if (!hctx_wr->result ||
1887 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1888 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1889 params.kctx_len /= 2;
1890 params.opad_needed = 0;
1894 params.sg_len = rounddown(params.sg_len, bs);
1895 params.hash_size = params.alg_prm.result_size;
1900 params.hash_size = crypto_ahash_digestsize(rtfm);
1901 params.scmd1 = reqctx->data_len + params.sg_len;
1904 reqctx->data_len += params.sg_len;
1905 skb = create_hash_wr(req, ¶ms);
1907 error = PTR_ERR(skb);
1910 hctx_wr->processed += params.sg_len;
1911 skb->dev = u_ctx->lldi.ports[0];
1912 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1919 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1920 unsigned char *input,
1923 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1924 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1925 int digestsize, updated_digestsize;
1926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1927 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1931 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1932 updated_digestsize = digestsize;
1933 if (digestsize == SHA224_DIGEST_SIZE)
1934 updated_digestsize = SHA256_DIGEST_SIZE;
1935 else if (digestsize == SHA384_DIGEST_SIZE)
1936 updated_digestsize = SHA512_DIGEST_SIZE;
1938 if (hctx_wr->dma_addr) {
1939 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1940 hctx_wr->dma_len, DMA_TO_DEVICE);
1941 hctx_wr->dma_addr = 0;
1943 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1945 if (hctx_wr->result == 1) {
1946 hctx_wr->result = 0;
1947 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1950 memcpy(reqctx->partial_hash,
1951 input + sizeof(struct cpl_fw6_pld),
1952 updated_digestsize);
1957 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1958 updated_digestsize);
1960 err = chcr_ahash_continue(req);
1965 if (hctx_wr->is_sg_map)
1966 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1970 req->base.complete(&req->base, err);
1974 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1975 * @req: crypto request
1977 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1980 struct crypto_tfm *tfm = req->tfm;
1981 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1982 struct adapter *adap = padap(ctx->dev);
1984 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1985 case CRYPTO_ALG_TYPE_AEAD:
1986 chcr_handle_aead_resp(aead_request_cast(req), input, err);
1989 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1990 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
1994 case CRYPTO_ALG_TYPE_AHASH:
1995 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
1997 atomic_inc(&adap->chcr_stats.complete);
2000 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2002 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2003 struct chcr_ahash_req_ctx *state = out;
2005 state->reqlen = req_ctx->reqlen;
2006 state->data_len = req_ctx->data_len;
2007 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2008 memcpy(state->partial_hash, req_ctx->partial_hash,
2009 CHCR_HASH_MAX_DIGEST_SIZE);
2010 chcr_init_hctx_per_wr(state);
2014 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2016 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2017 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2019 req_ctx->reqlen = state->reqlen;
2020 req_ctx->data_len = state->data_len;
2021 req_ctx->reqbfr = req_ctx->bfr1;
2022 req_ctx->skbfr = req_ctx->bfr2;
2023 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2024 memcpy(req_ctx->partial_hash, state->partial_hash,
2025 CHCR_HASH_MAX_DIGEST_SIZE);
2026 chcr_init_hctx_per_wr(req_ctx);
2030 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2031 unsigned int keylen)
2033 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2034 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2035 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2036 unsigned int i, err = 0, updated_digestsize;
2038 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2040 /* use the key to calculate the ipad and opad. ipad will sent with the
2041 * first request's data. opad will be sent with the final hash result
2042 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2044 shash->tfm = hmacctx->base_hash;
2045 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2047 err = crypto_shash_digest(shash, key, keylen,
2051 keylen = digestsize;
2053 memcpy(hmacctx->ipad, key, keylen);
2055 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2056 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2058 for (i = 0; i < bs / sizeof(int); i++) {
2059 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2060 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2063 updated_digestsize = digestsize;
2064 if (digestsize == SHA224_DIGEST_SIZE)
2065 updated_digestsize = SHA256_DIGEST_SIZE;
2066 else if (digestsize == SHA384_DIGEST_SIZE)
2067 updated_digestsize = SHA512_DIGEST_SIZE;
2068 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2069 hmacctx->ipad, digestsize);
2072 chcr_change_order(hmacctx->ipad, updated_digestsize);
2074 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2075 hmacctx->opad, digestsize);
2078 chcr_change_order(hmacctx->opad, updated_digestsize);
2083 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2084 unsigned int key_len)
2086 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2087 unsigned short context_size = 0;
2090 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2094 memcpy(ablkctx->key, key, key_len);
2095 ablkctx->enckey_len = key_len;
2096 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2097 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2098 ablkctx->key_ctx_hdr =
2099 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2100 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2101 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2102 CHCR_KEYCTX_NO_KEY, 1,
2104 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2107 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2108 ablkctx->enckey_len = 0;
2113 static int chcr_sha_init(struct ahash_request *areq)
2115 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2116 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2117 int digestsize = crypto_ahash_digestsize(tfm);
2119 req_ctx->data_len = 0;
2120 req_ctx->reqlen = 0;
2121 req_ctx->reqbfr = req_ctx->bfr1;
2122 req_ctx->skbfr = req_ctx->bfr2;
2123 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2128 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2130 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2131 sizeof(struct chcr_ahash_req_ctx));
2132 return chcr_device_init(crypto_tfm_ctx(tfm));
2135 static int chcr_hmac_init(struct ahash_request *areq)
2137 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2138 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2139 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2140 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2141 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2143 chcr_sha_init(areq);
2144 req_ctx->data_len = bs;
2145 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2146 if (digestsize == SHA224_DIGEST_SIZE)
2147 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2148 SHA256_DIGEST_SIZE);
2149 else if (digestsize == SHA384_DIGEST_SIZE)
2150 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2151 SHA512_DIGEST_SIZE);
2153 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2159 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2161 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2162 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2163 unsigned int digestsize =
2164 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2166 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2167 sizeof(struct chcr_ahash_req_ctx));
2168 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2169 if (IS_ERR(hmacctx->base_hash))
2170 return PTR_ERR(hmacctx->base_hash);
2171 return chcr_device_init(crypto_tfm_ctx(tfm));
2174 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2176 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2177 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2179 if (hmacctx->base_hash) {
2180 chcr_free_shash(hmacctx->base_hash);
2181 hmacctx->base_hash = NULL;
2185 inline void chcr_aead_common_exit(struct aead_request *req)
2187 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2188 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2189 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2191 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2194 static int chcr_aead_common_init(struct aead_request *req)
2196 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2197 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2198 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2199 unsigned int authsize = crypto_aead_authsize(tfm);
2200 int error = -EINVAL;
2202 /* validate key size */
2203 if (aeadctx->enckey_len == 0)
2205 if (reqctx->op && req->cryptlen < authsize)
2208 reqctx->scratch_pad = reqctx->iv + IV;
2210 reqctx->scratch_pad = NULL;
2212 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2218 reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2219 CHCR_SRC_SG_SIZE, 0);
2220 reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2221 CHCR_SRC_SG_SIZE, req->assoclen);
2227 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2228 int aadmax, int wrlen,
2229 unsigned short op_type)
2231 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2233 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2234 dst_nents > MAX_DSGL_ENT ||
2235 (req->assoclen > aadmax) ||
2236 (wrlen > SGE_MAX_WR_LEN))
2241 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2243 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2244 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2245 struct aead_request *subreq = aead_request_ctx(req);
2247 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2248 aead_request_set_callback(subreq, req->base.flags,
2249 req->base.complete, req->base.data);
2250 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2252 aead_request_set_ad(subreq, req->assoclen);
2253 return op_type ? crypto_aead_decrypt(subreq) :
2254 crypto_aead_encrypt(subreq);
2257 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2261 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2262 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2263 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2264 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2265 struct sk_buff *skb = NULL;
2266 struct chcr_wr *chcr_req;
2267 struct cpl_rx_phys_dsgl *phys_cpl;
2268 struct ulptx_sgl *ulptx;
2269 unsigned int transhdr_len;
2270 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2271 unsigned int kctx_len = 0, dnents;
2272 unsigned int assoclen = req->assoclen;
2273 unsigned int authsize = crypto_aead_authsize(tfm);
2274 int error = -EINVAL;
2276 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2278 struct adapter *adap = padap(a_ctx(tfm)->dev);
2280 if (req->cryptlen == 0)
2284 error = chcr_aead_common_init(req);
2286 return ERR_PTR(error);
2288 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2289 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2292 reqctx->aad_nents = 0;
2294 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2295 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2296 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
2298 dnents += MIN_AUTH_SG; // For IV
2300 dst_size = get_space_for_phys_dsgl(dnents);
2301 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2302 - sizeof(chcr_req->key_ctx);
2303 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2304 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2306 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2307 : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2309 transhdr_len += temp;
2310 transhdr_len = roundup(transhdr_len, 16);
2312 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2313 transhdr_len, reqctx->op)) {
2314 atomic_inc(&adap->chcr_stats.fallback);
2315 chcr_aead_common_exit(req);
2316 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2318 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2324 chcr_req = __skb_put_zero(skb, transhdr_len);
2326 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2329 * Input order is AAD,IV and Payload. where IV should be included as
2330 * the part of authdata. All other fields should be filled according
2331 * to the hardware spec
2333 chcr_req->sec_cpl.op_ivinsrtofst =
2334 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2336 chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2337 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2338 assoclen ? 1 : 0, assoclen,
2340 (temp & 0x1F0) >> 4);
2341 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2343 null ? 0 : assoclen + IV + 1,
2345 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2346 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2347 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2349 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2350 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2351 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2353 actx->auth_mode, aeadctx->hmac_ctrl,
2355 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2358 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2359 if (reqctx->op == CHCR_ENCRYPT_OP ||
2360 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2361 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2362 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2363 aeadctx->enckey_len);
2365 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2366 aeadctx->enckey_len);
2368 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2369 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2370 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2371 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2372 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2373 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2374 CTR_RFC3686_IV_SIZE);
2375 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2376 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2378 memcpy(reqctx->iv, req->iv, IV);
2380 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2381 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2382 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2383 chcr_add_aead_src_ent(req, ulptx, assoclen);
2384 atomic_inc(&adap->chcr_stats.cipher_rqst);
2385 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2386 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2387 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2388 transhdr_len, temp, 0);
2393 chcr_aead_common_exit(req);
2395 return ERR_PTR(error);
2398 int chcr_aead_dma_map(struct device *dev,
2399 struct aead_request *req,
2400 unsigned short op_type)
2403 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2404 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2405 unsigned int authsize = crypto_aead_authsize(tfm);
2408 dst_size = req->assoclen + req->cryptlen + (op_type ?
2409 -authsize : authsize);
2410 if (!req->cryptlen || !dst_size)
2412 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2414 if (dma_mapping_error(dev, reqctx->iv_dma))
2417 reqctx->b0_dma = reqctx->iv_dma + IV;
2420 if (req->src == req->dst) {
2421 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2426 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2430 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2433 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2441 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2445 void chcr_aead_dma_unmap(struct device *dev,
2446 struct aead_request *req,
2447 unsigned short op_type)
2449 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2450 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2451 unsigned int authsize = crypto_aead_authsize(tfm);
2454 dst_size = req->assoclen + req->cryptlen + (op_type ?
2455 -authsize : authsize);
2456 if (!req->cryptlen || !dst_size)
2459 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2461 if (req->src == req->dst) {
2462 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2465 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2467 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2472 void chcr_add_aead_src_ent(struct aead_request *req,
2473 struct ulptx_sgl *ulptx,
2474 unsigned int assoclen)
2476 struct ulptx_walk ulp_walk;
2477 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2480 u8 *buf = (u8 *)ulptx;
2482 if (reqctx->b0_len) {
2483 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2484 buf += reqctx->b0_len;
2486 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2489 memcpy(buf, reqctx->iv, IV);
2491 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2492 buf, req->cryptlen, req->assoclen);
2494 ulptx_walk_init(&ulp_walk, ulptx);
2496 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2498 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2499 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2500 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2502 ulptx_walk_end(&ulp_walk);
2506 void chcr_add_aead_dst_ent(struct aead_request *req,
2507 struct cpl_rx_phys_dsgl *phys_cpl,
2508 unsigned int assoclen,
2511 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2512 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2513 struct dsgl_walk dsgl_walk;
2514 unsigned int authsize = crypto_aead_authsize(tfm);
2515 struct chcr_context *ctx = a_ctx(tfm);
2518 dsgl_walk_init(&dsgl_walk, phys_cpl);
2520 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2521 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2522 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2523 temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2524 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2525 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2528 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2530 struct cipher_wr_param *wrparam)
2532 struct ulptx_walk ulp_walk;
2533 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2536 memcpy(buf, reqctx->iv, IV);
2539 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2540 buf, wrparam->bytes, reqctx->processed);
2542 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2543 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2545 reqctx->srcsg = ulp_walk.last_sg;
2546 reqctx->src_ofst = ulp_walk.last_sg_len;
2547 ulptx_walk_end(&ulp_walk);
2551 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2552 struct cpl_rx_phys_dsgl *phys_cpl,
2553 struct cipher_wr_param *wrparam,
2556 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2557 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2558 struct chcr_context *ctx = c_ctx(tfm);
2559 struct dsgl_walk dsgl_walk;
2561 dsgl_walk_init(&dsgl_walk, phys_cpl);
2562 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2564 reqctx->dstsg = dsgl_walk.last_sg;
2565 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2567 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2570 void chcr_add_hash_src_ent(struct ahash_request *req,
2571 struct ulptx_sgl *ulptx,
2572 struct hash_wr_param *param)
2574 struct ulptx_walk ulp_walk;
2575 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2577 if (reqctx->hctx_wr.imm) {
2578 u8 *buf = (u8 *)ulptx;
2580 if (param->bfr_len) {
2581 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2582 buf += param->bfr_len;
2585 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2586 sg_nents(reqctx->hctx_wr.srcsg), buf,
2589 ulptx_walk_init(&ulp_walk, ulptx);
2591 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2592 &reqctx->hctx_wr.dma_addr);
2593 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2594 param->sg_len, reqctx->hctx_wr.src_ofst);
2595 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2596 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2597 ulptx_walk_end(&ulp_walk);
2601 int chcr_hash_dma_map(struct device *dev,
2602 struct ahash_request *req)
2604 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2609 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2613 req_ctx->hctx_wr.is_sg_map = 1;
2617 void chcr_hash_dma_unmap(struct device *dev,
2618 struct ahash_request *req)
2620 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2625 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2627 req_ctx->hctx_wr.is_sg_map = 0;
2631 int chcr_cipher_dma_map(struct device *dev,
2632 struct ablkcipher_request *req)
2636 if (req->src == req->dst) {
2637 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2642 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2646 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2649 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2660 void chcr_cipher_dma_unmap(struct device *dev,
2661 struct ablkcipher_request *req)
2663 if (req->src == req->dst) {
2664 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2667 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2669 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2674 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2678 memset(block, 0, csize);
2683 else if (msglen > (unsigned int)(1 << (8 * csize)))
2686 data = cpu_to_be32(msglen);
2687 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2692 static void generate_b0(struct aead_request *req,
2693 struct chcr_aead_ctx *aeadctx,
2694 unsigned short op_type)
2696 unsigned int l, lp, m;
2698 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2699 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2700 u8 *b0 = reqctx->scratch_pad;
2702 m = crypto_aead_authsize(aead);
2704 memcpy(b0, reqctx->iv, 16);
2709 /* set m, bits 3-5 */
2710 *b0 |= (8 * ((m - 2) / 2));
2712 /* set adata, bit 6, if associated data is used */
2715 rc = set_msg_len(b0 + 16 - l,
2716 (op_type == CHCR_DECRYPT_OP) ?
2717 req->cryptlen - m : req->cryptlen, l);
2720 static inline int crypto_ccm_check_iv(const u8 *iv)
2722 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2723 if (iv[0] < 1 || iv[0] > 7)
2729 static int ccm_format_packet(struct aead_request *req,
2730 struct chcr_aead_ctx *aeadctx,
2731 unsigned int sub_type,
2732 unsigned short op_type,
2733 unsigned int assoclen)
2735 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2738 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2740 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2741 memcpy(reqctx->iv + 4, req->iv, 8);
2742 memset(reqctx->iv + 12, 0, 4);
2744 memcpy(reqctx->iv, req->iv, 16);
2747 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2750 generate_b0(req, aeadctx, op_type);
2751 /* zero the ctr value */
2752 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2756 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2757 unsigned int dst_size,
2758 struct aead_request *req,
2759 unsigned short op_type)
2761 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2762 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2763 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2764 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2765 unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2766 unsigned int ccm_xtra;
2767 unsigned char tag_offset = 0, auth_offset = 0;
2768 unsigned int assoclen;
2770 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2771 assoclen = req->assoclen - 8;
2773 assoclen = req->assoclen;
2774 ccm_xtra = CCM_B0_SIZE +
2775 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2777 auth_offset = req->cryptlen ?
2778 (assoclen + IV + 1 + ccm_xtra) : 0;
2779 if (op_type == CHCR_DECRYPT_OP) {
2780 if (crypto_aead_authsize(tfm) != req->cryptlen)
2781 tag_offset = crypto_aead_authsize(tfm);
2787 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2788 2, assoclen + 1 + ccm_xtra);
2790 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2791 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2792 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2793 1, assoclen + ccm_xtra, assoclen
2794 + IV + 1 + ccm_xtra, 0);
2796 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2797 auth_offset, tag_offset,
2798 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2799 crypto_aead_authsize(tfm));
2800 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2801 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2802 cipher_mode, mac_mode,
2803 aeadctx->hmac_ctrl, IV >> 1);
2805 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2809 static int aead_ccm_validate_input(unsigned short op_type,
2810 struct aead_request *req,
2811 struct chcr_aead_ctx *aeadctx,
2812 unsigned int sub_type)
2814 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2815 if (crypto_ccm_check_iv(req->iv)) {
2816 pr_err("CCM: IV check fails\n");
2820 if (req->assoclen != 16 && req->assoclen != 20) {
2821 pr_err("RFC4309: Invalid AAD length %d\n",
2829 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2833 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2834 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2835 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2836 struct sk_buff *skb = NULL;
2837 struct chcr_wr *chcr_req;
2838 struct cpl_rx_phys_dsgl *phys_cpl;
2839 struct ulptx_sgl *ulptx;
2840 unsigned int transhdr_len;
2841 unsigned int dst_size = 0, kctx_len, dnents, temp;
2842 unsigned int sub_type, assoclen = req->assoclen;
2843 unsigned int authsize = crypto_aead_authsize(tfm);
2844 int error = -EINVAL;
2845 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2847 struct adapter *adap = padap(a_ctx(tfm)->dev);
2849 sub_type = get_aead_subtype(tfm);
2850 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2852 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2853 error = chcr_aead_common_init(req);
2855 return ERR_PTR(error);
2857 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2860 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2861 dnents += sg_nents_xlen(req->dst, req->cryptlen
2862 + (reqctx->op ? -authsize : authsize),
2863 CHCR_DST_SG_SIZE, req->assoclen);
2864 dnents += MIN_CCM_SG; // For IV and B0
2865 dst_size = get_space_for_phys_dsgl(dnents);
2866 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2867 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2868 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2869 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2870 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2871 reqctx->b0_len, 16) :
2872 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2874 transhdr_len += temp;
2875 transhdr_len = roundup(transhdr_len, 16);
2877 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2878 reqctx->b0_len, transhdr_len, reqctx->op)) {
2879 atomic_inc(&adap->chcr_stats.fallback);
2880 chcr_aead_common_exit(req);
2881 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2883 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2890 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2892 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2894 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2895 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2896 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2897 aeadctx->key, aeadctx->enckey_len);
2899 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2900 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2901 error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
2904 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2905 chcr_add_aead_src_ent(req, ulptx, assoclen);
2907 atomic_inc(&adap->chcr_stats.aead_rqst);
2908 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2909 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2910 reqctx->b0_len) : 0);
2911 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2912 transhdr_len, temp, 0);
2919 chcr_aead_common_exit(req);
2920 return ERR_PTR(error);
2923 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2927 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2928 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2929 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2930 struct sk_buff *skb = NULL;
2931 struct chcr_wr *chcr_req;
2932 struct cpl_rx_phys_dsgl *phys_cpl;
2933 struct ulptx_sgl *ulptx;
2934 unsigned int transhdr_len, dnents = 0;
2935 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2936 unsigned int authsize = crypto_aead_authsize(tfm);
2937 int error = -EINVAL;
2938 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2940 struct adapter *adap = padap(a_ctx(tfm)->dev);
2942 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2943 assoclen = req->assoclen - 8;
2946 error = chcr_aead_common_init(req);
2948 return ERR_PTR(error);
2949 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2950 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2951 (reqctx->op ? -authsize : authsize),
2952 CHCR_DST_SG_SIZE, req->assoclen);
2953 dnents += MIN_GCM_SG; // For IV
2954 dst_size = get_space_for_phys_dsgl(dnents);
2955 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2956 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2957 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2959 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2960 (sgl_len(reqctx->src_nents +
2961 reqctx->aad_nents + MIN_GCM_SG) * 8);
2962 transhdr_len += temp;
2963 transhdr_len = roundup(transhdr_len, 16);
2964 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2965 transhdr_len, reqctx->op)) {
2967 atomic_inc(&adap->chcr_stats.fallback);
2968 chcr_aead_common_exit(req);
2969 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2971 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2977 chcr_req = __skb_put_zero(skb, transhdr_len);
2979 //Offset of tag from end
2980 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2981 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2982 a_ctx(tfm)->dev->rx_channel_id, 2,
2984 chcr_req->sec_cpl.pldlen =
2985 htonl(assoclen + IV + req->cryptlen);
2986 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2987 assoclen ? 1 : 0, assoclen,
2988 assoclen + IV + 1, 0);
2989 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2990 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2992 chcr_req->sec_cpl.seqno_numivs =
2993 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2994 CHCR_ENCRYPT_OP) ? 1 : 0,
2995 CHCR_SCMD_CIPHER_MODE_AES_GCM,
2996 CHCR_SCMD_AUTH_MODE_GHASH,
2997 aeadctx->hmac_ctrl, IV >> 1);
2998 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3000 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3001 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3002 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3003 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3005 /* prepare a 16 byte iv */
3006 /* S A L T | IV | 0x00000001 */
3007 if (get_aead_subtype(tfm) ==
3008 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3009 memcpy(reqctx->iv, aeadctx->salt, 4);
3010 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
3012 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3014 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3016 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3017 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3019 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
3020 chcr_add_aead_src_ent(req, ulptx, assoclen);
3021 atomic_inc(&adap->chcr_stats.aead_rqst);
3022 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3023 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3024 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3025 transhdr_len, temp, reqctx->verify);
3030 chcr_aead_common_exit(req);
3031 return ERR_PTR(error);
3036 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3038 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3039 struct aead_alg *alg = crypto_aead_alg(tfm);
3041 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3042 CRYPTO_ALG_NEED_FALLBACK |
3044 if (IS_ERR(aeadctx->sw_cipher))
3045 return PTR_ERR(aeadctx->sw_cipher);
3046 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3047 sizeof(struct aead_request) +
3048 crypto_aead_reqsize(aeadctx->sw_cipher)));
3049 return chcr_device_init(a_ctx(tfm));
3052 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3054 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3056 crypto_free_aead(aeadctx->sw_cipher);
3059 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3060 unsigned int authsize)
3062 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3064 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3065 aeadctx->mayverify = VERIFY_HW;
3066 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3068 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3069 unsigned int authsize)
3071 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3072 u32 maxauth = crypto_aead_maxauthsize(tfm);
3074 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3075 * true for sha1. authsize == 12 condition should be before
3076 * authsize == (maxauth >> 1)
3078 if (authsize == ICV_4) {
3079 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3080 aeadctx->mayverify = VERIFY_HW;
3081 } else if (authsize == ICV_6) {
3082 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3083 aeadctx->mayverify = VERIFY_HW;
3084 } else if (authsize == ICV_10) {
3085 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3086 aeadctx->mayverify = VERIFY_HW;
3087 } else if (authsize == ICV_12) {
3088 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3089 aeadctx->mayverify = VERIFY_HW;
3090 } else if (authsize == ICV_14) {
3091 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3092 aeadctx->mayverify = VERIFY_HW;
3093 } else if (authsize == (maxauth >> 1)) {
3094 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3095 aeadctx->mayverify = VERIFY_HW;
3096 } else if (authsize == maxauth) {
3097 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3098 aeadctx->mayverify = VERIFY_HW;
3100 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3101 aeadctx->mayverify = VERIFY_SW;
3103 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3107 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3109 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3113 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3114 aeadctx->mayverify = VERIFY_HW;
3117 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3118 aeadctx->mayverify = VERIFY_HW;
3121 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3122 aeadctx->mayverify = VERIFY_HW;
3125 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3126 aeadctx->mayverify = VERIFY_HW;
3129 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3130 aeadctx->mayverify = VERIFY_HW;
3134 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3135 aeadctx->mayverify = VERIFY_SW;
3139 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3140 CRYPTO_TFM_RES_BAD_KEY_LEN);
3143 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3146 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3147 unsigned int authsize)
3149 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3153 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3154 aeadctx->mayverify = VERIFY_HW;
3157 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3158 aeadctx->mayverify = VERIFY_HW;
3161 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3162 aeadctx->mayverify = VERIFY_HW;
3165 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3166 CRYPTO_TFM_RES_BAD_KEY_LEN);
3169 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3172 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3173 unsigned int authsize)
3175 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3179 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3180 aeadctx->mayverify = VERIFY_HW;
3183 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3184 aeadctx->mayverify = VERIFY_HW;
3187 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3188 aeadctx->mayverify = VERIFY_HW;
3191 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3192 aeadctx->mayverify = VERIFY_HW;
3195 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3196 aeadctx->mayverify = VERIFY_HW;
3199 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3200 aeadctx->mayverify = VERIFY_HW;
3203 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3204 aeadctx->mayverify = VERIFY_HW;
3207 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3208 CRYPTO_TFM_RES_BAD_KEY_LEN);
3211 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3214 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3216 unsigned int keylen)
3218 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3219 unsigned char ck_size, mk_size;
3220 int key_ctx_size = 0;
3222 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3223 if (keylen == AES_KEYSIZE_128) {
3224 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3225 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3226 } else if (keylen == AES_KEYSIZE_192) {
3227 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3228 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3229 } else if (keylen == AES_KEYSIZE_256) {
3230 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3231 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3233 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3234 CRYPTO_TFM_RES_BAD_KEY_LEN);
3235 aeadctx->enckey_len = 0;
3238 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3240 memcpy(aeadctx->key, key, keylen);
3241 aeadctx->enckey_len = keylen;
3246 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3248 unsigned int keylen)
3250 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3253 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3254 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3255 CRYPTO_TFM_REQ_MASK);
3256 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3257 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3258 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3259 CRYPTO_TFM_RES_MASK);
3262 return chcr_ccm_common_setkey(aead, key, keylen);
3265 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3266 unsigned int keylen)
3268 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3272 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3273 CRYPTO_TFM_RES_BAD_KEY_LEN);
3274 aeadctx->enckey_len = 0;
3277 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3278 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3279 CRYPTO_TFM_REQ_MASK);
3280 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3281 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3282 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3283 CRYPTO_TFM_RES_MASK);
3287 memcpy(aeadctx->salt, key + keylen, 3);
3288 return chcr_ccm_common_setkey(aead, key, keylen);
3291 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3292 unsigned int keylen)
3294 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3295 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3296 struct crypto_cipher *cipher;
3297 unsigned int ck_size;
3298 int ret = 0, key_ctx_size = 0;
3300 aeadctx->enckey_len = 0;
3301 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3302 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3303 & CRYPTO_TFM_REQ_MASK);
3304 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3305 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3306 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3307 CRYPTO_TFM_RES_MASK);
3311 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3313 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3314 memcpy(aeadctx->salt, key + keylen, 4);
3316 if (keylen == AES_KEYSIZE_128) {
3317 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3318 } else if (keylen == AES_KEYSIZE_192) {
3319 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3320 } else if (keylen == AES_KEYSIZE_256) {
3321 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3323 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3324 CRYPTO_TFM_RES_BAD_KEY_LEN);
3325 pr_err("GCM: Invalid key length %d\n", keylen);
3330 memcpy(aeadctx->key, key, keylen);
3331 aeadctx->enckey_len = keylen;
3332 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3334 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3335 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3338 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3339 * It will go in key context
3341 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3342 if (IS_ERR(cipher)) {
3343 aeadctx->enckey_len = 0;
3348 ret = crypto_cipher_setkey(cipher, key, keylen);
3350 aeadctx->enckey_len = 0;
3353 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3354 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3357 crypto_free_cipher(cipher);
3362 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3363 unsigned int keylen)
3365 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3366 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3367 /* it contains auth and cipher key both*/
3368 struct crypto_authenc_keys keys;
3369 unsigned int bs, subtype;
3370 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3371 int err = 0, i, key_ctx_len = 0;
3372 unsigned char ck_size = 0;
3373 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3374 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3375 struct algo_param param;
3379 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3380 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3381 & CRYPTO_TFM_REQ_MASK);
3382 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3383 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3384 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3385 & CRYPTO_TFM_RES_MASK);
3389 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3390 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3394 if (get_alg_config(¶m, max_authsize)) {
3395 pr_err("chcr : Unsupported digest size\n");
3398 subtype = get_aead_subtype(authenc);
3399 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3400 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3401 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3403 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3404 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3405 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3407 if (keys.enckeylen == AES_KEYSIZE_128) {
3408 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3409 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3410 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3411 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3412 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3414 pr_err("chcr : Unsupported cipher key\n");
3418 /* Copy only encryption key. We use authkey to generate h(ipad) and
3419 * h(opad) so authkey is not needed again. authkeylen size have the
3420 * size of the hash digest size.
3422 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3423 aeadctx->enckey_len = keys.enckeylen;
3424 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3425 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3427 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3428 aeadctx->enckey_len << 3);
3430 base_hash = chcr_alloc_shash(max_authsize);
3431 if (IS_ERR(base_hash)) {
3432 pr_err("chcr : Base driver cannot be loaded\n");
3433 aeadctx->enckey_len = 0;
3434 memzero_explicit(&keys, sizeof(keys));
3438 SHASH_DESC_ON_STACK(shash, base_hash);
3440 shash->tfm = base_hash;
3441 shash->flags = crypto_shash_get_flags(base_hash);
3442 bs = crypto_shash_blocksize(base_hash);
3443 align = KEYCTX_ALIGN_PAD(max_authsize);
3444 o_ptr = actx->h_iopad + param.result_size + align;
3446 if (keys.authkeylen > bs) {
3447 err = crypto_shash_digest(shash, keys.authkey,
3451 pr_err("chcr : Base driver cannot be loaded\n");
3454 keys.authkeylen = max_authsize;
3456 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3458 /* Compute the ipad-digest*/
3459 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3460 memcpy(pad, o_ptr, keys.authkeylen);
3461 for (i = 0; i < bs >> 2; i++)
3462 *((unsigned int *)pad + i) ^= IPAD_DATA;
3464 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3467 /* Compute the opad-digest */
3468 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3469 memcpy(pad, o_ptr, keys.authkeylen);
3470 for (i = 0; i < bs >> 2; i++)
3471 *((unsigned int *)pad + i) ^= OPAD_DATA;
3473 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3476 /* convert the ipad and opad digest to network order */
3477 chcr_change_order(actx->h_iopad, param.result_size);
3478 chcr_change_order(o_ptr, param.result_size);
3479 key_ctx_len = sizeof(struct _key_ctx) +
3480 roundup(keys.enckeylen, 16) +
3481 (param.result_size + align) * 2;
3482 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3483 0, 1, key_ctx_len >> 4);
3484 actx->auth_mode = param.auth_mode;
3485 chcr_free_shash(base_hash);
3487 memzero_explicit(&keys, sizeof(keys));
3491 aeadctx->enckey_len = 0;
3492 memzero_explicit(&keys, sizeof(keys));
3493 if (!IS_ERR(base_hash))
3494 chcr_free_shash(base_hash);
3498 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3499 const u8 *key, unsigned int keylen)
3501 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3502 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3503 struct crypto_authenc_keys keys;
3505 /* it contains auth and cipher key both*/
3506 unsigned int subtype;
3507 int key_ctx_len = 0;
3508 unsigned char ck_size = 0;
3510 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3511 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3512 & CRYPTO_TFM_REQ_MASK);
3513 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3514 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3515 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3516 & CRYPTO_TFM_RES_MASK);
3520 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3521 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3524 subtype = get_aead_subtype(authenc);
3525 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3526 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3527 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3529 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3530 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3531 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3533 if (keys.enckeylen == AES_KEYSIZE_128) {
3534 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3535 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3536 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3537 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3538 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3540 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3543 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3544 aeadctx->enckey_len = keys.enckeylen;
3545 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3546 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3547 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3548 aeadctx->enckey_len << 3);
3550 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3552 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3553 0, key_ctx_len >> 4);
3554 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3555 memzero_explicit(&keys, sizeof(keys));
3558 aeadctx->enckey_len = 0;
3559 memzero_explicit(&keys, sizeof(keys));
3563 static int chcr_aead_op(struct aead_request *req,
3565 create_wr_t create_wr_fn)
3567 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3568 struct uld_ctx *u_ctx;
3569 struct sk_buff *skb;
3572 if (!a_ctx(tfm)->dev) {
3573 pr_err("chcr : %s : No crypto device.\n", __func__);
3576 u_ctx = ULD_CTX(a_ctx(tfm));
3577 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3578 a_ctx(tfm)->tx_qidx)) {
3580 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3584 /* Form a WR from req */
3585 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3587 if (IS_ERR(skb) || !skb)
3588 return PTR_ERR(skb);
3590 skb->dev = u_ctx->lldi.ports[0];
3591 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3593 return isfull ? -EBUSY : -EINPROGRESS;
3596 static int chcr_aead_encrypt(struct aead_request *req)
3598 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3599 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3601 reqctx->verify = VERIFY_HW;
3602 reqctx->op = CHCR_ENCRYPT_OP;
3604 switch (get_aead_subtype(tfm)) {
3605 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3606 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3607 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3608 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3609 return chcr_aead_op(req, 0, create_authenc_wr);
3610 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3611 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3612 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3614 return chcr_aead_op(req, 0, create_gcm_wr);
3618 static int chcr_aead_decrypt(struct aead_request *req)
3620 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3621 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3622 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3625 if (aeadctx->mayverify == VERIFY_SW) {
3626 size = crypto_aead_maxauthsize(tfm);
3627 reqctx->verify = VERIFY_SW;
3630 reqctx->verify = VERIFY_HW;
3632 reqctx->op = CHCR_DECRYPT_OP;
3633 switch (get_aead_subtype(tfm)) {
3634 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3635 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3636 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3637 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3638 return chcr_aead_op(req, size, create_authenc_wr);
3639 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3640 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3641 return chcr_aead_op(req, size, create_aead_ccm_wr);
3643 return chcr_aead_op(req, size, create_gcm_wr);
3647 static struct chcr_alg_template driver_algs[] = {
3650 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3653 .cra_name = "cbc(aes)",
3654 .cra_driver_name = "cbc-aes-chcr",
3655 .cra_blocksize = AES_BLOCK_SIZE,
3656 .cra_init = chcr_cra_init,
3657 .cra_exit = chcr_cra_exit,
3658 .cra_u.ablkcipher = {
3659 .min_keysize = AES_MIN_KEY_SIZE,
3660 .max_keysize = AES_MAX_KEY_SIZE,
3661 .ivsize = AES_BLOCK_SIZE,
3662 .setkey = chcr_aes_cbc_setkey,
3663 .encrypt = chcr_aes_encrypt,
3664 .decrypt = chcr_aes_decrypt,
3669 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3672 .cra_name = "xts(aes)",
3673 .cra_driver_name = "xts-aes-chcr",
3674 .cra_blocksize = AES_BLOCK_SIZE,
3675 .cra_init = chcr_cra_init,
3677 .cra_u .ablkcipher = {
3678 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3679 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3680 .ivsize = AES_BLOCK_SIZE,
3681 .setkey = chcr_aes_xts_setkey,
3682 .encrypt = chcr_aes_encrypt,
3683 .decrypt = chcr_aes_decrypt,
3688 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3691 .cra_name = "ctr(aes)",
3692 .cra_driver_name = "ctr-aes-chcr",
3694 .cra_init = chcr_cra_init,
3695 .cra_exit = chcr_cra_exit,
3696 .cra_u.ablkcipher = {
3697 .min_keysize = AES_MIN_KEY_SIZE,
3698 .max_keysize = AES_MAX_KEY_SIZE,
3699 .ivsize = AES_BLOCK_SIZE,
3700 .setkey = chcr_aes_ctr_setkey,
3701 .encrypt = chcr_aes_encrypt,
3702 .decrypt = chcr_aes_decrypt,
3707 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3708 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3711 .cra_name = "rfc3686(ctr(aes))",
3712 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3714 .cra_init = chcr_rfc3686_init,
3715 .cra_exit = chcr_cra_exit,
3716 .cra_u.ablkcipher = {
3717 .min_keysize = AES_MIN_KEY_SIZE +
3718 CTR_RFC3686_NONCE_SIZE,
3719 .max_keysize = AES_MAX_KEY_SIZE +
3720 CTR_RFC3686_NONCE_SIZE,
3721 .ivsize = CTR_RFC3686_IV_SIZE,
3722 .setkey = chcr_aes_rfc3686_setkey,
3723 .encrypt = chcr_aes_encrypt,
3724 .decrypt = chcr_aes_decrypt,
3731 .type = CRYPTO_ALG_TYPE_AHASH,
3734 .halg.digestsize = SHA1_DIGEST_SIZE,
3737 .cra_driver_name = "sha1-chcr",
3738 .cra_blocksize = SHA1_BLOCK_SIZE,
3743 .type = CRYPTO_ALG_TYPE_AHASH,
3746 .halg.digestsize = SHA256_DIGEST_SIZE,
3748 .cra_name = "sha256",
3749 .cra_driver_name = "sha256-chcr",
3750 .cra_blocksize = SHA256_BLOCK_SIZE,
3755 .type = CRYPTO_ALG_TYPE_AHASH,
3758 .halg.digestsize = SHA224_DIGEST_SIZE,
3760 .cra_name = "sha224",
3761 .cra_driver_name = "sha224-chcr",
3762 .cra_blocksize = SHA224_BLOCK_SIZE,
3767 .type = CRYPTO_ALG_TYPE_AHASH,
3770 .halg.digestsize = SHA384_DIGEST_SIZE,
3772 .cra_name = "sha384",
3773 .cra_driver_name = "sha384-chcr",
3774 .cra_blocksize = SHA384_BLOCK_SIZE,
3779 .type = CRYPTO_ALG_TYPE_AHASH,
3782 .halg.digestsize = SHA512_DIGEST_SIZE,
3784 .cra_name = "sha512",
3785 .cra_driver_name = "sha512-chcr",
3786 .cra_blocksize = SHA512_BLOCK_SIZE,
3792 .type = CRYPTO_ALG_TYPE_HMAC,
3795 .halg.digestsize = SHA1_DIGEST_SIZE,
3797 .cra_name = "hmac(sha1)",
3798 .cra_driver_name = "hmac-sha1-chcr",
3799 .cra_blocksize = SHA1_BLOCK_SIZE,
3804 .type = CRYPTO_ALG_TYPE_HMAC,
3807 .halg.digestsize = SHA224_DIGEST_SIZE,
3809 .cra_name = "hmac(sha224)",
3810 .cra_driver_name = "hmac-sha224-chcr",
3811 .cra_blocksize = SHA224_BLOCK_SIZE,
3816 .type = CRYPTO_ALG_TYPE_HMAC,
3819 .halg.digestsize = SHA256_DIGEST_SIZE,
3821 .cra_name = "hmac(sha256)",
3822 .cra_driver_name = "hmac-sha256-chcr",
3823 .cra_blocksize = SHA256_BLOCK_SIZE,
3828 .type = CRYPTO_ALG_TYPE_HMAC,
3831 .halg.digestsize = SHA384_DIGEST_SIZE,
3833 .cra_name = "hmac(sha384)",
3834 .cra_driver_name = "hmac-sha384-chcr",
3835 .cra_blocksize = SHA384_BLOCK_SIZE,
3840 .type = CRYPTO_ALG_TYPE_HMAC,
3843 .halg.digestsize = SHA512_DIGEST_SIZE,
3845 .cra_name = "hmac(sha512)",
3846 .cra_driver_name = "hmac-sha512-chcr",
3847 .cra_blocksize = SHA512_BLOCK_SIZE,
3851 /* Add AEAD Algorithms */
3853 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3857 .cra_name = "gcm(aes)",
3858 .cra_driver_name = "gcm-aes-chcr",
3860 .cra_priority = CHCR_AEAD_PRIORITY,
3861 .cra_ctxsize = sizeof(struct chcr_context) +
3862 sizeof(struct chcr_aead_ctx) +
3863 sizeof(struct chcr_gcm_ctx),
3865 .ivsize = GCM_AES_IV_SIZE,
3866 .maxauthsize = GHASH_DIGEST_SIZE,
3867 .setkey = chcr_gcm_setkey,
3868 .setauthsize = chcr_gcm_setauthsize,
3872 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3876 .cra_name = "rfc4106(gcm(aes))",
3877 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3879 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3880 .cra_ctxsize = sizeof(struct chcr_context) +
3881 sizeof(struct chcr_aead_ctx) +
3882 sizeof(struct chcr_gcm_ctx),
3885 .ivsize = GCM_RFC4106_IV_SIZE,
3886 .maxauthsize = GHASH_DIGEST_SIZE,
3887 .setkey = chcr_gcm_setkey,
3888 .setauthsize = chcr_4106_4309_setauthsize,
3892 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3896 .cra_name = "ccm(aes)",
3897 .cra_driver_name = "ccm-aes-chcr",
3899 .cra_priority = CHCR_AEAD_PRIORITY,
3900 .cra_ctxsize = sizeof(struct chcr_context) +
3901 sizeof(struct chcr_aead_ctx),
3904 .ivsize = AES_BLOCK_SIZE,
3905 .maxauthsize = GHASH_DIGEST_SIZE,
3906 .setkey = chcr_aead_ccm_setkey,
3907 .setauthsize = chcr_ccm_setauthsize,
3911 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3915 .cra_name = "rfc4309(ccm(aes))",
3916 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3918 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3919 .cra_ctxsize = sizeof(struct chcr_context) +
3920 sizeof(struct chcr_aead_ctx),
3924 .maxauthsize = GHASH_DIGEST_SIZE,
3925 .setkey = chcr_aead_rfc4309_setkey,
3926 .setauthsize = chcr_4106_4309_setauthsize,
3930 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3934 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3936 "authenc-hmac-sha1-cbc-aes-chcr",
3937 .cra_blocksize = AES_BLOCK_SIZE,
3938 .cra_priority = CHCR_AEAD_PRIORITY,
3939 .cra_ctxsize = sizeof(struct chcr_context) +
3940 sizeof(struct chcr_aead_ctx) +
3941 sizeof(struct chcr_authenc_ctx),
3944 .ivsize = AES_BLOCK_SIZE,
3945 .maxauthsize = SHA1_DIGEST_SIZE,
3946 .setkey = chcr_authenc_setkey,
3947 .setauthsize = chcr_authenc_setauthsize,
3951 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3956 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3958 "authenc-hmac-sha256-cbc-aes-chcr",
3959 .cra_blocksize = AES_BLOCK_SIZE,
3960 .cra_priority = CHCR_AEAD_PRIORITY,
3961 .cra_ctxsize = sizeof(struct chcr_context) +
3962 sizeof(struct chcr_aead_ctx) +
3963 sizeof(struct chcr_authenc_ctx),
3966 .ivsize = AES_BLOCK_SIZE,
3967 .maxauthsize = SHA256_DIGEST_SIZE,
3968 .setkey = chcr_authenc_setkey,
3969 .setauthsize = chcr_authenc_setauthsize,
3973 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3977 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3979 "authenc-hmac-sha224-cbc-aes-chcr",
3980 .cra_blocksize = AES_BLOCK_SIZE,
3981 .cra_priority = CHCR_AEAD_PRIORITY,
3982 .cra_ctxsize = sizeof(struct chcr_context) +
3983 sizeof(struct chcr_aead_ctx) +
3984 sizeof(struct chcr_authenc_ctx),
3986 .ivsize = AES_BLOCK_SIZE,
3987 .maxauthsize = SHA224_DIGEST_SIZE,
3988 .setkey = chcr_authenc_setkey,
3989 .setauthsize = chcr_authenc_setauthsize,
3993 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3997 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3999 "authenc-hmac-sha384-cbc-aes-chcr",
4000 .cra_blocksize = AES_BLOCK_SIZE,
4001 .cra_priority = CHCR_AEAD_PRIORITY,
4002 .cra_ctxsize = sizeof(struct chcr_context) +
4003 sizeof(struct chcr_aead_ctx) +
4004 sizeof(struct chcr_authenc_ctx),
4007 .ivsize = AES_BLOCK_SIZE,
4008 .maxauthsize = SHA384_DIGEST_SIZE,
4009 .setkey = chcr_authenc_setkey,
4010 .setauthsize = chcr_authenc_setauthsize,
4014 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4018 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4020 "authenc-hmac-sha512-cbc-aes-chcr",
4021 .cra_blocksize = AES_BLOCK_SIZE,
4022 .cra_priority = CHCR_AEAD_PRIORITY,
4023 .cra_ctxsize = sizeof(struct chcr_context) +
4024 sizeof(struct chcr_aead_ctx) +
4025 sizeof(struct chcr_authenc_ctx),
4028 .ivsize = AES_BLOCK_SIZE,
4029 .maxauthsize = SHA512_DIGEST_SIZE,
4030 .setkey = chcr_authenc_setkey,
4031 .setauthsize = chcr_authenc_setauthsize,
4035 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4039 .cra_name = "authenc(digest_null,cbc(aes))",
4041 "authenc-digest_null-cbc-aes-chcr",
4042 .cra_blocksize = AES_BLOCK_SIZE,
4043 .cra_priority = CHCR_AEAD_PRIORITY,
4044 .cra_ctxsize = sizeof(struct chcr_context) +
4045 sizeof(struct chcr_aead_ctx) +
4046 sizeof(struct chcr_authenc_ctx),
4049 .ivsize = AES_BLOCK_SIZE,
4051 .setkey = chcr_aead_digest_null_setkey,
4052 .setauthsize = chcr_authenc_null_setauthsize,
4056 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4060 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4062 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4064 .cra_priority = CHCR_AEAD_PRIORITY,
4065 .cra_ctxsize = sizeof(struct chcr_context) +
4066 sizeof(struct chcr_aead_ctx) +
4067 sizeof(struct chcr_authenc_ctx),
4070 .ivsize = CTR_RFC3686_IV_SIZE,
4071 .maxauthsize = SHA1_DIGEST_SIZE,
4072 .setkey = chcr_authenc_setkey,
4073 .setauthsize = chcr_authenc_setauthsize,
4077 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4082 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4084 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4086 .cra_priority = CHCR_AEAD_PRIORITY,
4087 .cra_ctxsize = sizeof(struct chcr_context) +
4088 sizeof(struct chcr_aead_ctx) +
4089 sizeof(struct chcr_authenc_ctx),
4092 .ivsize = CTR_RFC3686_IV_SIZE,
4093 .maxauthsize = SHA256_DIGEST_SIZE,
4094 .setkey = chcr_authenc_setkey,
4095 .setauthsize = chcr_authenc_setauthsize,
4099 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4103 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4105 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4107 .cra_priority = CHCR_AEAD_PRIORITY,
4108 .cra_ctxsize = sizeof(struct chcr_context) +
4109 sizeof(struct chcr_aead_ctx) +
4110 sizeof(struct chcr_authenc_ctx),
4112 .ivsize = CTR_RFC3686_IV_SIZE,
4113 .maxauthsize = SHA224_DIGEST_SIZE,
4114 .setkey = chcr_authenc_setkey,
4115 .setauthsize = chcr_authenc_setauthsize,
4119 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4123 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4125 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4127 .cra_priority = CHCR_AEAD_PRIORITY,
4128 .cra_ctxsize = sizeof(struct chcr_context) +
4129 sizeof(struct chcr_aead_ctx) +
4130 sizeof(struct chcr_authenc_ctx),
4133 .ivsize = CTR_RFC3686_IV_SIZE,
4134 .maxauthsize = SHA384_DIGEST_SIZE,
4135 .setkey = chcr_authenc_setkey,
4136 .setauthsize = chcr_authenc_setauthsize,
4140 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4144 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4146 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4148 .cra_priority = CHCR_AEAD_PRIORITY,
4149 .cra_ctxsize = sizeof(struct chcr_context) +
4150 sizeof(struct chcr_aead_ctx) +
4151 sizeof(struct chcr_authenc_ctx),
4154 .ivsize = CTR_RFC3686_IV_SIZE,
4155 .maxauthsize = SHA512_DIGEST_SIZE,
4156 .setkey = chcr_authenc_setkey,
4157 .setauthsize = chcr_authenc_setauthsize,
4161 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4165 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4167 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4169 .cra_priority = CHCR_AEAD_PRIORITY,
4170 .cra_ctxsize = sizeof(struct chcr_context) +
4171 sizeof(struct chcr_aead_ctx) +
4172 sizeof(struct chcr_authenc_ctx),
4175 .ivsize = CTR_RFC3686_IV_SIZE,
4177 .setkey = chcr_aead_digest_null_setkey,
4178 .setauthsize = chcr_authenc_null_setauthsize,
4185 * chcr_unregister_alg - Deregister crypto algorithms with
4188 static int chcr_unregister_alg(void)
4192 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4193 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4194 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4195 if (driver_algs[i].is_registered)
4196 crypto_unregister_alg(
4197 &driver_algs[i].alg.crypto);
4199 case CRYPTO_ALG_TYPE_AEAD:
4200 if (driver_algs[i].is_registered)
4201 crypto_unregister_aead(
4202 &driver_algs[i].alg.aead);
4204 case CRYPTO_ALG_TYPE_AHASH:
4205 if (driver_algs[i].is_registered)
4206 crypto_unregister_ahash(
4207 &driver_algs[i].alg.hash);
4210 driver_algs[i].is_registered = 0;
4215 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4216 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4217 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4220 * chcr_register_alg - Register crypto algorithms with kernel framework.
4222 static int chcr_register_alg(void)
4224 struct crypto_alg ai;
4225 struct ahash_alg *a_hash;
4229 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4230 if (driver_algs[i].is_registered)
4232 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4233 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4234 driver_algs[i].alg.crypto.cra_priority =
4236 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4237 driver_algs[i].alg.crypto.cra_flags =
4238 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4239 CRYPTO_ALG_NEED_FALLBACK;
4240 driver_algs[i].alg.crypto.cra_ctxsize =
4241 sizeof(struct chcr_context) +
4242 sizeof(struct ablk_ctx);
4243 driver_algs[i].alg.crypto.cra_alignmask = 0;
4244 driver_algs[i].alg.crypto.cra_type =
4245 &crypto_ablkcipher_type;
4246 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4247 name = driver_algs[i].alg.crypto.cra_driver_name;
4249 case CRYPTO_ALG_TYPE_AEAD:
4250 driver_algs[i].alg.aead.base.cra_flags =
4251 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4252 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4253 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4254 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4255 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4256 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4257 err = crypto_register_aead(&driver_algs[i].alg.aead);
4258 name = driver_algs[i].alg.aead.base.cra_driver_name;
4260 case CRYPTO_ALG_TYPE_AHASH:
4261 a_hash = &driver_algs[i].alg.hash;
4262 a_hash->update = chcr_ahash_update;
4263 a_hash->final = chcr_ahash_final;
4264 a_hash->finup = chcr_ahash_finup;
4265 a_hash->digest = chcr_ahash_digest;
4266 a_hash->export = chcr_ahash_export;
4267 a_hash->import = chcr_ahash_import;
4268 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4269 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4270 a_hash->halg.base.cra_module = THIS_MODULE;
4271 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4272 a_hash->halg.base.cra_alignmask = 0;
4273 a_hash->halg.base.cra_exit = NULL;
4275 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4276 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4277 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4278 a_hash->init = chcr_hmac_init;
4279 a_hash->setkey = chcr_ahash_setkey;
4280 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4282 a_hash->init = chcr_sha_init;
4283 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4284 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4286 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4287 ai = driver_algs[i].alg.hash.halg.base;
4288 name = ai.cra_driver_name;
4292 pr_err("chcr : %s : Algorithm registration failed\n",
4296 driver_algs[i].is_registered = 1;
4302 chcr_unregister_alg();
4307 * start_crypto - Register the crypto algorithms.
4308 * This should called once when the first device comesup. After this
4309 * kernel will start calling driver APIs for crypto operations.
4311 int start_crypto(void)
4313 return chcr_register_alg();
4317 * stop_crypto - Deregister all the crypto algorithms with kernel.
4318 * This should be called once when the last device goes down. After this
4319 * kernel will not call the driver API for crypto operations.
4321 int stop_crypto(void)
4323 chcr_unregister_alg();