Merge tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt...
[linux-2.6-block.git] / drivers / crypto / chelsio / chcr_algo.c
index db203f8be4290de3784fe7f5ee7f4175e5426101..bcef76508dfaeeafaedca586b2be01cb7cb9598e 100644 (file)
@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 
 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 {
-       return ctx->dev->u_ctx;
+       return container_of(ctx->dev, struct uld_ctx, dev);
 }
 
 static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,18 +198,43 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
                *err = 0;
 }
 
-static inline void chcr_handle_aead_resp(struct aead_request *req,
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+       int err = 0;
+
+       spin_lock_bh(&dev->lock_chcr_dev);
+       if (dev->state == CHCR_DETACH)
+               err = 1;
+       else
+               atomic_inc(&dev->inflight);
+
+       spin_unlock_bh(&dev->lock_chcr_dev);
+
+       return err;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+       atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
                                         unsigned char *input,
                                         int err)
 {
        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_dev *dev = a_ctx(tfm)->dev;
 
        chcr_aead_common_exit(req);
        if (reqctx->verify == VERIFY_SW) {
                chcr_verify_tag(req, input, &err);
                reqctx->verify = VERIFY_HW;
        }
+       chcr_dec_wrcount(dev);
        req->base.complete(&req->base, err);
+
+       return err;
 }
 
 static void get_aes_decrypt_key(unsigned char *dec_key,
@@ -391,7 +416,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
                                        size_t size,
-                                       dma_addr_t *addr)
+                                       dma_addr_t addr)
 {
        int j;
 
@@ -399,7 +424,7 @@ static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
                return;
        j = walk->nents;
        walk->to->len[j % 8] = htons(size);
-       walk->to->addr[j % 8] = cpu_to_be64(*addr);
+       walk->to->addr[j % 8] = cpu_to_be64(addr);
        j++;
        if ((j % 8) == 0)
                walk->to++;
@@ -473,16 +498,16 @@ static inline void ulptx_walk_end(struct ulptx_walk *walk)
 
 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
                                        size_t size,
-                                       dma_addr_t *addr)
+                                       dma_addr_t addr)
 {
        if (!size)
                return;
 
        if (walk->nents == 0) {
                walk->sgl->len0 = cpu_to_be32(size);
-               walk->sgl->addr0 = cpu_to_be64(*addr);
+               walk->sgl->addr0 = cpu_to_be64(addr);
        } else {
-               walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+               walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
                walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
                walk->pair_idx = !walk->pair_idx;
                if (!walk->pair_idx)
@@ -717,7 +742,7 @@ static inline void create_wreq(struct chcr_context *ctx,
                htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
        chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
        chcr_req->wreq.rx_chid_to_rx_q_id =
-               FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+               FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
                                !!lcb, ctx->tx_qidx);
 
        chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
@@ -773,7 +798,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
        }
        chcr_req = __skb_put_zero(skb, transhdr_len);
        chcr_req->sec_cpl.op_ivinsrtofst =
-               FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+               FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
 
        chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
        chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1100,6 +1125,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
        struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
        struct  cipher_wr_param wrparam;
+       struct chcr_dev *dev = c_ctx(tfm)->dev;
        int bytes;
 
        if (err)
@@ -1161,6 +1187,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 unmap:
        chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
 complete:
+       chcr_dec_wrcount(dev);
        req->base.complete(&req->base, err);
        return err;
 }
@@ -1187,7 +1214,10 @@ static int process_cipher(struct ablkcipher_request *req,
                       ablkctx->enckey_len, req->nbytes, ivsize);
                goto error;
        }
-       chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+
+       err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+       if (err)
+               goto error;
        if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
                                            AES_MIN_KEY_SIZE +
                                            sizeof(struct cpl_rx_phys_dsgl) +
@@ -1276,15 +1306,21 @@ error:
 static int chcr_aes_encrypt(struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct chcr_dev *dev = c_ctx(tfm)->dev;
        struct sk_buff *skb = NULL;
        int err, isfull = 0;
        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 
+       err = chcr_inc_wrcount(dev);
+       if (err)
+               return -ENXIO;
        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
                                            c_ctx(tfm)->tx_qidx))) {
                isfull = 1;
-               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -ENOSPC;
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+                       err = -ENOSPC;
+                       goto error;
+               }
        }
 
        err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1295,15 +1331,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
        set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
        chcr_send_wr(skb);
        return isfull ? -EBUSY : -EINPROGRESS;
+error:
+       chcr_dec_wrcount(dev);
+       return err;
 }
 
 static int chcr_aes_decrypt(struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+       struct chcr_dev *dev = c_ctx(tfm)->dev;
        struct sk_buff *skb = NULL;
        int err, isfull = 0;
 
+       err = chcr_inc_wrcount(dev);
+       if (err)
+               return -ENXIO;
+
        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
                                            c_ctx(tfm)->tx_qidx))) {
                isfull = 1;
@@ -1311,8 +1355,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
                        return -ENOSPC;
        }
 
-        err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
-                             &skb, CHCR_DECRYPT_OP);
+       err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+                            &skb, CHCR_DECRYPT_OP);
        if (err || !skb)
                return err;
        skb->dev = u_ctx->lldi.ports[0];
@@ -1333,10 +1377,11 @@ static int chcr_device_init(struct chcr_context *ctx)
        if (!ctx->dev) {
                u_ctx = assign_chcr_device();
                if (!u_ctx) {
+                       err = -ENXIO;
                        pr_err("chcr device assignment fails\n");
                        goto out;
                }
-               ctx->dev = u_ctx->dev;
+               ctx->dev = &u_ctx->dev;
                adap = padap(ctx->dev);
                ntxq = u_ctx->lldi.ntxq;
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
@@ -1344,7 +1389,6 @@ static int chcr_device_init(struct chcr_context *ctx)
                spin_lock(&ctx->dev->lock_chcr_dev);
                ctx->tx_chan_id = ctx->dev->tx_channel_id;
                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
-               ctx->dev->rx_channel_id = 0;
                spin_unlock(&ctx->dev->lock_chcr_dev);
                rxq_idx = ctx->tx_chan_id * rxq_perchan;
                rxq_idx += id % rxq_perchan;
@@ -1498,7 +1542,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
        chcr_req = __skb_put_zero(skb, transhdr_len);
 
        chcr_req->sec_cpl.op_ivinsrtofst =
-               FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+               FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
        chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
 
        chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1562,6 +1606,7 @@ static int chcr_ahash_update(struct ahash_request *req)
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
        struct uld_ctx *u_ctx = NULL;
+       struct chcr_dev *dev = h_ctx(rtfm)->dev;
        struct sk_buff *skb;
        u8 remainder = 0, bs;
        unsigned int nbytes = req->nbytes;
@@ -1570,12 +1615,6 @@ static int chcr_ahash_update(struct ahash_request *req)
 
        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
        u_ctx = ULD_CTX(h_ctx(rtfm));
-       if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-                                           h_ctx(rtfm)->tx_qidx))) {
-               isfull = 1;
-               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -ENOSPC;
-       }
 
        if (nbytes + req_ctx->reqlen >= bs) {
                remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1586,10 +1625,27 @@ static int chcr_ahash_update(struct ahash_request *req)
                req_ctx->reqlen += nbytes;
                return 0;
        }
+       error = chcr_inc_wrcount(dev);
+       if (error)
+               return -ENXIO;
+       /* Detach state for CHCR means lldi or padap is freed. Increasing
+        * inflight count for dev guarantees that lldi and padap is valid
+        */
+       if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+                                           h_ctx(rtfm)->tx_qidx))) {
+               isfull = 1;
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+                       error = -ENOSPC;
+                       goto err;
+               }
+       }
+
        chcr_init_hctx_per_wr(req_ctx);
        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
-       if (error)
-               return -ENOMEM;
+       if (error) {
+               error = -ENOMEM;
+               goto err;
+       }
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        params.kctx_len = roundup(params.alg_prm.result_size, 16);
        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1629,6 +1685,8 @@ static int chcr_ahash_update(struct ahash_request *req)
        return isfull ? -EBUSY : -EINPROGRESS;
 unmap:
        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+       chcr_dec_wrcount(dev);
        return error;
 }
 
@@ -1646,10 +1704,16 @@ static int chcr_ahash_final(struct ahash_request *req)
 {
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+       struct chcr_dev *dev = h_ctx(rtfm)->dev;
        struct hash_wr_param params;
        struct sk_buff *skb;
        struct uld_ctx *u_ctx = NULL;
        u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+       int error = -EINVAL;
+
+       error = chcr_inc_wrcount(dev);
+       if (error)
+               return -ENXIO;
 
        chcr_init_hctx_per_wr(req_ctx);
        u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1686,19 +1750,25 @@ static int chcr_ahash_final(struct ahash_request *req)
        }
        params.hash_size = crypto_ahash_digestsize(rtfm);
        skb = create_hash_wr(req, &params);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       if (IS_ERR(skb)) {
+               error = PTR_ERR(skb);
+               goto err;
+       }
        req_ctx->reqlen = 0;
        skb->dev = u_ctx->lldi.ports[0];
        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
        chcr_send_wr(skb);
        return -EINPROGRESS;
+err:
+       chcr_dec_wrcount(dev);
+       return error;
 }
 
 static int chcr_ahash_finup(struct ahash_request *req)
 {
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+       struct chcr_dev *dev = h_ctx(rtfm)->dev;
        struct uld_ctx *u_ctx = NULL;
        struct sk_buff *skb;
        struct hash_wr_param params;
@@ -1707,17 +1777,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
 
        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
        u_ctx = ULD_CTX(h_ctx(rtfm));
+       error = chcr_inc_wrcount(dev);
+       if (error)
+               return -ENXIO;
 
        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
                                            h_ctx(rtfm)->tx_qidx))) {
                isfull = 1;
-               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -ENOSPC;
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+                       error = -ENOSPC;
+                       goto err;
+               }
        }
        chcr_init_hctx_per_wr(req_ctx);
        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
-       if (error)
-               return -ENOMEM;
+       if (error) {
+               error = -ENOMEM;
+               goto err;
+       }
 
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1774,6 +1851,8 @@ static int chcr_ahash_finup(struct ahash_request *req)
        return isfull ? -EBUSY : -EINPROGRESS;
 unmap:
        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+       chcr_dec_wrcount(dev);
        return error;
 }
 
@@ -1781,6 +1860,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
 {
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+       struct chcr_dev *dev = h_ctx(rtfm)->dev;
        struct uld_ctx *u_ctx = NULL;
        struct sk_buff *skb;
        struct hash_wr_param params;
@@ -1789,19 +1869,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
 
        rtfm->init(req);
        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+       error = chcr_inc_wrcount(dev);
+       if (error)
+               return -ENXIO;
 
        u_ctx = ULD_CTX(h_ctx(rtfm));
        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
                                            h_ctx(rtfm)->tx_qidx))) {
                isfull = 1;
-               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -ENOSPC;
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+                       error = -ENOSPC;
+                       goto err;
+               }
        }
 
        chcr_init_hctx_per_wr(req_ctx);
        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
-       if (error)
-               return -ENOMEM;
+       if (error) {
+               error = -ENOMEM;
+               goto err;
+       }
 
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1854,6 +1941,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
        return isfull ? -EBUSY : -EINPROGRESS;
 unmap:
        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+       chcr_dec_wrcount(dev);
        return error;
 }
 
@@ -1925,6 +2014,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
        int digestsize, updated_digestsize;
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+       struct chcr_dev *dev = h_ctx(tfm)->dev;
 
        if (input == NULL)
                goto out;
@@ -1967,6 +2057,7 @@ unmap:
 
 
 out:
+       chcr_dec_wrcount(dev);
        req->base.complete(&req->base, err);
 }
 
@@ -1983,14 +2074,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 
        switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
        case CRYPTO_ALG_TYPE_AEAD:
-               chcr_handle_aead_resp(aead_request_cast(req), input, err);
+               err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
                break;
 
        case CRYPTO_ALG_TYPE_ABLKCIPHER:
-                err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+                chcr_handle_cipher_resp(ablkcipher_request_cast(req),
                                               input, err);
                break;
-
        case CRYPTO_ALG_TYPE_AHASH:
                chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
                }
@@ -2008,7 +2098,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
        memcpy(state->partial_hash, req_ctx->partial_hash,
               CHCR_HASH_MAX_DIGEST_SIZE);
        chcr_init_hctx_per_wr(state);
-               return 0;
+       return 0;
 }
 
 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -2215,10 +2305,7 @@ static int chcr_aead_common_init(struct aead_request *req)
                error = -ENOMEM;
                goto err;
        }
-       reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
-                                         CHCR_SRC_SG_SIZE, 0);
-       reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
-                                         CHCR_SRC_SG_SIZE, req->assoclen);
+
        return 0;
 err:
        return error;
@@ -2249,7 +2336,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
                                  req->base.complete, req->base.data);
        aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
                                 req->iv);
-        aead_request_set_ad(subreq, req->assoclen);
+       aead_request_set_ad(subreq, req->assoclen);
        return op_type ? crypto_aead_decrypt(subreq) :
                crypto_aead_encrypt(subreq);
 }
@@ -2268,10 +2355,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        struct ulptx_sgl *ulptx;
        unsigned int transhdr_len;
        unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
-       unsigned int   kctx_len = 0, dnents;
-       unsigned int  assoclen = req->assoclen;
+       unsigned int   kctx_len = 0, dnents, snents;
        unsigned int  authsize = crypto_aead_authsize(tfm);
        int error = -EINVAL;
+       u8 *ivptr;
        int null = 0;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
                GFP_ATOMIC;
@@ -2288,24 +2375,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
                null = 1;
-               assoclen = 0;
-               reqctx->aad_nents = 0;
        }
-       dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
-       dnents += sg_nents_xlen(req->dst, req->cryptlen +
-               (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
-               req->assoclen);
+       dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+               (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
        dnents += MIN_AUTH_SG; // For IV
-
+       snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+                              CHCR_SRC_SG_SIZE, 0);
        dst_size = get_space_for_phys_dsgl(dnents);
        kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
                - sizeof(chcr_req->key_ctx);
        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-       reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+       reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
                        SGE_MAX_WR_LEN;
-       temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
-                       : (sgl_len(reqctx->src_nents + reqctx->aad_nents
-                       + MIN_GCM_SG) * 8);
+       temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+                       : (sgl_len(snents) * 8);
        transhdr_len += temp;
        transhdr_len = roundup(transhdr_len, 16);
 
@@ -2315,7 +2398,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
                chcr_aead_common_exit(req);
                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
        }
-       skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+       skb = alloc_skb(transhdr_len, flags);
        if (!skb) {
                error = -ENOMEM;
                goto err;
@@ -2331,16 +2414,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
         * to the hardware spec
         */
        chcr_req->sec_cpl.op_ivinsrtofst =
-               FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
-                                      assoclen + 1);
-       chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+               FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+       chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
-                                       assoclen ? 1 : 0, assoclen,
-                                       assoclen + IV + 1,
+                                       null ? 0 : 1 + IV,
+                                       null ? 0 : IV + req->assoclen,
+                                       req->assoclen + IV + 1,
                                        (temp & 0x1F0) >> 4);
        chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
                                        temp & 0xF,
-                                       null ? 0 : assoclen + IV + 1,
+                                       null ? 0 : req->assoclen + IV + 1,
                                        temp, temp);
        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
            subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
@@ -2367,23 +2450,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 
        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
               actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+       ulptx = (struct ulptx_sgl *)(ivptr + IV);
        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
            subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
-               memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
-               memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+               memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+               memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
                                CTR_RFC3686_IV_SIZE);
-               *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+               *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
                        CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
        } else {
-               memcpy(reqctx->iv, req->iv, IV);
+               memcpy(ivptr, req->iv, IV);
        }
-       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-       ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
-       chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
-       chcr_add_aead_src_ent(req, ulptx, assoclen);
+       chcr_add_aead_dst_ent(req, phys_cpl, qid);
+       chcr_add_aead_src_ent(req, ulptx);
        atomic_inc(&adap->chcr_stats.cipher_rqst);
-       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
-               kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+               kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
                   transhdr_len, temp, 0);
        reqctx->skb = skb;
@@ -2470,8 +2554,7 @@ void chcr_aead_dma_unmap(struct device *dev,
 }
 
 void chcr_add_aead_src_ent(struct aead_request *req,
-                          struct ulptx_sgl *ulptx,
-                          unsigned int assoclen)
+                          struct ulptx_sgl *ulptx)
 {
        struct ulptx_walk ulp_walk;
        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
@@ -2484,28 +2567,20 @@ void chcr_add_aead_src_ent(struct aead_request *req,
                        buf += reqctx->b0_len;
                }
                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-                                  buf, assoclen, 0);
-               buf += assoclen;
-               memcpy(buf, reqctx->iv, IV);
-               buf += IV;
-               sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-                                  buf, req->cryptlen, req->assoclen);
+                                  buf, req->cryptlen + req->assoclen, 0);
        } else {
                ulptx_walk_init(&ulp_walk, ulptx);
                if (reqctx->b0_len)
                        ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
-                                           &reqctx->b0_dma);
-               ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
-               ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
-               ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
-                                 req->assoclen);
+                                           reqctx->b0_dma);
+               ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+                                 req->assoclen,  0);
                ulptx_walk_end(&ulp_walk);
        }
 }
 
 void chcr_add_aead_dst_ent(struct aead_request *req,
                           struct cpl_rx_phys_dsgl *phys_cpl,
-                          unsigned int assoclen,
                           unsigned short qid)
 {
        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
@@ -2516,12 +2591,10 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        u32 temp;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
-       if (reqctx->b0_len)
-               dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
-       dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
-       dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
-       temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
-       dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+       dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+       temp = req->assoclen + req->cryptlen +
+               (reqctx->op ? -authsize : authsize);
+       dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
        dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
@@ -2589,7 +2662,7 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
                ulptx_walk_init(&ulp_walk, ulptx);
                if (param->bfr_len)
                        ulptx_walk_add_page(&ulp_walk, param->bfr_len,
-                                           &reqctx->hctx_wr.dma_addr);
+                                           reqctx->hctx_wr.dma_addr);
                ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
                                  param->sg_len, reqctx->hctx_wr.src_ofst);
                reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
@@ -2689,8 +2762,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
        return 0;
 }
 
-static void generate_b0(struct aead_request *req,
-                       struct chcr_aead_ctx *aeadctx,
+static void generate_b0(struct aead_request *req, u8 *ivptr,
                        unsigned short op_type)
 {
        unsigned int l, lp, m;
@@ -2701,7 +2773,7 @@ static void generate_b0(struct aead_request *req,
 
        m = crypto_aead_authsize(aead);
 
-       memcpy(b0, reqctx->iv, 16);
+       memcpy(b0, ivptr, 16);
 
        lp = b0[0];
        l = lp + 1;
@@ -2727,29 +2799,31 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
 }
 
 static int ccm_format_packet(struct aead_request *req,
-                            struct chcr_aead_ctx *aeadctx,
+                            u8 *ivptr,
                             unsigned int sub_type,
                             unsigned short op_type,
                             unsigned int assoclen)
 {
        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
        int rc = 0;
 
        if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
-               reqctx->iv[0] = 3;
-               memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
-               memcpy(reqctx->iv + 4, req->iv, 8);
-               memset(reqctx->iv + 12, 0, 4);
+               ivptr[0] = 3;
+               memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+               memcpy(ivptr + 4, req->iv, 8);
+               memset(ivptr + 12, 0, 4);
        } else {
-               memcpy(reqctx->iv, req->iv, 16);
+               memcpy(ivptr, req->iv, 16);
        }
        if (assoclen)
                *((unsigned short *)(reqctx->scratch_pad + 16)) =
                                htons(assoclen);
 
-       generate_b0(req, aeadctx, op_type);
+       generate_b0(req, ivptr, op_type);
        /* zero the ctr value */
-       memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+       memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
        return rc;
 }
 
@@ -2762,7 +2836,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
        unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
        unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
-       unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+       unsigned int c_id = a_ctx(tfm)->tx_chan_id;
        unsigned int ccm_xtra;
        unsigned char tag_offset = 0, auth_offset = 0;
        unsigned int assoclen;
@@ -2775,7 +2849,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
                ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
 
        auth_offset = req->cryptlen ?
-               (assoclen + IV + 1 + ccm_xtra) : 0;
+               (req->assoclen + IV + 1 + ccm_xtra) : 0;
        if (op_type == CHCR_DECRYPT_OP) {
                if (crypto_aead_authsize(tfm) != req->cryptlen)
                        tag_offset = crypto_aead_authsize(tfm);
@@ -2785,13 +2859,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 
 
        sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
-                                        2, assoclen + 1 + ccm_xtra);
+                                        2, 1);
        sec_cpl->pldlen =
-               htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+               htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
        /* For CCM there wil be b0 always. So AAD start will be 1 always */
        sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
-                                       1, assoclen + ccm_xtra, assoclen
-                                       + IV + 1 + ccm_xtra, 0);
+                               1 + IV, IV + assoclen + ccm_xtra,
+                               req->assoclen + IV + 1 + ccm_xtra, 0);
 
        sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
                                        auth_offset, tag_offset,
@@ -2838,10 +2912,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct ulptx_sgl *ulptx;
        unsigned int transhdr_len;
-       unsigned int dst_size = 0, kctx_len, dnents, temp;
+       unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
        unsigned int sub_type, assoclen = req->assoclen;
        unsigned int authsize = crypto_aead_authsize(tfm);
        int error = -EINVAL;
+       u8 *ivptr;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
                GFP_ATOMIC;
        struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2857,37 +2932,38 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
        if (error)
                goto err;
-       dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
-       dnents += sg_nents_xlen(req->dst, req->cryptlen
+       dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
                        + (reqctx->op ? -authsize : authsize),
-                       CHCR_DST_SG_SIZE, req->assoclen);
+                       CHCR_DST_SG_SIZE, 0);
        dnents += MIN_CCM_SG; // For IV and B0
        dst_size = get_space_for_phys_dsgl(dnents);
+       snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+                              CHCR_SRC_SG_SIZE, 0);
+       snents += MIN_CCM_SG; //For B0
        kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-       reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+       reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
                       reqctx->b0_len) <= SGE_MAX_WR_LEN;
-       temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+       temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
                                     reqctx->b0_len, 16) :
-               (sgl_len(reqctx->src_nents + reqctx->aad_nents +
-                                   MIN_CCM_SG) *  8);
+               (sgl_len(snents) *  8);
        transhdr_len += temp;
        transhdr_len = roundup(transhdr_len, 16);
 
        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
-                                   reqctx->b0_len, transhdr_len, reqctx->op)) {
+                               reqctx->b0_len, transhdr_len, reqctx->op)) {
                atomic_inc(&adap->chcr_stats.fallback);
                chcr_aead_common_exit(req);
                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
        }
-       skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
+       skb = alloc_skb(transhdr_len,  flags);
 
        if (!skb) {
                error = -ENOMEM;
                goto err;
        }
 
-       chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+       chcr_req = __skb_put_zero(skb, transhdr_len);
 
        fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
 
@@ -2897,16 +2973,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
                        aeadctx->key, aeadctx->enckey_len);
 
        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-       ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
-       error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+       ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+       ulptx = (struct ulptx_sgl *)(ivptr + IV);
+       error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
        if (error)
                goto dstmap_fail;
-       chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
-       chcr_add_aead_src_ent(req, ulptx, assoclen);
+       chcr_add_aead_dst_ent(req, phys_cpl, qid);
+       chcr_add_aead_src_ent(req, ulptx);
 
        atomic_inc(&adap->chcr_stats.aead_rqst);
-       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
-               kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+               kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
                reqctx->b0_len) : 0);
        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
                    transhdr_len, temp, 0);
@@ -2931,10 +3008,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct ulptx_sgl *ulptx;
-       unsigned int transhdr_len, dnents = 0;
+       unsigned int transhdr_len, dnents = 0, snents;
        unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
        unsigned int authsize = crypto_aead_authsize(tfm);
        int error = -EINVAL;
+       u8 *ivptr;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
                GFP_ATOMIC;
        struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2946,19 +3024,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        error = chcr_aead_common_init(req);
        if (error)
                return ERR_PTR(error);
-       dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
-       dnents += sg_nents_xlen(req->dst, req->cryptlen +
+       dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
                                (reqctx->op ? -authsize : authsize),
-                               CHCR_DST_SG_SIZE, req->assoclen);
+                               CHCR_DST_SG_SIZE, 0);
+       snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+                              CHCR_SRC_SG_SIZE, 0);
        dnents += MIN_GCM_SG; // For IV
        dst_size = get_space_for_phys_dsgl(dnents);
        kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-       reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+       reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
                        SGE_MAX_WR_LEN;
-       temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
-               (sgl_len(reqctx->src_nents +
-               reqctx->aad_nents + MIN_GCM_SG) * 8);
+       temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+               (sgl_len(snents) * 8);
        transhdr_len += temp;
        transhdr_len = roundup(transhdr_len, 16);
        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
@@ -2968,7 +3046,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                chcr_aead_common_exit(req);
                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
        }
-       skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+       skb = alloc_skb(transhdr_len, flags);
        if (!skb) {
                error = -ENOMEM;
                goto err;
@@ -2979,15 +3057,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        //Offset of tag from end
        temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
        chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
-                                       a_ctx(tfm)->dev->rx_channel_id, 2,
-                                       (assoclen + 1));
+                                       a_ctx(tfm)->tx_chan_id, 2, 1);
        chcr_req->sec_cpl.pldlen =
-               htonl(assoclen + IV + req->cryptlen);
+               htonl(req->assoclen + IV + req->cryptlen);
        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
-                                       assoclen ? 1 : 0, assoclen,
-                                       assoclen + IV + 1, 0);
+                                       assoclen ? 1 + IV : 0,
+                                       assoclen ? IV + assoclen : 0,
+                                       req->assoclen + IV + 1, 0);
        chcr_req->sec_cpl.cipherstop_lo_authinsert =
-                       FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+                       FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
                                                temp, temp);
        chcr_req->sec_cpl.seqno_numivs =
                        FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
@@ -3002,25 +3080,26 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
               GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
 
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       ivptr = (u8 *)(phys_cpl + 1) + dst_size;
        /* prepare a 16 byte iv */
        /* S   A   L  T |  IV | 0x00000001 */
        if (get_aead_subtype(tfm) ==
            CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
-               memcpy(reqctx->iv, aeadctx->salt, 4);
-               memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+               memcpy(ivptr, aeadctx->salt, 4);
+               memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
        } else {
-               memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+               memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
        }
-       *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+       *((unsigned int *)(ivptr + 12)) = htonl(0x01);
 
-       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-       ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+       ulptx = (struct ulptx_sgl *)(ivptr + 16);
 
-       chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
-       chcr_add_aead_src_ent(req, ulptx, assoclen);
+       chcr_add_aead_dst_ent(req, phys_cpl, qid);
+       chcr_add_aead_src_ent(req, ulptx);
        atomic_inc(&adap->chcr_stats.aead_rqst);
-       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
-               kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+       temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+               kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
                    transhdr_len, temp, reqctx->verify);
        reqctx->skb = skb;
@@ -3118,12 +3197,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
                aeadctx->mayverify = VERIFY_HW;
                break;
        case ICV_12:
-                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
-                aeadctx->mayverify = VERIFY_HW;
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+               aeadctx->mayverify = VERIFY_HW;
                break;
        case ICV_14:
-                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
-                aeadctx->mayverify = VERIFY_HW;
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+               aeadctx->mayverify = VERIFY_HW;
                break;
        case ICV_16:
                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
@@ -3565,27 +3644,42 @@ static int chcr_aead_op(struct aead_request *req,
                        create_wr_t create_wr_fn)
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
        struct uld_ctx *u_ctx;
        struct sk_buff *skb;
        int isfull = 0;
+       struct chcr_dev *cdev;
 
-       if (!a_ctx(tfm)->dev) {
+       cdev = a_ctx(tfm)->dev;
+       if (!cdev) {
                pr_err("chcr : %s : No crypto device.\n", __func__);
                return -ENXIO;
        }
+
+       if (chcr_inc_wrcount(cdev)) {
+       /* Detach state for CHCR means lldi or padap is freed.
+        * We cannot increment fallback here.
+        */
+               return chcr_aead_fallback(req, reqctx->op);
+       }
+
        u_ctx = ULD_CTX(a_ctx(tfm));
        if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
                                   a_ctx(tfm)->tx_qidx)) {
                isfull = 1;
-               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+                       chcr_dec_wrcount(cdev);
                        return -ENOSPC;
+               }
        }
 
        /* Form a WR from req */
        skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
 
-       if (IS_ERR(skb) || !skb)
+       if (IS_ERR(skb) || !skb) {
+               chcr_dec_wrcount(cdev);
                return PTR_ERR(skb);
+       }
 
        skb->dev = u_ctx->lldi.ports[0];
        set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
@@ -3722,7 +3816,6 @@ static struct chcr_alg_template driver_algs[] = {
                                .setkey         = chcr_aes_rfc3686_setkey,
                                .encrypt        = chcr_aes_encrypt,
                                .decrypt        = chcr_aes_decrypt,
-                               .geniv          = "seqiv",
                        }
                }
        },
@@ -4178,7 +4271,6 @@ static struct chcr_alg_template driver_algs[] = {
                        .setauthsize = chcr_authenc_null_setauthsize,
                }
        },
-
 };
 
 /*