crypto: chelsio - Fix memory corruption in DMA Mapped buffers.
authorHarsh Jain <harsh@chelsio.com>
Wed, 19 Sep 2018 17:12:16 +0000 (22:42 +0530)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 28 Sep 2018 04:44:34 +0000 (12:44 +0800)
Update PCI Id in "cpl_rx_phys_dsgl" header. In case pci_chan_id and
tx_chan_id are not derived from same queue, H/W can send request
completion indication before completing DMA Transfer.

Herbert, It would be good if fix can be merge to stable tree.
For 4.14 kernel, It requires some update to avoid mege conficts.

Cc: <stable@vger.kernel.org>
Signed-off-by: Harsh Jain <harsh@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_crypto.h

index 5c539af8ed6048c9687dae5f99653d57a17bc4c7..010bbf607797f26acaecd1dc542ffa2fb049bf7a 100644 (file)
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 }
 
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+                                int pci_chan_id)
 {
        struct cpl_rx_phys_dsgl *phys_cpl;
 
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
        phys_cpl->rss_hdr_int.qid = htons(qid);
        phys_cpl->rss_hdr_int.hash_val = 0;
+       phys_cpl->rss_hdr_int.channel = pci_chan_id;
 }
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
                FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
                                !!lcb, ctx->tx_qidx);
 
-       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
                                                       qid);
        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
                                     ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
                                    adap->vres.ncrypto_fc);
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
                txq_perchan = ntxq / u_ctx->lldi.nchan;
-               rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
-               rxq_idx += id % rxq_perchan;
-               txq_idx = ctx->dev->tx_channel_id * txq_perchan;
-               txq_idx += id % txq_perchan;
                spin_lock(&ctx->dev->lock_chcr_dev);
-               ctx->rx_qidx = rxq_idx;
-               ctx->tx_qidx = txq_idx;
+               ctx->tx_chan_id = ctx->dev->tx_channel_id;
                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
                ctx->dev->rx_channel_id = 0;
                spin_unlock(&ctx->dev->lock_chcr_dev);
+               rxq_idx = ctx->tx_chan_id * rxq_perchan;
+               rxq_idx += id % rxq_perchan;
+               txq_idx = ctx->tx_chan_id * txq_perchan;
+               txq_idx += id % txq_perchan;
+               ctx->rx_qidx = rxq_idx;
+               ctx->tx_qidx = txq_idx;
+               /* Channel Id used by SGE to forward packet to Host.
+                * Same value should be used in cpl_fw6_pld RSS_CH field
+                * by FW. Driver programs PCI channel ID to be used in fw
+                * at the time of queue allocation with value "pi->tx_chan"
+                */
+               ctx->pci_chan_id = txq_idx / txq_perchan;
        }
 out:
        return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct dsgl_walk dsgl_walk;
        unsigned int authsize = crypto_aead_authsize(tfm);
+       struct chcr_context *ctx = a_ctx(tfm);
        u32 temp;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
        temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
                             unsigned short qid)
 {
        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+       struct chcr_context *ctx = c_ctx(tfm);
        struct dsgl_walk dsgl_walk;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
        reqctx->dstsg = dsgl_walk.last_sg;
        reqctx->dst_ofst = dsgl_walk.last_sg_len;
 
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_hash_src_ent(struct ahash_request *req,
index 54835cb109e561ee4ce68f673eaceda38a39e35e..0d2c70c344f39bdf34751a1d64edb2efea426e3b 100644 (file)
@@ -255,6 +255,8 @@ struct chcr_context {
        struct chcr_dev *dev;
        unsigned char tx_qidx;
        unsigned char rx_qidx;
+       unsigned char tx_chan_id;
+       unsigned char pci_chan_id;
        struct __crypto_ctx crypto_ctx[0];
 };