nvmet-tcp: fix receive data digest calculation for multiple h2cdata PDUs
authorSagi Grimberg <sagi@grimberg.me>
Wed, 3 Feb 2021 23:00:01 +0000 (15:00 -0800)
committerChristoph Hellwig <hch@lst.de>
Wed, 10 Feb 2021 15:38:03 +0000 (16:38 +0100)
When a host sends multiple h2cdata PDUs for a single command, we
should verify the data digest calculation per PDU and not
per command.

Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
Reported-by: Narayan Ayalasomayajula <Narayan.Ayalasomayajula@wdc.com>
Tested-by: Narayan Ayalasomayajula <Narayan.Ayalasomayajula@wdc.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/tcp.c

index dc1f0f6471896e3dfd3f38dee4084184bc5d588c..c3da50f776fac255df3cb5c48ffdc456e452837d 100644 (file)
@@ -378,7 +378,7 @@ err:
        return NVME_SC_INTERNAL;
 }
 
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
                struct nvmet_tcp_cmd *cmd)
 {
        ahash_request_set_crypt(hash, cmd->req.sg,
@@ -386,6 +386,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
        crypto_ahash_digest(hash);
 }
 
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+               struct nvmet_tcp_cmd *cmd)
+{
+       struct scatterlist sg;
+       struct kvec *iov;
+       int i;
+
+       crypto_ahash_init(hash);
+       for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+               sg_init_one(&sg, iov->iov_base, iov->iov_len);
+               ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+               crypto_ahash_update(hash);
+       }
+       ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+       crypto_ahash_final(hash);
+}
+
 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 {
        struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -410,7 +427,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 
        if (queue->data_digest) {
                pdu->hdr.flags |= NVME_TCP_F_DDGST;
-               nvmet_tcp_ddgst(queue->snd_hash, cmd);
+               nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
        }
 
        if (cmd->queue->hdr_digest) {
@@ -1059,7 +1076,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
 
-       nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+       nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
        queue->offset = 0;
        queue->left = NVME_TCP_DIGEST_LENGTH;
        queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1080,14 +1097,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
                cmd->rbytes_done += ret;
        }
 
+       if (queue->data_digest) {
+               nvmet_tcp_prep_recv_ddgst(cmd);
+               return 0;
+       }
        nvmet_tcp_unmap_pdu_iovec(cmd);
 
        if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
            cmd->rbytes_done == cmd->req.transfer_len) {
-               if (queue->data_digest) {
-                       nvmet_tcp_prep_recv_ddgst(cmd);
-                       return 0;
-               }
                cmd->req.execute(&cmd->req);
        }