drivers/crypto/nx: Fixes for multiple races and issues
authorKent Yoder <key@linux.vnet.ibm.com>
Fri, 12 Apr 2013 17:13:59 +0000 (17:13 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 24 May 2013 08:11:10 +0000 (18:11 +1000)
Fixes a race on driver init with registering algorithms where the
driver status flag wasn't being set before self testing started.

  Added the cra_alignmask field for CBC and ECB modes.

  Fixed a bug in GCM where AES block size was being used instead of
authsize.

  Removed use of blkcipher_walk routines for scatterlist processing.
Corner cases in the code prevent us from processing an entire
scatterlist at a time and walking the buffers in block sized chunks
turns out to be unecessary anyway.

  Fixed off-by-one error in saving off extra data in the sha code.

  Fixed accounting error for number of bytes processed in the sha code.

Signed-off-by: Kent Yoder <key@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
drivers/crypto/nx/nx-aes-cbc.c
drivers/crypto/nx/nx-aes-ecb.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c

index a76d4c4f29f50798ed3ecafaa871d98a7d02c7e3..35d483f8db66b2a3ecb204181ffd0d43b0aaaffe 100644 (file)
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
        .cra_blocksize   = AES_BLOCK_SIZE,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
+       .cra_alignmask   = 0xf,
        .cra_module      = THIS_MODULE,
        .cra_init        = nx_crypto_ctx_aes_cbc_init,
        .cra_exit        = nx_crypto_ctx_exit,
index ba5f1611336fe9fa5165cfa680e7acc2b7e38637..7bbc9a81da219e5c27e021c9ef6c880d194c90d3 100644 (file)
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
        .cra_priority    = 300,
        .cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize   = AES_BLOCK_SIZE,
+       .cra_alignmask   = 0xf,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
        .cra_module      = THIS_MODULE,
index c8109edc5cfb02063c1bd6c5914985846bccac2a..6cca6c392b00f34fa65ad4663b1725bd7b3842fe 100644 (file)
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        if (enc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
        else
-               nbytes -= AES_BLOCK_SIZE;
+               nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 
        csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
 
index 9767315f8c0bd069a451f1a2c74bbf2f2bff628d..67024f2f0b78746bdbfcb8c5ffde595754a0b155 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+       if (len + sctx->count < SHA256_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count, data, len);
                sctx->count += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count = leftover;
 
        csbcpb->cpb.sha256.message_bit_length += (u64)
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        struct nx_sg *in_sg, *out_sg;
        int rc;
 
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
                 * so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
-       atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+       atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
                     &(nx_ctx->stats->sha256_bytes));
        memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
index 3177b8c3d5f1e3073ef559d902fe3cd488e8360b..08eee11223490c7a1a249a2f559f960e72e7af2c 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+       if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count[0], data, len);
                sctx->count[0] += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha512_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count[0] = leftover;
 
        spbc_bits = csbcpb->cpb.sha512.spbc * 8;
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
                goto out;
 
        atomic_inc(&(nx_ctx->stats->sha512_ops));
-       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
                     &(nx_ctx->stats->sha512_bytes));
 
        memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
index c767f232e6933bc876adedd56a1f7e655f93c815..bbdab6e5ccf08f75fcc23481dbb9d29a1df6cd69 100644 (file)
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
 {
        struct nx_sg *nx_insg = nx_ctx->in_sg;
        struct nx_sg *nx_outsg = nx_ctx->out_sg;
-       struct blkcipher_walk walk;
-       int rc;
-
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-       if (rc)
-               goto out;
 
        if (iv)
-               memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+               memcpy(iv, desc->info, AES_BLOCK_SIZE);
 
-       while (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = blkcipher_walk_done(desc, &walk, 0);
-               if (rc)
-                       break;
-       }
-
-       if (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = 0;
-       }
+       nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
+       nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
 
        /* these lengths should be negative, which will indicate to phyp that
         * the input and output parameters are scatterlists, not linear
         * buffers */
        nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -454,6 +430,8 @@ static int nx_register_algs(void)
        if (rc)
                goto out;
 
+       nx_driver.of.status = NX_OKAY;
+
        rc = crypto_register_alg(&nx_ecb_aes_alg);
        if (rc)
                goto out;
@@ -498,8 +476,6 @@ static int nx_register_algs(void)
        if (rc)
                goto out_unreg_s512;
 
-       nx_driver.of.status = NX_OKAY;
-
        goto out;
 
 out_unreg_s512: