crypto: arm64/sha256 - clean up backwards function names
authorEric Biggers <ebiggers@google.com>
Tue, 10 Oct 2023 06:41:26 +0000 (23:41 -0700)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 20 Oct 2023 05:39:26 +0000 (13:39 +0800)
In the Linux kernel, a function whose name has two leading underscores
is conventionally called by the same-named function without leading
underscores -- not the other way around.  __sha256_block_data_order()
and __sha256_block_neon() got this backwards.  Fix this, albeit without
changing the names in the perlasm since that is OpenSSL code.  No change
in behavior.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm64/crypto/sha256-glue.c

index 9b5c86e07a9af3db024c34bedeedf88a7c94b447..35356987cc1e0b4d9fdb5cfe93dbd4b334e2538c 100644 (file)
@@ -27,8 +27,8 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
                                        unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
-static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
-                                     int blocks)
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+                                  int blocks)
 {
        sha256_block_data_order(sst->state, src, blocks);
 }
@@ -36,8 +36,8 @@ static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
                                  unsigned int num_blks);
 
-static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
-                               int blocks)
+static void sha256_neon_transform(struct sha256_state *sst, u8 const *src,
+                                 int blocks)
 {
        sha256_block_neon(sst->state, src, blocks);
 }
@@ -45,17 +45,15 @@ static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
                                      unsigned int len)
 {
-       return sha256_base_do_update(desc, data, len,
-                                    __sha256_block_data_order);
+       return sha256_base_do_update(desc, data, len, sha256_arm64_transform);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
                                     unsigned int len, u8 *out)
 {
        if (len)
-               sha256_base_do_update(desc, data, len,
-                                     __sha256_block_data_order);
-       sha256_base_do_finalize(desc, __sha256_block_data_order);
+               sha256_base_do_update(desc, data, len, sha256_arm64_transform);
+       sha256_base_do_finalize(desc, sha256_arm64_transform);
 
        return sha256_base_finish(desc, out);
 }
@@ -98,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
        if (!crypto_simd_usable())
                return sha256_base_do_update(desc, data, len,
-                               __sha256_block_data_order);
+                               sha256_arm64_transform);
 
        while (len > 0) {
                unsigned int chunk = len;
@@ -114,7 +112,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
                                sctx->count % SHA256_BLOCK_SIZE;
 
                kernel_neon_begin();
-               sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
+               sha256_base_do_update(desc, data, chunk, sha256_neon_transform);
                kernel_neon_end();
                data += chunk;
                len -= chunk;
@@ -128,13 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
        if (!crypto_simd_usable()) {
                if (len)
                        sha256_base_do_update(desc, data, len,
-                               __sha256_block_data_order);
-               sha256_base_do_finalize(desc, __sha256_block_data_order);
+                               sha256_arm64_transform);
+               sha256_base_do_finalize(desc, sha256_arm64_transform);
        } else {
                if (len)
                        sha256_update_neon(desc, data, len);
                kernel_neon_begin();
-               sha256_base_do_finalize(desc, __sha256_block_neon);
+               sha256_base_do_finalize(desc, sha256_neon_transform);
                kernel_neon_end();
        }
        return sha256_base_finish(desc, out);