]> git.baikalelectronics.ru Git - kernel.git/commitdiff
crypto: arm64/chacha - depend on generic chacha library instead of crypto driver
authorArd Biesheuvel <ardb@kernel.org>
Fri, 8 Nov 2019 12:22:11 +0000 (13:22 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Sun, 17 Nov 2019 01:02:39 +0000 (09:02 +0800)
Depend on the generic ChaCha library routines instead of pulling in the
generic ChaCha skcipher driver, which is more than we need, and makes
managing the dependencies between the generic library, generic driver,
accelerated library and driver more complicated.

While at it, drop the logic to prefer the scalar code on short inputs.
Turning the NEON on and off is cheap these days, and one major use case
for ChaCha20 is ChaCha20-Poly1305, which is guaranteed to hit the scalar
path upon every invocation  (when doing the Poly1305 nonce generation)

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm64/crypto/Kconfig
arch/arm64/crypto/chacha-neon-glue.c

index 286e3514d34c5d4774334bd8f1939511b1714197..22c6642ae4648f58fc1f2bbee12afeb3b2fdb4cc 100644 (file)
@@ -103,7 +103,7 @@ config CRYPTO_CHACHA20_NEON
        tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
        depends on KERNEL_MODE_NEON
        select CRYPTO_SKCIPHER
-       select CRYPTO_CHACHA20
+       select CRYPTO_LIB_CHACHA_GENERIC
 
 config CRYPTO_NHPOLY1305_NEON
        tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
index d4cc61bfe79dfdfe46f9525c5cccc2b922a1a464..cae2cb92eca86dab50082206f4e7c58e5fff228e 100644 (file)
@@ -68,7 +68,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
 
        err = skcipher_walk_virt(&walk, req, false);
 
-       crypto_chacha_init(state, ctx, iv);
+       chacha_init_generic(state, ctx->key, iv);
 
        while (walk.nbytes > 0) {
                unsigned int nbytes = walk.nbytes;
@@ -76,10 +76,16 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
                if (nbytes < walk.total)
                        nbytes = rounddown(nbytes, walk.stride);
 
-               kernel_neon_begin();
-               chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes, ctx->nrounds);
-               kernel_neon_end();
+               if (!crypto_simd_usable()) {
+                       chacha_crypt_generic(state, walk.dst.virt.addr,
+                                            walk.src.virt.addr, nbytes,
+                                            ctx->nrounds);
+               } else {
+                       kernel_neon_begin();
+                       chacha_doneon(state, walk.dst.virt.addr,
+                                     walk.src.virt.addr, nbytes, ctx->nrounds);
+                       kernel_neon_end();
+               }
                err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
 
@@ -91,9 +97,6 @@ static int chacha_neon(struct skcipher_request *req)
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
-               return crypto_chacha_crypt(req);
-
        return chacha_neon_stream_xor(req, ctx, req->iv);
 }
 
@@ -105,14 +108,15 @@ static int xchacha_neon(struct skcipher_request *req)
        u32 state[16];
        u8 real_iv[16];
 
-       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
-               return crypto_xchacha_crypt(req);
+       chacha_init_generic(state, ctx->key, req->iv);
 
-       crypto_chacha_init(state, ctx, req->iv);
-
-       kernel_neon_begin();
-       hchacha_block_neon(state, subctx.key, ctx->nrounds);
-       kernel_neon_end();
+       if (crypto_simd_usable()) {
+               kernel_neon_begin();
+               hchacha_block_neon(state, subctx.key, ctx->nrounds);
+               kernel_neon_end();
+       } else {
+               hchacha_block_generic(state, subctx.key, ctx->nrounds);
+       }
        subctx.nrounds = ctx->nrounds;
 
        memcpy(&real_iv[0], req->iv + 24, 8);
@@ -134,7 +138,7 @@ static struct skcipher_alg algs[] = {
                .ivsize                 = CHACHA_IV_SIZE,
                .chunksize              = CHACHA_BLOCK_SIZE,
                .walksize               = 5 * CHACHA_BLOCK_SIZE,
-               .setkey                 = crypto_chacha20_setkey,
+               .setkey                 = chacha20_setkey,
                .encrypt                = chacha_neon,
                .decrypt                = chacha_neon,
        }, {
@@ -150,7 +154,7 @@ static struct skcipher_alg algs[] = {
                .ivsize                 = XCHACHA_IV_SIZE,
                .chunksize              = CHACHA_BLOCK_SIZE,
                .walksize               = 5 * CHACHA_BLOCK_SIZE,
-               .setkey                 = crypto_chacha20_setkey,
+               .setkey                 = chacha20_setkey,
                .encrypt                = xchacha_neon,
                .decrypt                = xchacha_neon,
        }, {
@@ -166,7 +170,7 @@ static struct skcipher_alg algs[] = {
                .ivsize                 = XCHACHA_IV_SIZE,
                .chunksize              = CHACHA_BLOCK_SIZE,
                .walksize               = 5 * CHACHA_BLOCK_SIZE,
-               .setkey                 = crypto_chacha12_setkey,
+               .setkey                 = chacha12_setkey,
                .encrypt                = xchacha_neon,
                .decrypt                = xchacha_neon,
        }