mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-20 22:23:27 +00:00
d540725871
Without this patch, the chacha block counter is not incremented on neon rounds, resulting in incorrect calculations and corrupt packets. This also switches to using `--no-numbered --zero-commit` so that future diffs are smaller. Reported-by: Hans Geiblinger <cybrnook2002@yahoo.com> Reviewed-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> Cc: David Bauer <mail@david-bauer.net> Cc: Petr Štetiar <ynezz@true.cz> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
139 lines
4.4 KiB
Diff
139 lines
4.4 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Ard Biesheuvel <ardb@kernel.org>
|
|
Date: Fri, 8 Nov 2019 13:22:12 +0100
|
|
Subject: [PATCH] crypto: arm64/chacha - expose arm64 ChaCha routine as library
|
|
function
|
|
|
|
commit b3aad5bad26a01a4bd8c49a5c5f52aec665f3b7c upstream.
|
|
|
|
Expose the accelerated NEON ChaCha routine directly as a symbol
|
|
export so that users of the ChaCha library API can use it directly.
|
|
|
|
Given that calls into the library API will always go through the
|
|
routines in this module if it is enabled, switch to static keys
|
|
to select the optimal implementation available (which may be none
|
|
at all, in which case we defer to the generic implementation for
|
|
all invocations).
|
|
|
|
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
|
|
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
|
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
|
|
---
|
|
arch/arm64/crypto/Kconfig | 1 +
|
|
arch/arm64/crypto/chacha-neon-glue.c | 53 ++++++++++++++++++++++------
|
|
2 files changed, 43 insertions(+), 11 deletions(-)
|
|
|
|
--- a/arch/arm64/crypto/Kconfig
|
|
+++ b/arch/arm64/crypto/Kconfig
|
|
@@ -104,6 +104,7 @@ config CRYPTO_CHACHA20_NEON
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_BLKCIPHER
|
|
select CRYPTO_LIB_CHACHA_GENERIC
|
|
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
|
|
|
|
config CRYPTO_NHPOLY1305_NEON
|
|
tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
|
|
--- a/arch/arm64/crypto/chacha-neon-glue.c
|
|
+++ b/arch/arm64/crypto/chacha-neon-glue.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <crypto/internal/chacha.h>
|
|
#include <crypto/internal/simd.h>
|
|
#include <crypto/internal/skcipher.h>
|
|
+#include <linux/jump_label.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
|
|
@@ -36,6 +37,8 @@ asmlinkage void chacha_4block_xor_neon(u
|
|
int nrounds, int bytes);
|
|
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
|
|
|
|
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
|
+
|
|
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
|
int bytes, int nrounds)
|
|
{
|
|
@@ -59,6 +62,37 @@ static void chacha_doneon(u32 *state, u8
|
|
}
|
|
}
|
|
|
|
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
|
|
+{
|
|
+ if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
|
|
+ hchacha_block_generic(state, stream, nrounds);
|
|
+ } else {
|
|
+ kernel_neon_begin();
|
|
+ hchacha_block_neon(state, stream, nrounds);
|
|
+ kernel_neon_end();
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(hchacha_block_arch);
|
|
+
|
|
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
|
+{
|
|
+ chacha_init_generic(state, key, iv);
|
|
+}
|
|
+EXPORT_SYMBOL(chacha_init_arch);
|
|
+
|
|
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
|
|
+ int nrounds)
|
|
+{
|
|
+ if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
|
|
+ !crypto_simd_usable())
|
|
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
|
+
|
|
+ kernel_neon_begin();
|
|
+ chacha_doneon(state, dst, src, bytes, nrounds);
|
|
+ kernel_neon_end();
|
|
+}
|
|
+EXPORT_SYMBOL(chacha_crypt_arch);
|
|
+
|
|
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
|
const struct chacha_ctx *ctx, const u8 *iv)
|
|
{
|
|
@@ -76,7 +110,8 @@ static int chacha_neon_stream_xor(struct
|
|
if (nbytes < walk.total)
|
|
nbytes = rounddown(nbytes, walk.stride);
|
|
|
|
- if (!crypto_simd_usable()) {
|
|
+ if (!static_branch_likely(&have_neon) ||
|
|
+ !crypto_simd_usable()) {
|
|
chacha_crypt_generic(state, walk.dst.virt.addr,
|
|
walk.src.virt.addr, nbytes,
|
|
ctx->nrounds);
|
|
@@ -109,14 +144,7 @@ static int xchacha_neon(struct skcipher_
|
|
u8 real_iv[16];
|
|
|
|
chacha_init_generic(state, ctx->key, req->iv);
|
|
-
|
|
- if (crypto_simd_usable()) {
|
|
- kernel_neon_begin();
|
|
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
|
|
- kernel_neon_end();
|
|
- } else {
|
|
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
|
|
- }
|
|
+ hchacha_block_arch(state, subctx.key, ctx->nrounds);
|
|
subctx.nrounds = ctx->nrounds;
|
|
|
|
memcpy(&real_iv[0], req->iv + 24, 8);
|
|
@@ -179,14 +207,17 @@ static struct skcipher_alg algs[] = {
|
|
static int __init chacha_simd_mod_init(void)
|
|
{
|
|
if (!cpu_have_named_feature(ASIMD))
|
|
- return -ENODEV;
|
|
+ return 0;
|
|
+
|
|
+ static_branch_enable(&have_neon);
|
|
|
|
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
|
|
}
|
|
|
|
static void __exit chacha_simd_mod_fini(void)
|
|
{
|
|
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
|
+ if (cpu_have_named_feature(ASIMD))
|
|
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
|
}
|
|
|
|
module_init(chacha_simd_mod_init);
|