mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-26 17:01:14 +00:00
c0cb86e1d5
Rather than using the clunky, old, slower wireguard-linux-compat out of tree module, this commit does a patch-by-patch backport of upstream's wireguard to 5.4. This specific backport is in widespread use, being part of SUSE's enterprise kernel, Oracle's enterprise kernel, Google's Android kernel, Gentoo's distro kernel, and probably more I've forgotten about. It's definately the "more proper" way of adding wireguard to a kernel than the ugly compat.h hell of the wireguard-linux-compat repo. And most importantly for OpenWRT, it allows using the same module configuration code for 5.10 as for 5.4, with no need for bifurcation. These patches are from the backport tree which is maintained in the open here: https://git.zx2c4.com/wireguard-linux/log/?h=backport-5.4.y I'll be sending PRs to update this as needed. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> (cherry picked from commit3888fa7880
) (cherry picked from commitd540725871
) (cherry picked from commit196f3d586f
) (cherry picked from commit3500fd7938
) (cherry picked from commit23b801d3ba
) (cherry picked from commit0c0cb97da7
) (cherry picked from commit2a27f6f90a
) Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com>
193 lines
6.0 KiB
Diff
193 lines
6.0 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Ard Biesheuvel <ardb@kernel.org>
|
|
Date: Fri, 8 Nov 2019 13:22:09 +0100
|
|
Subject: [PATCH] crypto: x86/chacha - depend on generic chacha library instead
|
|
of crypto driver
|
|
|
|
commit 28e8d89b1ce8d2e7badfb5f69971dd635acb8863 upstream.
|
|
|
|
In preparation of extending the x86 ChaCha driver to also expose the ChaCha
|
|
library interface, drop the dependency on the chacha_generic crypto driver
|
|
as a non-SIMD fallback, and depend on the generic ChaCha library directly.
|
|
This way, we only pull in the code we actually need, without registering
|
|
a set of ChaCha skciphers that we will never use.
|
|
|
|
Since turning the FPU on and off is cheap these days, simplify the SIMD
|
|
routine by dropping the per-page yield, which makes for a cleaner switch
|
|
to the library API as well. This also allows use to invoke the skcipher
|
|
walk routines in non-atomic mode.
|
|
|
|
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
|
|
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
|
|
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
|
|
---
|
|
arch/x86/crypto/chacha_glue.c | 90 ++++++++++++++---------------------
|
|
crypto/Kconfig | 2 +-
|
|
2 files changed, 36 insertions(+), 56 deletions(-)
|
|
|
|
--- a/arch/x86/crypto/chacha_glue.c
|
|
+++ b/arch/x86/crypto/chacha_glue.c
|
|
@@ -123,37 +123,38 @@ static void chacha_dosimd(u32 *state, u8
|
|
}
|
|
}
|
|
|
|
-static int chacha_simd_stream_xor(struct skcipher_walk *walk,
|
|
+static int chacha_simd_stream_xor(struct skcipher_request *req,
|
|
const struct chacha_ctx *ctx, const u8 *iv)
|
|
{
|
|
u32 *state, state_buf[16 + 2] __aligned(8);
|
|
- int next_yield = 4096; /* bytes until next FPU yield */
|
|
- int err = 0;
|
|
+ struct skcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
|
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
|
|
|
- crypto_chacha_init(state, ctx, iv);
|
|
+ chacha_init_generic(state, ctx->key, iv);
|
|
|
|
- while (walk->nbytes > 0) {
|
|
- unsigned int nbytes = walk->nbytes;
|
|
+ while (walk.nbytes > 0) {
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- if (nbytes < walk->total) {
|
|
- nbytes = round_down(nbytes, walk->stride);
|
|
- next_yield -= nbytes;
|
|
- }
|
|
-
|
|
- chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
|
|
- nbytes, ctx->nrounds);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- if (next_yield <= 0) {
|
|
- /* temporarily allow preemption */
|
|
- kernel_fpu_end();
|
|
+ if (!crypto_simd_usable()) {
|
|
+ chacha_crypt_generic(state, walk.dst.virt.addr,
|
|
+ walk.src.virt.addr, nbytes,
|
|
+ ctx->nrounds);
|
|
+ } else {
|
|
kernel_fpu_begin();
|
|
- next_yield = 4096;
|
|
+ chacha_dosimd(state, walk.dst.virt.addr,
|
|
+ walk.src.virt.addr, nbytes,
|
|
+ ctx->nrounds);
|
|
+ kernel_fpu_end();
|
|
}
|
|
-
|
|
- err = skcipher_walk_done(walk, walk->nbytes - nbytes);
|
|
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
|
|
return err;
|
|
@@ -163,55 +164,34 @@ static int chacha_simd(struct skcipher_r
|
|
{
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct skcipher_walk walk;
|
|
- int err;
|
|
-
|
|
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
|
- return crypto_chacha_crypt(req);
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- kernel_fpu_begin();
|
|
- err = chacha_simd_stream_xor(&walk, ctx, req->iv);
|
|
- kernel_fpu_end();
|
|
- return err;
|
|
+ return chacha_simd_stream_xor(req, ctx, req->iv);
|
|
}
|
|
|
|
static int xchacha_simd(struct skcipher_request *req)
|
|
{
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
- struct skcipher_walk walk;
|
|
- struct chacha_ctx subctx;
|
|
u32 *state, state_buf[16 + 2] __aligned(8);
|
|
+ struct chacha_ctx subctx;
|
|
u8 real_iv[16];
|
|
- int err;
|
|
-
|
|
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
|
- return crypto_xchacha_crypt(req);
|
|
-
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
- if (err)
|
|
- return err;
|
|
|
|
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
|
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
|
- crypto_chacha_init(state, ctx, req->iv);
|
|
+ chacha_init_generic(state, ctx->key, req->iv);
|
|
|
|
- kernel_fpu_begin();
|
|
-
|
|
- hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
|
|
+ if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
|
|
+ kernel_fpu_begin();
|
|
+ hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
|
|
+ kernel_fpu_end();
|
|
+ } else {
|
|
+ hchacha_block_generic(state, subctx.key, ctx->nrounds);
|
|
+ }
|
|
subctx.nrounds = ctx->nrounds;
|
|
|
|
memcpy(&real_iv[0], req->iv + 24, 8);
|
|
memcpy(&real_iv[8], req->iv + 16, 8);
|
|
- err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
|
|
-
|
|
- kernel_fpu_end();
|
|
-
|
|
- return err;
|
|
+ return chacha_simd_stream_xor(req, &subctx, real_iv);
|
|
}
|
|
|
|
static struct skcipher_alg algs[] = {
|
|
@@ -227,7 +207,7 @@ static struct skcipher_alg algs[] = {
|
|
.max_keysize = CHACHA_KEY_SIZE,
|
|
.ivsize = CHACHA_IV_SIZE,
|
|
.chunksize = CHACHA_BLOCK_SIZE,
|
|
- .setkey = crypto_chacha20_setkey,
|
|
+ .setkey = chacha20_setkey,
|
|
.encrypt = chacha_simd,
|
|
.decrypt = chacha_simd,
|
|
}, {
|
|
@@ -242,7 +222,7 @@ static struct skcipher_alg algs[] = {
|
|
.max_keysize = CHACHA_KEY_SIZE,
|
|
.ivsize = XCHACHA_IV_SIZE,
|
|
.chunksize = CHACHA_BLOCK_SIZE,
|
|
- .setkey = crypto_chacha20_setkey,
|
|
+ .setkey = chacha20_setkey,
|
|
.encrypt = xchacha_simd,
|
|
.decrypt = xchacha_simd,
|
|
}, {
|
|
@@ -257,7 +237,7 @@ static struct skcipher_alg algs[] = {
|
|
.max_keysize = CHACHA_KEY_SIZE,
|
|
.ivsize = XCHACHA_IV_SIZE,
|
|
.chunksize = CHACHA_BLOCK_SIZE,
|
|
- .setkey = crypto_chacha12_setkey,
|
|
+ .setkey = chacha12_setkey,
|
|
.encrypt = xchacha_simd,
|
|
.decrypt = xchacha_simd,
|
|
},
|
|
--- a/crypto/Kconfig
|
|
+++ b/crypto/Kconfig
|
|
@@ -1417,7 +1417,7 @@ config CRYPTO_CHACHA20_X86_64
|
|
tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)"
|
|
depends on X86 && 64BIT
|
|
select CRYPTO_BLKCIPHER
|
|
- select CRYPTO_CHACHA20
|
|
+ select CRYPTO_LIB_CHACHA_GENERIC
|
|
help
|
|
SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
|
|
XChaCha20, and XChaCha12 stream ciphers.
|