Refactor code to use diesel-async.

This still depends on unreleased diesel and diesel-async code. As soon
as new diesel and diesel-async code has been released, we can remove
the [patch.crates-io] from Cargo.toml.
This commit is contained in:
Orne Brocaar 2023-11-28 13:12:18 +00:00
parent 3f57609981
commit 8e2eda3d5b
30 changed files with 3384 additions and 4122 deletions

219
Cargo.lock generated
View File

@ -759,9 +759,11 @@ dependencies = [
"chrono", "chrono",
"clap", "clap",
"diesel", "diesel",
"diesel-async",
"diesel_migrations", "diesel_migrations",
"dotenv", "dotenv",
"futures", "futures",
"futures-util",
"gcp_auth", "gcp_auth",
"geohash", "geohash",
"handlebars", "handlebars",
@ -796,6 +798,9 @@ dependencies = [
"reqwest", "reqwest",
"rquickjs", "rquickjs",
"rust-embed", "rust-embed",
"rustls",
"rustls-native-certs",
"rustls-pemfile",
"serde", "serde",
"serde_json", "serde_json",
"serde_urlencoded", "serde_urlencoded",
@ -804,6 +809,8 @@ dependencies = [
"thiserror", "thiserror",
"tokio", "tokio",
"tokio-executor-trait", "tokio-executor-trait",
"tokio-postgres",
"tokio-postgres-rustls",
"tokio-reactor-trait", "tokio-reactor-trait",
"tokio-stream", "tokio-stream",
"toml", "toml",
@ -1172,6 +1179,24 @@ dependencies = [
"generic-array", "generic-array",
] ]
[[package]]
name = "deadpool"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb84100978c1c7b37f09ed3ce3e5f843af02c2a2c431bae5b19230dad2c1b490"
dependencies = [
"async-trait",
"deadpool-runtime",
"num_cpus",
"tokio",
]
[[package]]
name = "deadpool-runtime"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49"
[[package]] [[package]]
name = "der" name = "der"
version = "0.7.8" version = "0.7.8"
@ -1204,9 +1229,8 @@ dependencies = [
[[package]] [[package]]
name = "diesel" name = "diesel"
version = "2.1.4" version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/diesel-rs/diesel.git?rev=566dcccc6df6adb6ceddef8df5e1806e2a065c40#566dcccc6df6adb6ceddef8df5e1806e2a065c40"
checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8"
dependencies = [ dependencies = [
"bigdecimal", "bigdecimal",
"bitflags 2.4.1", "bitflags 2.4.1",
@ -1217,19 +1241,31 @@ dependencies = [
"num-bigint", "num-bigint",
"num-integer", "num-integer",
"num-traits", "num-traits",
"pq-sys",
"r2d2",
"serde_json", "serde_json",
"uuid", "uuid",
] ]
[[package]]
name = "diesel-async"
version = "0.4.1"
source = "git+https://github.com/weiznich/diesel_async.git?rev=017ebe2fb7a2709ab5db92148dea5ce812a35e09#017ebe2fb7a2709ab5db92148dea5ce812a35e09"
dependencies = [
"async-trait",
"deadpool",
"diesel",
"futures-util",
"scoped-futures",
"tokio",
"tokio-postgres",
]
[[package]] [[package]]
name = "diesel_derives" name = "diesel_derives"
version = "2.1.2" version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/diesel-rs/diesel.git?rev=566dcccc6df6adb6ceddef8df5e1806e2a065c40#566dcccc6df6adb6ceddef8df5e1806e2a065c40"
checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44"
dependencies = [ dependencies = [
"diesel_table_macro_syntax", "diesel_table_macro_syntax",
"dsl_auto_type",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.39", "syn 2.0.39",
@ -1249,8 +1285,7 @@ dependencies = [
[[package]] [[package]]
name = "diesel_table_macro_syntax" name = "diesel_table_macro_syntax"
version = "0.1.0" version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/diesel-rs/diesel.git?rev=566dcccc6df6adb6ceddef8df5e1806e2a065c40#566dcccc6df6adb6ceddef8df5e1806e2a065c40"
checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5"
dependencies = [ dependencies = [
"syn 2.0.39", "syn 2.0.39",
] ]
@ -1306,6 +1341,19 @@ version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
[[package]]
name = "dsl_auto_type"
version = "0.1.0"
source = "git+https://github.com/diesel-rs/diesel.git?rev=566dcccc6df6adb6ceddef8df5e1806e2a065c40#566dcccc6df6adb6ceddef8df5e1806e2a065c40"
dependencies = [
"darling",
"either",
"heck",
"proc-macro2",
"quote",
"syn 2.0.39",
]
[[package]] [[package]]
name = "dtoa" name = "dtoa"
version = "1.0.9" version = "1.0.9"
@ -1453,6 +1501,12 @@ dependencies = [
"async-trait", "async-trait",
] ]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]] [[package]]
name = "fastrand" name = "fastrand"
version = "1.9.0" version = "1.9.0"
@ -1484,6 +1538,12 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7"
[[package]]
name = "finl_unicode"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6"
[[package]] [[package]]
name = "fixedbitset" name = "fixedbitset"
version = "0.4.2" version = "0.4.2"
@ -2410,6 +2470,16 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
[[package]]
name = "md-5"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
dependencies = [
"cfg-if",
"digest",
]
[[package]] [[package]]
name = "memchr" name = "memchr"
version = "2.6.4" version = "2.6.4"
@ -2968,6 +3038,15 @@ dependencies = [
"indexmap 2.1.0", "indexmap 2.1.0",
] ]
[[package]]
name = "phf"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
dependencies = [
"phf_shared 0.11.2",
]
[[package]] [[package]]
name = "phf_shared" name = "phf_shared"
version = "0.10.0" version = "0.10.0"
@ -2977,6 +3056,15 @@ dependencies = [
"siphasher", "siphasher",
] ]
[[package]]
name = "phf_shared"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b"
dependencies = [
"siphasher",
]
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "1.1.3" version = "1.1.3"
@ -3095,6 +3183,35 @@ dependencies = [
"windows-sys", "windows-sys",
] ]
[[package]]
name = "postgres-protocol"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520"
dependencies = [
"base64 0.21.5",
"byteorder",
"bytes",
"fallible-iterator",
"hmac",
"md-5",
"memchr",
"rand",
"sha2",
"stringprep",
]
[[package]]
name = "postgres-types"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c"
dependencies = [
"bytes",
"fallible-iterator",
"postgres-protocol",
]
[[package]] [[package]]
name = "powerfmt" name = "powerfmt"
version = "0.2.0" version = "0.2.0"
@ -3107,15 +3224,6 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "pq-sys"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd"
dependencies = [
"vcpkg",
]
[[package]] [[package]]
name = "precomputed-hash" name = "precomputed-hash"
version = "0.1.1" version = "0.1.1"
@ -3733,6 +3841,16 @@ dependencies = [
"parking_lot", "parking_lot",
] ]
[[package]]
name = "scoped-futures"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1473e24c637950c9bd38763220bea91ec3e095a89f672bbd7a10d03e77ba467"
dependencies = [
"cfg-if",
"pin-utils",
]
[[package]] [[package]]
name = "scoped-tls" name = "scoped-tls"
version = "1.0.1" version = "1.0.1"
@ -4110,10 +4228,21 @@ dependencies = [
"new_debug_unreachable", "new_debug_unreachable",
"once_cell", "once_cell",
"parking_lot", "parking_lot",
"phf_shared", "phf_shared 0.10.0",
"precomputed-hash", "precomputed-hash",
] ]
[[package]]
name = "stringprep"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6"
dependencies = [
"finl_unicode",
"unicode-bidi",
"unicode-normalization",
]
[[package]] [[package]]
name = "strsim" name = "strsim"
version = "0.10.0" version = "0.10.0"
@ -4344,6 +4473,46 @@ dependencies = [
"syn 2.0.39", "syn 2.0.39",
] ]
[[package]]
name = "tokio-postgres"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8"
dependencies = [
"async-trait",
"byteorder",
"bytes",
"fallible-iterator",
"futures-channel",
"futures-util",
"log",
"parking_lot",
"percent-encoding",
"phf",
"pin-project-lite",
"postgres-protocol",
"postgres-types",
"rand",
"socket2 0.5.5",
"tokio",
"tokio-util",
"whoami",
]
[[package]]
name = "tokio-postgres-rustls"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd5831152cb0d3f79ef5523b357319ba154795d64c7078b2daa95a803b54057f"
dependencies = [
"futures",
"ring 0.16.20",
"rustls",
"tokio",
"tokio-postgres",
"tokio-rustls",
]
[[package]] [[package]]
name = "tokio-reactor-trait" name = "tokio-reactor-trait"
version = "1.1.0" version = "1.1.0"
@ -4934,6 +5103,16 @@ dependencies = [
"rustix 0.38.25", "rustix 0.38.25",
] ]
[[package]]
name = "whoami"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]] [[package]]
name = "winapi" name = "winapi"
version = "0.3.9" version = "0.3.9"

View File

@ -13,3 +13,7 @@ members = [
opt-level = 'z' opt-level = 'z'
lto = true lto = true
codegen-units = 1 codegen-units = 1
[patch.crates-io]
diesel = { git = "https://github.com/diesel-rs/diesel.git", rev = "566dcccc6df6adb6ceddef8df5e1806e2a065c40" }
diesel-async = { git = "https://github.com/weiznich/diesel_async.git", rev = "017ebe2fb7a2709ab5db92148dea5ce812a35e09" }

View File

@ -26,14 +26,16 @@ handlebars = "4.4"
validator = "0.16" validator = "0.16"
diesel = { version = "2.1", features = [ diesel = { version = "2.1", features = [
"chrono", "chrono",
"postgres",
"r2d2",
"uuid", "uuid",
"serde_json", "serde_json",
"numeric", "numeric",
"64-column-tables", "64-column-tables",
"postgres_backend",
] } ] }
diesel_migrations = { version = "2.1" } diesel_migrations = { version = "2.1" }
diesel-async = { version = "0.4", features = ["deadpool", "postgres", "async-connection-wrapper"] }
tokio-postgres = "0.7"
tokio-postgres-rustls = "0.10.0"
r2d2 = "0.8" r2d2 = "0.8"
bigdecimal = "0.4" bigdecimal = "0.4"
redis = { version = "0.23", features = ["r2d2", "cluster", "tls-rustls"] } redis = { version = "0.23", features = ["r2d2", "cluster", "tls-rustls"] }
@ -84,6 +86,7 @@ warp = { version = "0.3", features = ["tls"], default-features = false }
hyper = "0.14" hyper = "0.14"
tower = "0.4" tower = "0.4"
futures = "0.3" futures = "0.3"
futures-util = "0.3"
http = "0.2" http = "0.2"
http-body = "0.4" http-body = "0.4"
rust-embed = "8.0" rust-embed = "8.0"
@ -98,6 +101,9 @@ anyhow = "1.0"
pbkdf2 = { version = "0.12", features = ["simple"] } pbkdf2 = { version = "0.12", features = ["simple"] }
rand_core = { version = "0.6", features = ["std"] } rand_core = { version = "0.6", features = ["std"] }
jsonwebtoken = "8.3" jsonwebtoken = "8.3"
rustls = "0.21"
rustls-native-certs = "0.6"
rustls-pemfile = "1.0"
openssl = { version = "0.10" } openssl = { version = "0.10" }
openidconnect = { version = "3.3", features = ["accept-rfc3339-timestamps"] } openidconnect = { version = "3.3", features = ["accept-rfc3339-timestamps"] }

File diff suppressed because it is too large Load Diff

View File

@ -2,14 +2,14 @@ use std::convert::Infallible;
use std::net::SocketAddr; use std::net::SocketAddr;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use diesel::RunQueryDsl; use diesel_async::RunQueryDsl;
use tokio::task; use tokio::task;
use tracing::info; use tracing::info;
use warp::{http::Response, http::StatusCode, Filter}; use warp::{http::Response, http::StatusCode, Filter};
use crate::config; use crate::config;
use crate::monitoring::prometheus; use crate::monitoring::prometheus;
use crate::storage::{get_db_conn, get_redis_conn}; use crate::storage::{get_async_db_conn, get_redis_conn};
pub async fn setup() { pub async fn setup() {
let conf = config::get(); let conf = config::get();
@ -50,17 +50,17 @@ async fn health_handler() -> Result<impl warp::Reply, Infallible> {
} }
async fn _health_handler() -> Result<()> { async fn _health_handler() -> Result<()> {
let mut c = get_async_db_conn().await?;
diesel::sql_query("select 1")
.execute(&mut c)
.await
.context("PostgreSQL connection error")?;
task::spawn_blocking(move || -> Result<()> { task::spawn_blocking(move || -> Result<()> {
let mut r = get_redis_conn()?; let mut r = get_redis_conn()?;
if !r.check_connection() { if !r.check_connection() {
return Err(anyhow!("Redis connection error")); return Err(anyhow!("Redis connection error"));
} }
let mut c = get_db_conn()?;
diesel::sql_query("select 1")
.execute(&mut c)
.context("PostgreSQL connection error")?;
Ok(()) Ok(())
}) })
.await? .await?

View File

@ -41,11 +41,12 @@ pub fn run() {
# PostgreSQL connection pool. # PostgreSQL connection pool.
max_open_connections={{ postgresql.max_open_connections }} max_open_connections={{ postgresql.max_open_connections }}
# Min idle connections. # CA certificate (optional).
# #
# This sets the min. number of idle connections in the PostgreSQL connection # Set this to the path of the CA certificate in case you are using TLS and
# pool (0 = equal to max_open_connections). # the server-certificate is not signed by a CA in the platform certificate
min_idle_connections={{ postgresql.min_idle_connections }} # store.
ca_cert="{{ postgresql.ca_cert }}"
# Redis configuration. # Redis configuration.
@ -459,11 +460,12 @@ pub fn run() {
# PostgreSQL connection pool. # PostgreSQL connection pool.
max_open_connections={{ integration.postgresql.max_open_connections }} max_open_connections={{ integration.postgresql.max_open_connections }}
# Min idle connections. # CA certificate (optional).
# #
# This sets the min. number of idle connections in the PostgreSQL connection # Set this to the path of the CA certificate in case you are using TLS and
# pool (0 = equal to max_open_connections). # the server-certificate is not signed by a CA in the platform certificate
min_idle_connections={{ integration.postgresql.min_idle_connections }} # store.
ca_cert="{{ integration.postgresql.ca_cert }}"
# AMQP / RabbitMQ integration configuration. # AMQP / RabbitMQ integration configuration.

View File

@ -54,7 +54,7 @@ impl Default for Logging {
pub struct Postgresql { pub struct Postgresql {
pub dsn: String, pub dsn: String,
pub max_open_connections: u32, pub max_open_connections: u32,
pub min_idle_connections: u32, pub ca_cert: String,
} }
impl Default for Postgresql { impl Default for Postgresql {
@ -62,7 +62,7 @@ impl Default for Postgresql {
Postgresql { Postgresql {
dsn: "postgresql://chirpstack:chirpstack@localhost/chirpstack?sslmode=disable".into(), dsn: "postgresql://chirpstack:chirpstack@localhost/chirpstack?sslmode=disable".into(),
max_open_connections: 10, max_open_connections: 10,
min_idle_connections: 0, ca_cert: "".into(),
} }
} }
} }
@ -307,7 +307,7 @@ impl Default for MqttIntegrationClient {
pub struct PostgresqlIntegration { pub struct PostgresqlIntegration {
pub dsn: String, pub dsn: String,
pub max_open_connections: u32, pub max_open_connections: u32,
pub min_idle_connections: u32, pub ca_cert: String,
} }
impl Default for PostgresqlIntegration { impl Default for PostgresqlIntegration {
@ -315,7 +315,7 @@ impl Default for PostgresqlIntegration {
PostgresqlIntegration { PostgresqlIntegration {
dsn: "postgresql://chirpstack_integration:chirpstack_integration@localhost/chirpstack_integration?sslmode=disable".into(), dsn: "postgresql://chirpstack_integration:chirpstack_integration@localhost/chirpstack_integration?sslmode=disable".into(),
max_open_connections: 10, max_open_connections: 10,
min_idle_connections: 0, ca_cert: "".into(),
} }
} }
} }

View File

@ -56,6 +56,7 @@ pub async fn setup() -> Result<()> {
} }
"postgresql" => integrations.push(Box::new( "postgresql" => integrations.push(Box::new(
postgresql::Integration::new(&conf.integration.postgresql) postgresql::Integration::new(&conf.integration.postgresql)
.await
.context("Setup PostgreSQL integration")?, .context("Setup PostgreSQL integration")?,
)), )),
"amqp" => integrations.push(Box::new( "amqp" => integrations.push(Box::new(

View File

@ -1,19 +1,24 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::str::FromStr; use std::str::FromStr;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::pg::PgConnection; use diesel::{ConnectionError, ConnectionResult};
use diesel::prelude::*; use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
use diesel::r2d2::{ConnectionManager, Pool}; use diesel_async::pooled_connection::deadpool::{Object as DeadpoolObject, Pool as DeadpoolPool};
use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
use diesel_async::{AsyncPgConnection, RunQueryDsl};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use tokio::task; use futures_util::future::BoxFuture;
use tracing::info; use futures_util::FutureExt;
use tracing::{error, info};
use uuid::Uuid; use uuid::Uuid;
use super::Integration as IntegrationTrait; use super::Integration as IntegrationTrait;
use crate::config::PostgresqlIntegration as Config; use crate::config::{self, PostgresqlIntegration as Config};
use chirpstack_api::integration; use chirpstack_api::integration;
use schema::{ use schema::{
event_ack, event_integration, event_join, event_location, event_log, event_status, event_ack, event_integration, event_join, event_location, event_log, event_status,
@ -25,7 +30,8 @@ mod schema;
pub const MIGRATIONS: EmbeddedMigrations = pub const MIGRATIONS: EmbeddedMigrations =
embed_migrations!("./src/integration/postgresql/migrations"); embed_migrations!("./src/integration/postgresql/migrations");
type PgPool = Pool<ConnectionManager<PgConnection>>; pub type AsyncPgPool = DeadpoolPool<AsyncPgConnection>;
pub type AsyncPgPoolConnection = DeadpoolObject<AsyncPgConnection>;
#[derive(Insertable)] #[derive(Insertable)]
#[diesel(table_name = event_up)] #[diesel(table_name = event_up)]
@ -189,32 +195,87 @@ struct EventIntegration {
} }
pub struct Integration { pub struct Integration {
pg_pool: PgPool, pg_pool: AsyncPgPool,
} }
impl Integration { impl Integration {
pub fn new(conf: &Config) -> Result<Integration> { pub async fn new(conf: &Config) -> Result<Integration> {
info!("Initializing PostgreSQL integration"); info!("Initializing PostgreSQL integration");
let pg_pool = PgPool::builder() let mut config = ManagerConfig::default();
.max_size(conf.max_open_connections) config.custom_setup = Box::new(pg_establish_connection);
.min_idle(match conf.min_idle_connections {
0 => None, let mgr =
_ => Some(conf.min_idle_connections), AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(&conf.dsn, config);
}) let pg_pool = DeadpoolPool::builder(mgr)
.build(ConnectionManager::new(&conf.dsn)) .max_size(conf.max_open_connections as usize)
.context("Setup PostgreSQL connection pool error")?; .build()?;
let mut db_conn = pg_pool.get()?;
let c = pg_pool.get().await?;
let mut c_wrapped: AsyncConnectionWrapper<AsyncPgPoolConnection> =
AsyncConnectionWrapper::from(c);
info!("Applying schema migrations"); info!("Applying schema migrations");
db_conn tokio::task::spawn_blocking(move || -> Result<()> {
c_wrapped
.run_pending_migrations(MIGRATIONS) .run_pending_migrations(MIGRATIONS)
.map_err(|e| anyhow!("{}", e))?; .map_err(|e| anyhow!("{}", e))?;
Ok(())
})
.await??;
Ok(Integration { pg_pool }) Ok(Integration { pg_pool })
} }
} }
// Source:
// https://github.com/weiznich/diesel_async/blob/main/examples/postgres/pooled-with-rustls/src/main.rs
fn pg_establish_connection(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
let fut = async {
let root_certs =
pg_root_certs().map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
let rustls_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
let (client, conn) = tokio_postgres::connect(config, tls)
.await
.map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
tokio::spawn(async move {
if let Err(e) = conn.await {
error!(error = %e, "PostgreSQL connection error");
}
});
AsyncPgConnection::try_from(client).await
};
fut.boxed()
}
fn pg_root_certs() -> Result<rustls::RootCertStore> {
let conf = config::get();
let mut roots = rustls::RootCertStore::empty();
let certs = rustls_native_certs::load_native_certs()?;
let certs: Vec<_> = certs.into_iter().map(|cert| cert.0).collect();
roots.add_parsable_certificates(&certs);
if !conf.postgresql.ca_cert.is_empty() {
let f = File::open(&conf.integration.postgresql.ca_cert).context("Open ca certificate")?;
let mut reader = BufReader::new(f);
let certs = rustls_pemfile::certs(&mut reader)?;
for cert in certs
.into_iter()
.map(rustls::Certificate)
.collect::<Vec<_>>()
{
roots.add(&cert)?;
}
}
Ok(roots)
}
#[async_trait] #[async_trait]
impl IntegrationTrait for Integration { impl IntegrationTrait for Integration {
async fn uplink_event( async fn uplink_event(
@ -254,16 +315,12 @@ impl IntegrationTrait for Integration {
rx_info: serde_json::to_value(&pl.rx_info)?, rx_info: serde_json::to_value(&pl.rx_info)?,
tx_info: serde_json::to_value(&pl.tx_info)?, tx_info: serde_json::to_value(&pl.tx_info)?,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_up::table) diesel::insert_into(event_up::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -295,16 +352,12 @@ impl IntegrationTrait for Integration {
tags: serde_json::to_value(&di.tags)?, tags: serde_json::to_value(&di.tags)?,
dev_addr: pl.dev_addr.clone(), dev_addr: pl.dev_addr.clone(),
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_join::table) diesel::insert_into(event_join::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -338,16 +391,12 @@ impl IntegrationTrait for Integration {
acknowledged: pl.acknowledged, acknowledged: pl.acknowledged,
f_cnt_down: pl.f_cnt_down as i64, f_cnt_down: pl.f_cnt_down as i64,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_ack::table) diesel::insert_into(event_ack::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -382,16 +431,12 @@ impl IntegrationTrait for Integration {
gateway_id: pl.gateway_id.clone(), gateway_id: pl.gateway_id.clone(),
tx_info: serde_json::to_value(&pl.tx_info)?, tx_info: serde_json::to_value(&pl.tx_info)?,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_tx_ack::table) diesel::insert_into(event_tx_ack::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -425,16 +470,12 @@ impl IntegrationTrait for Integration {
description: pl.description.clone(), description: pl.description.clone(),
context: serde_json::to_value(&pl.context)?, context: serde_json::to_value(&pl.context)?,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_log::table) diesel::insert_into(event_log::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -469,15 +510,12 @@ impl IntegrationTrait for Integration {
battery_level_unavailable: pl.battery_level_unavailable, battery_level_unavailable: pl.battery_level_unavailable,
battery_level: pl.battery_level, battery_level: pl.battery_level,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_status::table) diesel::insert_into(event_status::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -514,16 +552,12 @@ impl IntegrationTrait for Integration {
source: loc.source.to_string(), source: loc.source.to_string(),
accuracy: loc.accuracy, accuracy: loc.accuracy,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_location::table) diesel::insert_into(event_location::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
@ -557,16 +591,12 @@ impl IntegrationTrait for Integration {
event_type: pl.event_type.clone(), event_type: pl.event_type.clone(),
object: serde_json::to_value(&pl.object)?, object: serde_json::to_value(&pl.object)?,
}; };
let mut c = self.pg_pool.get()?; let mut c = self.pg_pool.get().await?;
task::spawn_blocking(move || -> Result<()> {
diesel::insert_into(event_integration::table) diesel::insert_into(event_integration::table)
.values(&e) .values(&e)
.execute(&mut c)?; .execute(&mut c)
Ok(()) .await?;
})
.await??;
Ok(()) Ok(())
} }
} }

View File

@ -2,13 +2,13 @@ use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::dsl;
use diesel::prelude::*; use diesel::prelude::*;
use tokio::task; use diesel_async::RunQueryDsl;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use super::error::Error; use super::error::Error;
use super::schema::api_key; use super::schema::api_key;
use super::{error, get_db_conn}; use super::{error, get_async_db_conn};
#[derive(Queryable, Insertable, PartialEq, Eq, Debug)] #[derive(Queryable, Insertable, PartialEq, Eq, Debug)]
#[diesel(table_name = api_key)] #[diesel(table_name = api_key)]
@ -51,41 +51,30 @@ pub struct Filters {
pub async fn create(ak: ApiKey) -> Result<ApiKey, Error> { pub async fn create(ak: ApiKey) -> Result<ApiKey, Error> {
ak.validate()?; ak.validate()?;
let ak = task::spawn_blocking(move || -> Result<ApiKey, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?; let ak: ApiKey = diesel::insert_into(api_key::table)
diesel::insert_into(api_key::table)
.values(&ak) .values(&ak)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, ak.id.to_string())) .await
}) .map_err(|e| error::Error::from_diesel(e, ak.id.to_string()))?;
.await??;
info!(id = %ak.id, "Api-key created"); info!(id = %ak.id, "Api-key created");
Ok(ak) Ok(ak)
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let ra = diesel::delete(api_key::dsl::api_key.find(&id))
.execute(&mut c)
move || -> Result<(), Error> { .await?;
let mut c = get_db_conn()?;
let ra = diesel::delete(api_key::dsl::api_key.find(&id)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
info!(id = %id, "Api-key deleted"); info!(id = %id, "Api-key deleted");
Ok(()) Ok(())
} }
})
.await?
}
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = api_key::dsl::api_key let mut q = api_key::dsl::api_key
.select(dsl::count_star()) .select(dsl::count_star())
@ -96,18 +85,11 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
q = q.filter(api_key::dsl::tenant_id.eq(tenant_id)); q = q.filter(api_key::dsl::tenant_id.eq(tenant_id));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<ApiKey>, Error> { pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<ApiKey>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<ApiKey>, Error> {
let mut c = get_db_conn()?;
let mut q = api_key::dsl::api_key let mut q = api_key::dsl::api_key
.filter(api_key::dsl::is_admin.eq(filters.is_admin)) .filter(api_key::dsl::is_admin.eq(filters.is_admin))
@ -121,12 +103,10 @@ pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<ApiK
.order_by(api_key::dsl::name) .order_by(api_key::dsl::name)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {
@ -143,19 +123,13 @@ pub mod test {
} }
pub async fn get(id: &Uuid) -> Result<ApiKey, Error> { pub async fn get(id: &Uuid) -> Result<ApiKey, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<ApiKey, Error> {
let mut c = get_db_conn()?;
api_key::dsl::api_key api_key::dsl::api_key
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| error::Error::from_diesel(e, id.to_string())) .map_err(|e| error::Error::from_diesel(e, id.to_string()))
} }
})
.await?
}
pub async fn create_api_key(is_admin: bool, is_tenant: bool) -> ApiKey { pub async fn create_api_key(is_admin: bool, is_tenant: bool) -> ApiKey {
let ak = ApiKey { let ak = ApiKey {

View File

@ -4,20 +4,22 @@ use std::str::FromStr;
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::backend::Backend; use diesel::{
use diesel::dsl; backend::Backend,
use diesel::pg::Pg; deserialize, dsl,
use diesel::prelude::*; pg::Pg,
use diesel::sql_types::{Jsonb, Text}; prelude::*,
use diesel::{deserialize, serialize}; serialize,
sql_types::{Jsonb, Text},
};
use diesel_async::RunQueryDsl;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use super::error::Error; use super::error::Error;
use super::schema::{application, application_integration}; use super::schema::{application, application_integration};
use super::{fields, get_db_conn}; use super::{fields, get_async_db_conn};
#[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)] #[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)]
#[diesel(table_name = application)] #[diesel(table_name = application)]
@ -289,42 +291,32 @@ impl Default for Integration {
pub async fn create(a: Application) -> Result<Application, Error> { pub async fn create(a: Application) -> Result<Application, Error> {
a.validate()?; a.validate()?;
task::spawn_blocking({
move || -> Result<Application, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?;
let a: Application = diesel::insert_into(application::table) let a: Application = diesel::insert_into(application::table)
.values(&a) .values(&a)
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, a.id.to_string()))?; .map_err(|e| Error::from_diesel(e, a.id.to_string()))?;
info!(id = %a.id, "Application created"); info!(id = %a.id, "Application created");
Ok(a) Ok(a)
} }
})
.await?
}
pub async fn get(id: &Uuid) -> Result<Application, Error> { pub async fn get(id: &Uuid) -> Result<Application, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<Application, Error> {
let mut c = get_db_conn()?;
let a = application::dsl::application let a = application::dsl::application
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(a) Ok(a)
} }
})
.await?
}
pub async fn update(a: Application) -> Result<Application, Error> { pub async fn update(a: Application) -> Result<Application, Error> {
a.validate()?; a.validate()?;
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Application, Error> {
let mut c = get_db_conn()?;
let a: Application = diesel::update(application::dsl::application.find(&a.id)) let a: Application = diesel::update(application::dsl::application.find(&a.id))
.set(( .set((
application::updated_at.eq(Utc::now()), application::updated_at.eq(Utc::now()),
@ -333,6 +325,7 @@ pub async fn update(a: Application) -> Result<Application, Error> {
application::tags.eq(&a.tags), application::tags.eq(&a.tags),
)) ))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, a.id.to_string()))?; .map_err(|e| Error::from_diesel(e, a.id.to_string()))?;
info!( info!(
@ -342,24 +335,14 @@ pub async fn update(a: Application) -> Result<Application, Error> {
Ok(a) Ok(a)
} }
})
.await?
}
pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result<Application, Error> { pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result<Application, Error> {
let app = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
let cert = cert.to_vec();
move || -> Result<Application, Error> {
let mut c = get_db_conn()?;
let app: Application = diesel::update(application::dsl::application.find(&id)) let app: Application = diesel::update(application::dsl::application.find(&id))
.set(application::mqtt_tls_cert.eq(cert)) .set(application::mqtt_tls_cert.eq(cert))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(app)
}
})
.await??;
info!( info!(
application_id = %id, application_id = %id,
@ -370,11 +353,10 @@ pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result<Application,
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let ra = diesel::delete(application::dsl::application.find(&id))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra = diesel::delete(application::dsl::application.find(&id)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
@ -386,15 +368,9 @@ pub async fn delete(id: &Uuid) -> Result<(), Error> {
Ok(()) Ok(())
} }
})
.await?
}
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = application::dsl::application let mut q = application::dsl::application
.select(dsl::count_star()) .select(dsl::count_star())
.into_boxed(); .into_boxed();
@ -407,10 +383,7 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
q = q.filter(application::dsl::name.ilike(format!("%{}%", search))); q = q.filter(application::dsl::name.ilike(format!("%{}%", search)));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list( pub async fn list(
@ -418,10 +391,7 @@ pub async fn list(
offset: i64, offset: i64,
filters: &Filters, filters: &Filters,
) -> Result<Vec<ApplicationListItem>, Error> { ) -> Result<Vec<ApplicationListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<ApplicationListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = application::dsl::application let mut q = application::dsl::application
.select(( .select((
application::id, application::id,
@ -444,37 +414,28 @@ pub async fn list(
.order_by(application::dsl::name) .order_by(application::dsl::name)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn create_integration(i: Integration) -> Result<Integration, Error> { pub async fn create_integration(i: Integration) -> Result<Integration, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Integration, Error> {
let mut c = get_db_conn()?;
let i: Integration = diesel::insert_into(application_integration::table) let i: Integration = diesel::insert_into(application_integration::table)
.values(&i) .values(&i)
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, i.kind.to_string()))?; .map_err(|e| Error::from_diesel(e, i.kind.to_string()))?;
info!(application_id = %i.application_id, kind = %i.kind, "Integration created"); info!(application_id = %i.application_id, kind = %i.kind, "Integration created");
Ok(i) Ok(i)
} }
})
.await?
}
pub async fn get_integration( pub async fn get_integration(
application_id: &Uuid, application_id: &Uuid,
kind: IntegrationKind, kind: IntegrationKind,
) -> Result<Integration, Error> { ) -> Result<Integration, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let application_id = *application_id;
move || -> Result<Integration, Error> {
let mut c = get_db_conn()?;
let mut i: Integration = application_integration::dsl::application_integration let mut i: Integration = application_integration::dsl::application_integration
.filter( .filter(
application_integration::dsl::application_id application_integration::dsl::application_id
@ -482,6 +443,7 @@ pub async fn get_integration(
.and(application_integration::dsl::kind.eq(kind)), .and(application_integration::dsl::kind.eq(kind)),
) )
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, application_id.to_string()))?; .map_err(|e| Error::from_diesel(e, application_id.to_string()))?;
// For backwards compatibiliy // For backwards compatibiliy
@ -498,14 +460,9 @@ pub async fn get_integration(
Ok(i) Ok(i)
} }
})
.await?
}
pub async fn update_integration(i: Integration) -> Result<Integration, Error> { pub async fn update_integration(i: Integration) -> Result<Integration, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Integration, Error> {
let mut c = get_db_conn()?;
let i: Integration = diesel::update( let i: Integration = diesel::update(
application_integration::dsl::application_integration.filter( application_integration::dsl::application_integration.filter(
application_integration::dsl::application_id application_integration::dsl::application_id
@ -518,21 +475,16 @@ pub async fn update_integration(i: Integration) -> Result<Integration, Error> {
application_integration::configuration.eq(&i.configuration), application_integration::configuration.eq(&i.configuration),
)) ))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, i.application_id.to_string()))?; .map_err(|e| Error::from_diesel(e, i.application_id.to_string()))?;
info!(application_id = %i.application_id, kind = %i.kind, "Integration updated"); info!(application_id = %i.application_id, kind = %i.kind, "Integration updated");
Ok(i) Ok(i)
} }
})
.await?
}
pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) -> Result<(), Error> { pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let application_id = *application_id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete( let ra = diesel::delete(
application_integration::dsl::application_integration.filter( application_integration::dsl::application_integration.filter(
application_integration::dsl::application_id application_integration::dsl::application_id
@ -540,7 +492,8 @@ pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) ->
.and(application_integration::dsl::kind.eq(&kind)), .and(application_integration::dsl::kind.eq(&kind)),
), ),
) )
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(application_id.to_string())); return Err(Error::NotFound(application_id.to_string()));
@ -549,26 +502,18 @@ pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) ->
info!(application_id = %application_id, kind = %kind, "Integration deleted"); info!(application_id = %application_id, kind = %kind, "Integration deleted");
Ok(()) Ok(())
} }
})
.await?
}
pub async fn get_integrations_for_application( pub async fn get_integrations_for_application(
application_id: &Uuid, application_id: &Uuid,
) -> Result<Vec<Integration>, Error> { ) -> Result<Vec<Integration>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let application_id = *application_id;
move || -> Result<Vec<Integration>, Error> {
let mut c = get_db_conn()?;
let items: Vec<Integration> = application_integration::dsl::application_integration let items: Vec<Integration> = application_integration::dsl::application_integration
.filter(application_integration::dsl::application_id.eq(&application_id)) .filter(application_integration::dsl::application_id.eq(&application_id))
.order_by(application_integration::dsl::kind) .order_by(application_integration::dsl::kind)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>, Error> { pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>, Error> {
#[derive(QueryableByName)] #[derive(QueryableByName)]
@ -577,10 +522,7 @@ pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>,
pub key: String, pub key: String,
} }
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let application_id = *application_id;
move || -> Result<Vec<String>, Error> {
let mut c = get_db_conn()?;
let keys: Vec<Measurement> = diesel::sql_query( let keys: Vec<Measurement> = diesel::sql_query(
r#" r#"
select select
@ -597,12 +539,10 @@ pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>,
) )
.bind::<diesel::sql_types::Uuid, _>(application_id) .bind::<diesel::sql_types::Uuid, _>(application_id)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, application_id.to_string()))?; .map_err(|e| Error::from_diesel(e, application_id.to_string()))?;
Ok(keys.iter().map(|k| k.key.clone()).collect()) Ok(keys.iter().map(|k| k.key.clone()).collect())
} }
})
.await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {

View File

@ -6,14 +6,14 @@ use anyhow::{Context, Result};
use bigdecimal::BigDecimal; use bigdecimal::BigDecimal;
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, Duration, Utc};
use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text}; use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text};
use tokio::task; use diesel_async::RunQueryDsl;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use lrwn::{DevAddr, EUI64}; use lrwn::{DevAddr, EUI64};
use super::schema::{application, device, device_profile, multicast_group_device, tenant}; use super::schema::{application, device, device_profile, multicast_group_device, tenant};
use super::{error::Error, fields, get_db_conn}; use super::{error::Error, fields, get_async_db_conn};
use crate::config; use crate::config;
#[derive(Debug, Clone, Copy, Eq, PartialEq, AsExpression, FromSqlRow)] #[derive(Debug, Clone, Copy, Eq, PartialEq, AsExpression, FromSqlRow)]
@ -177,11 +177,11 @@ pub struct DevicesDataRate {
} }
pub async fn create(d: Device) -> Result<Device, Error> { pub async fn create(d: Device) -> Result<Device, Error> {
d.validate()?; let mut c = get_async_db_conn().await?;
let d = task::spawn_blocking({ let d: Device = c
move || -> Result<Device, Error> { .build_transaction()
let mut c = get_db_conn()?; .run::<Device, Error, _>(|c| {
c.transaction::<Device, Error, _>(|c| { Box::pin(async move {
// use for update to lock the tenant // use for update to lock the tenant
let t: super::tenant::Tenant = tenant::dsl::tenant let t: super::tenant::Tenant = tenant::dsl::tenant
.select(( .select((
@ -200,13 +200,15 @@ pub async fn create(d: Device) -> Result<Device, Error> {
.inner_join(application::table) .inner_join(application::table)
.filter(application::dsl::id.eq(&d.application_id)) .filter(application::dsl::id.eq(&d.application_id))
.for_update() .for_update()
.first(c)?; .first(c)
.await?;
let dev_count: i64 = device::dsl::device let dev_count: i64 = device::dsl::device
.select(dsl::count_star()) .select(dsl::count_star())
.inner_join(application::table) .inner_join(application::table)
.filter(application::dsl::tenant_id.eq(&t.id)) .filter(application::dsl::tenant_id.eq(&t.id))
.first(c)?; .first(c)
.await?;
if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count { if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count {
return Err(Error::NotAllowed( return Err(Error::NotAllowed(
@ -217,36 +219,29 @@ pub async fn create(d: Device) -> Result<Device, Error> {
diesel::insert_into(device::table) diesel::insert_into(device::table)
.values(&d) .values(&d)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, d.dev_eui.to_string())) .map_err(|e| Error::from_diesel(e, d.dev_eui.to_string()))
}) })
}
}) })
.await??; .await?;
info!(dev_eui = %d.dev_eui, "Device created"); info!(dev_eui = %d.dev_eui, "Device created");
Ok(d) Ok(d)
} }
pub async fn get(dev_eui: &EUI64) -> Result<Device, Error> { pub async fn get(dev_eui: &EUI64) -> Result<Device, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<Device, Error> {
let mut c = get_db_conn()?;
let d = device::dsl::device let d = device::dsl::device
.find(&dev_eui) .find(&dev_eui)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
Ok(d) Ok(d)
} }
})
.await?
}
pub async fn update(d: Device) -> Result<Device, Error> { pub async fn update(d: Device) -> Result<Device, Error> {
d.validate()?; d.validate()?;
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Device, Error> { let d: Device = diesel::update(device::dsl::device.find(&d.dev_eui))
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&d.dev_eui))
.set(( .set((
device::updated_at.eq(Utc::now()), device::updated_at.eq(Utc::now()),
device::application_id.eq(&d.application_id), device::application_id.eq(&d.application_id),
@ -260,57 +255,41 @@ pub async fn update(d: Device) -> Result<Device, Error> {
device::join_eui.eq(&d.join_eui), device::join_eui.eq(&d.join_eui),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, d.dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, d.dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %d.dev_eui, "Device updated"); info!(dev_eui = %d.dev_eui, "Device updated");
Ok(d) Ok(d)
} }
pub async fn set_enabled_class(dev_eui: &EUI64, mode: DeviceClass) -> Result<Device, Error> { pub async fn set_enabled_class(dev_eui: &EUI64, mode: DeviceClass) -> Result<Device, Error> {
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let d: Device = diesel::update(device::dsl::device.find(&dev_eui))
move || -> Result<Device, Error> {
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui))
.set(device::enabled_class.eq(&mode)) .set(device::enabled_class.eq(&mode))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, enabled_class = %mode, "Enabled class updated"); info!(dev_eui = %dev_eui, enabled_class = %mode, "Enabled class updated");
Ok(d) Ok(d)
} }
pub async fn set_join_eui(dev_eui: EUI64, join_eui: EUI64) -> Result<Device, Error> { pub async fn set_join_eui(dev_eui: EUI64, join_eui: EUI64) -> Result<Device, Error> {
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Device, Error> { let d: Device = diesel::update(device::dsl::device.find(&dev_eui))
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui))
.set(device::join_eui.eq(&join_eui)) .set(device::join_eui.eq(&join_eui))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, join_eui = %join_eui, "Updated JoinEUI"); info!(dev_eui = %dev_eui, join_eui = %join_eui, "Updated JoinEUI");
Ok(d) Ok(d)
} }
pub async fn set_dev_addr(dev_eui: EUI64, dev_addr: DevAddr) -> Result<Device, Error> { pub async fn set_dev_addr(dev_eui: EUI64, dev_addr: DevAddr) -> Result<Device, Error> {
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Device, Error> { let d: Device = diesel::update(device::dsl::device.find(&dev_eui))
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui))
.set(device::dev_addr.eq(&dev_addr)) .set(device::dev_addr.eq(&dev_addr))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, dev_addr = %dev_addr, "Updated DevAddr"); info!(dev_eui = %dev_eui, dev_addr = %dev_addr, "Updated DevAddr");
Ok(d) Ok(d)
} }
@ -323,34 +302,24 @@ pub async fn set_scheduler_run_after(
dev_eui: &EUI64, dev_eui: &EUI64,
new_ts: Option<DateTime<Utc>>, new_ts: Option<DateTime<Utc>>,
) -> Result<Device, Error> { ) -> Result<Device, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<Device, Error> {
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui)) diesel::update(device::dsl::device.find(&dev_eui))
.set(device::scheduler_run_after.eq(&new_ts)) .set(device::scheduler_run_after.eq(&new_ts))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))
} }
})
.await?
}
pub async fn set_last_seen_dr(dev_eui: &EUI64, dr: u8) -> Result<Device, Error> { pub async fn set_last_seen_dr(dev_eui: &EUI64, dr: u8) -> Result<Device, Error> {
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let d: Device = diesel::update(device::dsl::device.find(&dev_eui))
move || -> Result<Device, Error> {
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui))
.set(( .set((
device::last_seen_at.eq(Utc::now()), device::last_seen_at.eq(Utc::now()),
device::dr.eq(dr as i16), device::dr.eq(dr as i16),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, dr = dr, "Data-rate updated"); info!(dev_eui = %dev_eui, dr = dr, "Data-rate updated");
Ok(d) Ok(d)
} }
@ -361,47 +330,34 @@ pub async fn set_status(
external_power_source: bool, external_power_source: bool,
battery_level: Option<BigDecimal>, battery_level: Option<BigDecimal>,
) -> Result<Device, Error> { ) -> Result<Device, Error> {
let d = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let d: Device = diesel::update(device::dsl::device.find(&dev_eui))
move || -> Result<Device, Error> {
let mut c = get_db_conn()?;
diesel::update(device::dsl::device.find(&dev_eui))
.set(( .set((
device::margin.eq(Some(margin)), device::margin.eq(Some(margin)),
device::external_power_source.eq(external_power_source), device::external_power_source.eq(external_power_source),
device::battery_level.eq(battery_level), device::battery_level.eq(battery_level),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, "Device status updated"); info!(dev_eui = %dev_eui, "Device status updated");
Ok(d) Ok(d)
} }
pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> { pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let ra = diesel::delete(device::dsl::device.find(&dev_eui))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra = diesel::delete(device::dsl::device.find(&dev_eui)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(dev_eui.to_string())); return Err(Error::NotFound(dev_eui.to_string()));
} }
Ok(())
}
})
.await??;
info!(dev_eui = %dev_eui, "Device deleted"); info!(dev_eui = %dev_eui, "Device deleted");
Ok(()) Ok(())
} }
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = device::dsl::device let mut q = device::dsl::device
.select(dsl::count_star()) .select(dsl::count_star())
.distinct() .distinct()
@ -417,14 +373,10 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
} }
if let Some(multicast_group_id) = &filters.multicast_group_id { if let Some(multicast_group_id) = &filters.multicast_group_id {
q = q q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list( pub async fn list(
@ -432,10 +384,7 @@ pub async fn list(
offset: i64, offset: i64,
filters: &Filters, filters: &Filters,
) -> Result<Vec<DeviceListItem>, Error> { ) -> Result<Vec<DeviceListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<DeviceListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = device::dsl::device let mut q = device::dsl::device
.inner_join(device_profile::table) .inner_join(device_profile::table)
.left_join(multicast_group_device::table) .left_join(multicast_group_device::table)
@ -464,25 +413,19 @@ pub async fn list(
} }
if let Some(multicast_group_id) = &filters.multicast_group_id { if let Some(multicast_group_id) = &filters.multicast_group_id {
q = q q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
} }
q.order_by(device::dsl::name) q.order_by(device::dsl::name)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActiveInactive, Error> { pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActiveInactive, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
move || -> Result<DevicesActiveInactive, Error> {
let mut c = get_db_conn()?;
diesel::sql_query(r#" diesel::sql_query(r#"
with device_active_inactive as ( with device_active_inactive as (
select select
@ -503,18 +446,12 @@ pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActi
device_active_inactive device_active_inactive
"#) "#)
.bind::<diesel::sql_types::Nullable<diesel::sql_types::Uuid>, _>(tenant_id) .bind::<diesel::sql_types::Nullable<diesel::sql_types::Uuid>, _>(tenant_id)
.get_result(&mut c) .get_result(&mut c).await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataRate>, Error> { pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataRate>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
move || -> Result<Vec<DevicesDataRate>, Error> {
let mut c = get_db_conn()?;
let mut q = device::dsl::device let mut q = device::dsl::device
.inner_join(device_profile::table) .inner_join(device_profile::table)
//.select((device::dr, dsl::count_star())) //.select((device::dr, dsl::count_star()))
@ -530,16 +467,16 @@ pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataR
q = q.filter(device_profile::dsl::tenant_id.eq(id)); q = q.filter(device_profile::dsl::tenant_id.eq(id));
} }
q.load(&mut c).map_err(|e| Error::from_diesel(e, "".into())) q.load(&mut c)
} .await
}) .map_err(|e| Error::from_diesel(e, "".into()))
.await?
} }
pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result<Vec<Device>> { pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result<Vec<Device>> {
task::spawn_blocking(move || -> Result<Vec<Device>> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?; c.build_transaction()
c.transaction::<Vec<Device>, Error, _>(|c| { .run::<Vec<Device>, Error, _>(|c| {
Box::pin(async {
let conf = config::get(); let conf = config::get();
// This query will: // This query will:
@ -597,11 +534,12 @@ pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result<Vec<Device>>
Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(), Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(),
) )
.load(c) .load(c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
}) })
.context("Get with Class B/C queue-items transaction")
}) })
.await? .await
.context("Get with Class B/C queue-items transaction")
} }
#[cfg(test)] #[cfg(test)]

View File

@ -1,13 +1,13 @@
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::prelude::*; use diesel::prelude::*;
use tokio::task; use diesel_async::RunQueryDsl;
use tracing::info; use tracing::info;
use lrwn::{AES128Key, EUI64}; use lrwn::{AES128Key, EUI64};
use super::error::Error; use super::error::Error;
use super::get_db_conn; use super::get_async_db_conn;
use super::schema::device_keys; use super::schema::device_keys;
#[derive(Queryable, Insertable, AsChangeset, PartialEq, Eq, Debug, Clone)] #[derive(Queryable, Insertable, AsChangeset, PartialEq, Eq, Debug, Clone)]
@ -45,16 +45,12 @@ impl Default for DeviceKeys {
} }
pub async fn create(dk: DeviceKeys) -> Result<DeviceKeys, Error> { pub async fn create(dk: DeviceKeys) -> Result<DeviceKeys, Error> {
let dk = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceKeys, Error> { let dk: DeviceKeys = diesel::insert_into(device_keys::table)
let mut c = get_db_conn()?;
diesel::insert_into(device_keys::table)
.values(&dk) .values(&dk)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dk.dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dk.dev_eui.to_string()))?;
})
.await??;
info!( info!(
dev_eui = %dk.dev_eui, dev_eui = %dk.dev_eui,
"Device-keys created" "Device-keys created"
@ -63,31 +59,22 @@ pub async fn create(dk: DeviceKeys) -> Result<DeviceKeys, Error> {
} }
pub async fn get(dev_eui: &EUI64) -> Result<DeviceKeys, Error> { pub async fn get(dev_eui: &EUI64) -> Result<DeviceKeys, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<DeviceKeys, Error> {
let mut c = get_db_conn()?;
let dk = device_keys::dsl::device_keys let dk = device_keys::dsl::device_keys
.find(&dev_eui) .find(&dev_eui)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
Ok(dk) Ok(dk)
} }
})
.await?
}
pub async fn update(dk: DeviceKeys) -> Result<DeviceKeys, Error> { pub async fn update(dk: DeviceKeys) -> Result<DeviceKeys, Error> {
let dk = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceKeys, Error> { let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(&dk.dev_eui))
let mut c = get_db_conn()?;
diesel::update(device_keys::dsl::device_keys.find(&dk.dev_eui))
.set(&dk) .set(&dk)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dk.dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dk.dev_eui.to_string()))?;
})
.await??;
info!( info!(
dev_eui = %dk.dev_eui, dev_eui = %dk.dev_eui,
"Device-keys updated" "Device-keys updated"
@ -96,19 +83,13 @@ pub async fn update(dk: DeviceKeys) -> Result<DeviceKeys, Error> {
} }
pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> { pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let ra = diesel::delete(device_keys::dsl::device_keys.find(&dev_eui))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra =
diesel::delete(device_keys::dsl::device_keys.find(&dev_eui)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(dev_eui.to_string())); return Err(Error::NotFound(dev_eui.to_string()));
} }
Ok(())
}
})
.await??;
info!( info!(
dev_eui = %dev_eui, dev_eui = %dev_eui,
"Device-keys deleted" "Device-keys deleted"
@ -117,18 +98,12 @@ pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> {
} }
pub async fn set_dev_nonces(dev_eui: &EUI64, nonces: &[i32]) -> Result<DeviceKeys, Error> { pub async fn set_dev_nonces(dev_eui: &EUI64, nonces: &[i32]) -> Result<DeviceKeys, Error> {
let dk = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(dev_eui))
let nonces = nonces.to_vec(); .set(device_keys::dev_nonces.eq(nonces))
move || -> Result<DeviceKeys, Error> {
let mut c = get_db_conn()?;
diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
.set(device_keys::dev_nonces.eq(&nonces))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!( info!(
dev_eui = %dev_eui, dev_eui = %dev_eui,
"Dev-nonces updated" "Dev-nonces updated"
@ -140,15 +115,16 @@ pub async fn validate_incr_join_and_store_dev_nonce(
dev_eui: &EUI64, dev_eui: &EUI64,
dev_nonce: i32, dev_nonce: i32,
) -> Result<DeviceKeys, Error> { ) -> Result<DeviceKeys, Error> {
let dk = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let dk: DeviceKeys = c
move || -> Result<DeviceKeys, Error> { .build_transaction()
let mut c = get_db_conn()?; .run::<DeviceKeys, Error, _>(|c| {
c.transaction::<DeviceKeys, Error, _>(|c| { Box::pin(async move {
let mut dk: DeviceKeys = device_keys::dsl::device_keys let mut dk: DeviceKeys = device_keys::dsl::device_keys
.find(&dev_eui) .find(&dev_eui)
.for_update() .for_update()
.first(c) .first(c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
if dk.dev_nonces.contains(&(Some(dev_nonce))) { if dk.dev_nonces.contains(&(Some(dev_nonce))) {
@ -165,11 +141,11 @@ pub async fn validate_incr_join_and_store_dev_nonce(
device_keys::join_nonce.eq(&dk.join_nonce), device_keys::join_nonce.eq(&dk.join_nonce),
)) ))
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))
}) })
}
}) })
.await??; .await?;
info!(dev_eui = %dev_eui, dev_nonce = dev_nonce, "Device-nonce validated, join-nonce incremented and stored"); info!(dev_eui = %dev_eui, dev_nonce = dev_nonce, "Device-nonce validated, join-nonce incremented and stored");
Ok(dk) Ok(dk)
@ -182,20 +158,16 @@ pub mod test {
use crate::test; use crate::test;
pub async fn reset_nonces(dev_eui: &EUI64) -> Result<DeviceKeys, Error> { pub async fn reset_nonces(dev_eui: &EUI64) -> Result<DeviceKeys, Error> {
let dk = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
move || -> Result<DeviceKeys, Error> {
let mut c = get_db_conn()?;
diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
.set(( .set((
device_keys::dev_nonces.eq::<Vec<i32>>(Vec::new()), device_keys::dev_nonces.eq::<Vec<i32>>(Vec::new()),
device_keys::join_nonce.eq(0), device_keys::join_nonce.eq(0),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!( info!(
dev_eui = %dev_eui, dev_eui = %dev_eui,
"Nonces reset" "Nonces reset"

View File

@ -2,9 +2,8 @@ use std::collections::HashMap;
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
@ -12,7 +11,7 @@ use lrwn::region::{CommonName, MacVersion, Revision};
use super::error::Error; use super::error::Error;
use super::schema::device_profile; use super::schema::device_profile;
use super::{error, fields, get_db_conn}; use super::{error, fields, get_async_db_conn};
use crate::api::helpers::ToProto; use crate::api::helpers::ToProto;
use crate::codec::Codec; use crate::codec::Codec;
use chirpstack_api::internal; use chirpstack_api::internal;
@ -199,42 +198,31 @@ pub struct Filters {
pub async fn create(dp: DeviceProfile) -> Result<DeviceProfile, Error> { pub async fn create(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
dp.validate()?; dp.validate()?;
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceProfile, Error> { let dp: DeviceProfile = diesel::insert_into(device_profile::table)
let mut c = get_db_conn()?;
diesel::insert_into(device_profile::table)
.values(&dp) .values(&dp)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, dp.id.to_string())) .await
} .map_err(|e| error::Error::from_diesel(e, dp.id.to_string()))?;
})
.await??;
info!(id = %dp.id, "Device-profile created"); info!(id = %dp.id, "Device-profile created");
Ok(dp) Ok(dp)
} }
pub async fn get(id: &Uuid) -> Result<DeviceProfile, Error> { pub async fn get(id: &Uuid) -> Result<DeviceProfile, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<DeviceProfile, Error> {
let mut c = get_db_conn()?;
let dp = device_profile::dsl::device_profile let dp = device_profile::dsl::device_profile
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| error::Error::from_diesel(e, id.to_string()))?; .map_err(|e| error::Error::from_diesel(e, id.to_string()))?;
Ok(dp) Ok(dp)
} }
})
.await?
}
pub async fn update(dp: DeviceProfile) -> Result<DeviceProfile, Error> { pub async fn update(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
dp.validate()?; dp.validate()?;
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceProfile, Error> {
let mut c = get_db_conn()?;
diesel::update(device_profile::dsl::device_profile.find(&dp.id)) let dp: DeviceProfile = diesel::update(device_profile::dsl::device_profile.find(&dp.id))
.set(( .set((
device_profile::updated_at.eq(Utc::now()), device_profile::updated_at.eq(Utc::now()),
device_profile::name.eq(&dp.name), device_profile::name.eq(&dp.name),
@ -272,8 +260,7 @@ pub async fn update(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
device_profile::relay_default_channel_index.eq(&dp.relay_default_channel_index), device_profile::relay_default_channel_index.eq(&dp.relay_default_channel_index),
device_profile::relay_second_channel_freq.eq(&dp.relay_second_channel_freq), device_profile::relay_second_channel_freq.eq(&dp.relay_second_channel_freq),
device_profile::relay_second_channel_dr.eq(&dp.relay_second_channel_dr), device_profile::relay_second_channel_dr.eq(&dp.relay_second_channel_dr),
device_profile::relay_second_channel_ack_offset device_profile::relay_second_channel_ack_offset.eq(&dp.relay_second_channel_ack_offset),
.eq(&dp.relay_second_channel_ack_offset),
device_profile::relay_ed_activation_mode.eq(&dp.relay_ed_activation_mode), device_profile::relay_ed_activation_mode.eq(&dp.relay_ed_activation_mode),
device_profile::relay_ed_smart_enable_level.eq(&dp.relay_ed_smart_enable_level), device_profile::relay_ed_smart_enable_level.eq(&dp.relay_ed_smart_enable_level),
device_profile::relay_ed_back_off.eq(&dp.relay_ed_back_off), device_profile::relay_ed_back_off.eq(&dp.relay_ed_back_off),
@ -283,70 +270,51 @@ pub async fn update(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
.eq(&dp.relay_ed_uplink_limit_reload_rate), .eq(&dp.relay_ed_uplink_limit_reload_rate),
device_profile::relay_join_req_limit_reload_rate device_profile::relay_join_req_limit_reload_rate
.eq(&dp.relay_join_req_limit_reload_rate), .eq(&dp.relay_join_req_limit_reload_rate),
device_profile::relay_notify_limit_reload_rate device_profile::relay_notify_limit_reload_rate.eq(&dp.relay_notify_limit_reload_rate),
.eq(&dp.relay_notify_limit_reload_rate),
device_profile::relay_global_uplink_limit_reload_rate device_profile::relay_global_uplink_limit_reload_rate
.eq(&dp.relay_global_uplink_limit_reload_rate), .eq(&dp.relay_global_uplink_limit_reload_rate),
device_profile::relay_overall_limit_reload_rate device_profile::relay_overall_limit_reload_rate.eq(&dp.relay_overall_limit_reload_rate),
.eq(&dp.relay_overall_limit_reload_rate),
device_profile::relay_join_req_limit_bucket_size device_profile::relay_join_req_limit_bucket_size
.eq(&dp.relay_join_req_limit_bucket_size), .eq(&dp.relay_join_req_limit_bucket_size),
device_profile::relay_notify_limit_bucket_size device_profile::relay_notify_limit_bucket_size.eq(&dp.relay_notify_limit_bucket_size),
.eq(&dp.relay_notify_limit_bucket_size),
device_profile::relay_global_uplink_limit_bucket_size device_profile::relay_global_uplink_limit_bucket_size
.eq(&dp.relay_global_uplink_limit_bucket_size), .eq(&dp.relay_global_uplink_limit_bucket_size),
device_profile::relay_overall_limit_bucket_size device_profile::relay_overall_limit_bucket_size.eq(&dp.relay_overall_limit_bucket_size),
.eq(&dp.relay_overall_limit_bucket_size),
device_profile::allow_roaming.eq(&dp.allow_roaming), device_profile::allow_roaming.eq(&dp.allow_roaming),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, dp.id.to_string())) .await
} .map_err(|e| error::Error::from_diesel(e, dp.id.to_string()))?;
})
.await??;
info!(id = %dp.id, "Device-profile updated"); info!(id = %dp.id, "Device-profile updated");
Ok(dp) Ok(dp)
} }
pub async fn set_measurements(id: Uuid, m: &fields::Measurements) -> Result<DeviceProfile, Error> { pub async fn set_measurements(id: Uuid, m: &fields::Measurements) -> Result<DeviceProfile, Error> {
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let m = m.clone(); let dp: DeviceProfile = diesel::update(device_profile::dsl::device_profile.find(&id))
move || -> Result<DeviceProfile, Error> {
let mut c = get_db_conn()?;
diesel::update(device_profile::dsl::device_profile.find(&id))
.set(device_profile::measurements.eq(m)) .set(device_profile::measurements.eq(m))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, id.to_string()))?;
})
.await??;
info!(id = %id, "Device-profile measurements updated"); info!(id = %id, "Device-profile measurements updated");
Ok(dp) Ok(dp)
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let ra = diesel::delete(device_profile::dsl::device_profile.find(&id))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra =
diesel::delete(device_profile::dsl::device_profile.find(&id)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(error::Error::NotFound(id.to_string())); return Err(error::Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Device-profile deleted"); info!(id = %id, "Device-profile deleted");
Ok(()) Ok(())
} }
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = device_profile::dsl::device_profile let mut q = device_profile::dsl::device_profile
.select(dsl::count_star()) .select(dsl::count_star())
.into_boxed(); .into_boxed();
@ -359,10 +327,7 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search))); q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search)));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list( pub async fn list(
@ -370,10 +335,7 @@ pub async fn list(
offset: i64, offset: i64,
filters: &Filters, filters: &Filters,
) -> Result<Vec<DeviceProfileListItem>, Error> { ) -> Result<Vec<DeviceProfileListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<DeviceProfileListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = device_profile::dsl::device_profile let mut q = device_profile::dsl::device_profile
.select(( .select((
device_profile::id, device_profile::id,
@ -401,12 +363,10 @@ pub async fn list(
.order_by(device_profile::dsl::name) .order_by(device_profile::dsl::name)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {

View File

@ -2,17 +2,16 @@ use std::collections::HashMap;
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use regex::Regex; use regex::Regex;
use tokio::task;
use tracing::info; use tracing::info;
use lrwn::region::{CommonName, MacVersion, Revision}; use lrwn::region::{CommonName, MacVersion, Revision};
use super::error::Error; use super::error::Error;
use super::schema::device_profile_template; use super::schema::device_profile_template;
use super::{error, fields, get_db_conn}; use super::{error, fields, get_async_db_conn};
use crate::codec::Codec; use crate::codec::Codec;
#[derive(Clone, Queryable, Insertable, Debug, PartialEq, Eq)] #[derive(Clone, Queryable, Insertable, Debug, PartialEq, Eq)]
@ -134,26 +133,20 @@ pub struct DeviceProfileTemplateListItem {
pub async fn create(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> { pub async fn create(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> {
dp.validate()?; dp.validate()?;
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceProfileTemplate, Error> { let dp: DeviceProfileTemplate = diesel::insert_into(device_profile_template::table)
let mut c = get_db_conn()?;
diesel::insert_into(device_profile_template::table)
.values(&dp) .values(&dp)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, dp.id.to_string())) .await
} .map_err(|e| error::Error::from_diesel(e, dp.id.to_string()))?;
})
.await??;
info!(id = %dp.id, "Device-profile template created"); info!(id = %dp.id, "Device-profile template created");
Ok(dp) Ok(dp)
} }
pub async fn upsert(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> { pub async fn upsert(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> {
dp.validate()?; dp.validate()?;
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceProfileTemplate, Error> { let dp: DeviceProfileTemplate = diesel::insert_into(device_profile_template::table)
let mut c = get_db_conn()?;
diesel::insert_into(device_profile_template::table)
.values(&dp) .values(&dp)
.on_conflict(device_profile_template::id) .on_conflict(device_profile_template::id)
.do_update() .do_update()
@ -170,10 +163,8 @@ pub async fn upsert(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate,
device_profile_template::payload_codec_runtime.eq(&dp.payload_codec_runtime), device_profile_template::payload_codec_runtime.eq(&dp.payload_codec_runtime),
device_profile_template::payload_codec_script.eq(&dp.payload_codec_script), device_profile_template::payload_codec_script.eq(&dp.payload_codec_script),
device_profile_template::uplink_interval.eq(&dp.uplink_interval), device_profile_template::uplink_interval.eq(&dp.uplink_interval),
device_profile_template::device_status_req_interval device_profile_template::device_status_req_interval.eq(&dp.device_status_req_interval),
.eq(&dp.device_status_req_interval), device_profile_template::flush_queue_on_activate.eq(&dp.flush_queue_on_activate),
device_profile_template::flush_queue_on_activate
.eq(&dp.flush_queue_on_activate),
device_profile_template::supports_otaa.eq(&dp.supports_otaa), device_profile_template::supports_otaa.eq(&dp.supports_otaa),
device_profile_template::supports_class_b.eq(&dp.supports_class_b), device_profile_template::supports_class_b.eq(&dp.supports_class_b),
device_profile_template::supports_class_c.eq(&dp.supports_class_c), device_profile_template::supports_class_c.eq(&dp.supports_class_c),
@ -188,39 +179,31 @@ pub async fn upsert(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate,
device_profile_template::abp_rx2_freq.eq(&dp.abp_rx2_freq), device_profile_template::abp_rx2_freq.eq(&dp.abp_rx2_freq),
device_profile_template::tags.eq(&dp.tags), device_profile_template::tags.eq(&dp.tags),
device_profile_template::measurements.eq(&dp.measurements), device_profile_template::measurements.eq(&dp.measurements),
device_profile_template::auto_detect_measurements device_profile_template::auto_detect_measurements.eq(&dp.auto_detect_measurements),
.eq(&dp.auto_detect_measurements),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, dp.id.to_string())) .await
} .map_err(|e| error::Error::from_diesel(e, dp.id.to_string()))?;
})
.await??;
info!(id = %dp.id, "Device-profile template upserted"); info!(id = %dp.id, "Device-profile template upserted");
Ok(dp) Ok(dp)
} }
pub async fn get(id: &str) -> Result<DeviceProfileTemplate, Error> { pub async fn get(id: &str) -> Result<DeviceProfileTemplate, Error> {
task::spawn_blocking({
let id = id.to_string(); let id = id.to_string();
move || -> Result<DeviceProfileTemplate, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?;
let dp = device_profile_template::dsl::device_profile_template let dp = device_profile_template::dsl::device_profile_template
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| error::Error::from_diesel(e, id.clone()))?; .map_err(|e| error::Error::from_diesel(e, id.clone()))?;
Ok(dp) Ok(dp)
} }
})
.await?
}
pub async fn update(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> { pub async fn update(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate, Error> {
dp.validate()?; dp.validate()?;
let dp = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceProfileTemplate, Error> {
let mut c = get_db_conn()?;
let dp: DeviceProfileTemplate =
diesel::update(device_profile_template::dsl::device_profile_template.find(&dp.id)) diesel::update(device_profile_template::dsl::device_profile_template.find(&dp.id))
.set(( .set((
device_profile_template::updated_at.eq(Utc::now()), device_profile_template::updated_at.eq(Utc::now()),
@ -237,8 +220,7 @@ pub async fn update(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate,
device_profile_template::uplink_interval.eq(&dp.uplink_interval), device_profile_template::uplink_interval.eq(&dp.uplink_interval),
device_profile_template::device_status_req_interval device_profile_template::device_status_req_interval
.eq(&dp.device_status_req_interval), .eq(&dp.device_status_req_interval),
device_profile_template::flush_queue_on_activate device_profile_template::flush_queue_on_activate.eq(&dp.flush_queue_on_activate),
.eq(&dp.flush_queue_on_activate),
device_profile_template::supports_otaa.eq(&dp.supports_otaa), device_profile_template::supports_otaa.eq(&dp.supports_otaa),
device_profile_template::supports_class_b.eq(&dp.supports_class_b), device_profile_template::supports_class_b.eq(&dp.supports_class_b),
device_profile_template::supports_class_c.eq(&dp.supports_class_c), device_profile_template::supports_class_c.eq(&dp.supports_class_c),
@ -254,49 +236,35 @@ pub async fn update(dp: DeviceProfileTemplate) -> Result<DeviceProfileTemplate,
device_profile_template::tags.eq(&dp.tags), device_profile_template::tags.eq(&dp.tags),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| error::Error::from_diesel(e, dp.id.clone())) .await
} .map_err(|e| error::Error::from_diesel(e, dp.id.clone()))?;
})
.await??;
info!(id = %dp.id, "Device-profile template updated"); info!(id = %dp.id, "Device-profile template updated");
Ok(dp) Ok(dp)
} }
pub async fn delete(id: &str) -> Result<(), Error> { pub async fn delete(id: &str) -> Result<(), Error> {
task::spawn_blocking({
let id = id.to_string(); let id = id.to_string();
move || -> Result<(), Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?; let ra = diesel::delete(device_profile_template::dsl::device_profile_template.find(&id))
let ra = .execute(&mut c)
diesel::delete(device_profile_template::dsl::device_profile_template.find(&id)) .await?;
.execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(error::Error::NotFound(id)); return Err(error::Error::NotFound(id));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Device-profile template deleted"); info!(id = %id, "Device-profile template deleted");
Ok(()) Ok(())
} }
pub async fn get_count() -> Result<i64, Error> { pub async fn get_count() -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
Ok(device_profile_template::dsl::device_profile_template Ok(device_profile_template::dsl::device_profile_template
.select(dsl::count_star()) .select(dsl::count_star())
.first(&mut c)?) .first(&mut c)
} .await?)
})
.await?
} }
pub async fn list(limit: i64, offset: i64) -> Result<Vec<DeviceProfileTemplateListItem>, Error> { pub async fn list(limit: i64, offset: i64) -> Result<Vec<DeviceProfileTemplateListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Vec<DeviceProfileTemplateListItem>, Error> {
let mut c = get_db_conn()?;
let items = device_profile_template::dsl::device_profile_template let items = device_profile_template::dsl::device_profile_template
.select(( .select((
device_profile_template::id, device_profile_template::id,
@ -320,12 +288,10 @@ pub async fn list(limit: i64, offset: i64) -> Result<Vec<DeviceProfileTemplateLi
)) ))
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {

View File

@ -1,12 +1,12 @@
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::{dsl, prelude::*}; use diesel::{dsl, prelude::*};
use tokio::task; use diesel_async::RunQueryDsl;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use super::error::Error; use super::error::Error;
use super::get_db_conn; use super::get_async_db_conn;
use super::schema::device_queue_item; use super::schema::device_queue_item;
use lrwn::EUI64; use lrwn::EUI64;
@ -64,40 +64,29 @@ impl Default for DeviceQueueItem {
pub async fn enqueue_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error> { pub async fn enqueue_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error> {
qi.validate()?; qi.validate()?;
let mut c = get_async_db_conn().await?;
let qi = task::spawn_blocking({ let qi: DeviceQueueItem = diesel::insert_into(device_queue_item::table)
move || -> Result<DeviceQueueItem, Error> {
let mut c = get_db_conn()?;
diesel::insert_into(device_queue_item::table)
.values(&qi) .values(&qi)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, qi.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, qi.id.to_string()))?;
})
.await??;
info!(id = %qi.id, dev_eui = %qi.dev_eui, "Device queue-item enqueued"); info!(id = %qi.id, dev_eui = %qi.dev_eui, "Device queue-item enqueued");
Ok(qi) Ok(qi)
} }
pub async fn get_item(id: &Uuid) -> Result<DeviceQueueItem, Error> { pub async fn get_item(id: &Uuid) -> Result<DeviceQueueItem, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<DeviceQueueItem, Error> {
let mut c = get_db_conn()?;
let qi = device_queue_item::dsl::device_queue_item let qi = device_queue_item::dsl::device_queue_item
.find(&id) .find(id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(qi) Ok(qi)
} }
})
.await?
}
pub async fn update_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error> { pub async fn update_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error> {
let qi = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<DeviceQueueItem, Error> { let qi: DeviceQueueItem =
let mut c = get_db_conn()?;
diesel::update(device_queue_item::dsl::device_queue_item.find(&qi.id)) diesel::update(device_queue_item::dsl::device_queue_item.find(&qi.id))
.set(( .set((
device_queue_item::is_pending.eq(&qi.is_pending), device_queue_item::is_pending.eq(&qi.is_pending),
@ -105,43 +94,33 @@ pub async fn update_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error>
device_queue_item::timeout_after.eq(&qi.timeout_after), device_queue_item::timeout_after.eq(&qi.timeout_after),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, qi.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, qi.id.to_string()))?;
})
.await??;
info!(id = %qi.id, dev_eui = %qi.dev_eui, "Device queue-item updated"); info!(id = %qi.id, dev_eui = %qi.dev_eui, "Device queue-item updated");
Ok(qi) Ok(qi)
} }
pub async fn delete_item(id: &Uuid) -> Result<(), Error> { pub async fn delete_item(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete(device_queue_item::dsl::device_queue_item.find(&id)) let ra = diesel::delete(device_queue_item::dsl::device_queue_item.find(&id))
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Device queue-item deleted"); info!(id = %id, "Device queue-item deleted");
Ok(()) Ok(())
} }
/// It returns the device queue-item and a bool indicating if there are more items in the queue. /// It returns the device queue-item and a bool indicating if there are more items in the queue.
pub async fn get_next_for_dev_eui(dev_eui: &EUI64) -> Result<(DeviceQueueItem, bool), Error> { pub async fn get_next_for_dev_eui(dev_eui: &EUI64) -> Result<(DeviceQueueItem, bool), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<(DeviceQueueItem, bool), Error> {
let mut c = get_db_conn()?;
let items: Vec<DeviceQueueItem> = device_queue_item::dsl::device_queue_item let items: Vec<DeviceQueueItem> = device_queue_item::dsl::device_queue_item
.filter(device_queue_item::dev_eui.eq(&dev_eui)) .filter(device_queue_item::dev_eui.eq(&dev_eui))
.order_by(device_queue_item::created_at) .order_by(device_queue_item::created_at)
.limit(2) .limit(2)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
// Return NotFound on empty Vec. // Return NotFound on empty Vec.
@ -162,49 +141,32 @@ pub async fn get_next_for_dev_eui(dev_eui: &EUI64) -> Result<(DeviceQueueItem, b
// Return first item and bool indicating if there are more items in the queue. // Return first item and bool indicating if there are more items in the queue.
Ok((items[0].clone(), items.len() > 1)) Ok((items[0].clone(), items.len() > 1))
} }
})
.await?
}
pub async fn get_for_dev_eui(dev_eui: &EUI64) -> Result<Vec<DeviceQueueItem>, Error> { pub async fn get_for_dev_eui(dev_eui: &EUI64) -> Result<Vec<DeviceQueueItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<Vec<DeviceQueueItem>, Error> {
let mut c = get_db_conn()?;
let items = device_queue_item::dsl::device_queue_item let items = device_queue_item::dsl::device_queue_item
.filter(device_queue_item::dev_eui.eq(&dev_eui)) .filter(device_queue_item::dev_eui.eq(&dev_eui))
.order_by(device_queue_item::created_at) .order_by(device_queue_item::created_at)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn flush_for_dev_eui(dev_eui: &EUI64) -> Result<(), Error> { pub async fn flush_for_dev_eui(dev_eui: &EUI64) -> Result<(), Error> {
let count = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui; let count: usize = diesel::delete(
move || -> Result<usize, Error> { device_queue_item::dsl::device_queue_item.filter(device_queue_item::dev_eui.eq(&dev_eui)),
let mut c = get_db_conn()?;
diesel::delete(
device_queue_item::dsl::device_queue_item
.filter(device_queue_item::dev_eui.eq(&dev_eui)),
) )
.execute(&mut c) .execute(&mut c)
.map_err(|e| Error::from_diesel(e, dev_eui.to_string())) .await
} .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
})
.await??;
info!(dev_eui = %dev_eui, count = count, "Device queue flushed"); info!(dev_eui = %dev_eui, count = count, "Device queue flushed");
Ok(()) Ok(())
} }
pub async fn get_pending_for_dev_eui(dev_eui: &EUI64) -> Result<DeviceQueueItem, Error> { pub async fn get_pending_for_dev_eui(dev_eui: &EUI64) -> Result<DeviceQueueItem, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let dev_eui = *dev_eui;
move || -> Result<DeviceQueueItem, Error> {
let mut c = get_db_conn()?;
let qi = device_queue_item::dsl::device_queue_item let qi = device_queue_item::dsl::device_queue_item
.filter( .filter(
device_queue_item::dev_eui device_queue_item::dev_eui
@ -212,24 +174,18 @@ pub async fn get_pending_for_dev_eui(dev_eui: &EUI64) -> Result<DeviceQueueItem,
.and(device_queue_item::is_pending.eq(true)), .and(device_queue_item::is_pending.eq(true)),
) )
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
Ok(qi) Ok(qi)
} }
})
.await?
}
pub async fn get_max_f_cnt_down(dev_eui: EUI64) -> Result<Option<i64>, Error> { pub async fn get_max_f_cnt_down(dev_eui: EUI64) -> Result<Option<i64>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Option<i64>, Error> {
let mut c = get_db_conn()?;
Ok(device_queue_item::dsl::device_queue_item Ok(device_queue_item::dsl::device_queue_item
.select(dsl::max(device_queue_item::f_cnt_down)) .select(dsl::max(device_queue_item::f_cnt_down))
.filter(device_queue_item::dsl::dev_eui.eq(dev_eui)) .filter(device_queue_item::dsl::dev_eui.eq(dev_eui))
.first(&mut c)?) .first(&mut c)
} .await?)
})
.await?
} }
#[cfg(test)] #[cfg(test)]
@ -281,7 +237,7 @@ pub mod test {
// get for dev eui // get for dev eui
let queue = get_for_dev_eui(&d.dev_eui).await.unwrap(); let queue = get_for_dev_eui(&d.dev_eui).await.unwrap();
assert_eq!(&qi, queue.first().unwrap()); assert_eq!(qi, queue[0]);
// next next queue item for dev eui // next next queue item for dev eui
let resp = get_next_for_dev_eui(&d.dev_eui).await.unwrap(); let resp = get_next_for_dev_eui(&d.dev_eui).await.unwrap();

View File

@ -2,16 +2,15 @@ use std::collections::HashMap;
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use lrwn::EUI64; use lrwn::EUI64;
use super::schema::{gateway, multicast_group_gateway, tenant}; use super::schema::{gateway, multicast_group_gateway, tenant};
use super::{error::Error, fields, get_db_conn}; use super::{error::Error, fields, get_async_db_conn};
#[derive(Queryable, Insertable, PartialEq, Debug)] #[derive(Queryable, Insertable, PartialEq, Debug)]
#[diesel(table_name = gateway)] #[diesel(table_name = gateway)]
@ -110,15 +109,17 @@ impl Default for Gateway {
pub async fn create(gw: Gateway) -> Result<Gateway, Error> { pub async fn create(gw: Gateway) -> Result<Gateway, Error> {
gw.validate()?; gw.validate()?;
let gw = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Gateway, Error> { let gw: Gateway = c
let mut c = get_db_conn()?; .build_transaction()
c.transaction::<Gateway, Error, _>(|c| { .run::<Gateway, Error, _>(|c| {
Box::pin(async move {
// use for_update to lock the tenant. // use for_update to lock the tenant.
let t: super::tenant::Tenant = tenant::dsl::tenant let t: super::tenant::Tenant = tenant::dsl::tenant
.find(&gw.tenant_id) .find(&gw.tenant_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, gw.tenant_id.to_string()))?; .map_err(|e| Error::from_diesel(e, gw.tenant_id.to_string()))?;
if !t.can_have_gateways { if !t.can_have_gateways {
@ -128,7 +129,8 @@ pub async fn create(gw: Gateway) -> Result<Gateway, Error> {
let gw_count: i64 = gateway::dsl::gateway let gw_count: i64 = gateway::dsl::gateway
.select(dsl::count_star()) .select(dsl::count_star())
.filter(gateway::dsl::tenant_id.eq(&gw.tenant_id)) .filter(gateway::dsl::tenant_id.eq(&gw.tenant_id))
.first(c)?; .first(c)
.await?;
if t.max_gateway_count != 0 && gw_count as i32 >= t.max_gateway_count { if t.max_gateway_count != 0 && gw_count as i32 >= t.max_gateway_count {
return Err(Error::NotAllowed( return Err(Error::NotAllowed(
@ -139,11 +141,11 @@ pub async fn create(gw: Gateway) -> Result<Gateway, Error> {
diesel::insert_into(gateway::table) diesel::insert_into(gateway::table)
.values(&gw) .values(&gw)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string())) .map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string()))
}) })
}
}) })
.await??; .await?;
info!( info!(
gateway_id = %gw.gateway_id, gateway_id = %gw.gateway_id,
"Gateway created" "Gateway created"
@ -152,26 +154,19 @@ pub async fn create(gw: Gateway) -> Result<Gateway, Error> {
} }
pub async fn get(gateway_id: &EUI64) -> Result<Gateway, Error> { pub async fn get(gateway_id: &EUI64) -> Result<Gateway, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let gateway_id = *gateway_id;
move || -> Result<Gateway, Error> {
let mut c = get_db_conn()?;
let gw = gateway::dsl::gateway let gw = gateway::dsl::gateway
.find(&gateway_id) .find(&gateway_id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?; .map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?;
Ok(gw) Ok(gw)
} }
})
.await?
}
pub async fn update(gw: Gateway) -> Result<Gateway, Error> { pub async fn update(gw: Gateway) -> Result<Gateway, Error> {
gw.validate()?; gw.validate()?;
let gw = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Gateway, Error> { let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&gw.gateway_id))
let mut c = get_db_conn()?;
diesel::update(gateway::dsl::gateway.find(&gw.gateway_id))
.set(( .set((
gateway::updated_at.eq(Utc::now()), gateway::updated_at.eq(Utc::now()),
gateway::name.eq(&gw.name), gateway::name.eq(&gw.name),
@ -183,10 +178,8 @@ pub async fn update(gw: Gateway) -> Result<Gateway, Error> {
gateway::tags.eq(&gw.tags), gateway::tags.eq(&gw.tags),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string()))?;
})
.await??;
info!( info!(
gateway_id = %gw.gateway_id, gateway_id = %gw.gateway_id,
"Gateway updated" "Gateway updated"
@ -195,24 +188,17 @@ pub async fn update(gw: Gateway) -> Result<Gateway, Error> {
} }
pub async fn update_state(id: &EUI64, props: &HashMap<String, String>) -> Result<Gateway, Error> { pub async fn update_state(id: &EUI64, props: &HashMap<String, String>) -> Result<Gateway, Error> {
let gw = task::spawn_blocking({
let id = *id;
let props = fields::KeyValue::new(props.clone()); let props = fields::KeyValue::new(props.clone());
move || -> Result<Gateway, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?;
let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id)) let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id))
.set(( .set((
gateway::last_seen_at.eq(Some(Utc::now())), gateway::last_seen_at.eq(Some(Utc::now())),
gateway::properties.eq(props), gateway::properties.eq(props),
)) ))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(gw)
}
})
.await??;
info!( info!(
gateway_id = %id, gateway_id = %id,
"Gateway state updated" "Gateway state updated"
@ -228,11 +214,8 @@ pub async fn update_state_and_loc(
alt: f32, alt: f32,
props: &HashMap<String, String>, props: &HashMap<String, String>,
) -> Result<Gateway, Error> { ) -> Result<Gateway, Error> {
let gw = task::spawn_blocking({
let id = *id;
let props = fields::KeyValue::new(props.clone()); let props = fields::KeyValue::new(props.clone());
move || -> Result<Gateway, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?;
let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id)) let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id))
.set(( .set((
gateway::last_seen_at.eq(Some(Utc::now())), gateway::last_seen_at.eq(Some(Utc::now())),
@ -242,13 +225,9 @@ pub async fn update_state_and_loc(
gateway::properties.eq(props), gateway::properties.eq(props),
)) ))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(gw)
}
})
.await??;
info!( info!(
gateway_id = %id, gateway_id = %id,
"Gateway state and location updated" "Gateway state and location updated"
@ -258,20 +237,12 @@ pub async fn update_state_and_loc(
} }
pub async fn update_tls_cert(id: &EUI64, cert: &[u8]) -> Result<Gateway, Error> { pub async fn update_tls_cert(id: &EUI64, cert: &[u8]) -> Result<Gateway, Error> {
let gw = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
let cert = cert.to_vec();
move || -> Result<Gateway, Error> {
let mut c = get_db_conn()?;
let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id)) let gw: Gateway = diesel::update(gateway::dsl::gateway.find(&id))
.set(gateway::tls_certificate.eq(cert)) .set(gateway::tls_certificate.eq(cert))
.get_result(&mut c) .get_result(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(gw)
}
})
.await??;
info!( info!(
gateway_id = %id, gateway_id = %id,
"Gateway tls certificate updated" "Gateway tls certificate updated"
@ -281,18 +252,13 @@ pub async fn update_tls_cert(id: &EUI64, cert: &[u8]) -> Result<Gateway, Error>
} }
pub async fn delete(gateway_id: &EUI64) -> Result<(), Error> { pub async fn delete(gateway_id: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let gateway_id = *gateway_id; let ra = diesel::delete(gateway::dsl::gateway.find(&gateway_id))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra = diesel::delete(gateway::dsl::gateway.find(&gateway_id)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(gateway_id.to_string())); return Err(Error::NotFound(gateway_id.to_string()));
} }
Ok(())
}
})
.await??;
info!( info!(
gateway_id = %gateway_id, gateway_id = %gateway_id,
"Gateway deleted" "Gateway deleted"
@ -301,10 +267,7 @@ pub async fn delete(gateway_id: &EUI64) -> Result<(), Error> {
} }
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = gateway::dsl::gateway let mut q = gateway::dsl::gateway
.select(dsl::count_star()) .select(dsl::count_star())
.distinct() .distinct()
@ -316,19 +279,14 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
} }
if let Some(multicast_group_id) = &filters.multicast_group_id { if let Some(multicast_group_id) = &filters.multicast_group_id {
q = q.filter( q = q.filter(multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id));
multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id),
);
} }
if let Some(search) = &filters.search { if let Some(search) = &filters.search {
q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search))); q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search)));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list( pub async fn list(
@ -336,10 +294,7 @@ pub async fn list(
offset: i64, offset: i64,
filters: &Filters, filters: &Filters,
) -> Result<Vec<GatewayListItem>, Error> { ) -> Result<Vec<GatewayListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<GatewayListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = gateway::dsl::gateway let mut q = gateway::dsl::gateway
.left_join(multicast_group_gateway::table) .left_join(multicast_group_gateway::table)
.select(( .select((
@ -368,27 +323,20 @@ pub async fn list(
} }
if let Some(multicast_group_id) = &filters.multicast_group_id { if let Some(multicast_group_id) = &filters.multicast_group_id {
q = q.filter( q = q.filter(multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id));
multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id),
);
} }
let items = q let items = q
.order_by(gateway::dsl::name) .order_by(gateway::dsl::name)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn get_meta(gateway_id: &EUI64) -> Result<GatewayMeta, Error> { pub async fn get_meta(gateway_id: &EUI64) -> Result<GatewayMeta, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let gateway_id = *gateway_id;
move || -> Result<GatewayMeta, Error> {
let mut c = get_db_conn()?;
let meta = gateway::dsl::gateway let meta = gateway::dsl::gateway
.inner_join(tenant::table) .inner_join(tenant::table)
.select(( .select((
@ -402,19 +350,13 @@ pub async fn get_meta(gateway_id: &EUI64) -> Result<GatewayMeta, Error> {
)) ))
.filter(gateway::dsl::gateway_id.eq(&gateway_id)) .filter(gateway::dsl::gateway_id.eq(&gateway_id))
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?; .map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?;
Ok(meta) Ok(meta)
} }
})
.await?
}
pub async fn get_counts_by_state(tenant_id: &Option<Uuid>) -> Result<GatewayCountsByState, Error> { pub async fn get_counts_by_state(tenant_id: &Option<Uuid>) -> Result<GatewayCountsByState, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
move || -> Result<GatewayCountsByState, Error> {
let mut c = get_db_conn()?;
let counts: GatewayCountsByState = diesel::sql_query(r#" let counts: GatewayCountsByState = diesel::sql_query(r#"
select select
coalesce(sum(case when last_seen_at is null then 1 end), 0) as never_seen_count, coalesce(sum(case when last_seen_at is null then 1 end), 0) as never_seen_count,
@ -424,11 +366,9 @@ pub async fn get_counts_by_state(tenant_id: &Option<Uuid>) -> Result<GatewayCoun
gateway gateway
where where
$1 is null or tenant_id = $1 $1 is null or tenant_id = $1
"#).bind::<diesel::sql_types::Nullable<diesel::sql_types::Uuid>, _>(tenant_id).get_result(&mut c)?; "#).bind::<diesel::sql_types::Nullable<diesel::sql_types::Uuid>, _>(tenant_id).get_result(&mut c).await?;
Ok(counts) Ok(counts)
} }
}).await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {

View File

@ -1,28 +1,24 @@
use diesel::prelude::*; use diesel::prelude::*;
use tokio::task; use diesel_async::RunQueryDsl;
use super::schema::{application, device, device_profile, tenant}; use super::schema::{application, device, device_profile, tenant};
use super::{ use super::{
application::Application, device::Device, device_profile::DeviceProfile, tenant::Tenant, application::Application, device::Device, device_profile::DeviceProfile, tenant::Tenant,
}; };
use super::{error::Error, get_db_conn}; use super::{error::Error, get_async_db_conn};
use lrwn::EUI64; use lrwn::EUI64;
pub async fn get_all_device_data( pub async fn get_all_device_data(
dev_eui: EUI64, dev_eui: EUI64,
) -> Result<(Device, Application, Tenant, DeviceProfile), Error> { ) -> Result<(Device, Application, Tenant, DeviceProfile), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<(Device, Application, Tenant, DeviceProfile), Error> {
let mut c = get_db_conn()?;
let res = device::table let res = device::table
.inner_join(application::table) .inner_join(application::table)
.inner_join(tenant::table.on(application::dsl::tenant_id.eq(tenant::dsl::id))) .inner_join(tenant::table.on(application::dsl::tenant_id.eq(tenant::dsl::id)))
.inner_join(device_profile::table) .inner_join(device_profile::table)
.filter(device::dsl::dev_eui.eq(&dev_eui)) .filter(device::dsl::dev_eui.eq(&dev_eui))
.first::<(Device, Application, Tenant, DeviceProfile)>(&mut c) .first::<(Device, Application, Tenant, DeviceProfile)>(&mut c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
Ok(res) Ok(res)
} }
})
.await?
}

View File

@ -1,12 +1,21 @@
use std::fs::File;
use std::io::BufReader;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::sync::RwLock; use std::sync::RwLock;
use anyhow::Context; use anyhow::Context;
use anyhow::Result; use anyhow::Result;
use diesel::pg::PgConnection; use diesel::{ConnectionError, ConnectionResult};
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
use diesel_async::pooled_connection::deadpool::{Object as DeadpoolObject, Pool as DeadpoolPool};
use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
use diesel_async::AsyncPgConnection;
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use tracing::info; use futures_util::future::BoxFuture;
use futures_util::FutureExt;
use r2d2::{Pool, PooledConnection};
use tokio::task;
use tracing::{error, info};
use crate::config; use crate::config;
@ -34,11 +43,11 @@ pub mod search;
pub mod tenant; pub mod tenant;
pub mod user; pub mod user;
pub type PgPool = Pool<ConnectionManager<PgConnection>>; pub type AsyncPgPool = DeadpoolPool<AsyncPgConnection>;
pub type PgPoolConnection = PooledConnection<ConnectionManager<PgConnection>>; pub type AsyncPgPoolConnection = DeadpoolObject<AsyncPgConnection>;
lazy_static! { lazy_static! {
static ref PG_POOL: RwLock<Option<PgPool>> = RwLock::new(None); static ref ASYNC_PG_POOL: RwLock<Option<AsyncPgPool>> = RwLock::new(None);
static ref REDIS_POOL: RwLock<Option<RedisPool>> = RwLock::new(None); static ref REDIS_POOL: RwLock<Option<RedisPool>> = RwLock::new(None);
static ref REDIS_PREFIX: RwLock<String> = RwLock::new("".to_string()); static ref REDIS_PREFIX: RwLock<String> = RwLock::new("".to_string());
} }
@ -170,21 +179,18 @@ pub async fn setup() -> Result<()> {
let conf = config::get(); let conf = config::get();
info!("Setting up PostgreSQL connection pool"); info!("Setting up PostgreSQL connection pool");
let pg_pool = PgPool::builder() let mut config = ManagerConfig::default();
.max_size(conf.postgresql.max_open_connections) config.custom_setup = Box::new(pg_establish_connection);
.min_idle(match conf.postgresql.min_idle_connections {
0 => None,
_ => Some(conf.postgresql.min_idle_connections),
})
.build(ConnectionManager::new(&conf.postgresql.dsn))
.context("Setup PostgreSQL connection pool error")?;
set_db_pool(pg_pool);
let mut pg_conn = get_db_conn()?;
info!("Applying schema migrations"); let mgr = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
pg_conn &conf.postgresql.dsn,
.run_pending_migrations(MIGRATIONS) config,
.map_err(|e| anyhow!("{}", e))?; );
let pool = DeadpoolPool::builder(mgr)
.max_size(conf.postgresql.max_open_connections as usize)
.build()?;
set_async_db_pool(pool);
run_db_migrations().await?;
info!("Setting up Redis client"); info!("Setting up Redis client");
if conf.redis.cluster { if conf.redis.cluster {
@ -221,18 +227,66 @@ pub async fn setup() -> Result<()> {
Ok(()) Ok(())
} }
pub fn get_db_pool() -> Result<PgPool> { // Source:
let pool_r = PG_POOL.read().unwrap(); // https://github.com/weiznich/diesel_async/blob/main/examples/postgres/pooled-with-rustls/src/main.rs
let pool = pool_r fn pg_establish_connection(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
let fut = async {
let root_certs =
pg_root_certs().map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
let rustls_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
let (client, conn) = tokio_postgres::connect(config, tls)
.await
.map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
tokio::spawn(async move {
if let Err(e) = conn.await {
error!(error = %e, "PostgreSQL connection error");
}
});
AsyncPgConnection::try_from(client).await
};
fut.boxed()
}
fn pg_root_certs() -> Result<rustls::RootCertStore> {
let conf = config::get();
let mut roots = rustls::RootCertStore::empty();
let certs = rustls_native_certs::load_native_certs()?;
let certs: Vec<_> = certs.into_iter().map(|cert| cert.0).collect();
roots.add_parsable_certificates(&certs);
if !conf.postgresql.ca_cert.is_empty() {
let f = File::open(&conf.postgresql.ca_cert).context("Open ca certificate")?;
let mut reader = BufReader::new(f);
let certs = rustls_pemfile::certs(&mut reader)?;
for cert in certs
.into_iter()
.map(rustls::Certificate)
.collect::<Vec<_>>()
{
roots.add(&cert)?;
}
}
Ok(roots)
}
pub fn get_async_db_pool() -> Result<AsyncPgPool> {
let pool_r = ASYNC_PG_POOL.read().unwrap();
let pool: AsyncPgPool = pool_r
.as_ref() .as_ref()
.ok_or_else(|| anyhow!("PostgreSQL connection pool is not initialized (yet)"))? .ok_or_else(|| anyhow!("PostgreSQL connection pool is not initialized"))?
.clone(); .clone();
Ok(pool) Ok(pool)
} }
pub fn get_db_conn() -> Result<PgPoolConnection> { pub async fn get_async_db_conn() -> Result<AsyncPgPoolConnection> {
let pool = get_db_pool()?; let pool = get_async_db_pool()?;
Ok(pool.get()?) Ok(pool.get().await?)
} }
pub fn get_redis_conn() -> Result<RedisPoolConnection> { pub fn get_redis_conn() -> Result<RedisPoolConnection> {
@ -246,11 +300,28 @@ pub fn get_redis_conn() -> Result<RedisPoolConnection> {
}) })
} }
pub fn set_db_pool(p: PgPool) { pub fn set_async_db_pool(p: AsyncPgPool) {
let mut pool_w = PG_POOL.write().unwrap(); let mut pool_w = ASYNC_PG_POOL.write().unwrap();
*pool_w = Some(p); *pool_w = Some(p);
} }
pub async fn run_db_migrations() -> Result<()> {
info!("Applying schema migrations");
let c = get_async_db_conn().await?;
let mut c_wrapped: AsyncConnectionWrapper<AsyncPgPoolConnection> =
AsyncConnectionWrapper::from(c);
task::spawn_blocking(move || -> Result<()> {
c_wrapped
.run_pending_migrations(MIGRATIONS)
.map_err(|e| anyhow!("{}", e))?;
Ok(())
})
.await?
}
pub fn set_redis_pool(p: RedisPool) { pub fn set_redis_pool(p: RedisPool) {
let mut pool_w = REDIS_POOL.write().unwrap(); let mut pool_w = REDIS_POOL.write().unwrap();
*pool_w = Some(p); *pool_w = Some(p);
@ -262,14 +333,22 @@ pub fn redis_key(s: String) -> String {
} }
#[cfg(test)] #[cfg(test)]
pub fn reset_db() -> Result<()> { pub async fn reset_db() -> Result<()> {
let mut conn = get_db_conn()?; let c = get_async_db_conn().await?;
conn.revert_all_migrations(MIGRATIONS) let mut c_wrapped: AsyncConnectionWrapper<AsyncPgPoolConnection> =
AsyncConnectionWrapper::from(c);
tokio::task::spawn_blocking(move || -> Result<()> {
c_wrapped
.revert_all_migrations(MIGRATIONS)
.map_err(|e| anyhow!("{}", e))?; .map_err(|e| anyhow!("{}", e))?;
conn.run_pending_migrations(MIGRATIONS) c_wrapped
.run_pending_migrations(MIGRATIONS)
.map_err(|e| anyhow!("{}", e))?; .map_err(|e| anyhow!("{}", e))?;
Ok(()) Ok(())
})
.await?
} }
#[cfg(test)] #[cfg(test)]

View File

@ -1,8 +1,7 @@
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, Duration, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
@ -14,7 +13,7 @@ use super::schema::{
application, device, gateway, multicast_group, multicast_group_device, multicast_group_gateway, application, device, gateway, multicast_group, multicast_group_device, multicast_group_gateway,
multicast_group_queue_item, multicast_group_queue_item,
}; };
use super::{fields, get_db_conn}; use super::{fields, get_async_db_conn};
use crate::downlink::classb; use crate::downlink::classb;
use crate::{config, gpstime::ToDateTime, gpstime::ToGpsTime}; use crate::{config, gpstime::ToDateTime, gpstime::ToGpsTime};
@ -133,41 +132,29 @@ impl Default for MulticastGroupQueueItem {
pub async fn create(mg: MulticastGroup) -> Result<MulticastGroup, Error> { pub async fn create(mg: MulticastGroup) -> Result<MulticastGroup, Error> {
mg.validate()?; mg.validate()?;
let mg = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<MulticastGroup, Error> { let mg: MulticastGroup = diesel::insert_into(multicast_group::table)
let mut c = get_db_conn()?;
diesel::insert_into(multicast_group::table)
.values(&mg) .values(&mg)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, mg.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?;
})
.await??;
info!(id = %mg.id, "Multicast-group created"); info!(id = %mg.id, "Multicast-group created");
Ok(mg) Ok(mg)
} }
pub async fn get(id: &Uuid) -> Result<MulticastGroup, Error> { pub async fn get(id: &Uuid) -> Result<MulticastGroup, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<MulticastGroup, Error> {
let mut c = get_db_conn()?;
multicast_group::dsl::multicast_group multicast_group::dsl::multicast_group
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string())) .map_err(|e| Error::from_diesel(e, id.to_string()))
} }
})
.await?
}
pub async fn update(mg: MulticastGroup) -> Result<MulticastGroup, Error> { pub async fn update(mg: MulticastGroup) -> Result<MulticastGroup, Error> {
mg.validate()?; mg.validate()?;
let mg = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<MulticastGroup, Error> { let mg: MulticastGroup = diesel::update(multicast_group::dsl::multicast_group.find(&mg.id))
let mut c = get_db_conn()?;
diesel::update(multicast_group::dsl::multicast_group.find(&mg.id))
.set(( .set((
multicast_group::updated_at.eq(Utc::now()), multicast_group::updated_at.eq(Utc::now()),
multicast_group::name.eq(&mg.name), multicast_group::name.eq(&mg.name),
@ -183,37 +170,26 @@ pub async fn update(mg: MulticastGroup) -> Result<MulticastGroup, Error> {
multicast_group::class_c_scheduling_type.eq(&mg.class_c_scheduling_type), multicast_group::class_c_scheduling_type.eq(&mg.class_c_scheduling_type),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, mg.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?;
})
.await??;
info!(id = %mg.id, "Multicast-group updated"); info!(id = %mg.id, "Multicast-group updated");
Ok(mg) Ok(mg)
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let ra = diesel::delete(multicast_group::dsl::multicast_group.find(&id))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra =
diesel::delete(multicast_group::dsl::multicast_group.find(&id)).execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Multicast-group deleted"); info!(id = %id, "Multicast-group deleted");
Ok(()) Ok(())
} }
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = multicast_group::dsl::multicast_group let mut q = multicast_group::dsl::multicast_group
.select(dsl::count_star()) .select(dsl::count_star())
.into_boxed(); .into_boxed();
@ -227,21 +203,16 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
} }
q.first(&mut c) q.first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn list( pub async fn list(
limit: i64, limit: i64,
offset: i64, offset: i64,
filters: &Filters, filters: &Filters,
) -> Result<Vec<MulticastGroupListItem>, Error> { ) -> Result<Vec<MulticastGroupListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<MulticastGroupListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = multicast_group::dsl::multicast_group let mut q = multicast_group::dsl::multicast_group
.select(( .select((
multicast_group::id, multicast_group::id,
@ -265,29 +236,27 @@ pub async fn list(
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn add_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> { pub async fn add_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id; c.build_transaction()
let dev_eui = *dev_eui; .run::<(), Error, _>(|c| {
move || -> Result<(), Error> { Box::pin(async move {
let mut c = get_db_conn()?;
c.transaction::<(), Error, _>(|c| {
let d: super::device::Device = device::dsl::device let d: super::device::Device = device::dsl::device
.find(&dev_eui) .find(&dev_eui)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
let mg: MulticastGroup = multicast_group::dsl::multicast_group let mg: MulticastGroup = multicast_group::dsl::multicast_group
.find(&group_id) .find(&group_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, group_id.to_string()))?; .map_err(|e| Error::from_diesel(e, group_id.to_string()))?;
if d.application_id != mg.application_id { if d.application_id != mg.application_id {
@ -302,65 +271,59 @@ pub async fn add_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> {
multicast_group_device::created_at.eq(Utc::now()), multicast_group_device::created_at.eq(Utc::now()),
)) ))
.execute(c) .execute(c)
.await
.map_err(|e| Error::from_diesel(e, "".into()))?; .map_err(|e| Error::from_diesel(e, "".into()))?;
Ok(()) Ok(())
}) })
}
}) })
.await??; .await?;
info!(multicast_group_id = %group_id, dev_eui = %dev_eui, "Device added to multicast-group"); info!(multicast_group_id = %group_id, dev_eui = %dev_eui, "Device added to multicast-group");
Ok(()) Ok(())
} }
pub async fn remove_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> { pub async fn remove_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id;
let dev_eui = *dev_eui;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete( let ra = diesel::delete(
multicast_group_device::dsl::multicast_group_device multicast_group_device::dsl::multicast_group_device
.filter(multicast_group_device::multicast_group_id.eq(&group_id)) .filter(multicast_group_device::multicast_group_id.eq(&group_id))
.filter(multicast_group_device::dev_eui.eq(&dev_eui)), .filter(multicast_group_device::dev_eui.eq(&dev_eui)),
) )
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(format!( return Err(Error::NotFound(format!(
"multicast-group: {}, device: {}", "multicast-group: {}, device: {}",
group_id, dev_eui group_id, dev_eui
))); )));
} }
Ok(())
}
})
.await??;
info!(multicast_group_id = %group_id, dev_eui = %dev_eui, "Device removed from multicast-group"); info!(multicast_group_id = %group_id, dev_eui = %dev_eui, "Device removed from multicast-group");
Ok(()) Ok(())
} }
pub async fn add_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> { pub async fn add_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id; c.build_transaction()
let gateway_id = *gateway_id; .run::<(), Error, _>(|c| {
move || -> Result<(), Error> { Box::pin(async move {
let mut c = get_db_conn()?;
c.transaction::<(), Error, _>(|c| {
let gw: super::gateway::Gateway = gateway::dsl::gateway let gw: super::gateway::Gateway = gateway::dsl::gateway
.find(&gateway_id) .find(&gateway_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?; .map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?;
let mg: MulticastGroup = multicast_group::dsl::multicast_group let mg: MulticastGroup = multicast_group::dsl::multicast_group
.find(&group_id) .find(&group_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, group_id.to_string()))?; .map_err(|e| Error::from_diesel(e, group_id.to_string()))?;
let a: super::application::Application = application::dsl::application let a: super::application::Application = application::dsl::application
.find(&mg.application_id) .find(&mg.application_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, mg.application_id.to_string()))?; .map_err(|e| Error::from_diesel(e, mg.application_id.to_string()))?;
if a.tenant_id != gw.tenant_id { if a.tenant_id != gw.tenant_id {
@ -375,71 +338,54 @@ pub async fn add_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Erro
multicast_group_gateway::created_at.eq(Utc::now()), multicast_group_gateway::created_at.eq(Utc::now()),
)) ))
.execute(c) .execute(c)
.await
.map_err(|e| Error::from_diesel(e, "".into()))?; .map_err(|e| Error::from_diesel(e, "".into()))?;
Ok(()) Ok(())
}) })
}
}) })
.await??; .await?;
info!(multicast_group_id = %group_id, gateway_id = %gateway_id, "Gateway added to multicast-group"); info!(multicast_group_id = %group_id, gateway_id = %gateway_id, "Gateway added to multicast-group");
Ok(()) Ok(())
} }
pub async fn remove_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> { pub async fn remove_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id;
let gateway_id = *gateway_id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete( let ra = diesel::delete(
multicast_group_gateway::dsl::multicast_group_gateway multicast_group_gateway::dsl::multicast_group_gateway
.filter(multicast_group_gateway::multicast_group_id.eq(&group_id)) .filter(multicast_group_gateway::multicast_group_id.eq(&group_id))
.filter(multicast_group_gateway::gateway_id.eq(&gateway_id)), .filter(multicast_group_gateway::gateway_id.eq(&gateway_id)),
) )
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(format!( return Err(Error::NotFound(format!(
"multicast-group: {}, gateway: {}", "multicast-group: {}, gateway: {}",
group_id, gateway_id group_id, gateway_id
))); )));
} }
Ok(())
}
})
.await??;
info!(multicast_group_id = %group_id, gateway_id = %gateway_id, "Gateway removed from multicast-group"); info!(multicast_group_id = %group_id, gateway_id = %gateway_id, "Gateway removed from multicast-group");
Ok(()) Ok(())
} }
pub async fn get_dev_euis(group_id: &Uuid) -> Result<Vec<EUI64>, Error> { pub async fn get_dev_euis(group_id: &Uuid) -> Result<Vec<EUI64>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id;
move || -> Result<Vec<EUI64>, Error> {
let mut c = get_db_conn()?;
multicast_group_device::dsl::multicast_group_device multicast_group_device::dsl::multicast_group_device
.select(multicast_group_device::dev_eui) .select(multicast_group_device::dev_eui)
.filter(multicast_group_device::dsl::multicast_group_id.eq(&group_id)) .filter(multicast_group_device::dsl::multicast_group_id.eq(&group_id))
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, group_id.to_string())) .map_err(|e| Error::from_diesel(e, group_id.to_string()))
} }
})
.await?
}
pub async fn get_gateway_ids(group_id: &Uuid) -> Result<Vec<EUI64>, Error> { pub async fn get_gateway_ids(group_id: &Uuid) -> Result<Vec<EUI64>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let group_id = *group_id;
move || -> Result<Vec<EUI64>, Error> {
let mut c = get_db_conn()?;
multicast_group_gateway::dsl::multicast_group_gateway multicast_group_gateway::dsl::multicast_group_gateway
.select(multicast_group_gateway::gateway_id) .select(multicast_group_gateway::gateway_id)
.filter(multicast_group_gateway::dsl::multicast_group_id.eq(&group_id)) .filter(multicast_group_gateway::dsl::multicast_group_id.eq(&group_id))
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, group_id.to_string())) .map_err(|e| Error::from_diesel(e, group_id.to_string()))
} }
})
.await?
}
// This enqueues a multicast-group queue item for the given gateways and returns the frame-counter // This enqueues a multicast-group queue item for the given gateways and returns the frame-counter
// of the multicast downlink. // of the multicast downlink.
@ -451,17 +397,18 @@ pub async fn enqueue(
) -> Result<(Vec<Uuid>, u32), Error> { ) -> Result<(Vec<Uuid>, u32), Error> {
qi.validate()?; qi.validate()?;
let (ids, f_cnt) = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let gateway_ids = gateway_ids.to_vec();
move || -> Result<(Vec<Uuid>, u32), Error> {
let mut c = get_db_conn()?;
let conf = config::get(); let conf = config::get();
c.transaction::<(Vec<Uuid>, u32), Error, _>(|c| { let (ids, f_cnt) = c
.build_transaction()
.run::<(Vec<Uuid>, u32), Error, _>(|c| {
Box::pin(async move {
let mut ids: Vec<Uuid> = Vec::new(); let mut ids: Vec<Uuid> = Vec::new();
let mg: MulticastGroup = multicast_group::dsl::multicast_group let mg: MulticastGroup = multicast_group::dsl::multicast_group
.find(&qi.multicast_group_id) .find(&qi.multicast_group_id)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?;
match mg.group_type.as_ref() { match mg.group_type.as_ref() {
@ -483,7 +430,8 @@ pub async fn enqueue(
multicast_group_queue_item::dsl::multicast_group_id multicast_group_queue_item::dsl::multicast_group_id
.eq(&qi.multicast_group_id), .eq(&qi.multicast_group_id),
) )
.first(c)?; .first(c)
.await?;
// Get timestamp after which we must generate the next ping-slot. // Get timestamp after which we must generate the next ping-slot.
let ping_slot_after_gps_time = match res { let ping_slot_after_gps_time = match res {
@ -505,7 +453,7 @@ pub async fn enqueue(
let scheduler_run_after_ts = emit_at_time_since_gps_epoch.to_date_time() let scheduler_run_after_ts = emit_at_time_since_gps_epoch.to_date_time()
- Duration::from_std(2 * conf.network.scheduler.interval).unwrap(); - Duration::from_std(2 * conf.network.scheduler.interval).unwrap();
for gateway_id in &gateway_ids { for gateway_id in gateway_ids {
let qi = MulticastGroupQueueItem { let qi = MulticastGroupQueueItem {
scheduler_run_after: scheduler_run_after_ts, scheduler_run_after: scheduler_run_after_ts,
multicast_group_id: mg.id, multicast_group_id: mg.id,
@ -523,6 +471,7 @@ pub async fn enqueue(
diesel::insert_into(multicast_group_queue_item::table) diesel::insert_into(multicast_group_queue_item::table)
.values(&qi) .values(&qi)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?;
ids.push(qi.id); ids.push(qi.id);
} }
@ -538,7 +487,8 @@ pub async fn enqueue(
multicast_group_queue_item::dsl::multicast_group_id multicast_group_queue_item::dsl::multicast_group_id
.eq(&qi.multicast_group_id), .eq(&qi.multicast_group_id),
) )
.first(c)?; .first(c)
.await?;
let mut scheduler_run_after_ts = match res { let mut scheduler_run_after_ts = match res {
Some(v) => { Some(v) => {
@ -563,7 +513,7 @@ pub async fn enqueue(
None None
}; };
for gateway_id in &gateway_ids { for gateway_id in gateway_ids {
let qi = MulticastGroupQueueItem { let qi = MulticastGroupQueueItem {
scheduler_run_after: scheduler_run_after_ts, scheduler_run_after: scheduler_run_after_ts,
multicast_group_id: mg.id, multicast_group_id: mg.id,
@ -579,6 +529,7 @@ pub async fn enqueue(
diesel::insert_into(multicast_group_queue_item::table) diesel::insert_into(multicast_group_queue_item::table)
.values(&qi) .values(&qi)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?;
ids.push(qi.id); ids.push(qi.id);
@ -604,77 +555,58 @@ pub async fn enqueue(
diesel::update(multicast_group::dsl::multicast_group.find(&qi.multicast_group_id)) diesel::update(multicast_group::dsl::multicast_group.find(&qi.multicast_group_id))
.set(multicast_group::f_cnt.eq(mg.f_cnt + 1)) .set(multicast_group::f_cnt.eq(mg.f_cnt + 1))
.execute(c) .execute(c)
.await
.map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?;
// Return value before it was incremented // Return value before it was incremented
Ok((ids, mg.f_cnt as u32)) Ok((ids, mg.f_cnt as u32))
}) })
}
}) })
.await??; .await?;
info!(multicast_group_id = %qi.multicast_group_id, f_cnt = f_cnt, "Multicast-group queue item created"); info!(multicast_group_id = %qi.multicast_group_id, f_cnt = f_cnt, "Multicast-group queue item created");
Ok((ids, f_cnt)) Ok((ids, f_cnt))
} }
pub async fn delete_queue_item(id: &Uuid) -> Result<(), Error> { pub async fn delete_queue_item(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let ra = diesel::delete(multicast_group_queue_item::dsl::multicast_group_queue_item.find(&id))
move || -> Result<(), Error> { .execute(&mut c)
let mut c = get_db_conn()?; .await?;
let ra = diesel::delete(
multicast_group_queue_item::dsl::multicast_group_queue_item.find(&id),
)
.execute(&mut c)?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Multicast-group queue item deleted"); info!(id = %id, "Multicast-group queue item deleted");
Ok(()) Ok(())
} }
pub async fn flush_queue(multicast_group_id: &Uuid) -> Result<(), Error> { pub async fn flush_queue(multicast_group_id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let multicast_group_id = *multicast_group_id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let _ = diesel::delete( let _ = diesel::delete(
multicast_group_queue_item::dsl::multicast_group_queue_item multicast_group_queue_item::dsl::multicast_group_queue_item
.filter(multicast_group_queue_item::multicast_group_id.eq(&multicast_group_id)), .filter(multicast_group_queue_item::multicast_group_id.eq(&multicast_group_id)),
) )
.execute(&mut c) .execute(&mut c)
.await
.map_err(|e| Error::from_diesel(e, multicast_group_id.to_string()))?; .map_err(|e| Error::from_diesel(e, multicast_group_id.to_string()))?;
Ok(())
}
})
.await??;
info!(multicast_group_id = %multicast_group_id, "Multicast-group queue flushed"); info!(multicast_group_id = %multicast_group_id, "Multicast-group queue flushed");
Ok(()) Ok(())
} }
pub async fn get_queue(multicast_group_id: &Uuid) -> Result<Vec<MulticastGroupQueueItem>, Error> { pub async fn get_queue(multicast_group_id: &Uuid) -> Result<Vec<MulticastGroupQueueItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let multicast_group_id = *multicast_group_id;
move || -> Result<Vec<MulticastGroupQueueItem>, Error> {
let mut c = get_db_conn()?;
multicast_group_queue_item::dsl::multicast_group_queue_item multicast_group_queue_item::dsl::multicast_group_queue_item
.filter(multicast_group_queue_item::dsl::multicast_group_id.eq(&multicast_group_id)) .filter(multicast_group_queue_item::dsl::multicast_group_id.eq(&multicast_group_id))
.order_by(multicast_group_queue_item::created_at) .order_by(multicast_group_queue_item::created_at)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, multicast_group_id.to_string())) .map_err(|e| Error::from_diesel(e, multicast_group_id.to_string()))
} }
})
.await?
}
pub async fn get_schedulable_queue_items(limit: usize) -> Result<Vec<MulticastGroupQueueItem>> { pub async fn get_schedulable_queue_items(limit: usize) -> Result<Vec<MulticastGroupQueueItem>> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Vec<MulticastGroupQueueItem>> { c.build_transaction()
let mut c = get_db_conn()?; .run::<Vec<MulticastGroupQueueItem>, Error, _>(|c| {
c.transaction::<Vec<MulticastGroupQueueItem>, Error, _>(|c| { Box::pin(async move {
let conf = config::get(); let conf = config::get();
diesel::sql_query( diesel::sql_query(
r#" r#"
@ -703,12 +635,12 @@ pub async fn get_schedulable_queue_items(limit: usize) -> Result<Vec<MulticastGr
Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(), Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(),
) )
.load(c) .load(c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
}) })
.context("Get schedulable multicast-group queue items")
}
}) })
.await? .await
.context("Get schedulable multicast-group queue items")
} }
#[cfg(test)] #[cfg(test)]
@ -718,18 +650,13 @@ pub mod test {
use crate::test; use crate::test;
pub async fn get_queue_item(id: &Uuid) -> Result<MulticastGroupQueueItem, Error> { pub async fn get_queue_item(id: &Uuid) -> Result<MulticastGroupQueueItem, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<MulticastGroupQueueItem, Error> {
let mut c = get_db_conn()?;
multicast_group_queue_item::dsl::multicast_group_queue_item multicast_group_queue_item::dsl::multicast_group_queue_item
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string())) .map_err(|e| Error::from_diesel(e, id.to_string()))
} }
})
.await?
}
struct FilterTest<'a> { struct FilterTest<'a> {
filters: Filters, filters: Filters,

View File

@ -1,15 +1,14 @@
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use lrwn::{DevAddr, EUI64}; use lrwn::{DevAddr, EUI64};
use super::schema::{device, device_profile, relay_device}; use super::schema::{device, device_profile, relay_device};
use super::{device::Device, error::Error, get_db_conn}; use super::{device::Device, error::Error, get_async_db_conn};
// This is set to 15, because the FilterList must contain a "catch-all" record to filter all // This is set to 15, because the FilterList must contain a "catch-all" record to filter all
// uplinks that do not match the remaining records. This means that we can use 16 - 1 FilterList // uplinks that do not match the remaining records. This means that we can use 16 - 1 FilterList
@ -44,10 +43,7 @@ pub struct DeviceListItem {
} }
pub async fn get_relay_count(filters: &RelayFilters) -> Result<i64, Error> { pub async fn get_relay_count(filters: &RelayFilters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = device::dsl::device let mut q = device::dsl::device
.select(dsl::count_star()) .select(dsl::count_star())
.inner_join(device_profile::table) .inner_join(device_profile::table)
@ -58,10 +54,7 @@ pub async fn get_relay_count(filters: &RelayFilters) -> Result<i64, Error> {
q = q.filter(device::dsl::application_id.eq(application_id)); q = q.filter(device::dsl::application_id.eq(application_id));
} }
Ok(q.first(&mut c)?) Ok(q.first(&mut c).await?)
}
})
.await?
} }
pub async fn list_relays( pub async fn list_relays(
@ -69,10 +62,7 @@ pub async fn list_relays(
offset: i64, offset: i64,
filters: &RelayFilters, filters: &RelayFilters,
) -> Result<Vec<RelayListItem>, Error> { ) -> Result<Vec<RelayListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<RelayListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = device::dsl::device let mut q = device::dsl::device
.inner_join(device_profile::table) .inner_join(device_profile::table)
.select((device::dev_eui, device::name)) .select((device::dev_eui, device::name))
@ -87,17 +77,12 @@ pub async fn list_relays(
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn get_device_count(filters: &DeviceFilters) -> Result<i64, Error> { pub async fn get_device_count(filters: &DeviceFilters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = relay_device::dsl::relay_device let mut q = relay_device::dsl::relay_device
.select(dsl::count_star()) .select(dsl::count_star())
.into_boxed(); .into_boxed();
@ -107,26 +92,20 @@ pub async fn get_device_count(filters: &DeviceFilters) -> Result<i64, Error> {
} }
q.first(&mut c) q.first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn list_devices( pub async fn list_devices(
limit: i64, limit: i64,
offset: i64, offset: i64,
filters: &DeviceFilters, filters: &DeviceFilters,
) -> Result<Vec<DeviceListItem>, Error> { ) -> Result<Vec<DeviceListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<DeviceListItem>, Error> {
let mut c = get_db_conn()?;
let mut q = relay_device::dsl::relay_device let mut q = relay_device::dsl::relay_device
.inner_join(device::table.on(relay_device::dsl::dev_eui.eq(device::dsl::dev_eui))) .inner_join(device::table.on(relay_device::dsl::dev_eui.eq(device::dsl::dev_eui)))
.inner_join( .inner_join(
device_profile::table device_profile::table.on(device::dsl::device_profile_id.eq(device_profile::dsl::id)),
.on(device::dsl::device_profile_id.eq(device_profile::dsl::id)),
) )
.select(( .select((
relay_device::dev_eui, relay_device::dev_eui,
@ -147,28 +126,28 @@ pub async fn list_devices(
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c) .load(&mut c)
.await
.map_err(|e| Error::from_diesel(e, "".into())) .map_err(|e| Error::from_diesel(e, "".into()))
} }
})
.await?
}
pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(), Error> { pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<(), Error> { c.build_transaction()
let mut c = get_db_conn()?; .run::<(), Error, _>(|c| {
c.transaction::<(), Error, _>(|c| { Box::pin(async move {
// We lock the relay device to avoid race-conditions in the validation. // We lock the relay device to avoid race-conditions in the validation.
let rd: Device = device::dsl::device let rd: Device = device::dsl::device
.find(&relay_dev_eui) .find(&relay_dev_eui)
.for_update() .for_update()
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, relay_dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, relay_dev_eui.to_string()))?;
// Is the given relay_dev_eui a Relay? // Is the given relay_dev_eui a Relay?
let rdp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile let rdp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile
.find(&rd.device_profile_id) .find(&rd.device_profile_id)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, rd.device_profile_id.to_string()))?; .map_err(|e| Error::from_diesel(e, rd.device_profile_id.to_string()))?;
if !rdp.is_relay { if !rdp.is_relay {
return Err(Error::Validation("Device is not a relay".to_string())); return Err(Error::Validation("Device is not a relay".to_string()));
@ -178,6 +157,7 @@ pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(
let d: Device = device::dsl::device let d: Device = device::dsl::device
.find(&device_dev_eui) .find(&device_dev_eui)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, device_dev_eui.to_string()))?; .map_err(|e| Error::from_diesel(e, device_dev_eui.to_string()))?;
if rd.application_id != d.application_id { if rd.application_id != d.application_id {
@ -190,6 +170,7 @@ pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(
let dp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile let dp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile
.find(&d.device_profile_id) .find(&d.device_profile_id)
.get_result(c) .get_result(c)
.await
.map_err(|e| Error::from_diesel(e, d.device_profile_id.to_string()))?; .map_err(|e| Error::from_diesel(e, d.device_profile_id.to_string()))?;
if rdp.region != dp.region { if rdp.region != dp.region {
return Err(Error::Validation( return Err(Error::Validation(
@ -207,6 +188,7 @@ pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(
.select(dsl::count_star()) .select(dsl::count_star())
.filter(relay_device::dsl::relay_dev_eui.eq(&relay_dev_eui)) .filter(relay_device::dsl::relay_dev_eui.eq(&relay_dev_eui))
.first(c) .first(c)
.await
.map_err(|e| Error::from_diesel(e, "".into()))?; .map_err(|e| Error::from_diesel(e, "".into()))?;
if count > RELAY_MAX_DEVICES { if count > RELAY_MAX_DEVICES {
@ -223,13 +205,13 @@ pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(
relay_device::created_at.eq(Utc::now()), relay_device::created_at.eq(Utc::now()),
)) ))
.execute(c) .execute(c)
.await
.map_err(|e| Error::from_diesel(e, "".into()))?; .map_err(|e| Error::from_diesel(e, "".into()))?;
Ok(()) Ok(())
}) })
}
}) })
.await??; .await?;
info!(relay_dev_eui = %relay_dev_eui, device_dev_eui = %device_dev_eui, "Device added to relay"); info!(relay_dev_eui = %relay_dev_eui, device_dev_eui = %device_dev_eui, "Device added to relay");
@ -237,25 +219,20 @@ pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(
} }
pub async fn remove_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(), Error> { pub async fn remove_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete( let ra = diesel::delete(
relay_device::dsl::relay_device relay_device::dsl::relay_device
.filter(relay_device::relay_dev_eui.eq(&relay_dev_eui)) .filter(relay_device::relay_dev_eui.eq(&relay_dev_eui))
.filter(relay_device::dev_eui.eq(&device_dev_eui)), .filter(relay_device::dev_eui.eq(&device_dev_eui)),
) )
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(format!( return Err(Error::NotFound(format!(
"relay_dev_eui: {}, device_dev_eui: {}", "relay_dev_eui: {}, device_dev_eui: {}",
relay_dev_eui, device_dev_eui relay_dev_eui, device_dev_eui
))); )));
} }
Ok(())
}
})
.await??;
info!(relay_dev_eui = %relay_dev_eui, device_dev_eui = %device_dev_eui, "Device removed from relay"); info!(relay_dev_eui = %relay_dev_eui, device_dev_eui = %device_dev_eui, "Device removed from relay");

View File

@ -1,13 +1,12 @@
use std::collections::HashMap; use std::collections::HashMap;
use crate::diesel::RunQueryDsl;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use diesel_async::RunQueryDsl;
use regex::Regex; use regex::Regex;
use tokio::task;
use uuid::Uuid; use uuid::Uuid;
use super::error::Error; use super::error::Error;
use super::get_db_conn; use super::get_async_db_conn;
use lrwn::EUI64; use lrwn::EUI64;
lazy_static! { lazy_static! {
@ -45,16 +44,12 @@ pub async fn global_search(
limit: usize, limit: usize,
offset: usize, offset: usize,
) -> Result<Vec<SearchResult>, Error> { ) -> Result<Vec<SearchResult>, Error> {
task::spawn_blocking({ let (query, tags) = parse_search_query(search);
let user_id = *user_id;
let search = search.to_string();
let (query, tags) = parse_search_query(&search);
let query = format!("%{}%", query); let query = format!("%{}%", query);
let tags = serde_json::to_value(tags).context("To serde_json value")?; let tags = serde_json::to_value(tags).context("To serde_json value")?;
move || -> Result<Vec<SearchResult>, Error> { let mut c = get_async_db_conn().await?;
let mut c = get_db_conn()?; let res: Vec<SearchResult> = diesel::sql_query(
let res = diesel::sql_query(
r#" r#"
-- device -- device
select select
@ -154,8 +149,7 @@ pub async fn global_search(
score desc score desc
limit $5 limit $5
offset $6 offset $6
"#, "#)
)
.bind::<diesel::sql_types::Text, _>(&search) .bind::<diesel::sql_types::Text, _>(&search)
.bind::<diesel::sql_types::Text, _>(&query) .bind::<diesel::sql_types::Text, _>(&query)
.bind::<diesel::sql_types::Bool, _>(global_admin) .bind::<diesel::sql_types::Bool, _>(global_admin)
@ -163,13 +157,10 @@ pub async fn global_search(
.bind::<diesel::sql_types::BigInt, _>(limit as i64) .bind::<diesel::sql_types::BigInt, _>(limit as i64)
.bind::<diesel::sql_types::BigInt, _>(offset as i64) .bind::<diesel::sql_types::BigInt, _>(offset as i64)
.bind::<diesel::sql_types::Jsonb, _>(tags) .bind::<diesel::sql_types::Jsonb, _>(tags)
.load(&mut c)?; .load(&mut c).await?;
Ok(res) Ok(res)
} }
})
.await?
}
fn parse_search_query(q: &str) -> (String, HashMap<String, String>) { fn parse_search_query(q: &str) -> (String, HashMap<String, String>) {
let mut tags: HashMap<String, String> = HashMap::new(); let mut tags: HashMap<String, String> = HashMap::new();

View File

@ -2,15 +2,14 @@ use std::collections::HashMap;
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use super::error::Error; use super::error::Error;
use super::schema::{tenant, tenant_user, user}; use super::schema::{tenant, tenant_user, user};
use super::{fields, get_db_conn}; use super::{fields, get_async_db_conn};
#[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)] #[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)]
#[diesel(table_name = tenant)] #[diesel(table_name = tenant)]
@ -105,41 +104,30 @@ pub struct Filters {
pub async fn create(t: Tenant) -> Result<Tenant, Error> { pub async fn create(t: Tenant) -> Result<Tenant, Error> {
t.validate()?; t.validate()?;
let t = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Tenant, Error> { let t: Tenant = diesel::insert_into(tenant::table)
let mut c = get_db_conn()?;
diesel::insert_into(tenant::table)
.values(&t) .values(&t)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, t.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, t.id.to_string()))?;
})
.await??;
info!(id = %t.id, "Tenant created"); info!(id = %t.id, "Tenant created");
Ok(t) Ok(t)
} }
pub async fn get(id: &Uuid) -> Result<Tenant, Error> { pub async fn get(id: &Uuid) -> Result<Tenant, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<Tenant, Error> {
let mut c = get_db_conn()?;
let t = tenant::dsl::tenant let t = tenant::dsl::tenant
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(t) Ok(t)
} }
})
.await?
}
pub async fn update(t: Tenant) -> Result<Tenant, Error> { pub async fn update(t: Tenant) -> Result<Tenant, Error> {
t.validate()?; t.validate()?;
let t = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Tenant, Error> { let t: Tenant = diesel::update(tenant::dsl::tenant.find(&t.id))
let mut c = get_db_conn()?;
diesel::update(tenant::dsl::tenant.find(&t.id))
.set(( .set((
tenant::updated_at.eq(Utc::now()), tenant::updated_at.eq(Utc::now()),
tenant::name.eq(&t.name), tenant::name.eq(&t.name),
@ -152,39 +140,28 @@ pub async fn update(t: Tenant) -> Result<Tenant, Error> {
tenant::tags.eq(&t.tags), tenant::tags.eq(&t.tags),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, t.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, t.id.to_string()))?;
})
.await??;
info!(id = %t.id, "Tenant updated"); info!(id = %t.id, "Tenant updated");
Ok(t) Ok(t)
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete(tenant::dsl::tenant.find(&id)) let ra = diesel::delete(tenant::dsl::tenant.find(&id))
.execute(&mut c) .execute(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(id = %id, "Tenant deleted"); info!(id = %id, "Tenant deleted");
Ok(()) Ok(())
} }
pub async fn get_count(filters: &Filters) -> Result<i64, Error> { pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let mut q = tenant::dsl::tenant let mut q = tenant::dsl::tenant
.left_join(tenant_user::table) .left_join(tenant_user::table)
.into_boxed(); .into_boxed();
@ -199,18 +176,13 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
Ok( Ok(
q.select(dsl::sql::<diesel::sql_types::BigInt>("count(distinct id)")) q.select(dsl::sql::<diesel::sql_types::BigInt>("count(distinct id)"))
.first(&mut c)?, .first(&mut c)
.await?,
) )
} }
})
.await?
}
pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<Tenant>, Error> { pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<Tenant>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let filters = filters.clone();
move || -> Result<Vec<Tenant>, Error> {
let mut c = get_db_conn()?;
let mut q = tenant::dsl::tenant let mut q = tenant::dsl::tenant
.left_join(tenant_user::table) .left_join(tenant_user::table)
.select(tenant::all_columns) .select(tenant::all_columns)
@ -228,25 +200,17 @@ pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<Tena
q = q.filter(tenant::dsl::name.ilike(format!("%{}%", search))); q = q.filter(tenant::dsl::name.ilike(format!("%{}%", search)));
} }
let items = q.load(&mut c)?; let items = q.load(&mut c).await?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn add_user(tu: TenantUser) -> Result<TenantUser, Error> { pub async fn add_user(tu: TenantUser) -> Result<TenantUser, Error> {
let tu = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<TenantUser, Error> { let tu: TenantUser = diesel::insert_into(tenant_user::table)
let mut c = get_db_conn()?;
diesel::insert_into(tenant_user::table)
.values(&tu) .values(&tu)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, tu.user_id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, tu.user_id.to_string()))?;
})
.await??;
info!( info!(
tenant_id = %tu.tenant_id, tenant_id = %tu.tenant_id,
user_id = %tu.user_id, user_id = %tu.user_id,
@ -256,20 +220,16 @@ pub async fn add_user(tu: TenantUser) -> Result<TenantUser, Error> {
} }
pub async fn update_user(tu: TenantUser) -> Result<TenantUser, Error> { pub async fn update_user(tu: TenantUser) -> Result<TenantUser, Error> {
let tu = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<TenantUser, Error> { let tu: TenantUser = diesel::update(
let mut c = get_db_conn()?;
diesel::update(
tenant_user::dsl::tenant_user tenant_user::dsl::tenant_user
.filter(tenant_user::dsl::tenant_id.eq(&tu.tenant_id)) .filter(tenant_user::dsl::tenant_id.eq(&tu.tenant_id))
.filter(tenant_user::dsl::user_id.eq(&tu.user_id)), .filter(tenant_user::dsl::user_id.eq(&tu.user_id)),
) )
.set(&tu) .set(&tu)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, tu.user_id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, tu.user_id.to_string()))?;
})
.await??;
info!( info!(
tenant_id = %tu.tenant_id, tenant_id = %tu.tenant_id,
user_id = %tu.user_id, user_id = %tu.user_id,
@ -279,46 +239,32 @@ pub async fn update_user(tu: TenantUser) -> Result<TenantUser, Error> {
} }
pub async fn get_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<TenantUser, Error> { pub async fn get_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<TenantUser, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
let user_id = *user_id;
move || -> Result<TenantUser, Error> {
let mut c = get_db_conn()?;
let tu: TenantUser = tenant_user::dsl::tenant_user let tu: TenantUser = tenant_user::dsl::tenant_user
.filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) .filter(tenant_user::dsl::tenant_id.eq(&tenant_id))
.filter(tenant_user::dsl::user_id.eq(&user_id)) .filter(tenant_user::dsl::user_id.eq(&user_id))
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, user_id.to_string()))?; .map_err(|e| Error::from_diesel(e, user_id.to_string()))?;
Ok(tu) Ok(tu)
} }
})
.await?
}
pub async fn get_user_count(tenant_id: &Uuid) -> Result<i64, Error> { pub async fn get_user_count(tenant_id: &Uuid) -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
move || -> Result<i64, Error> {
let mut c = get_db_conn()?;
let count = tenant_user::dsl::tenant_user let count = tenant_user::dsl::tenant_user
.select(dsl::count_star()) .select(dsl::count_star())
.filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) .filter(tenant_user::dsl::tenant_id.eq(&tenant_id))
.first(&mut c)?; .first(&mut c)
.await?;
Ok(count) Ok(count)
} }
})
.await?
}
pub async fn get_users( pub async fn get_users(
tenant_id: &Uuid, tenant_id: &Uuid,
limit: i64, limit: i64,
offset: i64, offset: i64,
) -> Result<Vec<TenantUserListItem>, Error> { ) -> Result<Vec<TenantUserListItem>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
move || -> Result<Vec<TenantUserListItem>, Error> {
let mut c = get_db_conn()?;
let items = tenant_user::dsl::tenant_user let items = tenant_user::dsl::tenant_user
.inner_join(user::table) .inner_join(user::table)
.select(( .select((
@ -335,33 +281,24 @@ pub async fn get_users(
.order_by(user::dsl::email) .order_by(user::dsl::email)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
pub async fn delete_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<(), Error> { pub async fn delete_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let tenant_id = *tenant_id;
let user_id = *user_id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete( let ra = diesel::delete(
tenant_user::dsl::tenant_user tenant_user::dsl::tenant_user
.filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) .filter(tenant_user::dsl::tenant_id.eq(&tenant_id))
.filter(tenant_user::dsl::user_id.eq(&user_id)), .filter(tenant_user::dsl::user_id.eq(&user_id)),
) )
.execute(&mut c)?; .execute(&mut c)
.await?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(user_id.to_string())); return Err(Error::NotFound(user_id.to_string()));
} }
Ok(())
}
})
.await??;
info!( info!(
tenant_id = %tenant_id, tenant_id = %tenant_id,
user_id = %user_id, user_id = %user_id,
@ -371,18 +308,13 @@ pub async fn delete_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<(), Error>
} }
pub async fn get_tenant_users_for_user(user_id: &Uuid) -> Result<Vec<TenantUser>, Error> { pub async fn get_tenant_users_for_user(user_id: &Uuid) -> Result<Vec<TenantUser>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let user_id = *user_id;
move || -> Result<Vec<TenantUser>, Error> {
let mut c = get_db_conn()?;
let items = tenant_user::dsl::tenant_user let items = tenant_user::dsl::tenant_user
.filter(tenant_user::dsl::user_id.eq(&user_id)) .filter(tenant_user::dsl::user_id.eq(&user_id))
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {

View File

@ -1,19 +1,18 @@
use anyhow::Result; use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::dsl; use diesel::{dsl, prelude::*};
use diesel::prelude::*; use diesel_async::RunQueryDsl;
use pbkdf2::{ use pbkdf2::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Algorithm, Pbkdf2, Algorithm, Pbkdf2,
}; };
use rand_core::OsRng; use rand_core::OsRng;
use tokio::task;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use validator::validate_email; use validator::validate_email;
use super::error::Error; use super::error::Error;
use super::get_db_conn; use super::get_async_db_conn;
use super::schema::user; use super::schema::user;
#[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)] #[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)]
@ -67,76 +66,54 @@ impl User {
pub async fn create(u: User) -> Result<User, Error> { pub async fn create(u: User) -> Result<User, Error> {
u.validate()?; u.validate()?;
let u = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
diesel::insert_into(user::table) let u: User = diesel::insert_into(user::table)
.values(&u) .values(&u)
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, u.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, u.id.to_string()))?;
})
.await??;
info!(id = %u.id, "User created"); info!(id = %u.id, "User created");
Ok(u) Ok(u)
} }
pub async fn get(id: &Uuid) -> Result<User, Error> { pub async fn get(id: &Uuid) -> Result<User, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
let u = user::dsl::user let u = user::dsl::user
.find(&id) .find(&id)
.first(&mut c) .first(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
Ok(u) Ok(u)
} }
})
.await?
}
pub async fn get_by_email(email: &str) -> Result<User, Error> { pub async fn get_by_email(email: &str) -> Result<User, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let email = email.to_string();
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
let u = user::dsl::user let u = user::dsl::user
.filter(user::dsl::email.eq(&email)) .filter(user::dsl::email.eq(email))
.first(&mut c) .first(&mut c)
.map_err(|e| Error::from_diesel(e, email))?; .await
.map_err(|e| Error::from_diesel(e, email.to_string()))?;
Ok(u) Ok(u)
} }
})
.await?
}
pub async fn get_by_external_id(external_id: &str) -> Result<User, Error> { pub async fn get_by_external_id(external_id: &str) -> Result<User, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let external_id = external_id.to_string();
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
let u = user::dsl::user let u = user::dsl::user
.filter(user::dsl::external_id.eq(&external_id)) .filter(user::dsl::external_id.eq(external_id))
.first(&mut c) .first(&mut c)
.map_err(|e| Error::from_diesel(e, external_id))?; .await
.map_err(|e| Error::from_diesel(e, external_id.to_string()))?;
Ok(u) Ok(u)
} }
})
.await?
}
pub async fn get_by_email_and_pw(email: &str, pw: &str) -> Result<User, Error> { pub async fn get_by_email_and_pw(email: &str, pw: &str) -> Result<User, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let email = email.to_string();
let pw = pw.to_string();
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
let u: User = match user::dsl::user let u: User = match user::dsl::user
.filter(user::dsl::email.eq(&email)) .filter(user::dsl::email.eq(email))
.first(&mut c) .first(&mut c)
.map_err(|e| Error::from_diesel(e, email)) .await
.map_err(|e| Error::from_diesel(e, email.to_string()))
{ {
Ok(v) => v, Ok(v) => v,
Err(Error::NotFound(_)) => { Err(Error::NotFound(_)) => {
@ -153,16 +130,11 @@ pub async fn get_by_email_and_pw(email: &str, pw: &str) -> Result<User, Error> {
Err(Error::InvalidUsernameOrPassword) Err(Error::InvalidUsernameOrPassword)
} }
})
.await?
}
pub async fn update(u: User) -> Result<User, Error> { pub async fn update(u: User) -> Result<User, Error> {
u.validate()?; u.validate()?;
let u = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<User, Error> { let u: User = diesel::update(user::dsl::user.find(&u.id))
let mut c = get_db_conn()?;
diesel::update(user::dsl::user.find(&u.id))
.set(( .set((
user::updated_at.eq(Utc::now()), user::updated_at.eq(Utc::now()),
user::is_admin.eq(&u.is_admin), user::is_admin.eq(&u.is_admin),
@ -173,76 +145,56 @@ pub async fn update(u: User) -> Result<User, Error> {
user::external_id.eq(&u.external_id), user::external_id.eq(&u.external_id),
)) ))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, u.id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, u.id.to_string()))?;
})
.await??;
info!(user_id = %u.id, "User updated"); info!(user_id = %u.id, "User updated");
Ok(u) Ok(u)
} }
pub async fn set_password_hash(id: &Uuid, hash: &str) -> Result<User, Error> { pub async fn set_password_hash(id: &Uuid, hash: &str) -> Result<User, Error> {
let u = task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id; let u: User = diesel::update(user::dsl::user.find(&id))
let hash = hash.to_string();
move || -> Result<User, Error> {
let mut c = get_db_conn()?;
diesel::update(user::dsl::user.find(&id))
.set(user::password_hash.eq(&hash)) .set(user::password_hash.eq(&hash))
.get_result(&mut c) .get_result(&mut c)
.map_err(|e| Error::from_diesel(e, id.to_string())) .await
} .map_err(|e| Error::from_diesel(e, id.to_string()))?;
})
.await??;
info!(id = %id, "Password set"); info!(id = %id, "Password set");
Ok(u) Ok(u)
} }
pub async fn delete(id: &Uuid) -> Result<(), Error> { pub async fn delete(id: &Uuid) -> Result<(), Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
let id = *id;
move || -> Result<(), Error> {
let mut c = get_db_conn()?;
let ra = diesel::delete(user::dsl::user.find(&id)) let ra = diesel::delete(user::dsl::user.find(&id))
.execute(&mut c) .execute(&mut c)
.await
.map_err(|e| Error::from_diesel(e, id.to_string()))?; .map_err(|e| Error::from_diesel(e, id.to_string()))?;
if ra == 0 { if ra == 0 {
return Err(Error::NotFound(id.to_string())); return Err(Error::NotFound(id.to_string()));
} }
Ok(())
}
})
.await??;
info!(user_id = %id, "User deleted"); info!(user_id = %id, "User deleted");
Ok(()) Ok(())
} }
pub async fn get_count() -> Result<i64, Error> { pub async fn get_count() -> Result<i64, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<i64, Error> { let count = user::dsl::user
let mut c = get_db_conn()?; .select(dsl::count_star())
let count = user::dsl::user.select(dsl::count_star()).first(&mut c)?; .first(&mut c)
.await?;
Ok(count) Ok(count)
} }
})
.await?
}
pub async fn list(limit: i64, offset: i64) -> Result<Vec<User>, Error> { pub async fn list(limit: i64, offset: i64) -> Result<Vec<User>, Error> {
task::spawn_blocking({ let mut c = get_async_db_conn().await?;
move || -> Result<Vec<User>, Error> {
let mut c = get_db_conn()?;
let items = user::dsl::user let items = user::dsl::user
.order_by(user::dsl::email) .order_by(user::dsl::email)
.limit(limit) .limit(limit)
.offset(offset) .offset(offset)
.load(&mut c)?; .load(&mut c)
.await?;
Ok(items) Ok(items)
} }
})
.await?
}
// The output format is documented here: // The output format is documented here:
// https://github.com/P-H-C/phc-string-format/blob/master/phc-sf-spec.md#specification // https://github.com/P-H-C/phc-string-format/blob/master/phc-sf-spec.md#specification

View File

@ -80,7 +80,7 @@ pub async fn prepare<'a>() -> std::sync::MutexGuard<'a, ()> {
storage::setup().await.unwrap(); storage::setup().await.unwrap();
// reset db // reset db
storage::reset_db().unwrap(); storage::reset_db().await.unwrap();
// flush redis db // flush redis db
storage::reset_redis().await.unwrap(); storage::reset_redis().await.unwrap();

View File

@ -1,17 +1,13 @@
FROM ghcr.io/cross-rs/aarch64-unknown-linux-musl:latest FROM ghcr.io/cross-rs/aarch64-unknown-linux-musl:latest
ENV ZLIB_VERSION=1.3
ENV POSTGRESQL_VERSION=11.21
ENV OPENSSL_VERSION=3.1.2 ENV OPENSSL_VERSION=3.1.2
ENV OPENSSL_TARGET=linux-aarch64 ENV OPENSSL_TARGET=linux-aarch64
ENV MUSL_PREFIX=aarch64-linux-musl ENV MUSL_PREFIX=aarch64-linux-musl
ENV POSTGRESQL_HOST=aarch64-unknown-linux-musl
RUN apt-get update && \ RUN apt-get update && \
apt-get --assume-yes install \ apt-get --assume-yes install \
protobuf-compiler \ protobuf-compiler \
libprotobuf-dev \ libprotobuf-dev
binutils-aarch64-linux-gnu
RUN echo "Building OpenSSL" && \ RUN echo "Building OpenSSL" && \
cd /tmp && \ cd /tmp && \
@ -23,24 +19,4 @@ RUN echo "Building OpenSSL" && \
make install_sw && \ make install_sw && \
rm -r /tmp/* rm -r /tmp/*
RUN echo "Building zlib" && \
cd /tmp && \
curl -fLO "https://zlib.net/zlib-$ZLIB_VERSION.tar.gz" && \
tar xzf "zlib-$ZLIB_VERSION.tar.gz" && cd "zlib-$ZLIB_VERSION" && \
CC=$MUSL_PREFIX-gcc ./configure --static --prefix=/usr/local/$MUSL_PREFIX-target && \
make && make install && \
rm -r /tmp/*
RUN echo "Building libpq" && \
cd /tmp && \
curl -fLO "https://ftp.postgresql.org/pub/source/v$POSTGRESQL_VERSION/postgresql-$POSTGRESQL_VERSION.tar.gz" && \
tar xzf "postgresql-$POSTGRESQL_VERSION.tar.gz" && cd "postgresql-$POSTGRESQL_VERSION" && \
CC=$MUSL_PREFIX-gcc CPPFLAGS="-I/usr/local/$MUSL_PREFIX/include -I/usr/local/$MUSL_PREFIX-target/include" LDFLAGS="-L/usr/local/$MUSL_PREFIX/lib -L/usr/local/$MUSL_PREFIX-target/lib" ./configure --with-openssl --without-readline --prefix=/usr/local/$MUSL_PREFIX-target --host $POSTGRESQL_HOST && \
cd src/interfaces/libpq && make all-static-lib && make install-lib-static && \
rm -r /tmp/*
# Workaround for re-defined unicode_to_utf8 which also exists in quickjs library.
RUN /usr/aarch64-linux-gnu/bin/objcopy --redefine-sym unicode_to_utf8=unicode_to_utf8_rename /usr/local/$MUSL_PREFIX-target/lib/libpq.a
ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig

View File

@ -1,17 +1,13 @@
FROM ghcr.io/cross-rs/armv7-unknown-linux-musleabihf:latest FROM ghcr.io/cross-rs/armv7-unknown-linux-musleabihf:latest
ENV ZLIB_VERSION=1.3
ENV POSTGRESQL_VERSION=11.21
ENV OPENSSL_VERSION=3.1.2 ENV OPENSSL_VERSION=3.1.2
ENV OPENSSL_TARGET=linux-generic32 ENV OPENSSL_TARGET=linux-generic32
ENV MUSL_PREFIX=arm-linux-musleabihf ENV MUSL_PREFIX=arm-linux-musleabihf
ENV POSTGRESQL_HOST=armv7-unknown-linux-musleabihf
RUN apt-get update && \ RUN apt-get update && \
apt-get --assume-yes install \ apt-get --assume-yes install \
protobuf-compiler \ protobuf-compiler \
libprotobuf-dev \ libprotobuf-dev
binutils-arm-linux-gnueabihf
RUN echo "Building OpenSSL" && \ RUN echo "Building OpenSSL" && \
cd /tmp && \ cd /tmp && \
@ -23,24 +19,4 @@ RUN echo "Building OpenSSL" && \
make install_sw && \ make install_sw && \
rm -r /tmp/* rm -r /tmp/*
RUN echo "Building zlib" && \
cd /tmp && \
curl -fLO "https://zlib.net/zlib-$ZLIB_VERSION.tar.gz" && \
tar xzf "zlib-$ZLIB_VERSION.tar.gz" && cd "zlib-$ZLIB_VERSION" && \
CC=$MUSL_PREFIX-gcc ./configure --static --prefix=/usr/local/$MUSL_PREFIX-target && \
make && make install && \
rm -r /tmp/*
RUN echo "Building libpq" && \
cd /tmp && \
curl -fLO "https://ftp.postgresql.org/pub/source/v$POSTGRESQL_VERSION/postgresql-$POSTGRESQL_VERSION.tar.gz" && \
tar xzf "postgresql-$POSTGRESQL_VERSION.tar.gz" && cd "postgresql-$POSTGRESQL_VERSION" && \
CC=$MUSL_PREFIX-gcc CPPFLAGS="-I/usr/local/$MUSL_PREFIX/include -I/usr/local/$MUSL_PREFIX-target/include" LDFLAGS="-L/usr/local/$MUSL_PREFIX/lib -L/usr/local/$MUSL_PREFIX-target/lib" ./configure --with-openssl --without-readline --prefix=/usr/local/$MUSL_PREFIX-target --host $POSTGRESQL_HOST && \
cd src/interfaces/libpq && make all-static-lib && make install-lib-static && \
rm -r /tmp/*
# Workaround for re-defined unicode_to_utf8 which also exists in quickjs library.
RUN /usr/bin/arm-linux-gnueabihf-objcopy --redefine-sym unicode_to_utf8=unicode_to_utf8_rename /usr/local/$MUSL_PREFIX-target/lib/libpq.a
ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig

View File

@ -1,7 +1,5 @@
FROM ghcr.io/cross-rs/x86_64-unknown-linux-musl:latest FROM ghcr.io/cross-rs/x86_64-unknown-linux-musl:latest
ENV ZLIB_VERSION=1.3
ENV POSTGRESQL_VERSION=11.21
ENV OPENSSL_VERSION=3.1.2 ENV OPENSSL_VERSION=3.1.2
ENV OPENSSL_TARGET=linux-x86_64 ENV OPENSSL_TARGET=linux-x86_64
ENV MUSL_PREFIX=x86_64-linux-musl ENV MUSL_PREFIX=x86_64-linux-musl
@ -21,24 +19,4 @@ RUN echo "Building OpenSSL" && \
make install_sw && \ make install_sw && \
rm -r /tmp/* rm -r /tmp/*
RUN echo "Building zlib" && \
cd /tmp && \
curl -fLO "https://zlib.net/zlib-$ZLIB_VERSION.tar.gz" && \
tar xzf "zlib-$ZLIB_VERSION.tar.gz" && cd "zlib-$ZLIB_VERSION" && \
CC=$MUSL_PREFIX-gcc ./configure --static --prefix=/usr/local/$MUSL_PREFIX-target && \
make && make install && \
rm -r /tmp/*
RUN echo "Building libpq" && \
cd /tmp && \
curl -fLO "https://ftp.postgresql.org/pub/source/v$POSTGRESQL_VERSION/postgresql-$POSTGRESQL_VERSION.tar.gz" && \
tar xzf "postgresql-$POSTGRESQL_VERSION.tar.gz" && cd "postgresql-$POSTGRESQL_VERSION" && \
CC=$MUSL_PREFIX-gcc CPPFLAGS="-I/usr/local/$MUSL_PREFIX/include -I/usr/local/$MUSL_PREFIX-target/include" LDFLAGS="-L/usr/local/$MUSL_PREFIX/lib -L/usr/local/$MUSL_PREFIX-target/lib -L/usr/local/$MUSL_PREFIX-target/lib64" ./configure --with-openssl --without-readline --prefix=/usr/local/$MUSL_PREFIX-target && \
cd src/interfaces/libpq && make all-static-lib && make install-lib-static && \
rm -r /tmp/*
# Workaround for re-defined unicode_to_utf8 which also exists in quickjs library.
RUN objcopy --redefine-sym unicode_to_utf8=unicode_to_utf8_rename /usr/local/$MUSL_PREFIX-target/lib/libpq.a
ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig:/usr/local/$MUSL_PREFIX-target/lib64/pkgconfig ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig:/usr/local/$MUSL_PREFIX-target/lib64/pkgconfig

View File

@ -16,7 +16,7 @@ hex = "0.4"
cmac = { version = "0.7", optional = true } cmac = { version = "0.7", optional = true }
aes = { version = "0.8", optional = true } aes = { version = "0.8", optional = true }
serde = { version = "1.0", features = ["derive"], optional = true } serde = { version = "1.0", features = ["derive"], optional = true }
diesel = { version = "2.1", features = ["postgres"], optional = true } diesel = { version = "2.1", features = ["postgres_backend"], optional = true }
# Error handling # Error handling
thiserror = "1.0" thiserror = "1.0"

View File

@ -11,7 +11,6 @@ pkgs.mkShell {
pkgs.perl pkgs.perl
pkgs.cmake pkgs.cmake
pkgs.clang pkgs.clang
pkgs.postgresql
pkgs.openssl pkgs.openssl
]; ];
LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib";