mirror of
https://github.com/chirpstack/chirpstack.git
synced 2025-06-10 19:51:37 +00:00
Refactor code to use async redis.
This commit is contained in:
parent
8e2eda3d5b
commit
345d0d8462
63
Cargo.lock
generated
63
Cargo.lock
generated
@ -758,6 +758,8 @@ dependencies = [
|
|||||||
"chirpstack_api",
|
"chirpstack_api",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
|
"deadpool-redis",
|
||||||
|
"deadpool-redis-cluster",
|
||||||
"diesel",
|
"diesel",
|
||||||
"diesel-async",
|
"diesel-async",
|
||||||
"diesel_migrations",
|
"diesel_migrations",
|
||||||
@ -789,7 +791,6 @@ dependencies = [
|
|||||||
"prometheus-client",
|
"prometheus-client",
|
||||||
"prost",
|
"prost",
|
||||||
"prost-types",
|
"prost-types",
|
||||||
"r2d2",
|
|
||||||
"rand",
|
"rand",
|
||||||
"rand_core",
|
"rand_core",
|
||||||
"rdkafka",
|
"rdkafka",
|
||||||
@ -1191,11 +1192,35 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "deadpool-redis"
|
||||||
|
version = "0.13.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "84930e585871d35b8e06d3e03d03e3a8a4c5dc71afa4376c7cd5f9223e1da1ea"
|
||||||
|
dependencies = [
|
||||||
|
"deadpool",
|
||||||
|
"redis",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "deadpool-redis-cluster"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "857c968579c82072dff24f48969d24fb1daab4970d94bb740d87ebb13bc8c2c4"
|
||||||
|
dependencies = [
|
||||||
|
"deadpool",
|
||||||
|
"redis",
|
||||||
|
"redis_cluster_async",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deadpool-runtime"
|
name = "deadpool-runtime"
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49"
|
checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49"
|
||||||
|
dependencies = [
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "der"
|
name = "der"
|
||||||
@ -3354,17 +3379,6 @@ dependencies = [
|
|||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "r2d2"
|
|
||||||
version = "0.8.10"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
|
|
||||||
dependencies = [
|
|
||||||
"log",
|
|
||||||
"parking_lot",
|
|
||||||
"scheduled-thread-pool",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rand"
|
name = "rand"
|
||||||
version = "0.8.5"
|
version = "0.8.5"
|
||||||
@ -3461,7 +3475,6 @@ dependencies = [
|
|||||||
"log",
|
"log",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"r2d2",
|
|
||||||
"rand",
|
"rand",
|
||||||
"rustls",
|
"rustls",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
@ -3474,6 +3487,21 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "redis_cluster_async"
|
||||||
|
version = "0.8.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "093073cc58cbe376f3308c530edcda1a49ef980de1c32f3fa63622fc5c6f0fb9"
|
||||||
|
dependencies = [
|
||||||
|
"crc16",
|
||||||
|
"futures",
|
||||||
|
"log",
|
||||||
|
"pin-project-lite",
|
||||||
|
"rand",
|
||||||
|
"redis",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "redox_syscall"
|
name = "redox_syscall"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
@ -3832,15 +3860,6 @@ dependencies = [
|
|||||||
"windows-sys",
|
"windows-sys",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "scheduled-thread-pool"
|
|
||||||
version = "0.2.7"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
|
|
||||||
dependencies = [
|
|
||||||
"parking_lot",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "scoped-futures"
|
name = "scoped-futures"
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
|
@ -2,9 +2,7 @@
|
|||||||
extern crate anyhow;
|
extern crate anyhow;
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::future::Future;
|
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::pin::Pin;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use aes_kw::Kek;
|
use aes_kw::Kek;
|
||||||
@ -13,6 +11,7 @@ use chrono::{DateTime, Utc};
|
|||||||
use reqwest::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE};
|
use reqwest::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE};
|
||||||
use reqwest::{Certificate, Identity};
|
use reqwest::{Certificate, Identity};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
use tokio::sync::oneshot::Receiver;
|
use tokio::sync::oneshot::Receiver;
|
||||||
use tracing::{debug, error, info, span, trace, Instrument, Level};
|
use tracing::{debug, error, info, span, trace, Instrument, Level};
|
||||||
|
|
||||||
@ -35,14 +34,6 @@ pub trait BasePayloadResultProvider {
|
|||||||
fn base_payload(&self) -> &BasePayloadResult;
|
fn base_payload(&self) -> &BasePayloadResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type RequestLogFn = Box<
|
|
||||||
dyn Fn(
|
|
||||||
stream::BackendInterfacesRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = Result<()>> + Sync + Send>>
|
|
||||||
+ Sync
|
|
||||||
+ Send,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub struct ClientConfig {
|
pub struct ClientConfig {
|
||||||
pub sender_id: Vec<u8>,
|
pub sender_id: Vec<u8>,
|
||||||
pub receiver_id: Vec<u8>,
|
pub receiver_id: Vec<u8>,
|
||||||
@ -63,7 +54,7 @@ pub struct ClientConfig {
|
|||||||
pub use_target_role_suffix: bool,
|
pub use_target_role_suffix: bool,
|
||||||
|
|
||||||
// Request log function.
|
// Request log function.
|
||||||
pub request_log_fn: Option<RequestLogFn>,
|
pub request_log_sender: Option<Sender<stream::BackendInterfacesRequest>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ClientConfig {
|
impl Default for ClientConfig {
|
||||||
@ -78,7 +69,7 @@ impl Default for ClientConfig {
|
|||||||
authorization: None,
|
authorization: None,
|
||||||
async_timeout: Duration::from_secs(0),
|
async_timeout: Duration::from_secs(0),
|
||||||
use_target_role_suffix: false,
|
use_target_role_suffix: false,
|
||||||
request_log_fn: None,
|
request_log_sender: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -345,9 +336,12 @@ impl Client {
|
|||||||
be_req_log.request_error = format!("{:#}", e);
|
be_req_log.request_error = format!("{:#}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(log_fn) = &self.config.request_log_fn {
|
if let Some(tx) = &self.config.request_log_sender {
|
||||||
if let Err(e) = log_fn(be_req_log).await {
|
// We use try_send here as we don't want to delay the response in case
|
||||||
error!(error = %e, "Log request error");
|
// there is no channel capacity. This would also log an error, proving
|
||||||
|
// feedback that there is a channel capacity issue.
|
||||||
|
if let Err(e) = tx.try_send(be_req_log) {
|
||||||
|
error!(error = %e, "Sending request-log to stream error");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1452,10 +1446,7 @@ pub mod test {
|
|||||||
let c = Client::new(ClientConfig {
|
let c = Client::new(ClientConfig {
|
||||||
sender_id: vec![1, 2, 3],
|
sender_id: vec![1, 2, 3],
|
||||||
server: server.url("/"),
|
server: server.url("/"),
|
||||||
request_log_fn: Some(Box::new(move |log| {
|
request_log_sender: Some(tx),
|
||||||
let tx = tx.clone();
|
|
||||||
Box::pin(async move { tx.send(log).await.map_err(|e| anyhow!("{}", e)) })
|
|
||||||
})),
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1517,10 +1508,7 @@ pub mod test {
|
|||||||
let c = Client::new(ClientConfig {
|
let c = Client::new(ClientConfig {
|
||||||
sender_id: vec![1, 2, 3],
|
sender_id: vec![1, 2, 3],
|
||||||
server: server.url("/"),
|
server: server.url("/"),
|
||||||
request_log_fn: Some(Box::new(move |log| {
|
request_log_sender: Some(tx),
|
||||||
let tx = tx.clone();
|
|
||||||
Box::pin(async move { tx.send(log).await.map_err(|e| anyhow!("{}", e)) })
|
|
||||||
})),
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -36,9 +36,10 @@ diesel_migrations = { version = "2.1" }
|
|||||||
diesel-async = { version = "0.4", features = ["deadpool", "postgres", "async-connection-wrapper"] }
|
diesel-async = { version = "0.4", features = ["deadpool", "postgres", "async-connection-wrapper"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-postgres = "0.7"
|
||||||
tokio-postgres-rustls = "0.10.0"
|
tokio-postgres-rustls = "0.10.0"
|
||||||
r2d2 = "0.8"
|
|
||||||
bigdecimal = "0.4"
|
bigdecimal = "0.4"
|
||||||
redis = { version = "0.23", features = ["r2d2", "cluster", "tls-rustls"] }
|
redis = { version = "0.23", features = ["cluster", "tls-rustls", "tokio-rustls-comp"] }
|
||||||
|
deadpool-redis = "0.13"
|
||||||
|
deadpool-redis-cluster = "0.1"
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
@ -18,7 +18,7 @@ use crate::backend::{joinserver, keywrap, roaming};
|
|||||||
use crate::downlink::data_fns;
|
use crate::downlink::data_fns;
|
||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
use crate::storage::{
|
use crate::storage::{
|
||||||
device_session, error::Error as StorageError, get_redis_conn, passive_roaming, redis_key,
|
device_session, error::Error as StorageError, get_async_redis_conn, passive_roaming, redis_key,
|
||||||
};
|
};
|
||||||
use crate::uplink::{
|
use crate::uplink::{
|
||||||
data_sns, error::Error as UplinkError, helpers, join_sns, RoamingMetaData, UplinkFrameSet,
|
data_sns, error::Error as UplinkError, helpers, join_sns, RoamingMetaData, UplinkFrameSet,
|
||||||
@ -127,7 +127,7 @@ pub async fn _handle_request(bp: BasePayload, b: Vec<u8>) -> http::Response<hype
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match roaming::get(&sender_id) {
|
match roaming::get(&sender_id).await {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("Unknown SenderID");
|
warn!("Unknown SenderID");
|
||||||
@ -523,14 +523,11 @@ async fn _handle_xmit_data_req(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_async_ans(bp: &BasePayload, b: &[u8]) -> Result<http::Response<hyper::Body>> {
|
async fn handle_async_ans(bp: &BasePayload, b: &[u8]) -> Result<http::Response<hyper::Body>> {
|
||||||
task::spawn_blocking({
|
|
||||||
let b = b.to_vec();
|
|
||||||
let transaction_id = bp.transaction_id;
|
let transaction_id = bp.transaction_id;
|
||||||
move || -> Result<()> {
|
let mut c = get_async_redis_conn().await?;
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = redis_key(format!("backend:async:{}", transaction_id));
|
let key = redis_key(format!("backend:async:{}", transaction_id));
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
@ -538,18 +535,14 @@ async fn handle_async_ans(bp: &BasePayload, b: &[u8]) -> Result<http::Response<h
|
|||||||
.arg(1_i64)
|
.arg(1_i64)
|
||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("pl")
|
.arg("pl")
|
||||||
.arg(&b)
|
.arg(b)
|
||||||
.ignore()
|
.ignore()
|
||||||
.cmd("EXPIRE")
|
.cmd("EXPIRE")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(30_i64)
|
.arg(30_i64)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
Ok(warp::reply().into_response())
|
Ok(warp::reply().into_response())
|
||||||
}
|
}
|
||||||
@ -560,11 +553,17 @@ pub async fn get_async_receiver(
|
|||||||
) -> Result<oneshot::Receiver<Vec<u8>>> {
|
) -> Result<oneshot::Receiver<Vec<u8>>> {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
|
|
||||||
task::spawn_blocking(move || -> Result<()> {
|
task::spawn(async move {
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = match get_async_redis_conn().await {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!(error = %e, "Get Redis connection error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
let key = redis_key(format!("backend:async:{}", transaction_id));
|
let key = redis_key(format!("backend:async:{}", transaction_id));
|
||||||
|
|
||||||
let srr: StreamReadReply = redis::cmd("XREAD")
|
let srr: StreamReadReply = match redis::cmd("XREAD")
|
||||||
.arg("BLOCK")
|
.arg("BLOCK")
|
||||||
.arg(timeout.as_millis() as u64)
|
.arg(timeout.as_millis() as u64)
|
||||||
.arg("COUNT")
|
.arg("COUNT")
|
||||||
@ -572,7 +571,15 @@ pub async fn get_async_receiver(
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg("0")
|
.arg("0")
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!(error = %e, "Read from Redis Stream error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
for stream_key in &srr.keys {
|
for stream_key in &srr.keys {
|
||||||
for stream_id in &stream_key.ids {
|
for stream_id in &stream_key.ids {
|
||||||
@ -581,7 +588,7 @@ pub async fn get_async_receiver(
|
|||||||
"pl" => {
|
"pl" => {
|
||||||
if let redis::Value::Data(b) = v {
|
if let redis::Value::Data(b) = v {
|
||||||
let _ = tx.send(b.to_vec());
|
let _ = tx.send(b.to_vec());
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
@ -595,8 +602,6 @@ pub async fn get_async_receiver(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(rx)
|
Ok(rx)
|
||||||
|
@ -63,12 +63,6 @@ impl ToStatus for uuid::Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToStatus for r2d2::Error {
|
|
||||||
fn status(&self) -> Status {
|
|
||||||
Status::new(Code::Internal, format!("{:#}", self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToStatus for lrwn::Error {
|
impl ToStatus for lrwn::Error {
|
||||||
fn status(&self) -> Status {
|
fn status(&self) -> Status {
|
||||||
Status::new(Code::Internal, format!("{:#}", self))
|
Status::new(Code::Internal, format!("{:#}", self))
|
||||||
|
@ -3,13 +3,12 @@ use std::net::SocketAddr;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use diesel_async::RunQueryDsl;
|
use diesel_async::RunQueryDsl;
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
use warp::{http::Response, http::StatusCode, Filter};
|
use warp::{http::Response, http::StatusCode, Filter};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::monitoring::prometheus;
|
use crate::monitoring::prometheus;
|
||||||
use crate::storage::{get_async_db_conn, get_redis_conn};
|
use crate::storage::{get_async_db_conn, get_async_redis_conn};
|
||||||
|
|
||||||
pub async fn setup() {
|
pub async fn setup() {
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
@ -56,12 +55,8 @@ async fn _health_handler() -> Result<()> {
|
|||||||
.await
|
.await
|
||||||
.context("PostgreSQL connection error")?;
|
.context("PostgreSQL connection error")?;
|
||||||
|
|
||||||
task::spawn_blocking(move || -> Result<()> {
|
let mut r = get_async_redis_conn().await?;
|
||||||
let mut r = get_redis_conn()?;
|
let _: String = redis::cmd("PING").query_async(&mut r).await?;
|
||||||
if !r.check_connection() {
|
|
||||||
return Err(anyhow!("Redis connection error"));
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
}
|
||||||
|
@ -13,13 +13,12 @@ use openidconnect::{
|
|||||||
};
|
};
|
||||||
use openidconnect::{EmptyAdditionalClaims, UserInfoClaims};
|
use openidconnect::{EmptyAdditionalClaims, UserInfoClaims};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::task;
|
|
||||||
use tracing::{error, trace};
|
use tracing::{error, trace};
|
||||||
use warp::{Rejection, Reply};
|
use warp::{Rejection, Reply};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
|
|
||||||
pub type User = UserInfoClaims<EmptyAdditionalClaims, CoreGenderClaim>;
|
pub type User = UserInfoClaims<EmptyAdditionalClaims, CoreGenderClaim>;
|
||||||
|
|
||||||
@ -133,41 +132,30 @@ async fn get_client() -> Result<CoreClient> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn store_nonce(state: &CsrfToken, nonce: &Nonce) -> Result<()> {
|
async fn store_nonce(state: &CsrfToken, nonce: &Nonce) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let state = state.clone();
|
|
||||||
let nonce = nonce.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
trace!("Storing nonce");
|
trace!("Storing nonce");
|
||||||
let key = redis_key(format!("auth:oidc:{}", state.secret()));
|
let key = redis_key(format!("auth:oidc:{}", state.secret()));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
redis::cmd("PSETEX")
|
redis::cmd("PSETEX")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(Duration::minutes(5).num_milliseconds())
|
.arg(Duration::minutes(5).num_milliseconds())
|
||||||
.arg(nonce.secret())
|
.arg(nonce.secret())
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_nonce(state: &CsrfToken) -> Result<Nonce> {
|
async fn get_nonce(state: &CsrfToken) -> Result<Nonce> {
|
||||||
task::spawn_blocking({
|
|
||||||
let state = state.clone();
|
|
||||||
move || -> Result<Nonce> {
|
|
||||||
trace!("Getting nonce");
|
trace!("Getting nonce");
|
||||||
let key = redis_key(format!("auth:oidc:{}", state.secret()));
|
let key = redis_key(format!("auth:oidc:{}", state.secret()));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
let v: String = redis::cmd("GET")
|
let v: String = redis::cmd("GET")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get nonce")?;
|
.context("Get nonce")?;
|
||||||
|
|
||||||
Ok(Nonce::new(v))
|
Ok(Nonce::new(v))
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
@ -11,7 +11,7 @@ lazy_static! {
|
|||||||
static ref CLIENTS: RwLock<Vec<(EUI64Prefix, Arc<Client>)>> = RwLock::new(vec![]);
|
static ref CLIENTS: RwLock<Vec<(EUI64Prefix, Arc<Client>)>> = RwLock::new(vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup() -> Result<()> {
|
pub async fn setup() -> Result<()> {
|
||||||
info!("Setting up Join Server clients");
|
info!("Setting up Join Server clients");
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
|
|
||||||
@ -28,9 +28,7 @@ pub fn setup() -> Result<()> {
|
|||||||
tls_cert: js.tls_cert.clone(),
|
tls_cert: js.tls_cert.clone(),
|
||||||
tls_key: js.tls_key.clone(),
|
tls_key: js.tls_key.clone(),
|
||||||
async_timeout: js.async_timeout,
|
async_timeout: js.async_timeout,
|
||||||
request_log_fn: Some(Box::new(move |log| {
|
request_log_sender: stream::backend_interfaces::get_log_sender().await,
|
||||||
Box::pin(async move { stream::backend_interfaces::log_request(log).await })
|
|
||||||
})),
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@ pub mod joinserver;
|
|||||||
pub mod keywrap;
|
pub mod keywrap;
|
||||||
pub mod roaming;
|
pub mod roaming;
|
||||||
|
|
||||||
pub fn setup() -> Result<()> {
|
pub async fn setup() -> Result<()> {
|
||||||
joinserver::setup()?;
|
joinserver::setup().await?;
|
||||||
roaming::setup()?;
|
roaming::setup().await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::{Duration, DurationRound};
|
use chrono::{Duration, DurationRound};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
use tracing::{debug, info, span, Level};
|
use tracing::{debug, info, span, Level};
|
||||||
|
|
||||||
use crate::gpstime::ToGpsTime;
|
use crate::gpstime::ToGpsTime;
|
||||||
@ -18,7 +19,7 @@ lazy_static! {
|
|||||||
static ref CLIENTS: RwLock<HashMap<NetID, Arc<Client>>> = RwLock::new(HashMap::new());
|
static ref CLIENTS: RwLock<HashMap<NetID, Arc<Client>>> = RwLock::new(HashMap::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setup() -> Result<()> {
|
pub async fn setup() -> Result<()> {
|
||||||
info!("Setting up roaming clients");
|
info!("Setting up roaming clients");
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
|
|
||||||
@ -56,26 +57,24 @@ pub fn setup() -> Result<()> {
|
|||||||
Some(s.authorization_header.clone())
|
Some(s.authorization_header.clone())
|
||||||
},
|
},
|
||||||
async_timeout: s.async_timeout,
|
async_timeout: s.async_timeout,
|
||||||
request_log_fn: Some(Box::new(move |log| {
|
request_log_sender: stream::backend_interfaces::get_log_sender().await,
|
||||||
Box::pin(async move { stream::backend_interfaces::log_request(log).await })
|
|
||||||
})),
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
set(&s.net_id, c);
|
set(&s.net_id, c).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set(net_id: &NetID, c: Client) {
|
pub async fn set(net_id: &NetID, c: Client) {
|
||||||
let mut clients_w = CLIENTS.write().unwrap();
|
let mut clients_w = CLIENTS.write().await;
|
||||||
clients_w.insert(*net_id, Arc::new(c));
|
clients_w.insert(*net_id, Arc::new(c));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(net_id: &NetID) -> Result<Arc<Client>> {
|
pub async fn get(net_id: &NetID) -> Result<Arc<Client>> {
|
||||||
let clients_r = CLIENTS.write().unwrap();
|
let mut clients_w = CLIENTS.write().await;
|
||||||
|
|
||||||
if let Some(client) = clients_r.get(net_id) {
|
if let Some(client) = clients_w.get(net_id) {
|
||||||
return Ok(client.clone());
|
return Ok(client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,12 +105,14 @@ pub fn get(net_id: &NetID) -> Result<Arc<Client>> {
|
|||||||
Some(conf.roaming.default.authorization_header.clone())
|
Some(conf.roaming.default.authorization_header.clone())
|
||||||
},
|
},
|
||||||
async_timeout: conf.roaming.default.async_timeout,
|
async_timeout: conf.roaming.default.async_timeout,
|
||||||
request_log_fn: Some(Box::new(move |log| {
|
request_log_sender: stream::backend_interfaces::get_log_sender().await,
|
||||||
Box::pin(async move { stream::backend_interfaces::log_request(log).await })
|
|
||||||
})),
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
return Ok(Arc::new(c));
|
let c = Arc::new(c);
|
||||||
|
let c_out = c.clone();
|
||||||
|
clients_w.insert(*net_id, c);
|
||||||
|
|
||||||
|
return Ok(c_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(anyhow!(
|
Err(anyhow!(
|
||||||
@ -330,8 +331,8 @@ pub fn dl_meta_data_to_uplink_rx_info(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn reset() {
|
pub async fn reset() {
|
||||||
let mut clients_w = CLIENTS.write().unwrap();
|
let mut clients_w = CLIENTS.write().await;
|
||||||
*clients_w = HashMap::new();
|
*clients_w = HashMap::new();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ pub async fn run() -> Result<()> {
|
|||||||
|
|
||||||
storage::setup().await?;
|
storage::setup().await?;
|
||||||
region::setup()?;
|
region::setup()?;
|
||||||
backend::setup()?;
|
backend::setup().await?;
|
||||||
adr::setup().await?;
|
adr::setup().await?;
|
||||||
integration::setup().await?;
|
integration::setup().await?;
|
||||||
gateway::backend::setup().await?;
|
gateway::backend::setup().await?;
|
||||||
|
@ -1044,7 +1044,7 @@ impl Data {
|
|||||||
let roaming_meta = ufs.roaming_meta_data.as_ref().unwrap();
|
let roaming_meta = ufs.roaming_meta_data.as_ref().unwrap();
|
||||||
|
|
||||||
let net_id = NetID::from_slice(&roaming_meta.base_payload.sender_id)?;
|
let net_id = NetID::from_slice(&roaming_meta.base_payload.sender_id)?;
|
||||||
let client = roaming::get(&net_id)?;
|
let client = roaming::get(&net_id).await?;
|
||||||
|
|
||||||
let mut req = backend::XmitDataReqPayload {
|
let mut req = backend::XmitDataReqPayload {
|
||||||
phy_payload: self.downlink_frame.items[0].phy_payload.clone(),
|
phy_payload: self.downlink_frame.items[0].phy_payload.clone(),
|
||||||
|
@ -19,13 +19,12 @@ use prost::Message;
|
|||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{error, info, trace};
|
use tracing::{error, info, trace};
|
||||||
|
|
||||||
use super::GatewayBackend;
|
use super::GatewayBackend;
|
||||||
use crate::config::GatewayBackendMqtt;
|
use crate::config::GatewayBackendMqtt;
|
||||||
use crate::monitoring::prometheus;
|
use crate::monitoring::prometheus;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use crate::{downlink, uplink};
|
use crate::{downlink, uplink};
|
||||||
use lrwn::region::CommonName;
|
use lrwn::region::CommonName;
|
||||||
|
|
||||||
@ -462,9 +461,7 @@ async fn message_callback(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn is_locked(key: String) -> Result<bool> {
|
async fn is_locked(key: String) -> Result<bool> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
move || -> Result<bool> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
|
|
||||||
let set: bool = redis::cmd("SET")
|
let set: bool = redis::cmd("SET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
@ -472,13 +469,11 @@ async fn is_locked(key: String) -> Result<bool> {
|
|||||||
.arg("PX")
|
.arg("PX")
|
||||||
.arg(5000)
|
.arg(5000)
|
||||||
.arg("NX")
|
.arg("NX")
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(!set)
|
Ok(!set)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gateway_is_json(gateway_id: &str) -> bool {
|
fn gateway_is_json(gateway_id: &str) -> bool {
|
||||||
let gw_json_r = GATEWAY_JSON.read().unwrap();
|
let gw_json_r = GATEWAY_JSON.read().unwrap();
|
||||||
|
@ -3,33 +3,28 @@ use std::io::Cursor;
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, Duration, Utc};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{info, trace};
|
use tracing::{info, trace};
|
||||||
|
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::{gw, internal};
|
use chirpstack_api::{gw, internal};
|
||||||
use lrwn::EUI64;
|
use lrwn::EUI64;
|
||||||
|
|
||||||
pub async fn get_geoloc_buffer(
|
pub async fn get_geoloc_buffer(
|
||||||
dev_eui: &EUI64,
|
dev_eui: &EUI64,
|
||||||
ttl: &Duration,
|
ttl: Duration,
|
||||||
) -> Result<Vec<Vec<gw::UplinkRxInfo>>> {
|
) -> Result<Vec<Vec<gw::UplinkRxInfo>>> {
|
||||||
if *ttl == Duration::zero() {
|
if ttl == Duration::zero() {
|
||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
let ttl = *ttl;
|
|
||||||
|
|
||||||
move || -> Result<Vec<Vec<gw::UplinkRxInfo>>> {
|
|
||||||
trace!(dev_eui = %dev_eui, "Getting geolocation buffer");
|
trace!(dev_eui = %dev_eui, "Getting geolocation buffer");
|
||||||
let key = redis_key(format!("device:{{{}}}:loracloud:buffer", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:loracloud:buffer", dev_eui));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
let b: Vec<u8> = redis::cmd("GET")
|
let b: Vec<u8> = redis::cmd("GET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get geolocation buffer")?;
|
.context("Get geolocation buffer")?;
|
||||||
if b.is_empty() {
|
if b.is_empty() {
|
||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
@ -70,9 +65,6 @@ pub async fn get_geoloc_buffer(
|
|||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn save_geoloc_buffer(
|
pub async fn save_geoloc_buffer(
|
||||||
dev_eui: &EUI64,
|
dev_eui: &EUI64,
|
||||||
@ -83,15 +75,9 @@ pub async fn save_geoloc_buffer(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
let ttl = *ttl;
|
|
||||||
let items = items.to_vec();
|
|
||||||
|
|
||||||
move || -> Result<()> {
|
|
||||||
trace!(dev_eui = %dev_eui, "Saving geolocation buffer");
|
trace!(dev_eui = %dev_eui, "Saving geolocation buffer");
|
||||||
let key = redis_key(format!("device:{{{}}}:loracloud:buffer", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:loracloud:buffer", dev_eui));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
let buffer = internal::LoraCloudGeolocBuffer {
|
let buffer = internal::LoraCloudGeolocBuffer {
|
||||||
uplinks: items
|
uplinks: items
|
||||||
@ -106,12 +92,10 @@ pub async fn save_geoloc_buffer(
|
|||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(ttl.num_milliseconds())
|
.arg(ttl.num_milliseconds())
|
||||||
.arg(b)
|
.arg(b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!(dev_eui = %dev_eui, "Geolocation buffer saved");
|
info!(dev_eui = %dev_eui, "Geolocation buffer saved");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
@ -461,7 +461,7 @@ impl Integration {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut buf = vec![pl.rx_info.clone()];
|
let mut buf = vec![pl.rx_info.clone()];
|
||||||
buf.extend_from_slice(&buffer::get_geoloc_buffer(&dev_eui, &ttl).await?);
|
buf.extend_from_slice(&buffer::get_geoloc_buffer(&dev_eui, ttl).await?);
|
||||||
buf.truncate(
|
buf.truncate(
|
||||||
(self
|
(self
|
||||||
.config
|
.config
|
||||||
|
@ -128,7 +128,7 @@ impl IntegrationTrait for Integration {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::storage::get_redis_conn;
|
use crate::storage::get_async_redis_conn;
|
||||||
use crate::test;
|
use crate::test;
|
||||||
use chirpstack_api::integration;
|
use chirpstack_api::integration;
|
||||||
use redis::streams::StreamReadReply;
|
use redis::streams::StreamReadReply;
|
||||||
@ -150,7 +150,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.uplink_event(&HashMap::new(), &pl).await.unwrap();
|
i.uplink_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "up", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "up", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// join
|
// join
|
||||||
let pl = integration::JoinEvent {
|
let pl = integration::JoinEvent {
|
||||||
@ -162,7 +162,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.join_event(&HashMap::new(), &pl).await.unwrap();
|
i.join_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "join", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "join", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// ack
|
// ack
|
||||||
let pl = integration::AckEvent {
|
let pl = integration::AckEvent {
|
||||||
@ -174,7 +174,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.ack_event(&HashMap::new(), &pl).await.unwrap();
|
i.ack_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "ack", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "ack", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// txack
|
// txack
|
||||||
let pl = integration::TxAckEvent {
|
let pl = integration::TxAckEvent {
|
||||||
@ -186,7 +186,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.txack_event(&HashMap::new(), &pl).await.unwrap();
|
i.txack_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "txack", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "txack", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// log
|
// log
|
||||||
let pl = integration::LogEvent {
|
let pl = integration::LogEvent {
|
||||||
@ -198,7 +198,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.log_event(&HashMap::new(), &pl).await.unwrap();
|
i.log_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "log", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "log", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// status
|
// status
|
||||||
let pl = integration::StatusEvent {
|
let pl = integration::StatusEvent {
|
||||||
@ -210,7 +210,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.status_event(&HashMap::new(), &pl).await.unwrap();
|
i.status_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "status", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "status", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// location
|
// location
|
||||||
let pl = integration::LocationEvent {
|
let pl = integration::LocationEvent {
|
||||||
@ -222,7 +222,7 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.location_event(&HashMap::new(), &pl).await.unwrap();
|
i.location_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
last_id = assert_reply(&last_id, "location", &pl.encode_to_vec());
|
last_id = assert_reply(&last_id, "location", &pl.encode_to_vec()).await;
|
||||||
|
|
||||||
// integration
|
// integration
|
||||||
let pl = integration::IntegrationEvent {
|
let pl = integration::IntegrationEvent {
|
||||||
@ -234,18 +234,19 @@ pub mod test {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
i.integration_event(&HashMap::new(), &pl).await.unwrap();
|
i.integration_event(&HashMap::new(), &pl).await.unwrap();
|
||||||
let _ = assert_reply(&last_id, "integration", &pl.encode_to_vec());
|
let _ = assert_reply(&last_id, "integration", &pl.encode_to_vec()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_reply(last_id: &str, event: &str, b: &[u8]) -> String {
|
async fn assert_reply(last_id: &str, event: &str, b: &[u8]) -> String {
|
||||||
let mut c = get_redis_conn().unwrap();
|
let mut c = get_async_redis_conn().await.unwrap();
|
||||||
let srr: StreamReadReply = redis::cmd("XREAD")
|
let srr: StreamReadReply = redis::cmd("XREAD")
|
||||||
.arg("COUNT")
|
.arg("COUNT")
|
||||||
.arg(1 as usize)
|
.arg(1 as usize)
|
||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg("device:stream:event")
|
.arg("device:stream:event")
|
||||||
.arg(&last_id)
|
.arg(&last_id)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(1, srr.keys.len());
|
assert_eq!(1, srr.keys.len());
|
||||||
|
|
||||||
|
@ -2,60 +2,46 @@ use std::io::Cursor;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
use super::{error::Error, get_redis_conn, redis_key};
|
use super::{error::Error, get_async_redis_conn, redis_key};
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use chirpstack_api::internal;
|
use chirpstack_api::internal;
|
||||||
use lrwn::EUI64;
|
use lrwn::EUI64;
|
||||||
|
|
||||||
pub async fn save_rx_info(rx_info: &internal::DeviceGatewayRxInfo) -> Result<()> {
|
pub async fn save_rx_info(rx_info: &internal::DeviceGatewayRxInfo) -> Result<()> {
|
||||||
let dev_eui = EUI64::from_slice(&rx_info.dev_eui)?;
|
let dev_eui = EUI64::from_slice(&rx_info.dev_eui)?;
|
||||||
task::spawn_blocking({
|
|
||||||
let rx_info = rx_info.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let key = redis_key(format!("device:{{{}}}:gwrx", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:gwrx", dev_eui));
|
||||||
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
||||||
let b = rx_info.encode_to_vec();
|
let b = rx_info.encode_to_vec();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
redis::cmd("PSETEX")
|
redis::cmd("PSETEX")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(ttl)
|
.arg(ttl)
|
||||||
.arg(b)
|
.arg(b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
info!(dev_eui = %dev_eui, "Gateway rx-info saved");
|
info!(dev_eui = %dev_eui, "Gateway rx-info saved");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_rx_info(dev_eui: &EUI64) -> Result<internal::DeviceGatewayRxInfo, Error> {
|
pub async fn get_rx_info(dev_eui: &EUI64) -> Result<internal::DeviceGatewayRxInfo, Error> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
move || -> Result<internal::DeviceGatewayRxInfo, Error> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = redis_key(format!("device:{{{}}}:gwrx", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:gwrx", dev_eui));
|
||||||
|
|
||||||
let b: Vec<u8> = redis::cmd("GET")
|
let b: Vec<u8> = redis::cmd("GET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get rx-info")?;
|
.context("Get rx-info")?;
|
||||||
if b.is_empty() {
|
if b.is_empty() {
|
||||||
return Err(Error::NotFound(dev_eui.to_string()));
|
return Err(Error::NotFound(dev_eui.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(internal::DeviceGatewayRxInfo::decode(&mut Cursor::new(b))
|
Ok(internal::DeviceGatewayRxInfo::decode(&mut Cursor::new(b)).context("Decode rx-info")?)
|
||||||
.context("Decode rx-info")?)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_rx_info_for_dev_euis(
|
pub async fn get_rx_info_for_dev_euis(
|
||||||
@ -65,18 +51,16 @@ pub async fn get_rx_info_for_dev_euis(
|
|||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let dev_euis = dev_euis.to_vec();
|
|
||||||
move || -> Result<Vec<internal::DeviceGatewayRxInfo>, Error> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let mut keys: Vec<String> = Vec::new();
|
let mut keys: Vec<String> = Vec::new();
|
||||||
for dev_eui in &dev_euis {
|
for dev_eui in dev_euis {
|
||||||
keys.push(redis_key(format!("device:{{{}}}:gwrx", dev_eui)));
|
keys.push(redis_key(format!("device:{{{}}}:gwrx", dev_eui)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let bb: Vec<Vec<u8>> = redis::cmd("MGET")
|
let bb: Vec<Vec<u8>> = redis::cmd("MGET")
|
||||||
.arg(keys)
|
.arg(keys)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("MGET")?;
|
.context("MGET")?;
|
||||||
let mut out: Vec<internal::DeviceGatewayRxInfo> = Vec::new();
|
let mut out: Vec<internal::DeviceGatewayRxInfo> = Vec::new();
|
||||||
for b in bb {
|
for b in bb {
|
||||||
@ -85,15 +69,11 @@ pub async fn get_rx_info_for_dev_euis(
|
|||||||
}
|
}
|
||||||
|
|
||||||
out.push(
|
out.push(
|
||||||
internal::DeviceGatewayRxInfo::decode(&mut Cursor::new(b))
|
internal::DeviceGatewayRxInfo::decode(&mut Cursor::new(b)).context("Decode rx-info")?,
|
||||||
.context("Decode rx-info")?,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
@ -3,11 +3,10 @@ use std::io::Cursor;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{error, info, trace};
|
use tracing::{error, info, trace};
|
||||||
|
|
||||||
use super::error::Error;
|
use super::error::Error;
|
||||||
use super::{get_redis_conn, redis_key};
|
use super::{get_async_redis_conn, redis_key};
|
||||||
use crate::api::helpers::FromProto;
|
use crate::api::helpers::FromProto;
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
@ -24,18 +23,15 @@ pub async fn save(ds: &internal::DeviceSession) -> Result<()> {
|
|||||||
let eui = EUI64::from_slice(&ds.dev_eui)?;
|
let eui = EUI64::from_slice(&ds.dev_eui)?;
|
||||||
let addr = DevAddr::from_slice(&ds.dev_addr)?;
|
let addr = DevAddr::from_slice(&ds.dev_addr)?;
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let ds = ds.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let addr_key = redis_key(format!("devaddr:{{{}}}", addr));
|
let addr_key = redis_key(format!("devaddr:{{{}}}", addr));
|
||||||
let ds_key = redis_key(format!("device:{{{}}}:ds", eui));
|
let ds_key = redis_key(format!("device:{{{}}}:ds", eui));
|
||||||
let b = ds.encode_to_vec();
|
let b = ds.encode_to_vec();
|
||||||
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
// Atomic add and pexpire.
|
// Atomic add and pexpire.
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("SADD")
|
.cmd("SADD")
|
||||||
.arg(&addr_key)
|
.arg(&addr_key)
|
||||||
@ -45,7 +41,8 @@ pub async fn save(ds: &internal::DeviceSession) -> Result<()> {
|
|||||||
.arg(&addr_key)
|
.arg(&addr_key)
|
||||||
.arg(ttl)
|
.arg(ttl)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// In case there is a pending rejoin session, make sure that the new
|
// In case there is a pending rejoin session, make sure that the new
|
||||||
// DevAddr also resolves to the device-session.
|
// DevAddr also resolves to the device-session.
|
||||||
@ -53,7 +50,7 @@ pub async fn save(ds: &internal::DeviceSession) -> Result<()> {
|
|||||||
let pending_addr = DevAddr::from_slice(&pending_ds.dev_addr)?;
|
let pending_addr = DevAddr::from_slice(&pending_ds.dev_addr)?;
|
||||||
let pending_addr_key = redis_key(format!("devaddr:{{{}}}", pending_addr));
|
let pending_addr_key = redis_key(format!("devaddr:{{{}}}", pending_addr));
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("SADD")
|
.cmd("SADD")
|
||||||
.arg(&pending_addr_key)
|
.arg(&pending_addr_key)
|
||||||
@ -63,33 +60,28 @@ pub async fn save(ds: &internal::DeviceSession) -> Result<()> {
|
|||||||
.arg(&pending_addr_key)
|
.arg(&pending_addr_key)
|
||||||
.arg(ttl)
|
.arg(ttl)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
redis::cmd("PSETEX")
|
redis::cmd("PSETEX")
|
||||||
.arg(ds_key)
|
.arg(ds_key)
|
||||||
.arg(ttl)
|
.arg(ttl)
|
||||||
.arg(b)
|
.arg(b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
info!(dev_eui = %eui, dev_addr = %addr, "Device-session saved");
|
info!(dev_eui = %eui, dev_addr = %addr, "Device-session saved");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(dev_eui: &EUI64) -> Result<chirpstack_api::internal::DeviceSession, Error> {
|
pub async fn get(dev_eui: &EUI64) -> Result<chirpstack_api::internal::DeviceSession, Error> {
|
||||||
task::spawn_blocking({
|
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
move || -> Result<chirpstack_api::internal::DeviceSession, Error> {
|
|
||||||
let key = redis_key(format!("device:{{{}}}:ds", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:ds", dev_eui));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let v: Vec<u8> = redis::cmd("GET")
|
let v: Vec<u8> = redis::cmd("GET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get device-session")?;
|
.context("Get device-session")?;
|
||||||
if v.is_empty() {
|
if v.is_empty() {
|
||||||
return Err(Error::NotFound(dev_eui.to_string()));
|
return Err(Error::NotFound(dev_eui.to_string()));
|
||||||
@ -98,22 +90,12 @@ pub async fn get(dev_eui: &EUI64) -> Result<chirpstack_api::internal::DeviceSess
|
|||||||
.context("Decode device-session")?;
|
.context("Decode device-session")?;
|
||||||
Ok(ds)
|
Ok(ds)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete(dev_eui: &EUI64) -> Result<()> {
|
pub async fn delete(dev_eui: &EUI64) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
move || -> Result<()> {
|
|
||||||
let key = redis_key(format!("device:{{{}}}:ds", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:ds", dev_eui));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
redis::cmd("DEL").arg(&key).query(&mut *c)?;
|
redis::cmd("DEL").arg(&key).query_async(&mut c).await?;
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
info!(dev_eui = %dev_eui, "Device-session deleted");
|
info!(dev_eui = %dev_eui, "Device-session deleted");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -204,7 +186,7 @@ pub async fn get_for_phypayload_and_incr_f_cnt_up(
|
|||||||
// Make sure that in case of concurrent calls for the same uplink only one will
|
// Make sure that in case of concurrent calls for the same uplink only one will
|
||||||
// pass. Either the concurrent call would read the incremented uplink frame-counter
|
// pass. Either the concurrent call would read the incremented uplink frame-counter
|
||||||
// or it is unable to aquire the lock.
|
// or it is unable to aquire the lock.
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let lock_key = redis_key(format!(
|
let lock_key = redis_key(format!(
|
||||||
"device:{{{}}}:ds:lock:{}",
|
"device:{{{}}}:ds:lock:{}",
|
||||||
hex::encode(&ds.dev_eui),
|
hex::encode(&ds.dev_eui),
|
||||||
@ -216,7 +198,8 @@ pub async fn get_for_phypayload_and_incr_f_cnt_up(
|
|||||||
.arg("EX")
|
.arg("EX")
|
||||||
.arg(1_usize)
|
.arg(1_usize)
|
||||||
.arg("NX")
|
.arg("NX")
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !set {
|
if !set {
|
||||||
return Ok(ValidationStatus::Retransmission(full_f_cnt, ds));
|
return Ok(ValidationStatus::Retransmission(full_f_cnt, ds));
|
||||||
@ -311,41 +294,32 @@ pub async fn get_for_phypayload(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_dev_euis_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<EUI64>> {
|
async fn get_dev_euis_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<EUI64>> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<Vec<EUI64>> {
|
|
||||||
let key = redis_key(format!("devaddr:{{{}}}", dev_addr));
|
let key = redis_key(format!("devaddr:{{{}}}", dev_addr));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let dev_euis: HashSet<Vec<u8>> = redis::cmd("SMEMBERS")
|
let dev_euis: HashSet<Vec<u8>> = redis::cmd("SMEMBERS")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get DevEUIs for DevAddr")?;
|
.context("Get DevEUIs for DevAddr")?;
|
||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
for dev_eui in &dev_euis {
|
for dev_eui in &dev_euis {
|
||||||
out.push(EUI64::from_slice(dev_eui)?);
|
out.push(EUI64::from_slice(dev_eui)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn remove_dev_eui_from_dev_addr_set(dev_addr: DevAddr, dev_eui: EUI64) -> Result<()> {
|
async fn remove_dev_eui_from_dev_addr_set(dev_addr: DevAddr, dev_eui: EUI64) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<()> {
|
|
||||||
let key = redis_key(format!("devaddr:{{{}}}", dev_addr));
|
let key = redis_key(format!("devaddr:{{{}}}", dev_addr));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
redis::cmd("SREM")
|
redis::cmd("SREM")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(&dev_eui.to_be_bytes())
|
.arg(&dev_eui.to_be_bytes())
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<internal::DeviceSession>> {
|
async fn get_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<internal::DeviceSession>> {
|
||||||
trace!(dev_addr = %dev_addr, "Getting device-session for DevAddr");
|
trace!(dev_addr = %dev_addr, "Getting device-session for DevAddr");
|
||||||
|
@ -2,43 +2,36 @@ use std::io::Cursor;
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
use super::{error::Error, get_redis_conn, redis_key};
|
use super::{error::Error, get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::internal;
|
use chirpstack_api::internal;
|
||||||
|
|
||||||
pub async fn save(df: &internal::DownlinkFrame) -> Result<()> {
|
pub async fn save(df: &internal::DownlinkFrame) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let df = df.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let b = df.encode_to_vec();
|
let b = df.encode_to_vec();
|
||||||
let key = redis_key(format!("frame:{}", df.downlink_id));
|
let key = redis_key(format!("frame:{}", df.downlink_id));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
redis::cmd("SETEX").arg(key).arg(30).arg(b).query(&mut *c)?;
|
redis::cmd("SETEX")
|
||||||
Ok(())
|
.arg(key)
|
||||||
}
|
.arg(30)
|
||||||
})
|
.arg(b)
|
||||||
.await??;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!(downlink_id = df.downlink_id, "Downlink-frame saved");
|
info!(downlink_id = df.downlink_id, "Downlink-frame saved");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(id: u32) -> Result<internal::DownlinkFrame, Error> {
|
pub async fn get(id: u32) -> Result<internal::DownlinkFrame, Error> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
move || -> Result<internal::DownlinkFrame, Error> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = redis_key(format!("frame:{}", id));
|
let key = redis_key(format!("frame:{}", id));
|
||||||
let v: Vec<u8> = redis::cmd("GET").arg(key).query(&mut *c)?;
|
let v: Vec<u8> = redis::cmd("GET").arg(key).query_async(&mut c).await?;
|
||||||
if v.is_empty() {
|
if v.is_empty() {
|
||||||
return Err(Error::NotFound(format!("{}", id)));
|
return Err(Error::NotFound(format!("{}", id)));
|
||||||
}
|
}
|
||||||
let df = internal::DownlinkFrame::decode(&mut Cursor::new(v))?;
|
let df = internal::DownlinkFrame::decode(&mut Cursor::new(v))?;
|
||||||
Ok(df)
|
Ok(df)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
@ -1,18 +1,13 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
use super::{get_redis_conn, redis_key};
|
use super::{get_async_redis_conn, redis_key};
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use lrwn::EUI64;
|
use lrwn::EUI64;
|
||||||
|
|
||||||
pub async fn set_pending(dev_eui: &EUI64, cid: lrwn::CID, set: &lrwn::MACCommandSet) -> Result<()> {
|
pub async fn set_pending(dev_eui: &EUI64, cid: lrwn::CID, set: &lrwn::MACCommandSet) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
let set = set.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
||||||
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
||||||
@ -22,22 +17,17 @@ pub async fn set_pending(dev_eui: &EUI64, cid: lrwn::CID, set: &lrwn::MACCommand
|
|||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(ttl)
|
.arg(ttl)
|
||||||
.arg(b)
|
.arg(b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
Ok(())
|
.await?;
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
info!(dev_eui = %dev_eui, cid = %cid, "Pending mac-command block set");
|
info!(dev_eui = %dev_eui, cid = %cid, "Pending mac-command block set");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_pending(dev_eui: &EUI64, cid: lrwn::CID) -> Result<Option<lrwn::MACCommandSet>> {
|
pub async fn get_pending(dev_eui: &EUI64, cid: lrwn::CID) -> Result<Option<lrwn::MACCommandSet>> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
move || -> Result<Option<lrwn::MACCommandSet>> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
||||||
let b: Vec<u8> = redis::cmd("GET").arg(key).query(&mut *c)?;
|
let b: Vec<u8> = redis::cmd("GET").arg(key).query_async(&mut c).await?;
|
||||||
|
|
||||||
let out = if !b.is_empty() {
|
let out = if !b.is_empty() {
|
||||||
let mut mac = lrwn::MACCommandSet::from_slice(&b);
|
let mut mac = lrwn::MACCommandSet::from_slice(&b);
|
||||||
@ -53,22 +43,13 @@ pub async fn get_pending(dev_eui: &EUI64, cid: lrwn::CID) -> Result<Option<lrwn:
|
|||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_pending(dev_eui: &EUI64, cid: lrwn::CID) -> Result<()> {
|
pub async fn delete_pending(dev_eui: &EUI64, cid: lrwn::CID) -> Result<()> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let dev_eui = *dev_eui;
|
|
||||||
move || -> Result<()> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
let key = redis_key(format!("device:{}:mac:pending:{}", dev_eui, cid.to_u8()));
|
||||||
|
|
||||||
redis::cmd("DEL").arg(key).query(&mut *c)?;
|
redis::cmd("DEL").arg(key).query_async(&mut c).await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
info!(dev_eui = %dev_eui, cid = %cid, "Pending mac-command block deleted");
|
info!(dev_eui = %dev_eui, cid = %cid, "Pending mac-command block deleted");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -5,10 +5,9 @@ use std::time::Duration;
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::{DateTime, Datelike, Duration as ChronoDuration, Local, TimeZone, Timelike};
|
use chrono::{DateTime, Datelike, Duration as ChronoDuration, Local, TimeZone, Timelike};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::task;
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
|
|
||||||
#[allow(clippy::upper_case_acronyms)]
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
@ -69,23 +68,16 @@ fn get_key(name: &str, a: Aggregation, dt: DateTime<Local>) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn save_state(name: &str, state: &str) -> Result<()> {
|
pub async fn save_state(name: &str, state: &str) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let key = redis_key(format!("metrics:{{{}}}", name));
|
let key = redis_key(format!("metrics:{{{}}}", name));
|
||||||
let state = state.to_string();
|
|
||||||
let ttl = get_ttl(Aggregation::MONTH);
|
let ttl = get_ttl(Aggregation::MONTH);
|
||||||
|
|
||||||
move || -> Result<()> {
|
let mut c = get_async_redis_conn().await?;
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
redis::cmd("PSETEX")
|
redis::cmd("PSETEX")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.arg(ttl.as_millis() as usize)
|
.arg(ttl.as_millis() as usize)
|
||||||
.arg(state)
|
.arg(state)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
info!(state = %state, "State saved");
|
info!(state = %state, "State saved");
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -104,9 +96,6 @@ async fn save_for_interval(a: Aggregation, name: &str, record: &Record) -> Resul
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let name = name.to_string();
|
|
||||||
let record = record.clone();
|
|
||||||
let ttl = get_ttl(a);
|
let ttl = get_ttl(a);
|
||||||
|
|
||||||
let ts: DateTime<Local> = match a {
|
let ts: DateTime<Local> = match a {
|
||||||
@ -135,10 +124,9 @@ async fn save_for_interval(a: Aggregation, name: &str, record: &Record) -> Resul
|
|||||||
.unwrap(),
|
.unwrap(),
|
||||||
};
|
};
|
||||||
|
|
||||||
move || -> Result<()> {
|
let mut c = get_async_redis_conn().await?;
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let key = get_key(&name, a, ts);
|
let key = get_key(&name, a, ts);
|
||||||
let mut pipe = c.new_pipeline();
|
let mut pipe = redis::pipe();
|
||||||
pipe.atomic();
|
pipe.atomic();
|
||||||
|
|
||||||
for (k, v) in &record.metrics {
|
for (k, v) in &record.metrics {
|
||||||
@ -168,27 +156,19 @@ async fn save_for_interval(a: Aggregation, name: &str, record: &Record) -> Resul
|
|||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(ttl.as_millis() as usize)
|
.arg(ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
info!(name = %name, aggregation = %a, "Metrics saved");
|
info!(name = %name, aggregation = %a, "Metrics saved");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_state(name: &str) -> Result<String> {
|
pub async fn get_state(name: &str) -> Result<String> {
|
||||||
task::spawn_blocking({
|
|
||||||
let key = redis_key(format!("metrics:{{{}}}", name));
|
let key = redis_key(format!("metrics:{{{}}}", name));
|
||||||
move || -> Result<String> {
|
let mut c = get_async_redis_conn().await?;
|
||||||
let mut c = get_redis_conn()?;
|
let v: Option<String> = redis::cmd("GET").arg(key).query_async(&mut c).await?;
|
||||||
let v: Option<String> = redis::cmd("GET").arg(key).query(&mut *c)?;
|
|
||||||
Ok(v.unwrap_or_default())
|
Ok(v.unwrap_or_default())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(
|
||||||
name: &str,
|
name: &str,
|
||||||
@ -278,17 +258,14 @@ pub async fn get(
|
|||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let keys = keys.clone();
|
let mut pipe = redis::pipe();
|
||||||
move || -> Result<Vec<Record>> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let mut pipe = c.new_pipeline();
|
|
||||||
|
|
||||||
for k in &keys {
|
for k in &keys {
|
||||||
pipe.cmd("HGETALL").arg(k);
|
pipe.cmd("HGETALL").arg(k);
|
||||||
}
|
}
|
||||||
|
|
||||||
let res: Vec<HashMap<String, f64>> = pipe.query(&mut c)?;
|
let res: Vec<HashMap<String, f64>> = pipe.query_async(&mut c).await?;
|
||||||
let mut out: Vec<Record> = Vec::new();
|
let mut out: Vec<Record> = Vec::new();
|
||||||
|
|
||||||
for (i, r) in res.iter().enumerate() {
|
for (i, r) in res.iter().enumerate() {
|
||||||
@ -324,9 +301,6 @@ pub async fn get(
|
|||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::BufReader;
|
use std::io::BufReader;
|
||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
@ -13,7 +12,8 @@ use diesel_async::AsyncPgConnection;
|
|||||||
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
|
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
|
||||||
use futures_util::future::BoxFuture;
|
use futures_util::future::BoxFuture;
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use r2d2::{Pool, PooledConnection};
|
use redis::aio::ConnectionLike;
|
||||||
|
use tokio::sync::RwLock as TokioRwLock;
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
|
||||||
@ -48,129 +48,48 @@ pub type AsyncPgPoolConnection = DeadpoolObject<AsyncPgConnection>;
|
|||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref ASYNC_PG_POOL: RwLock<Option<AsyncPgPool>> = RwLock::new(None);
|
static ref ASYNC_PG_POOL: RwLock<Option<AsyncPgPool>> = RwLock::new(None);
|
||||||
static ref REDIS_POOL: RwLock<Option<RedisPool>> = RwLock::new(None);
|
static ref ASYNC_REDIS_POOL: TokioRwLock<Option<AsyncRedisPool>> = TokioRwLock::new(None);
|
||||||
static ref REDIS_PREFIX: RwLock<String> = RwLock::new("".to_string());
|
static ref REDIS_PREFIX: RwLock<String> = RwLock::new("".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
|
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
|
||||||
|
|
||||||
pub enum RedisPool {
|
#[derive(Clone)]
|
||||||
Client(Pool<redis::Client>),
|
pub enum AsyncRedisPool {
|
||||||
ClusterClient(Pool<redis::cluster::ClusterClient>),
|
Client(deadpool_redis::Pool),
|
||||||
|
ClusterClient(deadpool_redis_cluster::Pool),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum RedisPoolConnection {
|
pub enum AsyncRedisPoolConnection {
|
||||||
Client(PooledConnection<redis::Client>),
|
Client(deadpool_redis::Connection),
|
||||||
ClusterClient(PooledConnection<redis::cluster::ClusterClient>),
|
ClusterClient(deadpool_redis_cluster::Connection),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RedisPoolConnection {
|
impl ConnectionLike for AsyncRedisPoolConnection {
|
||||||
pub fn new_pipeline(&self) -> RedisPipeline {
|
fn req_packed_command<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
cmd: &'a redis::Cmd,
|
||||||
|
) -> redis::RedisFuture<'a, redis::Value> {
|
||||||
match self {
|
match self {
|
||||||
RedisPoolConnection::Client(_) => RedisPipeline::Pipeline(redis::pipe()),
|
AsyncRedisPoolConnection::Client(v) => v.req_packed_command(cmd),
|
||||||
RedisPoolConnection::ClusterClient(_) => {
|
AsyncRedisPoolConnection::ClusterClient(v) => v.req_packed_command(cmd),
|
||||||
RedisPipeline::ClusterPipeline(redis::cluster::cluster_pipe())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
fn req_packed_commands<'a>(
|
||||||
}
|
&'a mut self,
|
||||||
|
cmd: &'a redis::Pipeline,
|
||||||
impl Deref for RedisPoolConnection {
|
offset: usize,
|
||||||
type Target = dyn redis::ConnectionLike;
|
count: usize,
|
||||||
|
) -> redis::RedisFuture<'a, Vec<redis::Value>> {
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
match self {
|
match self {
|
||||||
RedisPoolConnection::Client(v) => v.deref() as &dyn redis::ConnectionLike,
|
AsyncRedisPoolConnection::Client(v) => v.req_packed_commands(cmd, offset, count),
|
||||||
RedisPoolConnection::ClusterClient(v) => v.deref() as &dyn redis::ConnectionLike,
|
AsyncRedisPoolConnection::ClusterClient(v) => v.req_packed_commands(cmd, offset, count),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
fn get_db(&self) -> i64 {
|
||||||
|
|
||||||
impl DerefMut for RedisPoolConnection {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
match self {
|
match self {
|
||||||
RedisPoolConnection::Client(v) => v.deref_mut() as &mut dyn redis::ConnectionLike,
|
AsyncRedisPoolConnection::Client(v) => v.get_db(),
|
||||||
RedisPoolConnection::ClusterClient(v) => {
|
AsyncRedisPoolConnection::ClusterClient(v) => v.get_db(),
|
||||||
v.deref_mut() as &mut dyn redis::ConnectionLike
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum RedisPipeline {
|
|
||||||
Pipeline(redis::Pipeline),
|
|
||||||
ClusterPipeline(redis::cluster::ClusterPipeline),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RedisPipeline {
|
|
||||||
pub fn cmd(&mut self, name: &str) -> &mut Self {
|
|
||||||
match self {
|
|
||||||
RedisPipeline::Pipeline(p) => {
|
|
||||||
p.cmd(name);
|
|
||||||
}
|
|
||||||
RedisPipeline::ClusterPipeline(p) => {
|
|
||||||
p.cmd(name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn arg<T: redis::ToRedisArgs>(&mut self, arg: T) -> &mut Self {
|
|
||||||
match self {
|
|
||||||
RedisPipeline::Pipeline(p) => {
|
|
||||||
p.arg(arg);
|
|
||||||
}
|
|
||||||
RedisPipeline::ClusterPipeline(p) => {
|
|
||||||
p.arg(arg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ignore(&mut self) -> &mut Self {
|
|
||||||
match self {
|
|
||||||
RedisPipeline::Pipeline(p) => {
|
|
||||||
p.ignore();
|
|
||||||
}
|
|
||||||
RedisPipeline::ClusterPipeline(p) => {
|
|
||||||
p.ignore();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn atomic(&mut self) -> &mut Self {
|
|
||||||
match self {
|
|
||||||
RedisPipeline::Pipeline(p) => {
|
|
||||||
p.atomic();
|
|
||||||
}
|
|
||||||
RedisPipeline::ClusterPipeline(_) => {
|
|
||||||
// TODO: ClusterPipeline does not (yet?) provide .atomic() method.
|
|
||||||
// https://github.com/redis-rs/redis-rs/issues/731
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn query<T: redis::FromRedisValue>(
|
|
||||||
&mut self,
|
|
||||||
con: &mut RedisPoolConnection,
|
|
||||||
) -> redis::RedisResult<T> {
|
|
||||||
match self {
|
|
||||||
RedisPipeline::Pipeline(p) => {
|
|
||||||
if let RedisPoolConnection::Client(c) = con {
|
|
||||||
p.query(&mut **c)
|
|
||||||
} else {
|
|
||||||
panic!("Mismatch between RedisPipeline and RedisPoolConnection")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RedisPipeline::ClusterPipeline(p) => {
|
|
||||||
if let RedisPoolConnection::ClusterClient(c) = con {
|
|
||||||
p.query(c)
|
|
||||||
} else {
|
|
||||||
panic!("Mismatch between RedisPipeline and RedisPoolConnection")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,29 +113,17 @@ pub async fn setup() -> Result<()> {
|
|||||||
|
|
||||||
info!("Setting up Redis client");
|
info!("Setting up Redis client");
|
||||||
if conf.redis.cluster {
|
if conf.redis.cluster {
|
||||||
let client = redis::cluster::ClusterClientBuilder::new(conf.redis.servers.clone())
|
let pool = deadpool_redis_cluster::Config::from_urls(conf.redis.servers.clone())
|
||||||
.build()
|
.builder()?
|
||||||
.context("ClusterClient open")?;
|
.max_size(conf.redis.max_open_connections as usize)
|
||||||
let pool: r2d2::Pool<redis::cluster::ClusterClient> = r2d2::Pool::builder()
|
.build()?;
|
||||||
.max_size(conf.redis.max_open_connections)
|
set_async_redis_pool(AsyncRedisPool::ClusterClient(pool)).await;
|
||||||
.min_idle(match conf.redis.min_idle_connections {
|
|
||||||
0 => None,
|
|
||||||
_ => Some(conf.redis.min_idle_connections),
|
|
||||||
})
|
|
||||||
.build(client)
|
|
||||||
.context("Building Redis pool")?;
|
|
||||||
set_redis_pool(RedisPool::ClusterClient(pool));
|
|
||||||
} else {
|
} else {
|
||||||
let client = redis::Client::open(conf.redis.servers[0].clone()).context("Redis client")?;
|
let pool = deadpool_redis::Config::from_url(conf.redis.servers[0].clone())
|
||||||
let pool: r2d2::Pool<redis::Client> = r2d2::Pool::builder()
|
.builder()?
|
||||||
.max_size(conf.redis.max_open_connections)
|
.max_size(conf.redis.max_open_connections as usize)
|
||||||
.min_idle(match conf.redis.min_idle_connections {
|
.build()?;
|
||||||
0 => None,
|
set_async_redis_pool(AsyncRedisPool::Client(pool)).await;
|
||||||
_ => Some(conf.redis.min_idle_connections),
|
|
||||||
})
|
|
||||||
.build(client)
|
|
||||||
.context("Building Redis pool")?;
|
|
||||||
set_redis_pool(RedisPool::Client(pool));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !conf.redis.key_prefix.is_empty() {
|
if !conf.redis.key_prefix.is_empty() {
|
||||||
@ -289,14 +196,23 @@ pub async fn get_async_db_conn() -> Result<AsyncPgPoolConnection> {
|
|||||||
Ok(pool.get().await?)
|
Ok(pool.get().await?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_redis_conn() -> Result<RedisPoolConnection> {
|
async fn get_async_redis_pool() -> Result<AsyncRedisPool> {
|
||||||
let pool_r = REDIS_POOL.read().unwrap();
|
let pool_r = ASYNC_REDIS_POOL.read().await;
|
||||||
let pool = pool_r
|
let pool: AsyncRedisPool = pool_r
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| anyhow!("Redis connection pool is not initialized (yet)"))?;
|
.ok_or_else(|| anyhow!("Redis connection pool is not initialized"))?
|
||||||
|
.clone();
|
||||||
|
Ok(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_async_redis_conn() -> Result<AsyncRedisPoolConnection> {
|
||||||
|
let pool = get_async_redis_pool().await?;
|
||||||
|
|
||||||
Ok(match pool {
|
Ok(match pool {
|
||||||
RedisPool::Client(v) => RedisPoolConnection::Client(v.get()?),
|
AsyncRedisPool::Client(v) => AsyncRedisPoolConnection::Client(v.get().await?),
|
||||||
RedisPool::ClusterClient(v) => RedisPoolConnection::ClusterClient(v.get()?),
|
AsyncRedisPool::ClusterClient(v) => {
|
||||||
|
AsyncRedisPoolConnection::ClusterClient(v.clone().get().await?)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,8 +238,8 @@ pub async fn run_db_migrations() -> Result<()> {
|
|||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_redis_pool(p: RedisPool) {
|
async fn set_async_redis_pool(p: AsyncRedisPool) {
|
||||||
let mut pool_w = REDIS_POOL.write().unwrap();
|
let mut pool_w = ASYNC_REDIS_POOL.write().await;
|
||||||
*pool_w = Some(p);
|
*pool_w = Some(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,8 +269,8 @@ pub async fn reset_db() -> Result<()> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub async fn reset_redis() -> Result<()> {
|
pub async fn reset_redis() -> Result<()> {
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
redis::cmd("FLUSHDB").query(&mut *c)?;
|
redis::cmd("FLUSHDB").query_async(&mut c).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,12 +4,11 @@ use std::str::FromStr;
|
|||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, Duration, Utc};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{debug, info};
|
use tracing::{debug, info};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::error::Error;
|
use super::error::Error;
|
||||||
use super::{get_redis_conn, redis_key};
|
use super::{get_async_redis_conn, redis_key};
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use chirpstack_api::internal;
|
use chirpstack_api::internal;
|
||||||
use lrwn::{AES128Key, DevAddr, EUI64};
|
use lrwn::{AES128Key, DevAddr, EUI64};
|
||||||
@ -37,9 +36,6 @@ pub async fn save(ds: &internal::PassiveRoamingDeviceSession) -> Result<()> {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let ds = ds.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
|
|
||||||
let dev_addr_key = redis_key(format!("pr:devaddr:{{{}}}", dev_addr));
|
let dev_addr_key = redis_key(format!("pr:devaddr:{{{}}}", dev_addr));
|
||||||
@ -49,7 +45,7 @@ pub async fn save(ds: &internal::PassiveRoamingDeviceSession) -> Result<()> {
|
|||||||
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
let ttl = conf.network.device_session_ttl.as_millis() as usize;
|
||||||
let pr_ttl = lifetime.num_milliseconds() as usize;
|
let pr_ttl = lifetime.num_milliseconds() as usize;
|
||||||
|
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
// We need to store a pointer from both the DevAddr and DevEUI to the
|
// We need to store a pointer from both the DevAddr and DevEUI to the
|
||||||
// passive-roaming device-session ID. This is needed:
|
// passive-roaming device-session ID. This is needed:
|
||||||
@ -61,7 +57,7 @@ pub async fn save(ds: &internal::PassiveRoamingDeviceSession) -> Result<()> {
|
|||||||
// * We need to be able to lookup the session using the DevAddr (potentially
|
// * We need to be able to lookup the session using the DevAddr (potentially
|
||||||
// using the MIC validation).
|
// using the MIC validation).
|
||||||
// * We need to be able to stop a passive-roaming session given a DevEUI.
|
// * We need to be able to stop a passive-roaming session given a DevEUI.
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("SADD")
|
.cmd("SADD")
|
||||||
.arg(&dev_addr_key)
|
.arg(&dev_addr_key)
|
||||||
@ -84,12 +80,8 @@ pub async fn save(ds: &internal::PassiveRoamingDeviceSession) -> Result<()> {
|
|||||||
.arg(pr_ttl)
|
.arg(pr_ttl)
|
||||||
.arg(b)
|
.arg(b)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
|
|
||||||
info!(id = %sess_id, "Passive-roaming device-session saved");
|
info!(id = %sess_id, "Passive-roaming device-session saved");
|
||||||
|
|
||||||
@ -97,13 +89,12 @@ pub async fn save(ds: &internal::PassiveRoamingDeviceSession) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(id: Uuid) -> Result<internal::PassiveRoamingDeviceSession, Error> {
|
pub async fn get(id: Uuid) -> Result<internal::PassiveRoamingDeviceSession, Error> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<internal::PassiveRoamingDeviceSession, Error> {
|
|
||||||
let key = redis_key(format!("pr:sess:{{{}}}", id));
|
let key = redis_key(format!("pr:sess:{{{}}}", id));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let v: Vec<u8> = redis::cmd("GET")
|
let v: Vec<u8> = redis::cmd("GET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Get passive-roaming device-session")?;
|
.context("Get passive-roaming device-session")?;
|
||||||
if v.is_empty() {
|
if v.is_empty() {
|
||||||
return Err(Error::NotFound(id.to_string()));
|
return Err(Error::NotFound(id.to_string()));
|
||||||
@ -112,20 +103,12 @@ pub async fn get(id: Uuid) -> Result<internal::PassiveRoamingDeviceSession, Erro
|
|||||||
.context("Decode passive-roaming device-session")?;
|
.context("Decode passive-roaming device-session")?;
|
||||||
Ok(ds)
|
Ok(ds)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete(id: Uuid) -> Result<()> {
|
pub async fn delete(id: Uuid) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<()> {
|
|
||||||
let key = redis_key(format!("pr:sess:{{{}}}", id));
|
let key = redis_key(format!("pr:sess:{{{}}}", id));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
redis::cmd("DEL").arg(&key).query(&mut *c)?;
|
redis::cmd("DEL").arg(&key).query_async(&mut c).await?;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await??;
|
|
||||||
info!(id = %id, "Passive-roaming device-session deleted");
|
info!(id = %id, "Passive-roaming device-session deleted");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -197,11 +180,9 @@ async fn get_sessions_for_dev_addr(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_session_ids_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<Uuid>> {
|
async fn get_session_ids_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<Uuid>> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<Vec<Uuid>> {
|
|
||||||
let key = redis_key(format!("pr:devaddr:{{{}}}", dev_addr));
|
let key = redis_key(format!("pr:devaddr:{{{}}}", dev_addr));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let v: Vec<String> = redis::cmd("SMEMBERS").arg(key).query(&mut *c)?;
|
let v: Vec<String> = redis::cmd("SMEMBERS").arg(key).query_async(&mut c).await?;
|
||||||
|
|
||||||
let mut out: Vec<Uuid> = Vec::new();
|
let mut out: Vec<Uuid> = Vec::new();
|
||||||
for id in &v {
|
for id in &v {
|
||||||
@ -210,16 +191,11 @@ async fn get_session_ids_for_dev_addr(dev_addr: DevAddr) -> Result<Vec<Uuid>> {
|
|||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_session_ids_for_dev_eui(dev_eui: EUI64) -> Result<Vec<Uuid>> {
|
pub async fn get_session_ids_for_dev_eui(dev_eui: EUI64) -> Result<Vec<Uuid>> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<Vec<Uuid>> {
|
|
||||||
let key = redis_key(format!("pr:dev:{{{}}}", dev_eui));
|
let key = redis_key(format!("pr:dev:{{{}}}", dev_eui));
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let v: Vec<String> = redis::cmd("SMEMBERS").arg(key).query(&mut *c)?;
|
let v: Vec<String> = redis::cmd("SMEMBERS").arg(key).query_async(&mut c).await?;
|
||||||
|
|
||||||
let mut out: Vec<Uuid> = Vec::new();
|
let mut out: Vec<Uuid> = Vec::new();
|
||||||
for id in &v {
|
for id in &v {
|
||||||
@ -228,9 +204,6 @@ pub async fn get_session_ids_for_dev_eui(dev_eui: EUI64) -> Result<Vec<Uuid>> {
|
|||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_full_f_cnt_up(next_expected_full_fcnt: u32, truncated_f_cnt: u32) -> u32 {
|
fn get_full_f_cnt_up(next_expected_full_fcnt: u32, truncated_f_cnt: u32) -> u32 {
|
||||||
// Handle re-transmission.
|
// Handle re-transmission.
|
||||||
|
@ -1,18 +1,13 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::stream;
|
use chirpstack_api::stream;
|
||||||
|
|
||||||
pub async fn log_request(pl: &stream::ApiRequestLog) -> Result<()> {
|
pub async fn log_request(pl: &stream::ApiRequestLog) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let pl = pl.clone();
|
|
||||||
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
if conf.monitoring.api_request_log_max_history == 0 {
|
if conf.monitoring.api_request_log_max_history == 0 {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -27,13 +22,11 @@ pub async fn log_request(pl: &stream::ApiRequestLog) -> Result<()> {
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("request")
|
.arg("request")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@ -56,7 +49,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
log_request(&pl).await.unwrap();
|
log_request(&pl).await.unwrap();
|
||||||
|
|
||||||
let mut c = get_redis_conn().unwrap();
|
let mut c = get_async_redis_conn().await.unwrap();
|
||||||
let key = redis_key("api:stream:request".to_string());
|
let key = redis_key("api:stream:request".to_string());
|
||||||
let srr: StreamReadReply = redis::cmd("XREAD")
|
let srr: StreamReadReply = redis::cmd("XREAD")
|
||||||
.arg("COUNT")
|
.arg("COUNT")
|
||||||
@ -64,7 +57,8 @@ mod tests {
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg("0")
|
.arg("0")
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(1, srr.keys.len());
|
assert_eq!(1, srr.keys.len());
|
||||||
|
@ -1,16 +1,36 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
use tokio::sync::mpsc::{self, Sender};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::stream;
|
use chirpstack_api::stream;
|
||||||
|
|
||||||
pub async fn log_request(pl: stream::BackendInterfacesRequest) -> Result<()> {
|
pub async fn get_log_sender() -> Option<Sender<stream::BackendInterfacesRequest>> {
|
||||||
task::spawn_blocking({
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
if conf.monitoring.backend_interfaces_log_max_history == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (tx, mut rx) = mpsc::channel(100);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Some(pl) = rx.recv().await {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = log_request(pl).await {
|
||||||
|
error!(error = %e, "Log request error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Some(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn log_request(pl: stream::BackendInterfacesRequest) -> Result<()> {
|
||||||
|
let conf = config::get();
|
||||||
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
if conf.monitoring.backend_interfaces_log_max_history == 0 {
|
if conf.monitoring.backend_interfaces_log_max_history == 0 {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -25,10 +45,8 @@ pub async fn log_request(pl: stream::BackendInterfacesRequest) -> Result<()> {
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("request")
|
.arg("request")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
@ -6,43 +6,38 @@ use anyhow::{Context, Result};
|
|||||||
use prost::Message;
|
use prost::Message;
|
||||||
use redis::streams::StreamReadReply;
|
use redis::streams::StreamReadReply;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{debug, error, trace};
|
use tracing::{debug, error, trace};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::{api, integration};
|
use chirpstack_api::{api, integration};
|
||||||
|
|
||||||
#[allow(clippy::enum_variant_names)]
|
#[allow(clippy::enum_variant_names)]
|
||||||
|
|
||||||
pub async fn log_event_for_device(typ: &str, dev_eui: &str, b: &[u8]) -> Result<()> {
|
pub async fn log_event_for_device(typ: &str, dev_eui: &str, b: &[u8]) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let typ = typ.to_string();
|
|
||||||
let dev_eui = dev_eui.to_string();
|
|
||||||
let b = b.to_vec();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
// per device stream
|
// per device stream
|
||||||
if conf.monitoring.per_device_event_log_max_history > 0 {
|
if conf.monitoring.per_device_event_log_max_history > 0 {
|
||||||
let key = redis_key(format!("device:{{{}}}:stream:event", dev_eui));
|
let key = redis_key(format!("device:{{{}}}:stream:event", dev_eui));
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg("MAXLEN")
|
.arg("MAXLEN")
|
||||||
.arg(conf.monitoring.per_device_event_log_max_history)
|
.arg(conf.monitoring.per_device_event_log_max_history)
|
||||||
.arg("*")
|
.arg("*")
|
||||||
.arg(&typ)
|
.arg(typ)
|
||||||
.arg(&b)
|
.arg(b)
|
||||||
.ignore()
|
.ignore()
|
||||||
.cmd("PEXPIRE")
|
.cmd("PEXPIRE")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(conf.monitoring.per_device_event_log_ttl.as_millis() as usize)
|
.arg(conf.monitoring.per_device_event_log_ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// global device stream
|
// global device stream
|
||||||
@ -53,29 +48,22 @@ pub async fn log_event_for_device(typ: &str, dev_eui: &str, b: &[u8]) -> Result<
|
|||||||
.arg("MAXLEN")
|
.arg("MAXLEN")
|
||||||
.arg(conf.monitoring.device_event_log_max_history)
|
.arg(conf.monitoring.device_event_log_max_history)
|
||||||
.arg("*")
|
.arg("*")
|
||||||
.arg(&typ)
|
.arg(typ)
|
||||||
.arg(&b)
|
.arg(b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_event_logs(
|
pub async fn get_event_logs(
|
||||||
key: String,
|
key: String,
|
||||||
count: usize,
|
count: usize,
|
||||||
channel: mpsc::Sender<api::LogItem>,
|
channel: mpsc::Sender<api::LogItem>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let key = key.clone();
|
|
||||||
let channel = channel.clone();
|
|
||||||
|
|
||||||
move || -> Result<()> {
|
|
||||||
let mut last_id = "0".to_string();
|
let mut last_id = "0".to_string();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if channel.is_closed() {
|
if channel.is_closed() {
|
||||||
@ -89,7 +77,8 @@ pub async fn get_event_logs(
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(&last_id)
|
.arg(&last_id)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("XREAD event stream")?;
|
.context("XREAD event stream")?;
|
||||||
|
|
||||||
for stream_key in &srr.keys {
|
for stream_key in &srr.keys {
|
||||||
@ -243,7 +232,8 @@ pub async fn get_event_logs(
|
|||||||
"location" => {
|
"location" => {
|
||||||
trace!(key = %k, id=%last_id, "Event-log received from stream");
|
trace!(key = %k, id=%last_id, "Event-log received from stream");
|
||||||
if let redis::Value::Data(b) = v {
|
if let redis::Value::Data(b) = v {
|
||||||
let pl = integration::LocationEvent::decode(&mut Cursor::new(b))?;
|
let pl =
|
||||||
|
integration::LocationEvent::decode(&mut Cursor::new(b))?;
|
||||||
let pl = api::LogItem {
|
let pl = api::LogItem {
|
||||||
id: stream_id.id.clone(),
|
id: stream_id.id.clone(),
|
||||||
time: pl.time.as_ref().map(|v| prost_types::Timestamp {
|
time: pl.time.as_ref().map(|v| prost_types::Timestamp {
|
||||||
@ -286,7 +276,6 @@ pub async fn get_event_logs(
|
|||||||
_ => {
|
_ => {
|
||||||
error!(key = %k, "Unexpected key in in event-log stream");
|
error!(key = %k, "Unexpected key in in event-log stream");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -295,7 +284,9 @@ pub async fn get_event_logs(
|
|||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
// Return in case of channel error, in any other case we just log
|
// Return in case of channel error, in any other case we just log
|
||||||
// the error.
|
// the error.
|
||||||
if e.downcast_ref::<mpsc::error::SendError<api::LogItem>>().is_some() {
|
if e.downcast_ref::<mpsc::error::SendError<api::LogItem>>()
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,6 +299,3 @@ pub async fn get_event_logs(
|
|||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
@ -8,22 +8,18 @@ use prost::Message;
|
|||||||
use redis::streams::StreamReadReply;
|
use redis::streams::StreamReadReply;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::task;
|
|
||||||
use tracing::{debug, error, trace, warn};
|
use tracing::{debug, error, trace, warn};
|
||||||
|
|
||||||
use lrwn::EUI64;
|
use lrwn::EUI64;
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::{api, stream};
|
use chirpstack_api::{api, stream};
|
||||||
|
|
||||||
pub async fn log_uplink_for_gateways(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
pub async fn log_uplink_for_gateways(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let ufl = ufl.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
for rx_info in &ufl.rx_info {
|
for rx_info in &ufl.rx_info {
|
||||||
let gateway_id = EUI64::from_str(&rx_info.gateway_id)?;
|
let gateway_id = EUI64::from_str(&rx_info.gateway_id)?;
|
||||||
@ -46,7 +42,7 @@ pub async fn log_uplink_for_gateways(ufl: &stream::UplinkFrameLog) -> Result<()>
|
|||||||
if conf.monitoring.per_gateway_frame_log_max_history > 0 {
|
if conf.monitoring.per_gateway_frame_log_max_history > 0 {
|
||||||
let key = redis_key(format!("gw:{{{}}}:stream:frame", gateway_id));
|
let key = redis_key(format!("gw:{{{}}}:stream:frame", gateway_id));
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
@ -60,7 +56,8 @@ pub async fn log_uplink_for_gateways(ufl: &stream::UplinkFrameLog) -> Result<()>
|
|||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(conf.monitoring.per_gateway_frame_log_ttl.as_millis() as usize)
|
.arg(conf.monitoring.per_gateway_frame_log_ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// global gateway stream
|
// global gateway stream
|
||||||
@ -73,33 +70,27 @@ pub async fn log_uplink_for_gateways(ufl: &stream::UplinkFrameLog) -> Result<()>
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("up")
|
.arg("up")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn log_downlink_for_gateway(dfl: &stream::DownlinkFrameLog) -> Result<()> {
|
pub async fn log_downlink_for_gateway(dfl: &stream::DownlinkFrameLog) -> Result<()> {
|
||||||
if dfl.gateway_id.is_empty() {
|
if dfl.gateway_id.is_empty() {
|
||||||
return Err(anyhow!("gateway_id must be set"));
|
return Err(anyhow!("gateway_id must be set"));
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let dfl = dfl.clone();
|
|
||||||
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let b = dfl.encode_to_vec();
|
let b = dfl.encode_to_vec();
|
||||||
|
|
||||||
// per gateway stream
|
// per gateway stream
|
||||||
if conf.monitoring.per_gateway_frame_log_max_history > 0 {
|
if conf.monitoring.per_gateway_frame_log_max_history > 0 {
|
||||||
let key = redis_key(format!("gw:{{{}}}:stream:frame", dfl.gateway_id));
|
let key = redis_key(format!("gw:{{{}}}:stream:frame", dfl.gateway_id));
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
@ -113,7 +104,8 @@ pub async fn log_downlink_for_gateway(dfl: &stream::DownlinkFrameLog) -> Result<
|
|||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(conf.monitoring.per_gateway_frame_log_ttl.as_millis() as usize)
|
.arg(conf.monitoring.per_gateway_frame_log_ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// global gateway stream
|
// global gateway stream
|
||||||
@ -126,32 +118,27 @@ pub async fn log_downlink_for_gateway(dfl: &stream::DownlinkFrameLog) -> Result<
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("down")
|
.arg("down")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn log_uplink_for_device(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
pub async fn log_uplink_for_device(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
||||||
if ufl.dev_eui.is_empty() {
|
if ufl.dev_eui.is_empty() {
|
||||||
return Err(anyhow!("dev_eui must be set"));
|
return Err(anyhow!("dev_eui must be set"));
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let ufl = ufl.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let b = ufl.encode_to_vec();
|
let b = ufl.encode_to_vec();
|
||||||
|
|
||||||
// per device stream
|
// per device stream
|
||||||
if conf.monitoring.per_device_frame_log_max_history > 0 {
|
if conf.monitoring.per_device_frame_log_max_history > 0 {
|
||||||
let key = redis_key(format!("device:{{{}}}:stream:frame", ufl.dev_eui));
|
let key = redis_key(format!("device:{{{}}}:stream:frame", ufl.dev_eui));
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
@ -165,7 +152,8 @@ pub async fn log_uplink_for_device(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
|||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(conf.monitoring.per_device_frame_log_ttl.as_millis() as usize)
|
.arg(conf.monitoring.per_device_frame_log_ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// global device stream
|
// global device stream
|
||||||
@ -178,32 +166,27 @@ pub async fn log_uplink_for_device(ufl: &stream::UplinkFrameLog) -> Result<()> {
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("up")
|
.arg("up")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn log_downlink_for_device(dfl: &stream::DownlinkFrameLog) -> Result<()> {
|
pub async fn log_downlink_for_device(dfl: &stream::DownlinkFrameLog) -> Result<()> {
|
||||||
if dfl.dev_eui.is_empty() {
|
if dfl.dev_eui.is_empty() {
|
||||||
return Err(anyhow!("dev_eui must be set"));
|
return Err(anyhow!("dev_eui must be set"));
|
||||||
}
|
}
|
||||||
|
|
||||||
task::spawn_blocking({
|
|
||||||
let dfl = dfl.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
let b = dfl.encode_to_vec();
|
let b = dfl.encode_to_vec();
|
||||||
|
|
||||||
// per device stream
|
// per device stream
|
||||||
if conf.monitoring.per_device_frame_log_max_history > 0 {
|
if conf.monitoring.per_device_frame_log_max_history > 0 {
|
||||||
let key = redis_key(format!("device:{{{}}}:stream:frame", dfl.dev_eui));
|
let key = redis_key(format!("device:{{{}}}:stream:frame", dfl.dev_eui));
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("XADD")
|
.cmd("XADD")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
@ -217,7 +200,8 @@ pub async fn log_downlink_for_device(dfl: &stream::DownlinkFrameLog) -> Result<(
|
|||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(conf.monitoring.per_device_frame_log_ttl.as_millis() as usize)
|
.arg(conf.monitoring.per_device_frame_log_ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// global device stream
|
// global device stream
|
||||||
@ -230,27 +214,20 @@ pub async fn log_downlink_for_device(dfl: &stream::DownlinkFrameLog) -> Result<(
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("down")
|
.arg("down")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_frame_logs(
|
pub async fn get_frame_logs(
|
||||||
key: String,
|
key: String,
|
||||||
count: usize,
|
count: usize,
|
||||||
channel: mpsc::Sender<api::LogItem>,
|
channel: mpsc::Sender<api::LogItem>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let key = key.clone();
|
|
||||||
let channel = channel.clone();
|
|
||||||
|
|
||||||
move || -> Result<()> {
|
|
||||||
let mut last_id = "0".to_string();
|
let mut last_id = "0".to_string();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if channel.is_closed() {
|
if channel.is_closed() {
|
||||||
@ -264,7 +241,8 @@ pub async fn get_frame_logs(
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg(&last_id)
|
.arg(&last_id)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("XREAD frame stream")?;
|
.context("XREAD frame stream")?;
|
||||||
|
|
||||||
for stream_key in &srr.keys {
|
for stream_key in &srr.keys {
|
||||||
@ -366,7 +344,9 @@ pub async fn get_frame_logs(
|
|||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
// Return in case of channel error, in any other case we just log
|
// Return in case of channel error, in any other case we just log
|
||||||
// the error.
|
// the error.
|
||||||
if e.downcast_ref::<mpsc::error::SendError<api::LogItem>>().is_some() {
|
if e.downcast_ref::<mpsc::error::SendError<api::LogItem>>()
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,7 +360,4 @@ pub async fn get_frame_logs(
|
|||||||
// check every 1 second if there are new messages, which should be sufficient.
|
// check every 1 second if there are new messages, which should be sufficient.
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
}).await?
|
|
||||||
}
|
}
|
||||||
|
@ -1,17 +1,13 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::storage::{get_redis_conn, redis_key};
|
use crate::storage::{get_async_redis_conn, redis_key};
|
||||||
use chirpstack_api::stream;
|
use chirpstack_api::stream;
|
||||||
|
|
||||||
pub async fn log_uplink(up: &stream::UplinkMeta) -> Result<()> {
|
pub async fn log_uplink(up: &stream::UplinkMeta) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let up = up.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
if conf.monitoring.meta_log_max_history > 0 {
|
if conf.monitoring.meta_log_max_history > 0 {
|
||||||
let key = redis_key("stream:meta".to_string());
|
let key = redis_key("stream:meta".to_string());
|
||||||
@ -23,21 +19,16 @@ pub async fn log_uplink(up: &stream::UplinkMeta) -> Result<()> {
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("up")
|
.arg("up")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn log_downlink(down: &stream::DownlinkMeta) -> Result<()> {
|
pub async fn log_downlink(down: &stream::DownlinkMeta) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let down = down.clone();
|
|
||||||
move || -> Result<()> {
|
|
||||||
let conf = config::get();
|
let conf = config::get();
|
||||||
let mut c = get_redis_conn()?;
|
let mut c = get_async_redis_conn().await?;
|
||||||
|
|
||||||
if conf.monitoring.meta_log_max_history > 0 {
|
if conf.monitoring.meta_log_max_history > 0 {
|
||||||
let key = redis_key("stream:meta".to_string());
|
let key = redis_key("stream:meta".to_string());
|
||||||
@ -50,11 +41,9 @@ pub async fn log_downlink(down: &stream::DownlinkMeta) -> Result<()> {
|
|||||||
.arg("*")
|
.arg("*")
|
||||||
.arg("down")
|
.arg("down")
|
||||||
.arg(&b)
|
.arg(&b)
|
||||||
.query(&mut *c)?;
|
.query_async(&mut c)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
@ -12,7 +12,7 @@ use crate::gateway::backend::mock as gateway_mock;
|
|||||||
use crate::integration::mock;
|
use crate::integration::mock;
|
||||||
use crate::storage::{
|
use crate::storage::{
|
||||||
device::{self, DeviceClass},
|
device::{self, DeviceClass},
|
||||||
device_queue, device_session, downlink_frame, get_redis_conn, redis_key,
|
device_queue, device_session, downlink_frame, get_async_redis_conn, redis_key,
|
||||||
};
|
};
|
||||||
use chirpstack_api::{gw, integration as integration_pb, internal, stream};
|
use chirpstack_api::{gw, integration as integration_pb, internal, stream};
|
||||||
use lrwn::EUI64;
|
use lrwn::EUI64;
|
||||||
@ -397,7 +397,7 @@ pub fn uplink_meta_log(um: stream::UplinkMeta) -> Validator {
|
|||||||
Box::new(move || {
|
Box::new(move || {
|
||||||
let um = um.clone();
|
let um = um.clone();
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let mut c = get_redis_conn().unwrap();
|
let mut c = get_async_redis_conn().await.unwrap();
|
||||||
let key = redis_key("stream:meta".to_string());
|
let key = redis_key("stream:meta".to_string());
|
||||||
let srr: StreamReadReply = redis::cmd("XREAD")
|
let srr: StreamReadReply = redis::cmd("XREAD")
|
||||||
.arg("COUNT")
|
.arg("COUNT")
|
||||||
@ -405,7 +405,8 @@ pub fn uplink_meta_log(um: stream::UplinkMeta) -> Validator {
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg("0")
|
.arg("0")
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for stream_key in &srr.keys {
|
for stream_key in &srr.keys {
|
||||||
@ -433,7 +434,7 @@ pub fn device_uplink_frame_log(uf: stream::UplinkFrameLog) -> Validator {
|
|||||||
Box::new(move || {
|
Box::new(move || {
|
||||||
let uf = uf.clone();
|
let uf = uf.clone();
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let mut c = get_redis_conn().unwrap();
|
let mut c = get_async_redis_conn().await.unwrap();
|
||||||
let key = redis_key(format!("device:{{{}}}:stream:frame", uf.dev_eui));
|
let key = redis_key(format!("device:{{{}}}:stream:frame", uf.dev_eui));
|
||||||
let srr: StreamReadReply = redis::cmd("XREAD")
|
let srr: StreamReadReply = redis::cmd("XREAD")
|
||||||
.arg("COUNT")
|
.arg("COUNT")
|
||||||
@ -441,7 +442,8 @@ pub fn device_uplink_frame_log(uf: stream::UplinkFrameLog) -> Validator {
|
|||||||
.arg("STREAMS")
|
.arg("STREAMS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.arg("0")
|
.arg("0")
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for stream_key in &srr.keys {
|
for stream_key in &srr.keys {
|
||||||
|
@ -37,8 +37,8 @@ async fn test_fns_uplink() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -168,7 +168,6 @@ async fn test_fns_uplink() {
|
|||||||
sns_pr_start_req_mock.delete();
|
sns_pr_start_req_mock.delete();
|
||||||
|
|
||||||
joinserver::reset();
|
joinserver::reset();
|
||||||
roaming::reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@ -188,8 +187,8 @@ async fn test_sns_uplink() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -436,8 +435,8 @@ async fn test_sns_roaming_not_allowed() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -618,8 +617,8 @@ async fn test_sns_dev_not_found() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let mut dev_addr = lrwn::DevAddr::from_be_bytes([0, 0, 0, 0]);
|
let mut dev_addr = lrwn::DevAddr::from_be_bytes([0, 0, 0, 0]);
|
||||||
dev_addr.set_dev_addr_prefix(lrwn::NetID::from_str("000505").unwrap().dev_addr_prefix());
|
dev_addr.set_dev_addr_prefix(lrwn::NetID::from_str("000505").unwrap().dev_addr_prefix());
|
||||||
|
@ -368,7 +368,7 @@ async fn run_test(t: &Test) {
|
|||||||
}];
|
}];
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
region::setup().unwrap();
|
region::setup().unwrap();
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
|
|
||||||
integration::set_mock().await;
|
integration::set_mock().await;
|
||||||
gateway_backend::set_backend(&"eu868", Box::new(gateway_backend::mock::Backend {})).await;
|
gateway_backend::set_backend(&"eu868", Box::new(gateway_backend::mock::Backend {})).await;
|
||||||
|
@ -46,8 +46,8 @@ async fn test_fns() {
|
|||||||
}];
|
}];
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -247,7 +247,6 @@ async fn test_fns() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
joinserver::reset();
|
joinserver::reset();
|
||||||
roaming::reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@ -269,8 +268,8 @@ async fn test_sns() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -432,7 +431,6 @@ async fn test_sns() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
joinserver::reset();
|
joinserver::reset();
|
||||||
roaming::reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@ -454,8 +452,8 @@ async fn test_sns_roaming_not_allowed() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
config::set(conf);
|
config::set(conf);
|
||||||
joinserver::setup().unwrap();
|
joinserver::setup().await.unwrap();
|
||||||
roaming::setup().unwrap();
|
roaming::setup().await.unwrap();
|
||||||
|
|
||||||
let t = tenant::create(tenant::Tenant {
|
let t = tenant::create(tenant::Tenant {
|
||||||
name: "tenant".into(),
|
name: "tenant".into(),
|
||||||
@ -591,5 +589,4 @@ async fn test_sns_roaming_not_allowed() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
joinserver::reset();
|
joinserver::reset();
|
||||||
roaming::reset();
|
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ impl Data {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let net_id = NetID::from_slice(&ds.net_id)?;
|
let net_id = NetID::from_slice(&ds.net_id)?;
|
||||||
let client = roaming::get(&net_id)?;
|
let client = roaming::get(&net_id).await?;
|
||||||
let async_receiver = match client.is_async() {
|
let async_receiver = match client.is_async() {
|
||||||
false => None,
|
false => None,
|
||||||
true => Some(
|
true => Some(
|
||||||
@ -183,7 +183,7 @@ impl Data {
|
|||||||
pr_req.base.transaction_id = 1234;
|
pr_req.base.transaction_id = 1234;
|
||||||
}
|
}
|
||||||
|
|
||||||
let client = roaming::get(&net_id)?;
|
let client = roaming::get(&net_id).await?;
|
||||||
let async_receiver = match client.is_async() {
|
let async_receiver = match client.is_async() {
|
||||||
false => None,
|
false => None,
|
||||||
true => Some(
|
true => Some(
|
||||||
|
@ -40,7 +40,7 @@ impl JoinRequest {
|
|||||||
|
|
||||||
ctx.filter_rx_info_by_public_only()?;
|
ctx.filter_rx_info_by_public_only()?;
|
||||||
ctx.get_home_net_id().await?;
|
ctx.get_home_net_id().await?;
|
||||||
ctx.get_client()?;
|
ctx.get_client().await?;
|
||||||
ctx.start_roaming().await?;
|
ctx.start_roaming().await?;
|
||||||
ctx.save_roaming_session().await?;
|
ctx.save_roaming_session().await?;
|
||||||
|
|
||||||
@ -94,10 +94,10 @@ impl JoinRequest {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_client(&mut self) -> Result<()> {
|
async fn get_client(&mut self) -> Result<()> {
|
||||||
let net_id = self.home_net_id.as_ref().unwrap();
|
let net_id = self.home_net_id.as_ref().unwrap();
|
||||||
trace!(net_id = %net_id, "Getting backend interfaces client");
|
trace!(net_id = %net_id, "Getting backend interfaces client");
|
||||||
self.client = Some(roaming::get(net_id)?);
|
self.client = Some(roaming::get(net_id).await?);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,6 @@ use prometheus_client::encoding::EncodeLabelSet;
|
|||||||
use prometheus_client::metrics::counter::Counter;
|
use prometheus_client::metrics::counter::Counter;
|
||||||
use prometheus_client::metrics::family::Family;
|
use prometheus_client::metrics::family::Family;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::task;
|
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
use tracing::{debug, error, info, span, trace, warn, Instrument, Level};
|
use tracing::{debug, error, info, span, trace, warn, Instrument, Level};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@ -19,7 +18,7 @@ use crate::config;
|
|||||||
use crate::helpers::errors::PrintFullError;
|
use crate::helpers::errors::PrintFullError;
|
||||||
use crate::monitoring::prometheus;
|
use crate::monitoring::prometheus;
|
||||||
use crate::storage::{
|
use crate::storage::{
|
||||||
device, device_profile, error::Error as StorageError, gateway, get_redis_conn, redis_key,
|
device, device_profile, error::Error as StorageError, gateway, get_async_redis_conn, redis_key,
|
||||||
};
|
};
|
||||||
use crate::stream;
|
use crate::stream;
|
||||||
use chirpstack_api::{common, gw, internal, stream as stream_pb};
|
use chirpstack_api::{common, gw, internal, stream as stream_pb};
|
||||||
@ -221,36 +220,28 @@ async fn _deduplicate_uplink(event: gw::UplinkFrame) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn deduplicate_put(key: &str, ttl: Duration, event: &gw::UplinkFrame) -> Result<()> {
|
async fn deduplicate_put(key: &str, ttl: Duration, event: &gw::UplinkFrame) -> Result<()> {
|
||||||
task::spawn_blocking({
|
|
||||||
let key = key.to_string();
|
|
||||||
let event_b = event.encode_to_vec();
|
let event_b = event.encode_to_vec();
|
||||||
move || -> Result<()> {
|
let mut c = get_async_redis_conn().await?;
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
|
|
||||||
c.new_pipeline()
|
redis::pipe()
|
||||||
.atomic()
|
.atomic()
|
||||||
.cmd("SADD")
|
.cmd("SADD")
|
||||||
.arg(&key)
|
.arg(key)
|
||||||
.arg(event_b)
|
.arg(event_b)
|
||||||
.ignore()
|
.ignore()
|
||||||
.cmd("PEXPIRE")
|
.cmd("PEXPIRE")
|
||||||
.arg(&key)
|
.arg(key)
|
||||||
.arg(ttl.as_millis() as usize)
|
.arg(ttl.as_millis() as usize)
|
||||||
.ignore()
|
.ignore()
|
||||||
.query(&mut c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Deduplication put")?;
|
.context("Deduplication put")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn deduplicate_locked(key: &str, ttl: Duration) -> Result<bool> {
|
async fn deduplicate_locked(key: &str, ttl: Duration) -> Result<bool> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let key = key.to_string();
|
|
||||||
move || -> Result<bool> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
|
|
||||||
let set: bool = redis::cmd("SET")
|
let set: bool = redis::cmd("SET")
|
||||||
.arg(key)
|
.arg(key)
|
||||||
@ -258,23 +249,19 @@ async fn deduplicate_locked(key: &str, ttl: Duration) -> Result<bool> {
|
|||||||
.arg("PX")
|
.arg("PX")
|
||||||
.arg(ttl.as_millis() as usize)
|
.arg(ttl.as_millis() as usize)
|
||||||
.arg("NX")
|
.arg("NX")
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Deduplication locked")?;
|
.context("Deduplication locked")?;
|
||||||
|
|
||||||
Ok(!set)
|
Ok(!set)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn deduplicate_collect(key: &str) -> Result<gw::UplinkFrameSet> {
|
async fn deduplicate_collect(key: &str) -> Result<gw::UplinkFrameSet> {
|
||||||
task::spawn_blocking({
|
let mut c = get_async_redis_conn().await?;
|
||||||
let key = key.to_string();
|
|
||||||
move || -> Result<gw::UplinkFrameSet> {
|
|
||||||
let mut c = get_redis_conn()?;
|
|
||||||
let items_b: Vec<Vec<u8>> = redis::cmd("SMEMBERS")
|
let items_b: Vec<Vec<u8>> = redis::cmd("SMEMBERS")
|
||||||
.arg(&key)
|
.arg(&key)
|
||||||
.query(&mut *c)
|
.query_async(&mut c)
|
||||||
|
.await
|
||||||
.context("Deduplication collect")?;
|
.context("Deduplication collect")?;
|
||||||
|
|
||||||
if items_b.is_empty() {
|
if items_b.is_empty() {
|
||||||
@ -286,8 +273,7 @@ async fn deduplicate_collect(key: &str) -> Result<gw::UplinkFrameSet> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for b in items_b {
|
for b in items_b {
|
||||||
let event =
|
let event = gw::UplinkFrame::decode(&mut Cursor::new(b)).context("Decode UplinkFrame")?;
|
||||||
gw::UplinkFrame::decode(&mut Cursor::new(b)).context("Decode UplinkFrame")?;
|
|
||||||
|
|
||||||
if event.tx_info.is_none() {
|
if event.tx_info.is_none() {
|
||||||
warn!("tx_info of uplink event is empty, skipping");
|
warn!("tx_info of uplink event is empty, skipping");
|
||||||
@ -305,9 +291,6 @@ async fn deduplicate_collect(key: &str) -> Result<gw::UplinkFrameSet> {
|
|||||||
|
|
||||||
Ok(pl)
|
Ok(pl)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn handle_uplink(deduplication_id: Uuid, uplink: gw::UplinkFrameSet) -> Result<()> {
|
pub async fn handle_uplink(deduplication_id: Uuid, uplink: gw::UplinkFrameSet) -> Result<()> {
|
||||||
let rx_info = &uplink
|
let rx_info = &uplink
|
||||||
|
Loading…
x
Reference in New Issue
Block a user