mirror of
https://github.com/chirpstack/chirpstack.git
synced 2024-12-19 13:17:55 +00:00
This feature makes it possible to select between PostgreSQL and SQLite as database backend using a compile feature-flag. It is not possible to enable both at the same time. --------- Co-authored-by: Momo Bel <plopyomomo@gmail.com>
This commit is contained in:
parent
800d7d0efe
commit
e63296573b
9
.github/workflows/main.yml
vendored
9
.github/workflows/main.yml
vendored
@ -13,6 +13,13 @@ env:
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
database:
|
||||
- postgres
|
||||
- sqlite
|
||||
env:
|
||||
DATABASE: ${{ matrix.database }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@ -32,7 +39,7 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: ${{ runner.os }}-cargo-test-${{ matrix.database }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
-
|
||||
name: Start dependency services
|
||||
run: docker compose up -d
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -11,8 +11,12 @@
|
||||
# Binary packages
|
||||
/dist
|
||||
|
||||
# SQLite databases
|
||||
*.sqlite
|
||||
|
||||
# Rust target directory
|
||||
**/target
|
||||
**/target-sqlite
|
||||
|
||||
# Certificates
|
||||
/chirpstack/configuration/certs/*
|
||||
|
24
Cargo.lock
generated
24
Cargo.lock
generated
@ -876,6 +876,7 @@ dependencies = [
|
||||
"rustls 0.23.12",
|
||||
"rustls-native-certs",
|
||||
"rustls-pemfile",
|
||||
"scoped-futures",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
@ -907,7 +908,6 @@ dependencies = [
|
||||
name = "chirpstack_api"
|
||||
version = "4.9.0"
|
||||
dependencies = [
|
||||
"diesel",
|
||||
"hex",
|
||||
"pbjson",
|
||||
"pbjson-build",
|
||||
@ -1303,10 +1303,12 @@ dependencies = [
|
||||
"chrono",
|
||||
"diesel_derives",
|
||||
"itoa",
|
||||
"libsqlite3-sys",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"serde_json",
|
||||
"time",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@ -2165,9 +2167,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hyper-util"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956"
|
||||
checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
@ -2466,6 +2468,16 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libsqlite3-sys"
|
||||
version = "0.29.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4588d65215825ee71ebff9e1c9982067833b1355d7546845ffdb3165cbd7456"
|
||||
dependencies = [
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.3.8"
|
||||
@ -5019,6 +5031,12 @@ version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
|
2
Makefile
2
Makefile
@ -8,7 +8,7 @@ dist:
|
||||
# Install dev dependencies
|
||||
dev-dependencies:
|
||||
cargo install cross --version 0.2.5
|
||||
cargo install diesel_cli --version 2.2.1 --no-default-features --features postgres
|
||||
cargo install diesel_cli --version 2.2.1 --no-default-features --features postgres,sqlite
|
||||
cargo install cargo-deb --version 1.43.1
|
||||
cargo install cargo-generate-rpm --version 0.12.1
|
||||
|
||||
|
32
README.md
32
README.md
@ -84,7 +84,11 @@ docker compose up -d
|
||||
Run the following command to run the ChirpStack tests:
|
||||
|
||||
```bash
|
||||
# Test (with PostgresQL database backend)
|
||||
make test
|
||||
|
||||
# Test with SQLite database backend
|
||||
DATABASE=sqlite make test
|
||||
```
|
||||
|
||||
### Building ChirpStack binaries
|
||||
@ -109,6 +113,34 @@ make release-amd64
|
||||
make dist
|
||||
```
|
||||
|
||||
By default the above commands will build ChirpStack with the PostgresQL database
|
||||
database backend. Set the `DATABASE=sqlite` env. variable to compile ChirpStack
|
||||
with the SQLite database backend.
|
||||
|
||||
### Database migrations
|
||||
|
||||
To create a new database migration, execute:
|
||||
|
||||
```
|
||||
make migration-generate NAME=test-migration
|
||||
```
|
||||
|
||||
To apply migrations, execute:
|
||||
|
||||
```
|
||||
make migration-run
|
||||
```
|
||||
|
||||
To revert a migration, execute:
|
||||
|
||||
```
|
||||
make migration-revert
|
||||
```
|
||||
|
||||
By default the above commands will execute the migration commands using the
|
||||
PostgresQL database backend. To execute migration commands for the SQLite
|
||||
database backend, set the `DATABASE=sqlite` env. variable.
|
||||
|
||||
## License
|
||||
|
||||
ChirpStack Network Server is distributed under the MIT license. See also
|
||||
|
2
api/rust/Cargo.toml
vendored
2
api/rust/Cargo.toml
vendored
@ -12,7 +12,6 @@
|
||||
default = ["api", "json"]
|
||||
api = ["tonic/transport", "tonic-build/transport", "tokio"]
|
||||
json = ["pbjson", "pbjson-types", "serde"]
|
||||
diesel = ["dep:diesel"]
|
||||
internal = []
|
||||
|
||||
[dependencies]
|
||||
@ -29,7 +28,6 @@
|
||||
pbjson = { version = "0.7", optional = true }
|
||||
pbjson-types = { version = "0.7", optional = true }
|
||||
serde = { version = "1.0", optional = true }
|
||||
diesel = { version = "2.2", features = ["postgres_backend"], optional = true }
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version = "0.12", features = [
|
||||
|
32
api/rust/src/internal.rs
vendored
32
api/rust/src/internal.rs
vendored
@ -2,13 +2,6 @@ include!(concat!(env!("OUT_DIR"), "/internal/internal.rs"));
|
||||
#[cfg(feature = "json")]
|
||||
include!(concat!(env!("OUT_DIR"), "/internal/internal.serde.rs"));
|
||||
|
||||
#[cfg(feature = "diesel")]
|
||||
use diesel::{backend::Backend, deserialize, serialize, sql_types::Binary};
|
||||
#[cfg(feature = "diesel")]
|
||||
use prost::Message;
|
||||
#[cfg(feature = "diesel")]
|
||||
use std::io::Cursor;
|
||||
|
||||
impl DeviceSession {
|
||||
pub fn get_a_f_cnt_down(&self) -> u32 {
|
||||
if self.mac_version().to_string().starts_with("1.0") {
|
||||
@ -30,28 +23,3 @@ impl DeviceSession {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "diesel")]
|
||||
impl<ST, DB> deserialize::FromSql<ST, DB> for DeviceSession
|
||||
where
|
||||
DB: Backend,
|
||||
*const [u8]: deserialize::FromSql<ST, DB>,
|
||||
{
|
||||
fn from_sql(value: DB::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let bytes = <Vec<u8> as deserialize::FromSql<ST, DB>>::from_sql(value)?;
|
||||
Ok(DeviceSession::decode(&mut Cursor::new(bytes))?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "diesel")]
|
||||
impl serialize::ToSql<Binary, diesel::pg::Pg> for DeviceSession
|
||||
where
|
||||
[u8]: serialize::ToSql<Binary, diesel::pg::Pg>,
|
||||
{
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, diesel::pg::Pg>) -> serialize::Result {
|
||||
<[u8] as serialize::ToSql<Binary, diesel::pg::Pg>>::to_sql(
|
||||
&self.encode_to_vec(),
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -26,20 +26,16 @@
|
||||
email_address = "0.2"
|
||||
diesel = { version = "2.2", features = [
|
||||
"chrono",
|
||||
"uuid",
|
||||
"serde_json",
|
||||
"numeric",
|
||||
"64-column-tables",
|
||||
"postgres_backend",
|
||||
] }
|
||||
diesel_migrations = { version = "2.2" }
|
||||
diesel-async = { version = "0.5", features = [
|
||||
"deadpool",
|
||||
"postgres",
|
||||
"async-connection-wrapper",
|
||||
] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-postgres-rustls = "0.12"
|
||||
tokio-postgres = { version = "0.7", optional = true }
|
||||
tokio-postgres-rustls = { version = "0.12", optional = true }
|
||||
bigdecimal = "0.4"
|
||||
redis = { version = "0.26", features = ["tls-rustls", "tokio-rustls-comp"] }
|
||||
deadpool-redis = { version = "0.16", features = ["cluster"] }
|
||||
@ -53,11 +49,7 @@
|
||||
], default-features = true }
|
||||
|
||||
# ChirpStack API definitions
|
||||
chirpstack_api = { path = "../api/rust", features = [
|
||||
"default",
|
||||
"internal",
|
||||
"diesel",
|
||||
] }
|
||||
chirpstack_api = { path = "../api/rust", features = ["default", "internal"] }
|
||||
lrwn = { path = "../lrwn", features = [
|
||||
"serde",
|
||||
"diesel",
|
||||
@ -161,6 +153,7 @@
|
||||
petgraph = "0.6"
|
||||
prometheus-client = "0.22"
|
||||
pin-project = "1.1"
|
||||
scoped-futures = { version = "0.1", features = ["std"] }
|
||||
signal-hook = "0.3"
|
||||
signal-hook-tokio = { version = "0.3", features = ["futures-v0_3"] }
|
||||
|
||||
@ -171,6 +164,23 @@
|
||||
dotenv = "0.15"
|
||||
|
||||
[features]
|
||||
default = ["postgres"]
|
||||
postgres = [
|
||||
"tokio-postgres",
|
||||
"tokio-postgres-rustls",
|
||||
"diesel/postgres_backend",
|
||||
"diesel/serde_json",
|
||||
"diesel/uuid",
|
||||
"diesel-async/postgres",
|
||||
"lrwn/postgres",
|
||||
]
|
||||
sqlite = [
|
||||
"diesel/sqlite",
|
||||
"diesel/returning_clauses_for_sqlite_3_35",
|
||||
"lrwn/sqlite",
|
||||
"diesel-async/sync-connection-wrapper",
|
||||
"diesel-async/sqlite",
|
||||
]
|
||||
test-all-integrations = [
|
||||
"test-integration-amqp",
|
||||
"test-integration-kafka",
|
||||
|
@ -1,20 +1,21 @@
|
||||
.PHONY: dist
|
||||
|
||||
PKG_VERSION := $(shell cargo metadata --no-deps --format-version 1 | jq -r '.packages[0].version')
|
||||
DATABASE ?= postgres
|
||||
|
||||
debug-amd64:
|
||||
cross build --target x86_64-unknown-linux-musl
|
||||
cross build --target x86_64-unknown-linux-musl --no-default-features --features="$(DATABASE)"
|
||||
|
||||
release-amd64:
|
||||
cross build --target x86_64-unknown-linux-musl --release
|
||||
cross build --target x86_64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)"
|
||||
|
||||
dist:
|
||||
# Keep these in this order, as aarch64 is based on Debian Buster (older),
|
||||
# the others on Bullseye. For some build scripts we want to build against
|
||||
# least recent LIBC.
|
||||
cross build --target aarch64-unknown-linux-musl --release
|
||||
cross build --target x86_64-unknown-linux-musl --release
|
||||
cross build --target armv7-unknown-linux-musleabihf --release
|
||||
cross build --target aarch64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)"
|
||||
cross build --target x86_64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)"
|
||||
cross build --target armv7-unknown-linux-musleabihf --release --no-default-features --features="$(DATABASE)"
|
||||
|
||||
cargo deb --target x86_64-unknown-linux-musl --no-build --no-strip
|
||||
cargo deb --target armv7-unknown-linux-musleabihf --no-build --no-strip
|
||||
@ -40,10 +41,38 @@ dist:
|
||||
|
||||
test:
|
||||
cargo fmt --check
|
||||
cargo clippy --no-deps
|
||||
TZ=UTC cargo test
|
||||
cargo clippy --no-deps --no-default-features --features="$(DATABASE)"
|
||||
TZ=UTC cargo test --no-default-features --features="$(DATABASE)"
|
||||
|
||||
test-all:
|
||||
cargo fmt --check
|
||||
cargo clippy --no-deps
|
||||
TZ=UTC cargo test --features test-all-integrations
|
||||
cargo clippy --no-deps --no-default-features --features="$(DATABASE)"
|
||||
TZ=UTC cargo test --no-default-features --features="$(DATABASE),test-all-integrations"
|
||||
|
||||
migration-generate:
|
||||
ifeq ($(NAME),)
|
||||
@echo "You must provide a NAME parameter, e.g. make migration-generate NAME=test-migration"
|
||||
else
|
||||
diesel --config-file diesel_$(DATABASE).toml migration --migration-dir migrations_$(DATABASE) generate $(NAME)
|
||||
endif
|
||||
|
||||
migration-run: chirpstack_test.sqlite
|
||||
ifeq ($(DATABASE),postgres)
|
||||
diesel --config-file diesel_postgres.toml migration --migration-dir migrations_postgres run
|
||||
endif
|
||||
ifeq ($(DATABASE),sqlite)
|
||||
DATABASE_URL="chirpstack_test.sqlite" diesel --config-file diesel_sqlite.toml migration --migration-dir migrations_sqlite run
|
||||
sed -i 's/Timestamp/TimestamptzSqlite/g' src/storage/schema_sqlite.rs
|
||||
endif
|
||||
|
||||
migration-revert: chirpstack_test.sqlite
|
||||
ifeq ($(DATABASE),postgres)
|
||||
diesel --config-file diesel_postgres.toml migration --migration-dir migrations_postgres revert
|
||||
endif
|
||||
ifeq ($(DATABASE),sqlite)
|
||||
DATABASE_URL="chirpstack_test.sqlite" diesel --config-file diesel_sqlite.toml migration --migration-dir migrations_sqlite revert
|
||||
sed -i 's/Timestamp/TimestamptzSqlite/g' src/storage/schema_sqlite.rs
|
||||
endif
|
||||
|
||||
chirpstack_test.sqlite:
|
||||
DATABASE_URL=chirpstack_test.sqlite diesel --config-file diesel_sqlite.toml setup --migration-dir migrations_sqlite
|
||||
|
@ -2,4 +2,4 @@
|
||||
# see diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "src/storage/schema.rs"
|
||||
file = "src/storage/schema_postgres.rs"
|
5
chirpstack/diesel_sqlite.toml
Normal file
5
chirpstack/diesel_sqlite.toml
Normal file
@ -0,0 +1,5 @@
|
||||
# For documentation on how to configure this file,
|
||||
# see diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "src/storage/schema_sqlite.rs"
|
18
chirpstack/migrations_sqlite/00000000000000_initial/down.sql
Normal file
18
chirpstack/migrations_sqlite/00000000000000_initial/down.sql
Normal file
@ -0,0 +1,18 @@
|
||||
drop table relay_gateway;
|
||||
drop table multicast_group_gateway;
|
||||
drop table multicast_group_queue_item;
|
||||
drop table multicast_group_device;
|
||||
drop table multicast_group;
|
||||
drop table device_queue_item;
|
||||
drop table device_keys;
|
||||
drop table device;
|
||||
drop table device_profile;
|
||||
drop table api_key;
|
||||
drop table application_integration;
|
||||
drop table application;
|
||||
drop table gateway;
|
||||
drop table tenant_user;
|
||||
drop table tenant;
|
||||
drop table "user";
|
||||
drop table relay_device;
|
||||
drop table device_profile_template;
|
392
chirpstack/migrations_sqlite/00000000000000_initial/up.sql
Normal file
392
chirpstack/migrations_sqlite/00000000000000_initial/up.sql
Normal file
@ -0,0 +1,392 @@
|
||||
-- user
|
||||
create table "user" (
|
||||
id text not null primary key,
|
||||
external_id text null,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
is_admin boolean not null,
|
||||
is_active boolean not null,
|
||||
email text not null,
|
||||
email_verified boolean not null,
|
||||
password_hash varchar(200) not null,
|
||||
note text not null
|
||||
);
|
||||
|
||||
create unique index idx_user_email on "user"(email);
|
||||
create unique index idx_user_external_id on "user"(external_id);
|
||||
|
||||
insert into "user" (
|
||||
id,
|
||||
created_at,
|
||||
updated_at,
|
||||
is_admin,
|
||||
is_active,
|
||||
email,
|
||||
email_verified,
|
||||
password_hash,
|
||||
note
|
||||
) values (
|
||||
'05244f12-6daf-4e1f-8315-c66783a0ab56',
|
||||
datetime('now'),
|
||||
datetime('now'),
|
||||
TRUE,
|
||||
TRUE,
|
||||
'admin',
|
||||
FALSE,
|
||||
'$pbkdf2-sha512$i=1,l=64$l8zGKtxRESq3PA2kFhHRWA$H3lGMxOt55wjwoc+myeOoABofJY9oDpldJa7fhqdjbh700V6FLPML75UmBOt9J5VFNjAL1AvqCozA1HJM0QVGA',
|
||||
''
|
||||
);
|
||||
|
||||
-- tenant
|
||||
create table tenant (
|
||||
id text not null primary key,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
can_have_gateways boolean not null,
|
||||
max_device_count integer not null,
|
||||
max_gateway_count integer not null,
|
||||
private_gateways_up boolean not null,
|
||||
private_gateways_down boolean not null default FALSE,
|
||||
tags text not null default '{}'
|
||||
);
|
||||
|
||||
-- sqlite has advanced text search with https://www.sqlite.org/fts5.html
|
||||
-- but looks like it is for a full table and not specific per column, to investigate
|
||||
create index idx_tenant_name_trgm on "tenant"(name);
|
||||
|
||||
insert into "tenant" (
|
||||
id,
|
||||
created_at,
|
||||
updated_at,
|
||||
name,
|
||||
description,
|
||||
can_have_gateways,
|
||||
max_device_count,
|
||||
max_gateway_count,
|
||||
private_gateways_up
|
||||
) values (
|
||||
'52f14cd4-c6f1-4fbd-8f87-4025e1d49242',
|
||||
datetime('now'),
|
||||
datetime('now'),
|
||||
'ChirpStack',
|
||||
'',
|
||||
TRUE,
|
||||
0,
|
||||
0,
|
||||
FALSE
|
||||
);
|
||||
|
||||
-- tenant user
|
||||
create table tenant_user (
|
||||
tenant_id text not null references tenant on delete cascade,
|
||||
user_id text not null references "user" on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
is_admin boolean not null,
|
||||
is_device_admin boolean not null,
|
||||
is_gateway_admin boolean not null,
|
||||
primary key (tenant_id, user_id)
|
||||
);
|
||||
|
||||
create index idx_tenant_user_user_id on tenant_user (user_id);
|
||||
|
||||
-- gateway
|
||||
create table gateway (
|
||||
gateway_id blob not null primary key,
|
||||
tenant_id text not null references tenant on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
last_seen_at datetime,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
latitude double precision not null,
|
||||
longitude double precision not null,
|
||||
altitude real not null,
|
||||
stats_interval_secs integer not null,
|
||||
tls_certificate blob,
|
||||
tags text not null,
|
||||
properties text not null
|
||||
);
|
||||
|
||||
create index idx_gateway_tenant_id on gateway (tenant_id);
|
||||
create index idx_gateway_name_trgm on gateway (name);
|
||||
create index idx_gateway_id_trgm on gateway (hex(gateway_id));
|
||||
create index idx_gateway_tags on gateway (tags);
|
||||
|
||||
-- application
|
||||
create table application (
|
||||
id text not null primary key,
|
||||
tenant_id text not null references tenant on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
mqtt_tls_cert blob,
|
||||
tags text not null default '{}'
|
||||
);
|
||||
|
||||
create index idx_application_tenant_id on application (tenant_id);
|
||||
create index idx_application_name_trgm on application (name);
|
||||
|
||||
-- application integration
|
||||
create table application_integration (
|
||||
application_id text not null references application on delete cascade,
|
||||
kind varchar(20) not null,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
configuration text not null,
|
||||
|
||||
primary key (application_id, kind)
|
||||
);
|
||||
|
||||
-- api-key
|
||||
create table api_key (
|
||||
id text not null primary key,
|
||||
created_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
is_admin boolean not null,
|
||||
tenant_id text null references tenant on delete cascade
|
||||
);
|
||||
|
||||
create index idx_api_key_tenant_id on api_key (tenant_id);
|
||||
|
||||
-- device-profile
|
||||
create table device_profile (
|
||||
id text not null primary key,
|
||||
tenant_id text not null references tenant on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
region varchar(10) not null,
|
||||
mac_version varchar(10) not null,
|
||||
reg_params_revision varchar(20) not null,
|
||||
adr_algorithm_id varchar(100) not null,
|
||||
payload_codec_runtime varchar(20) not null,
|
||||
uplink_interval integer not null,
|
||||
device_status_req_interval integer not null,
|
||||
supports_otaa boolean not null,
|
||||
supports_class_b boolean not null,
|
||||
supports_class_c boolean not null,
|
||||
class_b_timeout integer not null,
|
||||
class_b_ping_slot_nb_k integer not null,
|
||||
class_b_ping_slot_dr smallint not null,
|
||||
class_b_ping_slot_freq bigint not null,
|
||||
class_c_timeout integer not null,
|
||||
abp_rx1_delay smallint not null,
|
||||
abp_rx1_dr_offset smallint not null,
|
||||
abp_rx2_dr smallint not null,
|
||||
abp_rx2_freq bigint not null,
|
||||
tags text not null,
|
||||
payload_codec_script text not null default '',
|
||||
flush_queue_on_activate boolean not null default FALSE,
|
||||
description text not null default '',
|
||||
measurements text not null default '{}',
|
||||
auto_detect_measurements boolean not null default TRUE,
|
||||
region_config_id varchar(100) null,
|
||||
is_relay boolean not null default FALSE,
|
||||
is_relay_ed boolean not null default FALSE,
|
||||
relay_ed_relay_only boolean not null default FALSE,
|
||||
relay_enabled boolean not null default FALSE,
|
||||
relay_cad_periodicity smallint not null default 0,
|
||||
relay_default_channel_index smallint not null default 0,
|
||||
relay_second_channel_freq bigint not null default 0,
|
||||
relay_second_channel_dr smallint not null default 0,
|
||||
relay_second_channel_ack_offset smallint not null default 0,
|
||||
relay_ed_activation_mode smallint not null default 0,
|
||||
relay_ed_smart_enable_level smallint not null default 0,
|
||||
relay_ed_back_off smallint not null default 0,
|
||||
relay_ed_uplink_limit_bucket_size smallint not null default 0,
|
||||
relay_ed_uplink_limit_reload_rate smallint not null default 0,
|
||||
relay_join_req_limit_reload_rate smallint not null default 0,
|
||||
relay_notify_limit_reload_rate smallint not null default 0,
|
||||
relay_global_uplink_limit_reload_rate smallint not null default 0,
|
||||
relay_overall_limit_reload_rate smallint not null default 0,
|
||||
relay_join_req_limit_bucket_size smallint not null default 0,
|
||||
relay_notify_limit_bucket_size smallint not null default 0,
|
||||
relay_global_uplink_limit_bucket_size smallint not null default 0,
|
||||
relay_overall_limit_bucket_size smallint not null default 0,
|
||||
allow_roaming boolean not null default TRUE,
|
||||
rx1_delay smallint not null default 0
|
||||
);
|
||||
|
||||
create index idx_device_profile_tenant_id on device_profile (tenant_id);
|
||||
create index idx_device_profile_name_trgm on device_profile (name);
|
||||
create index idx_device_profile_tags on device_profile (tags);
|
||||
|
||||
-- device
|
||||
create table device (
|
||||
dev_eui blob not null primary key,
|
||||
application_id text not null references application on delete cascade,
|
||||
device_profile_id text not null references device_profile on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
last_seen_at datetime,
|
||||
scheduler_run_after datetime,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
external_power_source boolean not null,
|
||||
battery_level numeric(5, 2),
|
||||
margin int,
|
||||
dr smallint,
|
||||
latitude double precision,
|
||||
longitude double precision,
|
||||
altitude real,
|
||||
dev_addr blob,
|
||||
enabled_class char(1) not null,
|
||||
skip_fcnt_check boolean not null,
|
||||
is_disabled boolean not null,
|
||||
tags text not null,
|
||||
variables text not null,
|
||||
join_eui blob not null default x'00000000',
|
||||
secondary_dev_addr blob,
|
||||
device_session blob
|
||||
);
|
||||
|
||||
create index idx_device_application_id on device (application_id);
|
||||
create index idx_device_device_profile_id on device (device_profile_id);
|
||||
create index idx_device_name_trgm on device (name);
|
||||
create index idx_device_dev_eui_trgm on device (hex(dev_eui));
|
||||
create index idx_device_dev_addr_trgm on device (hex(dev_addr));
|
||||
create index idx_device_tags on device (tags);
|
||||
|
||||
create table device_keys (
|
||||
dev_eui blob not null primary key references device on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
nwk_key blob not null,
|
||||
app_key blob not null,
|
||||
dev_nonces text not null,
|
||||
join_nonce int not null
|
||||
);
|
||||
|
||||
create table device_queue_item (
|
||||
id text not null primary key,
|
||||
dev_eui blob references device on delete cascade not null,
|
||||
created_at datetime not null,
|
||||
f_port smallint not null,
|
||||
confirmed boolean not null,
|
||||
data blob not null,
|
||||
is_pending boolean not null,
|
||||
f_cnt_down bigint null,
|
||||
timeout_after datetime,
|
||||
is_encrypted boolean default FALSE not null
|
||||
);
|
||||
|
||||
create index idx_device_queue_item_dev_eui on device_queue_item (dev_eui);
|
||||
create index idx_device_queue_item_created_at on device_queue_item (created_at);
|
||||
create index idx_device_queue_item_timeout_after on device_queue_item (timeout_after);
|
||||
|
||||
-- multicast groups
|
||||
create table multicast_group (
|
||||
id text not null primary key,
|
||||
application_id text not null references application on delete cascade,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
region varchar(10) not null,
|
||||
mc_addr blob not null,
|
||||
mc_nwk_s_key blob not null,
|
||||
mc_app_s_key blob not null,
|
||||
f_cnt bigint not null,
|
||||
group_type char(1) not null,
|
||||
dr smallint not null,
|
||||
frequency bigint not null,
|
||||
class_b_ping_slot_nb_k smallint not null,
|
||||
class_c_scheduling_type varchar(20) not null default 'delay'
|
||||
);
|
||||
|
||||
create index idx_multicast_group_application_id on multicast_group (application_id);
|
||||
create index idx_multicast_group_name_trgm on multicast_group (name);
|
||||
|
||||
create table multicast_group_device (
|
||||
multicast_group_id text not null references multicast_group on delete cascade,
|
||||
dev_eui blob not null references device on delete cascade,
|
||||
created_at datetime not null,
|
||||
primary key (multicast_group_id, dev_eui)
|
||||
);
|
||||
|
||||
create table multicast_group_queue_item (
|
||||
id text not null primary key,
|
||||
created_at datetime not null,
|
||||
scheduler_run_after datetime not null,
|
||||
multicast_group_id text not null references multicast_group on delete cascade,
|
||||
gateway_id blob not null references gateway on delete cascade,
|
||||
f_cnt bigint not null,
|
||||
f_port smallint not null,
|
||||
data blob not null,
|
||||
emit_at_time_since_gps_epoch bigint
|
||||
);
|
||||
|
||||
create index idx_multicast_group_queue_item_multicast_group_id on multicast_group_queue_item (multicast_group_id);
|
||||
create index idx_multicast_group_queue_item_scheduler_run_after on multicast_group_queue_item (scheduler_run_after);
|
||||
|
||||
create table device_profile_template (
|
||||
id text not null primary key,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
vendor varchar(100) not null,
|
||||
firmware varchar(100) not null,
|
||||
region varchar(10) not null,
|
||||
mac_version varchar(10) not null,
|
||||
reg_params_revision varchar(20) not null,
|
||||
adr_algorithm_id varchar(100) not null,
|
||||
payload_codec_runtime varchar(20) not null,
|
||||
payload_codec_script text not null,
|
||||
uplink_interval integer not null,
|
||||
device_status_req_interval integer not null,
|
||||
flush_queue_on_activate boolean not null,
|
||||
supports_otaa boolean not null,
|
||||
supports_class_b boolean not null,
|
||||
supports_class_c boolean not null,
|
||||
class_b_timeout integer not null,
|
||||
class_b_ping_slot_nb_k integer not null,
|
||||
class_b_ping_slot_dr smallint not null,
|
||||
class_b_ping_slot_freq bigint not null,
|
||||
class_c_timeout integer not null,
|
||||
abp_rx1_delay smallint not null,
|
||||
abp_rx1_dr_offset smallint not null,
|
||||
abp_rx2_dr smallint not null,
|
||||
abp_rx2_freq bigint not null,
|
||||
tags text not null,
|
||||
measurements text not null default '{}',
|
||||
auto_detect_measurements boolean not null default TRUE
|
||||
);
|
||||
|
||||
create table multicast_group_gateway (
|
||||
multicast_group_id text not null references multicast_group on delete cascade,
|
||||
gateway_id blob not null references gateway on delete cascade,
|
||||
created_at datetime not null,
|
||||
primary key (multicast_group_id, gateway_id)
|
||||
);
|
||||
|
||||
create table relay_device (
|
||||
relay_dev_eui blob not null references device on delete cascade,
|
||||
dev_eui blob not null references device on delete cascade,
|
||||
created_at datetime not null,
|
||||
primary key (relay_dev_eui, dev_eui)
|
||||
);
|
||||
|
||||
create index idx_tenant_tags on tenant (tags);
|
||||
create index idx_application_tags on application (tags);
|
||||
create index idx_device_dev_addr on device (dev_addr);
|
||||
create index idx_device_secondary_dev_addr on device (secondary_dev_addr);
|
||||
|
||||
|
||||
-- relay gateway
|
||||
create table relay_gateway (
|
||||
tenant_id text not null references tenant on delete cascade,
|
||||
relay_id blob not null,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null,
|
||||
last_seen_at datetime,
|
||||
name varchar(100) not null,
|
||||
description text not null,
|
||||
stats_interval_secs integer not null,
|
||||
region_config_id varchar(100) not null,
|
||||
|
||||
primary key (tenant_id, relay_id)
|
||||
);
|
@ -44,7 +44,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let a = application::Application {
|
||||
tenant_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
name: req_app.name.clone(),
|
||||
description: req_app.description.clone(),
|
||||
tags: fields::KeyValue::new(req_app.tags.clone()),
|
||||
@ -119,7 +119,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update(application::Application {
|
||||
id: app_id,
|
||||
id: app_id.into(),
|
||||
name: req_app.name.to_string(),
|
||||
description: req_app.description.to_string(),
|
||||
tags: fields::KeyValue::new(req_app.tags.clone()),
|
||||
@ -279,7 +279,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let i = application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::Http,
|
||||
configuration: application::IntegrationConfiguration::Http(
|
||||
application::HttpConfiguration {
|
||||
@ -367,7 +367,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::Http,
|
||||
configuration: application::IntegrationConfiguration::Http(
|
||||
application::HttpConfiguration {
|
||||
@ -438,7 +438,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let i = application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::InfluxDb,
|
||||
configuration: application::IntegrationConfiguration::InfluxDb(
|
||||
application::InfluxDbConfiguration {
|
||||
@ -535,7 +535,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::InfluxDb,
|
||||
configuration: application::IntegrationConfiguration::InfluxDb(
|
||||
application::InfluxDbConfiguration {
|
||||
@ -610,7 +610,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let i = application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::ThingsBoard,
|
||||
configuration: application::IntegrationConfiguration::ThingsBoard(
|
||||
application::ThingsBoardConfiguration {
|
||||
@ -689,7 +689,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::ThingsBoard,
|
||||
configuration: application::IntegrationConfiguration::ThingsBoard(
|
||||
application::ThingsBoardConfiguration {
|
||||
@ -755,7 +755,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::MyDevices,
|
||||
configuration: application::IntegrationConfiguration::MyDevices(
|
||||
application::MyDevicesConfiguration {
|
||||
@ -832,7 +832,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::MyDevices,
|
||||
configuration: application::IntegrationConfiguration::MyDevices(
|
||||
application::MyDevicesConfiguration {
|
||||
@ -907,7 +907,7 @@ impl ApplicationService for Application {
|
||||
};
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::LoraCloud,
|
||||
configuration: application::IntegrationConfiguration::LoraCloud(
|
||||
application::LoraCloudConfiguration {
|
||||
@ -1032,7 +1032,7 @@ impl ApplicationService for Application {
|
||||
};
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::LoraCloud,
|
||||
configuration: application::IntegrationConfiguration::LoraCloud(
|
||||
application::LoraCloudConfiguration {
|
||||
@ -1119,7 +1119,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::GcpPubSub,
|
||||
configuration: application::IntegrationConfiguration::GcpPubSub(
|
||||
application::GcpPubSubConfiguration {
|
||||
@ -1202,7 +1202,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::GcpPubSub,
|
||||
configuration: application::IntegrationConfiguration::GcpPubSub(
|
||||
application::GcpPubSubConfiguration {
|
||||
@ -1271,7 +1271,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::AwsSns,
|
||||
configuration: application::IntegrationConfiguration::AwsSns(
|
||||
application::AwsSnsConfiguration {
|
||||
@ -1354,7 +1354,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::AwsSns,
|
||||
configuration: application::IntegrationConfiguration::AwsSns(
|
||||
application::AwsSnsConfiguration {
|
||||
@ -1424,7 +1424,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::AzureServiceBus,
|
||||
configuration: application::IntegrationConfiguration::AzureServiceBus(
|
||||
application::AzureServiceBusConfiguration {
|
||||
@ -1506,7 +1506,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::AzureServiceBus,
|
||||
configuration: application::IntegrationConfiguration::AzureServiceBus(
|
||||
application::AzureServiceBusConfiguration {
|
||||
@ -1574,7 +1574,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::PilotThings,
|
||||
configuration: application::IntegrationConfiguration::PilotThings(
|
||||
application::PilotThingsConfiguration {
|
||||
@ -1653,7 +1653,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::PilotThings,
|
||||
configuration: application::IntegrationConfiguration::PilotThings(
|
||||
application::PilotThingsConfiguration {
|
||||
@ -1730,7 +1730,7 @@ impl ApplicationService for Application {
|
||||
}
|
||||
|
||||
let _ = application::create_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::Ifttt,
|
||||
configuration: application::IntegrationConfiguration::Ifttt(
|
||||
application::IftttConfiguration {
|
||||
@ -1814,7 +1814,7 @@ impl ApplicationService for Application {
|
||||
.await?;
|
||||
|
||||
let _ = application::update_integration(application::Integration {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
kind: application::IntegrationKind::Ifttt,
|
||||
configuration: application::IntegrationConfiguration::Ifttt(
|
||||
application::IftttConfiguration {
|
||||
@ -1945,7 +1945,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut create_req = Request::new(create_req);
|
||||
create_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
create_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let create_resp = service.create(create_req).await.unwrap();
|
||||
let create_resp = create_resp.get_ref();
|
||||
|
||||
@ -1954,7 +1956,9 @@ pub mod test {
|
||||
id: create_resp.id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Application {
|
||||
@ -1976,7 +1980,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut up_req = Request::new(up_req);
|
||||
up_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.update(up_req).await.unwrap();
|
||||
|
||||
//get
|
||||
@ -1984,7 +1990,9 @@ pub mod test {
|
||||
id: create_resp.id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Application {
|
||||
@ -2004,7 +2012,9 @@ pub mod test {
|
||||
offset: 0,
|
||||
};
|
||||
let mut list_req = Request::new(list_req);
|
||||
list_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
list_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let list_resp = service.list(list_req).await.unwrap();
|
||||
assert_eq!(1, list_resp.get_ref().total_count);
|
||||
assert_eq!(1, list_resp.get_ref().result.len());
|
||||
@ -2014,14 +2024,18 @@ pub mod test {
|
||||
id: create_resp.id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.delete(del_req).await.unwrap();
|
||||
|
||||
let del_req = api::DeleteApplicationRequest {
|
||||
id: create_resp.id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let del_resp = service.delete(del_req).await;
|
||||
assert!(del_resp.is_err());
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -64,8 +64,8 @@ impl DeviceService for Device {
|
||||
|
||||
let d = device::Device {
|
||||
dev_eui,
|
||||
application_id: app_id,
|
||||
device_profile_id: dp_id,
|
||||
application_id: app_id.into(),
|
||||
device_profile_id: dp_id.into(),
|
||||
name: req_d.name.clone(),
|
||||
description: req_d.description.clone(),
|
||||
skip_fcnt_check: req_d.skip_fcnt_check,
|
||||
@ -191,8 +191,8 @@ impl DeviceService for Device {
|
||||
// update
|
||||
let _ = device::update(device::Device {
|
||||
dev_eui,
|
||||
application_id: app_id,
|
||||
device_profile_id: dp_id,
|
||||
application_id: app_id.into(),
|
||||
device_profile_id: dp_id.into(),
|
||||
name: req_d.name.clone(),
|
||||
description: req_d.description.clone(),
|
||||
skip_fcnt_check: req_d.skip_fcnt_check,
|
||||
@ -533,7 +533,7 @@ impl DeviceService for Device {
|
||||
dp.reset_session_to_boot_params(&mut ds);
|
||||
|
||||
let mut device_changeset = device::DeviceChangeset {
|
||||
device_session: Some(Some(ds)),
|
||||
device_session: Some(Some(ds.into())),
|
||||
dev_addr: Some(Some(dev_addr)),
|
||||
secondary_dev_addr: Some(None),
|
||||
..Default::default()
|
||||
@ -1085,7 +1085,7 @@ impl DeviceService for Device {
|
||||
}
|
||||
|
||||
let qi = device_queue::DeviceQueueItem {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
dev_eui,
|
||||
f_port: req_qi.f_port as i16,
|
||||
confirmed: req_qi.confirmed,
|
||||
@ -1539,11 +1539,14 @@ pub mod test {
|
||||
dev.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
dev_addr: Some(Some(DevAddr::from_be_bytes([1, 2, 3, 4]))),
|
||||
device_session: Some(Some(internal::DeviceSession {
|
||||
dev_addr: vec![1, 2, 3, 4],
|
||||
js_session_key_id: vec![8, 7, 6, 5, 4, 3, 2, 1],
|
||||
..Default::default()
|
||||
})),
|
||||
device_session: Some(Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![1, 2, 3, 4],
|
||||
js_session_key_id: vec![8, 7, 6, 5, 4, 3, 2, 1],
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
)),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
@ -1568,14 +1571,17 @@ pub mod test {
|
||||
device::partial_update(
|
||||
dev.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(internal::DeviceSession {
|
||||
dev_addr: vec![1, 2, 3, 4],
|
||||
app_s_key: Some(common::KeyEnvelope {
|
||||
kek_label: "test-key".into(),
|
||||
aes_key: vec![8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1],
|
||||
}),
|
||||
..Default::default()
|
||||
})),
|
||||
device_session: Some(Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![1, 2, 3, 4],
|
||||
app_s_key: Some(common::KeyEnvelope {
|
||||
kek_label: "test-key".into(),
|
||||
aes_key: vec![8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1],
|
||||
}),
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
)),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
|
@ -45,7 +45,7 @@ impl DeviceProfileService for DeviceProfile {
|
||||
.await?;
|
||||
|
||||
let mut dp = device_profile::DeviceProfile {
|
||||
tenant_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
name: req_dp.name.clone(),
|
||||
description: req_dp.description.clone(),
|
||||
region: req_dp.region().from_proto(),
|
||||
@ -247,7 +247,7 @@ impl DeviceProfileService for DeviceProfile {
|
||||
|
||||
// update
|
||||
let _ = device_profile::update(device_profile::DeviceProfile {
|
||||
id: dp_id,
|
||||
id: dp_id.into(),
|
||||
name: req_dp.name.clone(),
|
||||
description: req_dp.description.clone(),
|
||||
region: req_dp.region().from_proto(),
|
||||
|
@ -58,7 +58,7 @@ impl GatewayService for Gateway {
|
||||
|
||||
let gw = gateway::Gateway {
|
||||
gateway_id: EUI64::from_str(&req_gw.gateway_id).map_err(|e| e.status())?,
|
||||
tenant_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
name: req_gw.name.clone(),
|
||||
description: req_gw.description.clone(),
|
||||
latitude: lat,
|
||||
@ -851,8 +851,8 @@ impl GatewayService for Gateway {
|
||||
.await?;
|
||||
|
||||
let _ = gateway::update_relay_gateway(gateway::RelayGateway {
|
||||
tenant_id,
|
||||
relay_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
name: req_relay.name.clone(),
|
||||
description: req_relay.description.clone(),
|
||||
stats_interval_secs: req_relay.stats_interval as i32,
|
||||
@ -1028,7 +1028,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut create_req = Request::new(create_req);
|
||||
create_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
create_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.create(create_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -1036,7 +1038,9 @@ pub mod test {
|
||||
gateway_id: "0102030405060708".into(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Gateway {
|
||||
@ -1070,7 +1074,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut up_req = Request::new(up_req);
|
||||
up_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.update(up_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -1078,7 +1084,9 @@ pub mod test {
|
||||
gateway_id: "0102030405060708".into(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Gateway {
|
||||
@ -1105,7 +1113,9 @@ pub mod test {
|
||||
..Default::default()
|
||||
};
|
||||
let mut list_req = Request::new(list_req);
|
||||
list_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
list_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let list_resp = service.list(list_req).await.unwrap();
|
||||
assert_eq!(1, list_resp.get_ref().total_count);
|
||||
assert_eq!(1, list_resp.get_ref().result.len());
|
||||
@ -1115,14 +1125,18 @@ pub mod test {
|
||||
gateway_id: "0102030405060708".into(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.delete(del_req).await.unwrap();
|
||||
|
||||
let del_req = api::DeleteGatewayRequest {
|
||||
gateway_id: "0102030405060708".into(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let del_resp = service.delete(del_req).await;
|
||||
assert!(del_resp.is_err());
|
||||
}
|
||||
@ -1198,7 +1212,9 @@ pub mod test {
|
||||
aggregation: common::Aggregation::Day.into(),
|
||||
};
|
||||
let mut stats_req = Request::new(stats_req);
|
||||
stats_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
stats_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let stats_resp = service.get_metrics(stats_req).await.unwrap();
|
||||
let stats_resp = stats_resp.get_ref();
|
||||
assert_eq!(
|
||||
@ -1289,7 +1305,7 @@ pub mod test {
|
||||
end: Some(now_st.into()),
|
||||
};
|
||||
let mut stats_req = Request::new(stats_req);
|
||||
stats_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
stats_req.extensions_mut().insert(AuthID::User(u.id.into()));
|
||||
let stats_resp = service.get_duty_cycle_metrics(stats_req).await.unwrap();
|
||||
let stats_resp = stats_resp.get_ref();
|
||||
assert_eq!(
|
||||
@ -1363,7 +1379,9 @@ pub mod test {
|
||||
relay_id: "01020304".into(),
|
||||
};
|
||||
let mut get_relay_req = Request::new(get_relay_req);
|
||||
get_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let get_relay_resp = service.get_relay_gateway(get_relay_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::RelayGateway {
|
||||
@ -1389,7 +1407,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut up_relay_req = Request::new(up_relay_req);
|
||||
up_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let _ = service.update_relay_gateway(up_relay_req).await.unwrap();
|
||||
|
||||
// get relay gateway
|
||||
@ -1398,7 +1418,9 @@ pub mod test {
|
||||
relay_id: "01020304".into(),
|
||||
};
|
||||
let mut get_relay_req = Request::new(get_relay_req);
|
||||
get_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let get_relay_resp = service.get_relay_gateway(get_relay_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::RelayGateway {
|
||||
@ -1419,7 +1441,9 @@ pub mod test {
|
||||
offset: 0,
|
||||
};
|
||||
let mut list_relay_req = Request::new(list_relay_req);
|
||||
list_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
list_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let list_relay_resp = service.list_relay_gateways(list_relay_req).await.unwrap();
|
||||
assert_eq!(1, list_relay_resp.get_ref().total_count);
|
||||
assert_eq!(1, list_relay_resp.get_ref().result.len());
|
||||
@ -1430,7 +1454,9 @@ pub mod test {
|
||||
relay_id: "01020304".into(),
|
||||
};
|
||||
let mut del_relay_req = Request::new(del_relay_req);
|
||||
del_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let del_relay_resp = service.delete_relay_gateway(del_relay_req).await;
|
||||
assert!(del_relay_resp.is_ok());
|
||||
|
||||
@ -1439,7 +1465,9 @@ pub mod test {
|
||||
relay_id: "01020304".into(),
|
||||
};
|
||||
let mut del_relay_req = Request::new(del_relay_req);
|
||||
del_relay_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_relay_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(u.id.into()));
|
||||
let del_relay_resp = service.delete_relay_gateway(del_relay_req).await;
|
||||
assert!(del_relay_resp.is_err());
|
||||
}
|
||||
|
@ -287,7 +287,11 @@ impl InternalService for Internal {
|
||||
let tenant_id = if req_key.tenant_id.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Uuid::from_str(&req_key.tenant_id).map_err(|e| e.status())?)
|
||||
Some(
|
||||
Uuid::from_str(&req_key.tenant_id)
|
||||
.map_err(|e| e.status())?
|
||||
.into(),
|
||||
)
|
||||
};
|
||||
|
||||
if req_key.is_admin && tenant_id.is_some() {
|
||||
@ -312,7 +316,7 @@ impl InternalService for Internal {
|
||||
let ak = api_key::ApiKey {
|
||||
name: req_key.name.clone(),
|
||||
is_admin: req_key.is_admin,
|
||||
tenant_id,
|
||||
tenant_id: tenant_id.map(|u| u.into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -47,7 +47,7 @@ impl MulticastGroupService for MulticastGroup {
|
||||
.await?;
|
||||
|
||||
let mg = multicast::MulticastGroup {
|
||||
application_id: app_id,
|
||||
application_id: app_id.into(),
|
||||
name: req_mg.name.clone(),
|
||||
region: req_mg.region().from_proto(),
|
||||
mc_addr: DevAddr::from_str(&req_mg.mc_addr).map_err(|e| e.status())?,
|
||||
@ -154,7 +154,7 @@ impl MulticastGroupService for MulticastGroup {
|
||||
.await?;
|
||||
|
||||
let _ = multicast::update(multicast::MulticastGroup {
|
||||
id: mg_id,
|
||||
id: mg_id.into(),
|
||||
name: req_mg.name.clone(),
|
||||
region: req_mg.region().from_proto(),
|
||||
mc_addr: DevAddr::from_str(&req_mg.mc_addr).map_err(|e| e.status())?,
|
||||
@ -408,7 +408,7 @@ impl MulticastGroupService for MulticastGroup {
|
||||
.await?;
|
||||
|
||||
let f_cnt = downlink::multicast::enqueue(multicast::MulticastGroupQueueItem {
|
||||
multicast_group_id: mg_id,
|
||||
multicast_group_id: mg_id.into(),
|
||||
f_port: req_enq.f_port as i16,
|
||||
data: req_enq.data.clone(),
|
||||
..Default::default()
|
||||
|
@ -122,7 +122,7 @@ impl TenantService for Tenant {
|
||||
|
||||
// update
|
||||
let _ = tenant::update(tenant::Tenant {
|
||||
id: tenant_id,
|
||||
id: tenant_id.into(),
|
||||
name: req_tenant.name.clone(),
|
||||
description: req_tenant.description.clone(),
|
||||
can_have_gateways: req_tenant.can_have_gateways,
|
||||
@ -190,7 +190,7 @@ impl TenantService for Tenant {
|
||||
let u = user::get(id).await.map_err(|e| e.status())?;
|
||||
|
||||
if !u.is_admin {
|
||||
filters.user_id = Some(u.id);
|
||||
filters.user_id = Some(u.id.into());
|
||||
}
|
||||
}
|
||||
AuthID::Key(_) => {
|
||||
@ -258,8 +258,8 @@ impl TenantService for Tenant {
|
||||
.await?;
|
||||
|
||||
let _ = tenant::add_user(tenant::TenantUser {
|
||||
tenant_id,
|
||||
user_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
user_id: user_id.into(),
|
||||
is_admin: req_user.is_admin,
|
||||
is_device_admin: req_user.is_device_admin,
|
||||
is_gateway_admin: req_user.is_gateway_admin,
|
||||
@ -342,8 +342,8 @@ impl TenantService for Tenant {
|
||||
.await?;
|
||||
|
||||
tenant::update_user(tenant::TenantUser {
|
||||
tenant_id,
|
||||
user_id,
|
||||
tenant_id: tenant_id.into(),
|
||||
user_id: user_id.into(),
|
||||
is_admin: req_user.is_admin,
|
||||
is_device_admin: req_user.is_device_admin,
|
||||
is_gateway_admin: req_user.is_gateway_admin,
|
||||
@ -482,7 +482,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut create_req = Request::new(create_req);
|
||||
create_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
create_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let create_resp = service.create(create_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -490,7 +492,9 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Tenant {
|
||||
@ -518,7 +522,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut up_req = Request::new(up_req);
|
||||
up_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.update(up_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -526,7 +532,9 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::Tenant {
|
||||
@ -549,7 +557,9 @@ pub mod test {
|
||||
user_id: "".into(),
|
||||
};
|
||||
let mut list_req = Request::new(list_req);
|
||||
list_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
list_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let list_resp = service.list(list_req).await.unwrap();
|
||||
assert_eq!(1, list_resp.get_ref().total_count);
|
||||
assert_eq!(1, list_resp.get_ref().result.len());
|
||||
@ -559,14 +569,18 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.delete(del_req).await.unwrap();
|
||||
|
||||
let del_req = api::DeleteTenantRequest {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let del_resp = service.delete(del_req).await;
|
||||
assert!(del_resp.is_err());
|
||||
}
|
||||
|
@ -64,8 +64,8 @@ impl UserService for User {
|
||||
let tenant_id = Uuid::from_str(&tu.tenant_id).map_err(|e| e.status())?;
|
||||
|
||||
tenant::add_user(tenant::TenantUser {
|
||||
tenant_id,
|
||||
user_id: u.id,
|
||||
tenant_id: tenant_id.into(),
|
||||
user_id: u.id.into(),
|
||||
is_admin: tu.is_admin,
|
||||
is_device_admin: tu.is_device_admin,
|
||||
is_gateway_admin: tu.is_gateway_admin,
|
||||
@ -138,7 +138,7 @@ impl UserService for User {
|
||||
|
||||
// update
|
||||
let _ = user::update(user::User {
|
||||
id: user_id,
|
||||
id: user_id.into(),
|
||||
is_admin: req_user.is_admin,
|
||||
is_active: req_user.is_active,
|
||||
email: req_user.email.clone(),
|
||||
@ -292,7 +292,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut create_req = Request::new(create_req);
|
||||
create_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
create_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let create_resp = service.create(create_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -300,7 +302,9 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::User {
|
||||
@ -326,7 +330,9 @@ pub mod test {
|
||||
}),
|
||||
};
|
||||
let mut up_req = Request::new(up_req);
|
||||
up_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.update(up_req).await.unwrap();
|
||||
|
||||
// get
|
||||
@ -334,7 +340,9 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut get_req = Request::new(get_req);
|
||||
get_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
get_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let get_resp = service.get(get_req).await.unwrap();
|
||||
assert_eq!(
|
||||
Some(api::User {
|
||||
@ -354,7 +362,9 @@ pub mod test {
|
||||
password: "newpassword".into(),
|
||||
};
|
||||
let mut up_req = Request::new(up_req);
|
||||
up_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
up_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.update_password(up_req).await.unwrap();
|
||||
|
||||
// list
|
||||
@ -363,7 +373,9 @@ pub mod test {
|
||||
limit: 10,
|
||||
};
|
||||
let mut list_req = Request::new(list_req);
|
||||
list_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
list_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let list_resp = service.list(list_req).await.unwrap();
|
||||
// * Admin from migrations
|
||||
// * User that we created for auth
|
||||
@ -376,14 +388,18 @@ pub mod test {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let _ = service.delete(del_req).await.unwrap();
|
||||
|
||||
let del_req = api::DeleteUserRequest {
|
||||
id: create_resp.get_ref().id.clone(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let del_resp = service.delete(del_req).await;
|
||||
assert!(del_resp.is_err());
|
||||
|
||||
@ -391,7 +407,9 @@ pub mod test {
|
||||
id: u.id.to_string(),
|
||||
};
|
||||
let mut del_req = Request::new(del_req);
|
||||
del_req.extensions_mut().insert(AuthID::User(u.id));
|
||||
del_req
|
||||
.extensions_mut()
|
||||
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
|
||||
let del_resp = service.delete(del_req).await;
|
||||
assert!(del_resp.is_err());
|
||||
}
|
||||
|
@ -3,7 +3,8 @@ use handlebars::{no_escape, Handlebars};
|
||||
use super::super::config;
|
||||
|
||||
pub fn run() {
|
||||
let template = r#"
|
||||
let template = vec![
|
||||
r#"
|
||||
# Logging configuration
|
||||
[logging]
|
||||
|
||||
@ -20,7 +21,9 @@ pub fn run() {
|
||||
|
||||
# Log as JSON.
|
||||
json={{ logging.json }}
|
||||
|
||||
"#,
|
||||
#[cfg(feature = "postgres")]
|
||||
r#"
|
||||
# PostgreSQL configuration.
|
||||
[postgresql]
|
||||
|
||||
@ -46,8 +49,36 @@ pub fn run() {
|
||||
# the server-certificate is not signed by a CA in the platform certificate
|
||||
# store.
|
||||
ca_cert="{{ postgresql.ca_cert }}"
|
||||
"#,
|
||||
#[cfg(feature = "sqlite")]
|
||||
r#"
|
||||
# SQLite configuration.
|
||||
[sqlite]
|
||||
|
||||
# Sqlite DB path.
|
||||
#
|
||||
# Format example: sqlite:///<DATABASE>.
|
||||
#
|
||||
path="{{ sqlite.path }}"
|
||||
|
||||
# Max open connections.
|
||||
#
|
||||
# This sets the max. number of open connections that are allowed in the
|
||||
# SQLite connection pool.
|
||||
max_open_connections={{ sqlite.max_open_connections }}
|
||||
|
||||
# PRAGMAs.
|
||||
#
|
||||
# This configures the list of PRAGMAs that are executed to prepare the
|
||||
# SQLite library. For a full list of available PRAGMAs see:
|
||||
# https://www.sqlite.org/pragma.html
|
||||
pragmas=[
|
||||
{{#each sqlite.pragmas}}
|
||||
"{{this}}",
|
||||
{{/each}}
|
||||
]
|
||||
"#,
|
||||
r#"
|
||||
# Redis configuration.
|
||||
[redis]
|
||||
|
||||
@ -944,6 +975,7 @@ pub fn run() {
|
||||
kek="{{ this.kek }}"
|
||||
{{/each}}
|
||||
|
||||
|
||||
# UI configuration.
|
||||
[ui]
|
||||
# Tileserver URL.
|
||||
@ -958,14 +990,14 @@ pub fn run() {
|
||||
# default tileserver_url (OSM). If you configure a different tile-server, you
|
||||
# might need to update the map_attribution.
|
||||
map_attribution="{{ui.map_attribution}}"
|
||||
"#;
|
||||
"#].join("\n");
|
||||
|
||||
let mut reg = Handlebars::new();
|
||||
reg.register_escape_fn(no_escape);
|
||||
let conf = config::get();
|
||||
println!(
|
||||
"{}",
|
||||
reg.render_template(template, &conf)
|
||||
reg.render_template(&template, &conf)
|
||||
.expect("render configfile error")
|
||||
);
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ pub async fn run() -> Result<()> {
|
||||
*dev_eui,
|
||||
&storage::device::DeviceChangeset {
|
||||
dev_addr: Some(Some(DevAddr::from_slice(&ds.dev_addr)?)),
|
||||
device_session: Some(Some(ds)),
|
||||
device_session: Some(Some(ds.into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
|
@ -5,8 +5,11 @@ use std::str::FromStr;
|
||||
use anyhow::{Context, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use diesel::backend::Backend;
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::pg::Pg;
|
||||
use diesel::sql_types::Text;
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{deserialize, serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@ -40,6 +43,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Text, Pg> for Codec
|
||||
where
|
||||
str: serialize::ToSql<Text, Pg>,
|
||||
@ -49,6 +53,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for Codec {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(self.to_string());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Codec {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
|
@ -19,6 +19,7 @@ pub struct Configuration {
|
||||
pub logging: Logging,
|
||||
pub postgresql: Postgresql,
|
||||
pub redis: Redis,
|
||||
pub sqlite: Sqlite,
|
||||
pub api: Api,
|
||||
pub gateway: Gateway,
|
||||
pub network: Network,
|
||||
@ -90,6 +91,29 @@ impl Default for Redis {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct Sqlite {
|
||||
pub path: String,
|
||||
pub pragmas: Vec<String>,
|
||||
pub max_open_connections: u32,
|
||||
}
|
||||
|
||||
impl Default for Sqlite {
|
||||
fn default() -> Self {
|
||||
Sqlite {
|
||||
path: "sqlite://chirpstack.sqlite".into(),
|
||||
pragmas: vec![
|
||||
// Set busy_timeout to avoid manually managing transaction business/contention
|
||||
"busy_timeout = 1000".to_string(),
|
||||
// Enable foreign-keys since it is off by default
|
||||
"foreign_keys = ON".to_string(),
|
||||
],
|
||||
max_open_connections: 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct Api {
|
||||
|
@ -363,7 +363,7 @@ impl Data {
|
||||
trace!("Selecting downlink gateway");
|
||||
|
||||
let gw_down = helpers::select_downlink_gateway(
|
||||
Some(self.tenant.id),
|
||||
Some(self.tenant.id.into()),
|
||||
&self.device.get_device_session()?.region_config_id,
|
||||
self.network_conf.gateway_prefer_min_margin,
|
||||
self.device_gateway_rx_info.as_mut().unwrap(),
|
||||
@ -519,7 +519,8 @@ impl Data {
|
||||
},
|
||||
};
|
||||
|
||||
integration::ack_event(self.application.id, &self.device.variables, &pl).await;
|
||||
integration::ack_event(self.application.id.into(), &self.device.variables, &pl)
|
||||
.await;
|
||||
warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of timeout");
|
||||
|
||||
continue;
|
||||
@ -549,7 +550,8 @@ impl Data {
|
||||
.collect(),
|
||||
};
|
||||
|
||||
integration::log_event(self.application.id, &self.device.variables, &pl).await;
|
||||
integration::log_event(self.application.id.into(), &self.device.variables, &pl)
|
||||
.await;
|
||||
warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of max. payload size");
|
||||
|
||||
continue;
|
||||
@ -585,7 +587,8 @@ impl Data {
|
||||
.collect(),
|
||||
};
|
||||
|
||||
integration::log_event(self.application.id, &self.device.variables, &pl).await;
|
||||
integration::log_event(self.application.id.into(), &self.device.variables, &pl)
|
||||
.await;
|
||||
warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of invalid frame-counter");
|
||||
|
||||
continue;
|
||||
@ -2728,7 +2731,7 @@ mod test {
|
||||
name: "max payload size error".into(),
|
||||
max_payload_size: 10,
|
||||
queue_items: vec![device_queue::DeviceQueueItem {
|
||||
id: qi_id,
|
||||
id: qi_id.into(),
|
||||
dev_eui: d.dev_eui,
|
||||
f_port: 1,
|
||||
data: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
@ -2768,7 +2771,7 @@ mod test {
|
||||
name: "is pending".into(),
|
||||
max_payload_size: 10,
|
||||
queue_items: vec![device_queue::DeviceQueueItem {
|
||||
id: qi_id,
|
||||
id: qi_id.into(),
|
||||
dev_eui: d.dev_eui,
|
||||
f_port: 1,
|
||||
f_cnt_down: Some(10),
|
||||
@ -2800,7 +2803,7 @@ mod test {
|
||||
name: "invalid frame-counter".into(),
|
||||
max_payload_size: 10,
|
||||
queue_items: vec![device_queue::DeviceQueueItem {
|
||||
id: qi_id,
|
||||
id: qi_id.into(),
|
||||
dev_eui: d.dev_eui,
|
||||
f_port: 1,
|
||||
data: vec![1, 2, 3],
|
||||
@ -2841,14 +2844,14 @@ mod test {
|
||||
name: "valid payload".into(),
|
||||
max_payload_size: 10,
|
||||
queue_items: vec![device_queue::DeviceQueueItem {
|
||||
id: qi_id,
|
||||
id: qi_id.into(),
|
||||
dev_eui: d.dev_eui,
|
||||
f_port: 1,
|
||||
data: vec![1, 2, 3],
|
||||
..Default::default()
|
||||
}],
|
||||
expected_queue_item: Some(device_queue::DeviceQueueItem {
|
||||
id: qi_id,
|
||||
id: qi_id.into(),
|
||||
dev_eui: d.dev_eui,
|
||||
f_port: 1,
|
||||
data: vec![1, 2, 3],
|
||||
@ -2874,7 +2877,7 @@ mod test {
|
||||
let d = device::partial_update(
|
||||
d.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(ds.clone())),
|
||||
device_session: Some(Some(ds.clone().into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
@ -3418,11 +3421,14 @@ mod test {
|
||||
dev_addr: Some(*dev_addr),
|
||||
application_id: app.id,
|
||||
device_profile_id: dp_ed.id,
|
||||
device_session: Some(internal::DeviceSession {
|
||||
dev_addr: dev_addr.to_vec(),
|
||||
nwk_s_enc_key: vec![0; 16],
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: dev_addr.to_vec(),
|
||||
nwk_s_enc_key: vec![0; 16],
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@ -3435,7 +3441,7 @@ mod test {
|
||||
let d_relay = device::partial_update(
|
||||
d_relay.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(test.device_session.clone())),
|
||||
device_session: Some(Some(test.device_session.clone().into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
@ -3884,7 +3890,7 @@ mod test {
|
||||
let d_relay = device::partial_update(
|
||||
d_relay.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(test.device_session.clone())),
|
||||
device_session: Some(Some(test.device_session.clone().into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
@ -4015,7 +4021,7 @@ mod test {
|
||||
application: application::Application::default(),
|
||||
device_profile: test.device_profile.clone(),
|
||||
device: device::Device {
|
||||
device_session: Some(test.device_session.clone()),
|
||||
device_session: Some(test.device_session.clone().into()),
|
||||
..Default::default()
|
||||
},
|
||||
network_conf: config::get_region_network("eu868").unwrap(),
|
||||
@ -4126,7 +4132,7 @@ mod test {
|
||||
application: application::Application::default(),
|
||||
device_profile: test.device_profile.clone(),
|
||||
device: device::Device {
|
||||
device_session: Some(test.device_session.clone()),
|
||||
device_session: Some(test.device_session.clone().into()),
|
||||
..Default::default()
|
||||
},
|
||||
network_conf: config::get_region_network("eu868").unwrap(),
|
||||
@ -4247,7 +4253,7 @@ mod test {
|
||||
application: application::Application::default(),
|
||||
device_profile: test.device_profile.clone(),
|
||||
device: device::Device {
|
||||
device_session: Some(test.device_session.clone()),
|
||||
device_session: Some(test.device_session.clone().into()),
|
||||
..Default::default()
|
||||
},
|
||||
network_conf: config::get_region_network("eu868").unwrap(),
|
||||
@ -4504,7 +4510,7 @@ mod test {
|
||||
let d_relay = device::partial_update(
|
||||
d_relay.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(test.device_session.clone())),
|
||||
device_session: Some(Some(test.device_session.clone().into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
|
@ -239,7 +239,7 @@ mod tests {
|
||||
},
|
||||
// is_private_down is set, first gateway matches tenant.
|
||||
Test {
|
||||
tenant_id: Some(t.id),
|
||||
tenant_id: Some(t.id.into()),
|
||||
min_snr_margin: 0.0,
|
||||
rx_info: internal::DeviceGatewayRxInfo {
|
||||
items: vec![
|
||||
@ -262,7 +262,7 @@ mod tests {
|
||||
},
|
||||
// is_private_down is set, second gateway matches tenant.
|
||||
Test {
|
||||
tenant_id: Some(t.id),
|
||||
tenant_id: Some(t.id.into()),
|
||||
min_snr_margin: 0.0,
|
||||
rx_info: internal::DeviceGatewayRxInfo {
|
||||
items: vec![
|
||||
|
@ -182,7 +182,7 @@ impl JoinAccept<'_> {
|
||||
trace!("Select downlink gateway");
|
||||
|
||||
let gw_down = helpers::select_downlink_gateway(
|
||||
Some(self.tenant.id),
|
||||
Some(self.tenant.id.into()),
|
||||
&self.uplink_frame_set.region_config_id,
|
||||
self.network_conf.gateway_prefer_min_margin,
|
||||
self.device_gateway_rx_info.as_mut().unwrap(),
|
||||
|
@ -434,7 +434,7 @@ impl TxAck {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
integration::log_event(app.id, &dev.variables, &pl).await;
|
||||
integration::log_event(app.id.into(), &dev.variables, &pl).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -483,7 +483,7 @@ impl TxAck {
|
||||
tx_info: self.downlink_frame_item.as_ref().unwrap().tx_info.clone(),
|
||||
};
|
||||
|
||||
integration::txack_event(app.id, &dev.variables, &pl).await;
|
||||
integration::txack_event(app.id.into(), &dev.variables, &pl).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -532,7 +532,7 @@ impl TxAck {
|
||||
tx_info: self.downlink_frame_item.as_ref().unwrap().tx_info.clone(),
|
||||
};
|
||||
|
||||
integration::txack_event(app.id, &dev.variables, &pl).await;
|
||||
integration::txack_event(app.id.into(), &dev.variables, &pl).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ pub mod mock;
|
||||
mod mqtt;
|
||||
mod mydevices;
|
||||
mod pilot_things;
|
||||
#[cfg(feature = "postgres")]
|
||||
mod postgresql;
|
||||
mod redis;
|
||||
mod thingsboard;
|
||||
@ -54,6 +55,7 @@ pub async fn setup() -> Result<()> {
|
||||
.context("Setup MQTT integration")?,
|
||||
));
|
||||
}
|
||||
#[cfg(feature = "postgres")]
|
||||
"postgresql" => integrations.push(Box::new(
|
||||
postgresql::Integration::new(&conf.integration.postgresql)
|
||||
.await
|
||||
@ -533,7 +535,7 @@ async fn handle_down_command(application_id: String, pl: integration::DownlinkCo
|
||||
// Validate that the application_id from the topic is indeed the application ID to which
|
||||
// the device belongs.
|
||||
let dev = device::get(&dev_eui).await?;
|
||||
if dev.application_id != app_id {
|
||||
if Into::<Uuid>::into(dev.application_id) != app_id {
|
||||
return Err(anyhow!(
|
||||
"Application ID from topic does not match application ID from device"
|
||||
));
|
||||
@ -555,8 +557,8 @@ async fn handle_down_command(application_id: String, pl: integration::DownlinkCo
|
||||
|
||||
let qi = device_queue::DeviceQueueItem {
|
||||
id: match pl.id.is_empty() {
|
||||
true => Uuid::new_v4(),
|
||||
false => Uuid::from_str(&pl.id)?,
|
||||
true => Uuid::new_v4().into(),
|
||||
false => Uuid::from_str(&pl.id)?.into(),
|
||||
},
|
||||
f_port: pl.f_port as i16,
|
||||
confirmed: pl.confirmed,
|
||||
|
@ -118,7 +118,7 @@ mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -277,7 +277,7 @@ mod test {
|
||||
device::partial_update(
|
||||
dev.dev_eui,
|
||||
&device::DeviceChangeset {
|
||||
device_session: Some(Some(tst.device_session_ed.clone())),
|
||||
device_session: Some(Some(tst.device_session_ed.clone().into())),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
@ -285,7 +285,7 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
let mut relay_dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use bigdecimal::BigDecimal;
|
||||
use chrono::{DateTime, Utc};
|
||||
use tracing::info;
|
||||
|
||||
use crate::api::helpers::ToProto;
|
||||
use crate::integration;
|
||||
use crate::storage::{application, device, device_profile, tenant};
|
||||
use crate::storage::{application, device, device_profile, fields, tenant};
|
||||
use crate::uplink::{helpers, UplinkFrameSet};
|
||||
use chirpstack_api::integration as integration_pb;
|
||||
|
||||
@ -29,8 +28,8 @@ pub async fn handle(
|
||||
margin: Some(pl.margin as i32),
|
||||
external_power_source: Some(pl.battery == 0),
|
||||
battery_level: Some(if pl.battery > 0 && pl.battery < 255 {
|
||||
let v: BigDecimal = ((pl.battery as f32) / 254.0 * 100.0).try_into()?;
|
||||
Some(v.with_scale(2))
|
||||
let v: fields::BigDecimal = ((pl.battery as f32) / 254.0 * 100.0).try_into()?;
|
||||
Some(v.with_scale(2).into())
|
||||
} else {
|
||||
None
|
||||
}),
|
||||
@ -47,7 +46,7 @@ pub async fn handle(
|
||||
helpers::get_rx_timestamp(&uplink_frame_set.rx_info_set).into();
|
||||
|
||||
integration::status_event(
|
||||
app.id,
|
||||
app.id.into(),
|
||||
&dev.variables,
|
||||
&integration_pb::StatusEvent {
|
||||
deduplication_id: uplink_frame_set.uplink_set_id.to_string(),
|
||||
@ -203,7 +202,7 @@ pub mod test {
|
||||
assert_eq!(Some(10), d.margin);
|
||||
assert!(!d.external_power_source);
|
||||
assert_eq!(
|
||||
Some(BigDecimal::from_str("100.00").unwrap()),
|
||||
Some(bigdecimal::BigDecimal::from_str("100.00").unwrap().into()),
|
||||
d.battery_level
|
||||
);
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -216,7 +216,7 @@ mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(&mut dev, &tst.filter_list_ans, tst.filter_list_req.as_ref());
|
||||
|
@ -361,7 +361,7 @@ pub mod test {
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
dev_eui: lrwn::EUI64::from_str("0102030405060708").unwrap(),
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let block = lrwn::MACCommandSet::new(vec![lrwn::MACCommand::LinkADRAns(
|
||||
|
@ -207,9 +207,12 @@ pub mod test {
|
||||
let dp: device_profile::DeviceProfile = Default::default();
|
||||
let mut dev = device::Device {
|
||||
dev_eui: EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
device_session: Some(internal::DeviceSession {
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -472,7 +472,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -65,7 +65,7 @@ pub async fn handle(
|
||||
.collect(),
|
||||
};
|
||||
|
||||
integration::log_event(app.id, &dev.variables, &log_event).await;
|
||||
integration::log_event(app.id.into(), &dev.variables, &log_event).await;
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
@ -88,19 +88,19 @@ mod test {
|
||||
integration::set_mock().await;
|
||||
|
||||
let t = tenant::Tenant {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
name: "tenant".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let app = application::Application {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
name: "app".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let dp = device_profile::DeviceProfile {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
name: "dp".to_string(),
|
||||
tags: fields::KeyValue::new(
|
||||
[("dp_tag".to_string(), "dp_value".to_string())]
|
||||
|
@ -183,7 +183,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -37,7 +37,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_handle() {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(internal::DeviceSession::default()),
|
||||
device_session: Some(internal::DeviceSession::default().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let block = lrwn::MACCommandSet::new(vec![lrwn::MACCommand::PingSlotInfoReq(
|
||||
|
@ -161,7 +161,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -180,7 +180,7 @@ mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(&mut dev, &tst.relay_conf_ans, tst.relay_conf_req.as_ref());
|
||||
|
@ -48,21 +48,24 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_handle() {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(internal::DeviceSession {
|
||||
tx_power_index: 3,
|
||||
min_supported_tx_power_index: 1,
|
||||
max_supported_tx_power_index: 5,
|
||||
extra_uplink_channels: [(3, Default::default())].iter().cloned().collect(),
|
||||
rx1_delay: 3,
|
||||
rx1_dr_offset: 1,
|
||||
rx2_dr: 5,
|
||||
rx2_frequency: 868900000,
|
||||
enabled_uplink_channel_indices: vec![0, 1],
|
||||
class_b_ping_slot_dr: 3,
|
||||
class_b_ping_slot_freq: 868100000,
|
||||
nb_trans: 3,
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
tx_power_index: 3,
|
||||
min_supported_tx_power_index: 1,
|
||||
max_supported_tx_power_index: 5,
|
||||
extra_uplink_channels: [(3, Default::default())].iter().cloned().collect(),
|
||||
rx1_delay: 3,
|
||||
rx1_dr_offset: 1,
|
||||
rx2_dr: 5,
|
||||
rx2_frequency: 868900000,
|
||||
enabled_uplink_channel_indices: vec![0, 1],
|
||||
class_b_ping_slot_dr: 3,
|
||||
class_b_ping_slot_freq: 868100000,
|
||||
nb_trans: 3,
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
};
|
||||
let dp = device_profile::DeviceProfile {
|
||||
|
@ -184,7 +184,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -103,7 +103,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -139,7 +139,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = handle(
|
||||
|
@ -126,7 +126,7 @@ pub mod test {
|
||||
|
||||
for tst in &tests {
|
||||
let mut dev = device::Device {
|
||||
device_session: Some(tst.device_session.clone()),
|
||||
device_session: Some(tst.device_session.clone().into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -8,16 +8,16 @@ use uuid::Uuid;
|
||||
|
||||
use super::error::Error;
|
||||
use super::schema::api_key;
|
||||
use super::{error, get_async_db_conn};
|
||||
use super::{error, fields, get_async_db_conn};
|
||||
|
||||
#[derive(Queryable, Insertable, PartialEq, Eq, Debug)]
|
||||
#[diesel(table_name = api_key)]
|
||||
pub struct ApiKey {
|
||||
pub id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
pub is_admin: bool,
|
||||
pub tenant_id: Option<Uuid>,
|
||||
pub tenant_id: Option<fields::Uuid>,
|
||||
}
|
||||
|
||||
impl ApiKey {
|
||||
@ -33,7 +33,7 @@ impl ApiKey {
|
||||
impl Default for ApiKey {
|
||||
fn default() -> Self {
|
||||
ApiKey {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
created_at: Utc::now(),
|
||||
name: "".into(),
|
||||
is_admin: false,
|
||||
@ -61,7 +61,7 @@ pub async fn create(ak: ApiKey) -> Result<ApiKey, Error> {
|
||||
}
|
||||
|
||||
pub async fn delete(id: &Uuid) -> Result<(), Error> {
|
||||
let ra = diesel::delete(api_key::dsl::api_key.find(&id))
|
||||
let ra = diesel::delete(api_key::dsl::api_key.find(fields::Uuid::from(id)))
|
||||
.execute(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
if ra == 0 {
|
||||
@ -78,7 +78,7 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(api_key::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(api_key::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
Ok(q.first(&mut get_async_db_conn().await?).await?)
|
||||
@ -90,7 +90,7 @@ pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result<Vec<ApiK
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(api_key::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(api_key::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
let items = q
|
||||
@ -118,7 +118,7 @@ pub mod test {
|
||||
|
||||
pub async fn get(id: &Uuid) -> Result<ApiKey, Error> {
|
||||
api_key::dsl::api_key
|
||||
.find(&id)
|
||||
.find(fields::Uuid::from(id))
|
||||
.first(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| error::Error::from_diesel(e, id.to_string()))
|
||||
@ -162,7 +162,7 @@ pub mod test {
|
||||
},
|
||||
FilterTest {
|
||||
filters: Filters {
|
||||
tenant_id: ak_tenant.tenant_id,
|
||||
tenant_id: ak_tenant.tenant_id.map(|u| u.into()),
|
||||
is_admin: false,
|
||||
},
|
||||
keys: vec![&ak_tenant],
|
||||
|
@ -4,14 +4,11 @@ use std::str::FromStr;
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use diesel::{
|
||||
backend::Backend,
|
||||
deserialize, dsl,
|
||||
pg::Pg,
|
||||
prelude::*,
|
||||
serialize,
|
||||
sql_types::{Jsonb, Text},
|
||||
};
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text};
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::{pg::Pg, sql_types::Jsonb};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
@ -24,8 +21,8 @@ use super::{fields, get_async_db_conn};
|
||||
#[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)]
|
||||
#[diesel(table_name = application)]
|
||||
pub struct Application {
|
||||
pub id: Uuid,
|
||||
pub tenant_id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub tenant_id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
@ -48,8 +45,8 @@ impl Default for Application {
|
||||
let now = Utc::now();
|
||||
|
||||
Application {
|
||||
id: Uuid::new_v4(),
|
||||
tenant_id: Uuid::nil(),
|
||||
id: Uuid::new_v4().into(),
|
||||
tenant_id: Uuid::nil().into(),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
name: "".into(),
|
||||
@ -68,7 +65,7 @@ pub struct Filters {
|
||||
|
||||
#[derive(Queryable, PartialEq, Eq, Debug)]
|
||||
pub struct ApplicationListItem {
|
||||
pub id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
@ -129,6 +126,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Text, Pg> for IntegrationKind
|
||||
where
|
||||
str: serialize::ToSql<Text, Pg>,
|
||||
@ -138,8 +136,16 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for IntegrationKind {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(self.to_string());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow, Serialize, Deserialize)]
|
||||
#[diesel(sql_type = Jsonb)]
|
||||
#[diesel(sql_type = fields::sql_types::JsonT)]
|
||||
pub enum IntegrationConfiguration {
|
||||
None,
|
||||
Http(HttpConfiguration),
|
||||
@ -154,6 +160,7 @@ pub enum IntegrationConfiguration {
|
||||
Ifttt(IftttConfiguration),
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<Jsonb, Pg> for IntegrationConfiguration {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
|
||||
@ -161,6 +168,7 @@ impl deserialize::FromSql<Jsonb, Pg> for IntegrationConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Jsonb, Pg> for IntegrationConfiguration {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result {
|
||||
let value = serde_json::to_value(self)?;
|
||||
@ -168,6 +176,23 @@ impl serialize::ToSql<Jsonb, Pg> for IntegrationConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<Text, Sqlite> for IntegrationConfiguration {
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let s =
|
||||
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
|
||||
Ok(serde_json::from_str(unsafe { &*s })?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for IntegrationConfiguration {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(serde_json::to_string(self)?);
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct HttpConfiguration {
|
||||
pub headers: HashMap<String, String>,
|
||||
@ -268,7 +293,7 @@ pub struct IftttConfiguration {
|
||||
#[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)]
|
||||
#[diesel(table_name = application_integration)]
|
||||
pub struct Integration {
|
||||
pub application_id: Uuid,
|
||||
pub application_id: fields::Uuid,
|
||||
pub kind: IntegrationKind,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
@ -280,7 +305,7 @@ impl Default for Integration {
|
||||
let now = Utc::now();
|
||||
|
||||
Integration {
|
||||
application_id: Uuid::nil(),
|
||||
application_id: Uuid::nil().into(),
|
||||
kind: IntegrationKind::Http,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
@ -305,7 +330,7 @@ pub async fn create(a: Application) -> Result<Application, Error> {
|
||||
|
||||
pub async fn get(id: &Uuid) -> Result<Application, Error> {
|
||||
let a = application::dsl::application
|
||||
.find(&id)
|
||||
.find(fields::Uuid::from(id))
|
||||
.first(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
@ -335,11 +360,12 @@ pub async fn update(a: Application) -> Result<Application, Error> {
|
||||
}
|
||||
|
||||
pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result<Application, Error> {
|
||||
let app: Application = diesel::update(application::dsl::application.find(&id))
|
||||
.set(application::mqtt_tls_cert.eq(cert))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
let app: Application =
|
||||
diesel::update(application::dsl::application.find(fields::Uuid::from(id)))
|
||||
.set(application::mqtt_tls_cert.eq(cert))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
|
||||
info!(
|
||||
application_id = %id,
|
||||
@ -350,7 +376,7 @@ pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result<Application,
|
||||
}
|
||||
|
||||
pub async fn delete(id: &Uuid) -> Result<(), Error> {
|
||||
let ra = diesel::delete(application::dsl::application.find(&id))
|
||||
let ra = diesel::delete(application::dsl::application.find(fields::Uuid::from(id)))
|
||||
.execute(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
if ra == 0 {
|
||||
@ -371,11 +397,18 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(application::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(application::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(application::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(application::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(application::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(q.first(&mut get_async_db_conn().await?).await?)
|
||||
@ -397,11 +430,18 @@ pub async fn list(
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(application::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(application::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(application::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(application::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(application::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
let items = q
|
||||
@ -431,7 +471,7 @@ pub async fn get_integration(
|
||||
let mut i: Integration = application_integration::dsl::application_integration
|
||||
.filter(
|
||||
application_integration::dsl::application_id
|
||||
.eq(application_id)
|
||||
.eq(fields::Uuid::from(application_id))
|
||||
.and(application_integration::dsl::kind.eq(kind)),
|
||||
)
|
||||
.first(&mut get_async_db_conn().await?)
|
||||
@ -478,7 +518,7 @@ pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) ->
|
||||
let ra = diesel::delete(
|
||||
application_integration::dsl::application_integration.filter(
|
||||
application_integration::dsl::application_id
|
||||
.eq(&application_id)
|
||||
.eq(fields::Uuid::from(application_id))
|
||||
.and(application_integration::dsl::kind.eq(&kind)),
|
||||
),
|
||||
)
|
||||
@ -497,20 +537,21 @@ pub async fn get_integrations_for_application(
|
||||
application_id: &Uuid,
|
||||
) -> Result<Vec<Integration>, Error> {
|
||||
let items: Vec<Integration> = application_integration::dsl::application_integration
|
||||
.filter(application_integration::dsl::application_id.eq(&application_id))
|
||||
.filter(application_integration::dsl::application_id.eq(fields::Uuid::from(application_id)))
|
||||
.order_by(application_integration::dsl::kind)
|
||||
.load(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
Ok(items)
|
||||
}
|
||||
|
||||
pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>, Error> {
|
||||
#[derive(QueryableByName)]
|
||||
struct Measurement {
|
||||
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||
pub key: String,
|
||||
}
|
||||
#[derive(QueryableByName)]
|
||||
struct Measurement {
|
||||
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>, Error> {
|
||||
let keys: Vec<Measurement> = diesel::sql_query(
|
||||
r#"
|
||||
select
|
||||
@ -525,7 +566,28 @@ pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>,
|
||||
key
|
||||
"#,
|
||||
)
|
||||
.bind::<diesel::sql_types::Uuid, _>(application_id)
|
||||
.bind::<fields::sql_types::Uuid, _>(fields::Uuid::from(application_id))
|
||||
.load(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, application_id.to_string()))?;
|
||||
Ok(keys.iter().map(|k| k.key.clone()).collect())
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub async fn get_measurement_keys(application_id: &Uuid) -> Result<Vec<String>, Error> {
|
||||
let keys: Vec<Measurement> = diesel::sql_query(
|
||||
r#"
|
||||
select distinct json_each.key as key
|
||||
from device_profile dp, json_each(dp.measurements)
|
||||
inner join device d
|
||||
on d.device_profile_id = dp.id
|
||||
where
|
||||
d.application_id = ?
|
||||
order by
|
||||
key
|
||||
"#,
|
||||
)
|
||||
.bind::<fields::sql_types::Uuid, _>(fields::Uuid::from(application_id))
|
||||
.load(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, application_id.to_string()))?;
|
||||
@ -548,7 +610,7 @@ pub mod test {
|
||||
|
||||
pub async fn create_application(tenant_id: Option<Uuid>) -> Application {
|
||||
let tenant_id = match tenant_id {
|
||||
Some(v) => v,
|
||||
Some(v) => v.into(),
|
||||
None => {
|
||||
let t = storage::tenant::test::create_tenant().await;
|
||||
t.id
|
||||
@ -623,7 +685,7 @@ pub mod test {
|
||||
},
|
||||
FilterTest {
|
||||
filters: Filters {
|
||||
tenant_id: Some(app.tenant_id),
|
||||
tenant_id: Some(app.tenant_id.into()),
|
||||
search: None,
|
||||
},
|
||||
apps: vec![&app],
|
||||
|
@ -1,9 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use bigdecimal::BigDecimal;
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text};
|
||||
use diesel_async::RunQueryDsl;
|
||||
@ -14,7 +14,7 @@ use chirpstack_api::internal;
|
||||
use lrwn::{DevAddr, EUI64};
|
||||
|
||||
use super::schema::{application, device, device_profile, multicast_group_device, tenant};
|
||||
use super::{error::Error, fields, get_async_db_conn};
|
||||
use super::{db_transaction, error::Error, fields, get_async_db_conn};
|
||||
use crate::api::helpers::FromProto;
|
||||
use crate::config;
|
||||
|
||||
@ -62,6 +62,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Text, diesel::pg::Pg> for DeviceClass
|
||||
where
|
||||
str: serialize::ToSql<Text, diesel::pg::Pg>,
|
||||
@ -77,12 +78,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, diesel::sqlite::Sqlite> for DeviceClass {
|
||||
fn to_sql(
|
||||
&self,
|
||||
out: &mut serialize::Output<'_, '_, diesel::sqlite::Sqlite>,
|
||||
) -> serialize::Result {
|
||||
out.set_value(self.to_string());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Queryable, QueryableByName, Insertable, PartialEq, Debug, Clone)]
|
||||
#[diesel(table_name = device)]
|
||||
pub struct Device {
|
||||
pub dev_eui: EUI64,
|
||||
pub application_id: Uuid,
|
||||
pub device_profile_id: Uuid,
|
||||
pub application_id: fields::Uuid,
|
||||
pub device_profile_id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub last_seen_at: Option<DateTime<Utc>>,
|
||||
@ -90,7 +102,7 @@ pub struct Device {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub external_power_source: bool,
|
||||
pub battery_level: Option<BigDecimal>,
|
||||
pub battery_level: Option<fields::BigDecimal>,
|
||||
pub margin: Option<i32>,
|
||||
pub dr: Option<i16>,
|
||||
pub latitude: Option<f64>,
|
||||
@ -104,7 +116,7 @@ pub struct Device {
|
||||
pub variables: fields::KeyValue,
|
||||
pub join_eui: EUI64,
|
||||
pub secondary_dev_addr: Option<DevAddr>,
|
||||
pub device_session: Option<internal::DeviceSession>,
|
||||
pub device_session: Option<fields::DeviceSession>,
|
||||
}
|
||||
|
||||
#[derive(AsChangeset, Debug, Clone, Default)]
|
||||
@ -116,10 +128,10 @@ pub struct DeviceChangeset {
|
||||
pub enabled_class: Option<DeviceClass>,
|
||||
pub join_eui: Option<EUI64>,
|
||||
pub secondary_dev_addr: Option<Option<DevAddr>>,
|
||||
pub device_session: Option<Option<internal::DeviceSession>>,
|
||||
pub device_session: Option<Option<fields::DeviceSession>>,
|
||||
pub margin: Option<i32>,
|
||||
pub external_power_source: Option<bool>,
|
||||
pub battery_level: Option<Option<BigDecimal>>,
|
||||
pub battery_level: Option<Option<fields::BigDecimal>>,
|
||||
pub scheduler_run_after: Option<Option<DateTime<Utc>>>,
|
||||
pub is_disabled: Option<bool>,
|
||||
}
|
||||
@ -135,12 +147,14 @@ impl Device {
|
||||
pub fn get_device_session(&self) -> Result<&internal::DeviceSession, Error> {
|
||||
self.device_session
|
||||
.as_ref()
|
||||
.map(|ds| ds.deref())
|
||||
.ok_or_else(|| Error::NotFound(self.dev_eui.to_string()))
|
||||
}
|
||||
|
||||
pub fn get_device_session_mut(&mut self) -> Result<&mut internal::DeviceSession, Error> {
|
||||
self.device_session
|
||||
.as_mut()
|
||||
.map(|ds| ds.deref_mut())
|
||||
.ok_or_else(|| Error::NotFound(self.dev_eui.to_string()))
|
||||
}
|
||||
|
||||
@ -155,8 +169,8 @@ impl Default for Device {
|
||||
|
||||
Device {
|
||||
dev_eui: EUI64::default(),
|
||||
application_id: Uuid::nil(),
|
||||
device_profile_id: Uuid::nil(),
|
||||
application_id: Uuid::nil().into(),
|
||||
device_profile_id: Uuid::nil().into(),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
last_seen_at: None,
|
||||
@ -188,14 +202,14 @@ pub struct DeviceListItem {
|
||||
pub dev_eui: EUI64,
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub device_profile_id: Uuid,
|
||||
pub device_profile_id: fields::Uuid,
|
||||
pub device_profile_name: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub last_seen_at: Option<DateTime<Utc>>,
|
||||
pub margin: Option<i32>,
|
||||
pub external_power_source: bool,
|
||||
pub battery_level: Option<BigDecimal>,
|
||||
pub battery_level: Option<fields::BigDecimal>,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@ -223,52 +237,50 @@ pub struct DevicesDataRate {
|
||||
|
||||
pub async fn create(d: Device) -> Result<Device, Error> {
|
||||
let mut c = get_async_db_conn().await?;
|
||||
let d: Device = c
|
||||
.build_transaction()
|
||||
.run::<Device, Error, _>(|c| {
|
||||
Box::pin(async move {
|
||||
// use for update to lock the tenant
|
||||
let t: super::tenant::Tenant = tenant::dsl::tenant
|
||||
.select((
|
||||
tenant::dsl::id,
|
||||
tenant::dsl::created_at,
|
||||
tenant::dsl::updated_at,
|
||||
tenant::dsl::name,
|
||||
tenant::dsl::description,
|
||||
tenant::dsl::can_have_gateways,
|
||||
tenant::dsl::max_device_count,
|
||||
tenant::dsl::max_gateway_count,
|
||||
tenant::dsl::private_gateways_up,
|
||||
tenant::dsl::private_gateways_down,
|
||||
tenant::dsl::tags,
|
||||
))
|
||||
.inner_join(application::table)
|
||||
.filter(application::dsl::id.eq(&d.application_id))
|
||||
.for_update()
|
||||
.first(c)
|
||||
.await?;
|
||||
let d: Device = db_transaction::<Device, Error, _>(&mut c, |c| {
|
||||
Box::pin(async move {
|
||||
let query = tenant::dsl::tenant
|
||||
.select((
|
||||
tenant::dsl::id,
|
||||
tenant::dsl::created_at,
|
||||
tenant::dsl::updated_at,
|
||||
tenant::dsl::name,
|
||||
tenant::dsl::description,
|
||||
tenant::dsl::can_have_gateways,
|
||||
tenant::dsl::max_device_count,
|
||||
tenant::dsl::max_gateway_count,
|
||||
tenant::dsl::private_gateways_up,
|
||||
tenant::dsl::private_gateways_down,
|
||||
tenant::dsl::tags,
|
||||
))
|
||||
.inner_join(application::table)
|
||||
.filter(application::dsl::id.eq(&d.application_id));
|
||||
// use for update to lock the tenant
|
||||
#[cfg(feature = "postgres")]
|
||||
let query = query.for_update();
|
||||
let t: super::tenant::Tenant = query.first(c).await?;
|
||||
|
||||
let dev_count: i64 = device::dsl::device
|
||||
.select(dsl::count_star())
|
||||
.inner_join(application::table)
|
||||
.filter(application::dsl::tenant_id.eq(&t.id))
|
||||
.first(c)
|
||||
.await?;
|
||||
let dev_count: i64 = device::dsl::device
|
||||
.select(dsl::count_star())
|
||||
.inner_join(application::table)
|
||||
.filter(application::dsl::tenant_id.eq(&t.id))
|
||||
.first(c)
|
||||
.await?;
|
||||
|
||||
if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count {
|
||||
return Err(Error::NotAllowed(
|
||||
"Max number of devices exceeded for tenant".into(),
|
||||
));
|
||||
}
|
||||
if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count {
|
||||
return Err(Error::NotAllowed(
|
||||
"Max number of devices exceeded for tenant".into(),
|
||||
));
|
||||
}
|
||||
|
||||
diesel::insert_into(device::table)
|
||||
.values(&d)
|
||||
.get_result(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, d.dev_eui.to_string()))
|
||||
})
|
||||
diesel::insert_into(device::table)
|
||||
.values(&d)
|
||||
.get_result(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, d.dev_eui.to_string()))
|
||||
})
|
||||
.await?;
|
||||
})
|
||||
.await?;
|
||||
info!(dev_eui = %d.dev_eui, "Device created");
|
||||
Ok(d)
|
||||
}
|
||||
@ -304,130 +316,129 @@ pub async fn get_for_phypayload_and_incr_f_cnt_up(
|
||||
|
||||
let mut c = get_async_db_conn().await?;
|
||||
|
||||
c.build_transaction()
|
||||
.run::<ValidationStatus, Error, _>(|c| {
|
||||
Box::pin(async move {
|
||||
let mut devices: Vec<Device> = device::dsl::device
|
||||
.filter(
|
||||
device::dsl::dev_addr
|
||||
.eq(&dev_addr)
|
||||
.or(device::dsl::secondary_dev_addr.eq(&dev_addr)),
|
||||
)
|
||||
.filter(device::dsl::is_disabled.eq(false))
|
||||
.for_update()
|
||||
.load(c)
|
||||
.await?;
|
||||
db_transaction::<ValidationStatus, Error, _>(&mut c, |c| {
|
||||
Box::pin(async move {
|
||||
let query = device::dsl::device
|
||||
.filter(
|
||||
device::dsl::dev_addr
|
||||
.eq(&dev_addr)
|
||||
.or(device::dsl::secondary_dev_addr.eq(&dev_addr)),
|
||||
)
|
||||
.filter(device::dsl::is_disabled.eq(false));
|
||||
#[cfg(feature = "postgres")]
|
||||
let query = query.for_update();
|
||||
let mut devices: Vec<Device> = query.load(c).await?;
|
||||
|
||||
if devices.is_empty() {
|
||||
return Err(Error::NotFound(dev_addr.to_string()));
|
||||
if devices.is_empty() {
|
||||
return Err(Error::NotFound(dev_addr.to_string()));
|
||||
}
|
||||
|
||||
for d in &mut devices {
|
||||
let mut sessions = vec![];
|
||||
|
||||
if let Some(ds) = &d.device_session {
|
||||
sessions.push(ds.clone());
|
||||
if let Some(ds) = &ds.pending_rejoin_device_session {
|
||||
sessions.push(ds.as_ref().into());
|
||||
}
|
||||
}
|
||||
|
||||
for d in &mut devices {
|
||||
let mut sessions = vec![];
|
||||
|
||||
if let Some(ds) = &d.device_session {
|
||||
sessions.push(ds.clone());
|
||||
if let Some(ds) = &ds.pending_rejoin_device_session {
|
||||
sessions.push(*ds.clone());
|
||||
}
|
||||
for ds in &mut sessions {
|
||||
if ds.dev_addr != dev_addr.to_vec() {
|
||||
continue;
|
||||
}
|
||||
|
||||
for ds in &mut sessions {
|
||||
if ds.dev_addr != dev_addr.to_vec() {
|
||||
continue;
|
||||
// Get the full 32bit frame-counter.
|
||||
let full_f_cnt = get_full_f_cnt_up(ds.f_cnt_up, f_cnt_orig);
|
||||
let f_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.f_nwk_s_int_key)?;
|
||||
let s_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.s_nwk_s_int_key)?;
|
||||
|
||||
// Check both the full frame-counter and the received frame-counter
|
||||
// truncated to the 16LSB.
|
||||
// The latter is needed in case of a frame-counter reset as the
|
||||
// GetFullFCntUp will think the 16LSB has rolled over and will
|
||||
// increment the 16MSB bit.
|
||||
let mut mic_ok = false;
|
||||
for f_cnt in [full_f_cnt, f_cnt_orig] {
|
||||
// Set the full f_cnt.
|
||||
if let lrwn::Payload::MACPayload(pl) = &mut phy.payload {
|
||||
pl.fhdr.f_cnt = f_cnt;
|
||||
}
|
||||
|
||||
// Get the full 32bit frame-counter.
|
||||
let full_f_cnt = get_full_f_cnt_up(ds.f_cnt_up, f_cnt_orig);
|
||||
let f_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.f_nwk_s_int_key)?;
|
||||
let s_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.s_nwk_s_int_key)?;
|
||||
|
||||
// Check both the full frame-counter and the received frame-counter
|
||||
// truncated to the 16LSB.
|
||||
// The latter is needed in case of a frame-counter reset as the
|
||||
// GetFullFCntUp will think the 16LSB has rolled over and will
|
||||
// increment the 16MSB bit.
|
||||
let mut mic_ok = false;
|
||||
for f_cnt in [full_f_cnt, f_cnt_orig] {
|
||||
// Set the full f_cnt.
|
||||
if let lrwn::Payload::MACPayload(pl) = &mut phy.payload {
|
||||
pl.fhdr.f_cnt = f_cnt;
|
||||
}
|
||||
|
||||
mic_ok = phy
|
||||
.validate_uplink_data_mic(
|
||||
ds.mac_version().from_proto(),
|
||||
ds.conf_f_cnt,
|
||||
tx_dr,
|
||||
tx_ch,
|
||||
&f_nwk_s_int_key,
|
||||
&s_nwk_s_int_key,
|
||||
)
|
||||
.context("Validate MIC")?;
|
||||
|
||||
if mic_ok {
|
||||
break;
|
||||
}
|
||||
}
|
||||
mic_ok = phy
|
||||
.validate_uplink_data_mic(
|
||||
ds.mac_version().from_proto(),
|
||||
ds.conf_f_cnt,
|
||||
tx_dr,
|
||||
tx_ch,
|
||||
&f_nwk_s_int_key,
|
||||
&s_nwk_s_int_key,
|
||||
)
|
||||
.context("Validate MIC")?;
|
||||
|
||||
if mic_ok {
|
||||
let full_f_cnt = if let lrwn::Payload::MACPayload(pl) = &phy.payload {
|
||||
pl.fhdr.f_cnt
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
if let Some(relay) = &ds.relay {
|
||||
if !relayed && relay.ed_relay_only {
|
||||
info!(
|
||||
dev_eui = %d.dev_eui,
|
||||
"Only communication through relay is allowed"
|
||||
);
|
||||
return Err(Error::NotFound(dev_addr.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if full_f_cnt >= ds.f_cnt_up {
|
||||
// We immediately save the device-session to make sure that concurrent calls for
|
||||
// the same uplink will fail on the frame-counter validation.
|
||||
let ds_f_cnt_up = ds.f_cnt_up;
|
||||
ds.f_cnt_up = full_f_cnt + 1;
|
||||
|
||||
let _ = diesel::update(device::dsl::device.find(d.dev_eui))
|
||||
.set(device::device_session.eq(&ds.clone()))
|
||||
.execute(c)
|
||||
.await?;
|
||||
|
||||
// We do return the device-session with original frame-counter
|
||||
ds.f_cnt_up = ds_f_cnt_up;
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Ok(full_f_cnt, d.clone()));
|
||||
} else if ds.skip_f_cnt_check {
|
||||
// re-transmission or frame-counter reset
|
||||
ds.f_cnt_up = 0;
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Ok(full_f_cnt, d.clone()));
|
||||
} else if full_f_cnt == (ds.f_cnt_up - 1) {
|
||||
// re-transmission, the frame-counter did not increment
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Retransmission(full_f_cnt, d.clone()));
|
||||
} else {
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Reset(full_f_cnt, d.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
// Restore the original f_cnt.
|
||||
if let lrwn::Payload::MACPayload(pl) = &mut phy.payload {
|
||||
pl.fhdr.f_cnt = f_cnt_orig;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::InvalidMIC)
|
||||
})
|
||||
if mic_ok {
|
||||
let full_f_cnt = if let lrwn::Payload::MACPayload(pl) = &phy.payload {
|
||||
pl.fhdr.f_cnt
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
if let Some(relay) = &ds.relay {
|
||||
if !relayed && relay.ed_relay_only {
|
||||
info!(
|
||||
dev_eui = %d.dev_eui,
|
||||
"Only communication through relay is allowed"
|
||||
);
|
||||
return Err(Error::NotFound(dev_addr.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if full_f_cnt >= ds.f_cnt_up {
|
||||
// We immediately save the device-session to make sure that concurrent calls for
|
||||
// the same uplink will fail on the frame-counter validation.
|
||||
let ds_f_cnt_up = ds.f_cnt_up;
|
||||
ds.f_cnt_up = full_f_cnt + 1;
|
||||
|
||||
let _ = diesel::update(device::dsl::device.find(d.dev_eui))
|
||||
.set(device::device_session.eq(&ds.clone()))
|
||||
.execute(c)
|
||||
.await?;
|
||||
|
||||
// We do return the device-session with original frame-counter
|
||||
ds.f_cnt_up = ds_f_cnt_up;
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Ok(full_f_cnt, d.clone()));
|
||||
} else if ds.skip_f_cnt_check {
|
||||
// re-transmission or frame-counter reset
|
||||
ds.f_cnt_up = 0;
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Ok(full_f_cnt, d.clone()));
|
||||
} else if full_f_cnt == (ds.f_cnt_up - 1) {
|
||||
// re-transmission, the frame-counter did not increment
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Retransmission(full_f_cnt, d.clone()));
|
||||
} else {
|
||||
d.device_session = Some(ds.clone());
|
||||
return Ok(ValidationStatus::Reset(full_f_cnt, d.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
// Restore the original f_cnt.
|
||||
if let lrwn::Payload::MACPayload(pl) = &mut phy.payload {
|
||||
pl.fhdr.f_cnt = f_cnt_orig;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::InvalidMIC)
|
||||
})
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_for_phypayload(
|
||||
@ -462,7 +473,7 @@ pub async fn get_for_phypayload(
|
||||
if let Some(ds) = &d.device_session {
|
||||
sessions.push(ds.clone());
|
||||
if let Some(ds) = &ds.pending_rejoin_device_session {
|
||||
sessions.push(*ds.clone());
|
||||
sessions.push(ds.as_ref().into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,15 +570,25 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
|
||||
.into_boxed();
|
||||
|
||||
if let Some(application_id) = &filters.application_id {
|
||||
q = q.filter(device::dsl::application_id.eq(application_id));
|
||||
q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(device::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(device::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(device::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(multicast_group_id) = &filters.multicast_group_id {
|
||||
q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
|
||||
q = q.filter(
|
||||
multicast_group_device::dsl::multicast_group_id
|
||||
.eq(fields::Uuid::from(multicast_group_id)),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(q.first(&mut get_async_db_conn().await?).await?)
|
||||
@ -598,15 +619,25 @@ pub async fn list(
|
||||
.into_boxed();
|
||||
|
||||
if let Some(application_id) = &filters.application_id {
|
||||
q = q.filter(device::dsl::application_id.eq(application_id));
|
||||
q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(device::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(device::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(device::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(multicast_group_id) = &filters.multicast_group_id {
|
||||
q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id));
|
||||
q = q.filter(
|
||||
multicast_group_device::dsl::multicast_group_id
|
||||
.eq(fields::Uuid::from(multicast_group_id)),
|
||||
);
|
||||
}
|
||||
|
||||
q.order_by(device::dsl::name)
|
||||
@ -617,6 +648,7 @@ pub async fn list(
|
||||
.map_err(|e| Error::from_diesel(e, "".into()))
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActiveInactive, Error> {
|
||||
diesel::sql_query(r#"
|
||||
with device_active_inactive as (
|
||||
@ -637,11 +669,43 @@ pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActi
|
||||
from
|
||||
device_active_inactive
|
||||
"#)
|
||||
.bind::<diesel::sql_types::Nullable<diesel::sql_types::Uuid>, _>(tenant_id)
|
||||
.bind::<diesel::sql_types::Nullable<fields::sql_types::Uuid>, _>(tenant_id.map(fields::Uuid::from))
|
||||
.get_result(&mut get_async_db_conn().await?).await
|
||||
.map_err(|e| Error::from_diesel(e, "".into()))
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub async fn get_active_inactive(tenant_id: &Option<Uuid>) -> Result<DevicesActiveInactive, Error> {
|
||||
diesel::sql_query(
|
||||
r#"
|
||||
with device_active_inactive as (
|
||||
select
|
||||
dp.uplink_interval * 1.5 as uplink_interval,
|
||||
d.last_seen_at as last_seen_at,
|
||||
(unixepoch('now') - unixepoch(last_seen_at)) as not_seen_duration
|
||||
from
|
||||
device d
|
||||
inner join device_profile dp
|
||||
on d.device_profile_id = dp.id
|
||||
where
|
||||
?1 is null or dp.tenant_id = ?1
|
||||
)
|
||||
select
|
||||
coalesce(sum(case when last_seen_at is null then 1 end), 0) as never_seen_count,
|
||||
coalesce(sum(case when not_seen_duration > uplink_interval then 1 end), 0) as inactive_count,
|
||||
coalesce(sum(case when not_seen_duration <= uplink_interval then 1 end), 0) as active_count
|
||||
from
|
||||
device_active_inactive
|
||||
"#,
|
||||
)
|
||||
.bind::<diesel::sql_types::Nullable<fields::sql_types::Uuid>, _>(
|
||||
tenant_id.map(fields::Uuid::from),
|
||||
)
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, "".into()))
|
||||
}
|
||||
|
||||
pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataRate>, Error> {
|
||||
let mut q = device::dsl::device
|
||||
.inner_join(device_profile::table)
|
||||
@ -655,7 +719,7 @@ pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataR
|
||||
.into_boxed();
|
||||
|
||||
if let Some(id) = &tenant_id {
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(id));
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(fields::Uuid::from(id)));
|
||||
}
|
||||
|
||||
q.load(&mut get_async_db_conn().await?)
|
||||
@ -665,28 +729,60 @@ pub async fn get_data_rates(tenant_id: &Option<Uuid>) -> Result<Vec<DevicesDataR
|
||||
|
||||
pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result<Vec<Device>> {
|
||||
let mut c = get_async_db_conn().await?;
|
||||
c.build_transaction()
|
||||
.run::<Vec<Device>, Error, _>(|c| {
|
||||
Box::pin(async {
|
||||
let conf = config::get();
|
||||
db_transaction::<Vec<Device>, Error, _>(&mut c, |c| {
|
||||
Box::pin(async {
|
||||
let conf = config::get();
|
||||
|
||||
// This query will:
|
||||
// * Select the devices for which a Class-B or Class-C downlink can be scheduled.
|
||||
// * Lock the device records for update with skip locked such that other
|
||||
// ChirpStack instances are able to do the same for the remaining devices.
|
||||
// * Update the scheduler_run_after for these devices to now() + 2 * scheduler
|
||||
// interval to avoid concurrency issues (other ChirpStack instance scheduling
|
||||
// the same queue items).
|
||||
//
|
||||
// This way, we do not have to keep the device records locked until the scheduler
|
||||
// finishes its batch as the same set of devices will not be returned until after
|
||||
// the updated scheduler_run_after. Only if the scheduler takes more time than 2x the
|
||||
// interval (the scheduler is still working on processing the batch after 2 x interval)
|
||||
// this might cause issues.
|
||||
// The alternative would be to keep the transaction open for a long time + keep
|
||||
// the device records locked during this time which could case issues as well.
|
||||
diesel::sql_query(
|
||||
r#"
|
||||
// This query will:
|
||||
// * Select the devices for which a Class-B or Class-C downlink can be scheduled.
|
||||
// * Lock the device records for update with skip locked such that other
|
||||
// ChirpStack instances are able to do the same for the remaining devices.
|
||||
// * Update the scheduler_run_after for these devices to now() + 2 * scheduler
|
||||
// interval to avoid concurrency issues (other ChirpStack instance scheduling
|
||||
// the same queue items).
|
||||
//
|
||||
// This way, we do not have to keep the device records locked until the scheduler
|
||||
// finishes its batch as the same set of devices will not be returned until after
|
||||
// the updated scheduler_run_after. Only if the scheduler takes more time than 2x the
|
||||
// interval (the scheduler is still working on processing the batch after 2 x interval)
|
||||
// this might cause issues.
|
||||
// The alternative would be to keep the transaction open for a long time + keep
|
||||
// the device records locked during this time which could case issues as well.
|
||||
diesel::sql_query(if cfg!(feature = "sqlite") {
|
||||
r#"
|
||||
update
|
||||
device
|
||||
set
|
||||
scheduler_run_after = ?3
|
||||
where
|
||||
dev_eui in (
|
||||
select
|
||||
d.dev_eui
|
||||
from
|
||||
device d
|
||||
where
|
||||
d.enabled_class in ('B', 'C')
|
||||
and (d.scheduler_run_after is null or d.scheduler_run_after < ?2)
|
||||
and d.is_disabled = FALSE
|
||||
and exists (
|
||||
select
|
||||
1
|
||||
from
|
||||
device_queue_item dq
|
||||
where
|
||||
dq.dev_eui = d.dev_eui
|
||||
and not (
|
||||
-- pending queue-item with timeout_after in the future
|
||||
(dq.is_pending = true and dq.timeout_after > ?2)
|
||||
)
|
||||
)
|
||||
order by d.dev_eui
|
||||
limit ?1
|
||||
)
|
||||
returning *
|
||||
"#
|
||||
} else {
|
||||
r#"
|
||||
update
|
||||
device
|
||||
set
|
||||
@ -718,20 +814,20 @@ pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result<Vec<Device>>
|
||||
for update skip locked
|
||||
)
|
||||
returning *
|
||||
"#,
|
||||
)
|
||||
.bind::<diesel::sql_types::Integer, _>(limit as i32)
|
||||
.bind::<diesel::sql_types::Timestamptz, _>(Utc::now())
|
||||
.bind::<diesel::sql_types::Timestamptz, _>(
|
||||
Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(),
|
||||
)
|
||||
.load(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, "".into()))
|
||||
"#
|
||||
})
|
||||
.bind::<diesel::sql_types::Integer, _>(limit as i32)
|
||||
.bind::<fields::sql_types::Timestamptz, _>(Utc::now())
|
||||
.bind::<fields::sql_types::Timestamptz, _>(
|
||||
Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(),
|
||||
)
|
||||
.load(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, "".into()))
|
||||
})
|
||||
.await
|
||||
.context("Get with Class B/C queue-items transaction")
|
||||
})
|
||||
.await
|
||||
.context("Get with Class B/C queue-items transaction")
|
||||
}
|
||||
|
||||
// GetFullFCntUp returns the full 32bit frame-counter, given the fCntUp which
|
||||
@ -786,9 +882,10 @@ pub mod test {
|
||||
};
|
||||
|
||||
let application_id = match application_id {
|
||||
Some(v) => v,
|
||||
Some(v) => v.into(),
|
||||
None => {
|
||||
let a = storage::application::test::create_application(Some(tenant_id)).await;
|
||||
let a =
|
||||
storage::application::test::create_application(Some(tenant_id.into())).await;
|
||||
a.id
|
||||
}
|
||||
};
|
||||
@ -797,7 +894,7 @@ pub mod test {
|
||||
name: "test-dev".into(),
|
||||
dev_eui,
|
||||
application_id,
|
||||
device_profile_id,
|
||||
device_profile_id: device_profile_id.into(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@ -808,8 +905,12 @@ pub mod test {
|
||||
async fn test_device() {
|
||||
let _guard = test::prepare().await;
|
||||
let dp = storage::device_profile::test::create_device_profile(None).await;
|
||||
let mut d =
|
||||
create_device(EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), dp.id, None).await;
|
||||
let mut d = create_device(
|
||||
EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
dp.id.into(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
// get
|
||||
let d_get = get(&d.dev_eui).await.unwrap();
|
||||
@ -858,7 +959,7 @@ pub mod test {
|
||||
},
|
||||
FilterTest {
|
||||
filters: Filters {
|
||||
application_id: Some(d.application_id),
|
||||
application_id: Some(d.application_id.into()),
|
||||
multicast_group_id: None,
|
||||
search: None,
|
||||
},
|
||||
@ -906,7 +1007,12 @@ pub mod test {
|
||||
async fn test_get_with_class_b_c_queue_items() {
|
||||
let _guard = test::prepare().await;
|
||||
let dp = storage::device_profile::test::create_device_profile(None).await;
|
||||
let d = create_device(EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), dp.id, None).await;
|
||||
let d = create_device(
|
||||
EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
dp.id.into(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
// nothing in the queue
|
||||
let res = get_with_class_b_c_queue_items(10).await.unwrap();
|
||||
@ -1057,24 +1163,27 @@ pub mod test {
|
||||
name: "0101010101010101".into(),
|
||||
dev_eui: EUI64::from_be_bytes([1, 1, 1, 1, 1, 1, 1, 1]),
|
||||
dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])),
|
||||
device_session: Some(internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
f_cnt_up: 100,
|
||||
skip_f_cnt_check: true,
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01,
|
||||
],
|
||||
f_cnt_up: 100,
|
||||
skip_f_cnt_check: true,
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
},
|
||||
Device {
|
||||
@ -1083,23 +1192,26 @@ pub mod test {
|
||||
name: "0202020202020202".into(),
|
||||
dev_eui: EUI64::from_be_bytes([2, 2, 2, 2, 2, 2, 2, 2]),
|
||||
dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])),
|
||||
device_session: Some(internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
f_cnt_up: 200,
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02,
|
||||
],
|
||||
f_cnt_up: 200,
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
},
|
||||
Device {
|
||||
@ -1109,40 +1221,43 @@ pub mod test {
|
||||
dev_eui: EUI64::from_be_bytes([3, 3, 3, 3, 3, 3, 3, 3]),
|
||||
dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])),
|
||||
secondary_dev_addr: Some(DevAddr::from_be_bytes([4, 3, 2, 1])),
|
||||
device_session: Some(internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
f_cnt_up: 300,
|
||||
pending_rejoin_device_session: Some(Box::new(internal::DeviceSession {
|
||||
dev_addr: vec![0x04, 0x03, 0x02, 0x01],
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04,
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04,
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04,
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||
0x03, 0x03, 0x03, 0x03,
|
||||
],
|
||||
f_cnt_up: 0,
|
||||
f_cnt_up: 300,
|
||||
pending_rejoin_device_session: Some(Box::new(internal::DeviceSession {
|
||||
dev_addr: vec![0x04, 0x03, 0x02, 0x01],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
],
|
||||
f_cnt_up: 0,
|
||||
..Default::default()
|
||||
})),
|
||||
..Default::default()
|
||||
})),
|
||||
..Default::default()
|
||||
}),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
},
|
||||
Device {
|
||||
@ -1151,23 +1266,26 @@ pub mod test {
|
||||
name: "0505050505050505".into(),
|
||||
dev_eui: EUI64::from_be_bytes([5, 5, 5, 5, 5, 5, 5, 5]),
|
||||
dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])),
|
||||
device_session: Some(internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
f_cnt_up: (1 << 16) + 1,
|
||||
..Default::default()
|
||||
}),
|
||||
device_session: Some(
|
||||
internal::DeviceSession {
|
||||
dev_addr: vec![0x01, 0x02, 0x03, 0x04],
|
||||
s_nwk_s_int_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
f_nwk_s_int_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
nwk_s_enc_key: vec![
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x05, 0x05, 0x05, 0x05,
|
||||
],
|
||||
f_cnt_up: (1 << 16) + 1,
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
@ -7,8 +7,8 @@ use tracing::info;
|
||||
use lrwn::{AES128Key, EUI64};
|
||||
|
||||
use super::error::Error;
|
||||
use super::get_async_db_conn;
|
||||
use super::schema::device_keys;
|
||||
use super::{db_transaction, fields, get_async_db_conn};
|
||||
|
||||
#[derive(Queryable, Insertable, AsChangeset, PartialEq, Eq, Debug, Clone)]
|
||||
#[diesel(table_name = device_keys)]
|
||||
@ -18,7 +18,7 @@ pub struct DeviceKeys {
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub nwk_key: AES128Key,
|
||||
pub app_key: AES128Key,
|
||||
pub dev_nonces: Vec<Option<i32>>,
|
||||
pub dev_nonces: fields::DevNonces,
|
||||
pub join_nonce: i32,
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ impl Default for DeviceKeys {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00,
|
||||
]),
|
||||
dev_nonces: Vec::new(),
|
||||
dev_nonces: Vec::new().into(),
|
||||
join_nonce: 0,
|
||||
}
|
||||
}
|
||||
@ -94,8 +94,9 @@ pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
pub async fn set_dev_nonces(dev_eui: &EUI64, nonces: &[i32]) -> Result<DeviceKeys, Error> {
|
||||
let nonces: Vec<Option<i32>> = nonces.iter().map(|v| Some(*v)).collect();
|
||||
let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(dev_eui))
|
||||
.set(device_keys::dev_nonces.eq(nonces))
|
||||
.set(device_keys::dev_nonces.eq(fields::DevNonces::from(nonces)))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
|
||||
@ -111,36 +112,35 @@ pub async fn validate_incr_join_and_store_dev_nonce(
|
||||
dev_nonce: i32,
|
||||
) -> Result<DeviceKeys, Error> {
|
||||
let mut c = get_async_db_conn().await?;
|
||||
let dk: DeviceKeys = c
|
||||
.build_transaction()
|
||||
.run::<DeviceKeys, Error, _>(|c| {
|
||||
Box::pin(async move {
|
||||
let mut dk: DeviceKeys = device_keys::dsl::device_keys
|
||||
.find(&dev_eui)
|
||||
.for_update()
|
||||
.first(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
|
||||
let dk: DeviceKeys = db_transaction::<DeviceKeys, Error, _>(&mut c, |c| {
|
||||
Box::pin(async move {
|
||||
let query = device_keys::dsl::device_keys.find(&dev_eui);
|
||||
#[cfg(feature = "postgres")]
|
||||
let query = query.for_update();
|
||||
let mut dk: DeviceKeys = query
|
||||
.first(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?;
|
||||
|
||||
if dk.dev_nonces.contains(&(Some(dev_nonce))) {
|
||||
return Err(Error::InvalidDevNonce);
|
||||
}
|
||||
if dk.dev_nonces.contains(&(Some(dev_nonce))) {
|
||||
return Err(Error::InvalidDevNonce);
|
||||
}
|
||||
|
||||
dk.dev_nonces.push(Some(dev_nonce));
|
||||
dk.join_nonce += 1;
|
||||
dk.dev_nonces.push(Some(dev_nonce));
|
||||
dk.join_nonce += 1;
|
||||
|
||||
diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
|
||||
.set((
|
||||
device_keys::updated_at.eq(Utc::now()),
|
||||
device_keys::dev_nonces.eq(&dk.dev_nonces),
|
||||
device_keys::join_nonce.eq(&dk.join_nonce),
|
||||
))
|
||||
.get_result(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))
|
||||
})
|
||||
diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
|
||||
.set((
|
||||
device_keys::updated_at.eq(Utc::now()),
|
||||
device_keys::dev_nonces.eq(&dk.dev_nonces),
|
||||
device_keys::join_nonce.eq(&dk.join_nonce),
|
||||
))
|
||||
.get_result(c)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, dev_eui.to_string()))
|
||||
})
|
||||
.await?;
|
||||
})
|
||||
.await?;
|
||||
|
||||
info!(dev_eui = %dev_eui, dev_nonce = dev_nonce, "Device-nonce validated, join-nonce incremented and stored");
|
||||
Ok(dk)
|
||||
@ -155,7 +155,7 @@ pub mod test {
|
||||
pub async fn reset_nonces(dev_eui: &EUI64) -> Result<DeviceKeys, Error> {
|
||||
let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(&dev_eui))
|
||||
.set((
|
||||
device_keys::dev_nonces.eq::<Vec<i32>>(Vec::new()),
|
||||
device_keys::dev_nonces.eq(fields::DevNonces::from(Vec::new())),
|
||||
device_keys::join_nonce.eq(0),
|
||||
))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
|
@ -19,8 +19,8 @@ use chirpstack_api::internal;
|
||||
#[derive(Clone, Queryable, Insertable, Debug, PartialEq, Eq)]
|
||||
#[diesel(table_name = device_profile)]
|
||||
pub struct DeviceProfile {
|
||||
pub id: Uuid,
|
||||
pub tenant_id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub tenant_id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
@ -95,8 +95,8 @@ impl Default for DeviceProfile {
|
||||
let now = Utc::now();
|
||||
|
||||
DeviceProfile {
|
||||
id: Uuid::new_v4(),
|
||||
tenant_id: Uuid::nil(),
|
||||
id: Uuid::new_v4().into(),
|
||||
tenant_id: Uuid::nil().into(),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
name: "".into(),
|
||||
@ -185,7 +185,7 @@ impl DeviceProfile {
|
||||
|
||||
#[derive(Queryable, PartialEq, Eq, Debug)]
|
||||
pub struct DeviceProfileListItem {
|
||||
pub id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
@ -217,7 +217,7 @@ pub async fn create(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
|
||||
|
||||
pub async fn get(id: &Uuid) -> Result<DeviceProfile, Error> {
|
||||
let dp = device_profile::dsl::device_profile
|
||||
.find(&id)
|
||||
.find(&fields::Uuid::from(id))
|
||||
.first(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| error::Error::from_diesel(e, id.to_string()))?;
|
||||
@ -297,17 +297,18 @@ pub async fn update(dp: DeviceProfile) -> Result<DeviceProfile, Error> {
|
||||
}
|
||||
|
||||
pub async fn set_measurements(id: Uuid, m: &fields::Measurements) -> Result<DeviceProfile, Error> {
|
||||
let dp: DeviceProfile = diesel::update(device_profile::dsl::device_profile.find(&id))
|
||||
.set(device_profile::measurements.eq(m))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
let dp: DeviceProfile =
|
||||
diesel::update(device_profile::dsl::device_profile.find(&fields::Uuid::from(id)))
|
||||
.set(device_profile::measurements.eq(m))
|
||||
.get_result(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
info!(id = %id, "Device-profile measurements updated");
|
||||
Ok(dp)
|
||||
}
|
||||
|
||||
pub async fn delete(id: &Uuid) -> Result<(), Error> {
|
||||
let ra = diesel::delete(device_profile::dsl::device_profile.find(&id))
|
||||
let ra = diesel::delete(device_profile::dsl::device_profile.find(&fields::Uuid::from(id)))
|
||||
.execute(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
if ra == 0 {
|
||||
@ -323,11 +324,18 @@ pub async fn get_count(filters: &Filters) -> Result<i64, Error> {
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(device_profile::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(q.first(&mut get_async_db_conn().await?).await?)
|
||||
@ -354,11 +362,18 @@ pub async fn list(
|
||||
.into_boxed();
|
||||
|
||||
if let Some(tenant_id) = &filters.tenant_id {
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(tenant_id));
|
||||
q = q.filter(device_profile::dsl::tenant_id.eq(fields::Uuid::from(tenant_id)));
|
||||
}
|
||||
|
||||
if let Some(search) = &filters.search {
|
||||
q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search)));
|
||||
#[cfg(feature = "postgres")]
|
||||
{
|
||||
q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search)));
|
||||
}
|
||||
#[cfg(feature = "sqlite")]
|
||||
{
|
||||
q = q.filter(device_profile::dsl::name.like(format!("%{}%", search)));
|
||||
}
|
||||
}
|
||||
|
||||
let items = q
|
||||
@ -386,7 +401,7 @@ pub mod test {
|
||||
|
||||
pub async fn create_device_profile(tenant_id: Option<Uuid>) -> DeviceProfile {
|
||||
let tenant_id = match tenant_id {
|
||||
Some(v) => v,
|
||||
Some(v) => v.into(),
|
||||
None => {
|
||||
let t = storage::tenant::test::create_tenant().await;
|
||||
t.id
|
||||
@ -462,7 +477,7 @@ pub mod test {
|
||||
},
|
||||
FilterTest {
|
||||
filters: Filters {
|
||||
tenant_id: Some(dp.tenant_id),
|
||||
tenant_id: Some(dp.tenant_id.into()),
|
||||
search: None,
|
||||
},
|
||||
dps: vec![&dp],
|
||||
|
@ -5,15 +5,14 @@ use diesel_async::RunQueryDsl;
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::error::Error;
|
||||
use super::get_async_db_conn;
|
||||
use super::schema::device_queue_item;
|
||||
use super::{error::Error, fields, get_async_db_conn};
|
||||
use lrwn::EUI64;
|
||||
|
||||
#[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)]
|
||||
#[diesel(table_name = device_queue_item)]
|
||||
pub struct DeviceQueueItem {
|
||||
pub id: Uuid,
|
||||
pub id: fields::Uuid,
|
||||
pub dev_eui: EUI64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub f_port: i16,
|
||||
@ -48,7 +47,7 @@ impl Default for DeviceQueueItem {
|
||||
let now = Utc::now();
|
||||
|
||||
DeviceQueueItem {
|
||||
id: Uuid::new_v4(),
|
||||
id: Uuid::new_v4().into(),
|
||||
dev_eui: EUI64::from_be_bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]),
|
||||
created_at: now,
|
||||
f_port: 0,
|
||||
@ -76,7 +75,7 @@ pub async fn enqueue_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error>
|
||||
|
||||
pub async fn get_item(id: &Uuid) -> Result<DeviceQueueItem, Error> {
|
||||
let qi = device_queue_item::dsl::device_queue_item
|
||||
.find(id)
|
||||
.find(&fields::Uuid::from(id))
|
||||
.first(&mut get_async_db_conn().await?)
|
||||
.await
|
||||
.map_err(|e| Error::from_diesel(e, id.to_string()))?;
|
||||
@ -99,9 +98,10 @@ pub async fn update_item(qi: DeviceQueueItem) -> Result<DeviceQueueItem, Error>
|
||||
}
|
||||
|
||||
pub async fn delete_item(id: &Uuid) -> Result<(), Error> {
|
||||
let ra = diesel::delete(device_queue_item::dsl::device_queue_item.find(&id))
|
||||
.execute(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
let ra =
|
||||
diesel::delete(device_queue_item::dsl::device_queue_item.find(&fields::Uuid::from(id)))
|
||||
.execute(&mut get_async_db_conn().await?)
|
||||
.await?;
|
||||
if ra == 0 {
|
||||
return Err(Error::NotFound(id.to_string()));
|
||||
}
|
||||
@ -192,7 +192,7 @@ pub mod test {
|
||||
let dp = storage::device_profile::test::create_device_profile(None).await;
|
||||
let d = storage::device::test::create_device(
|
||||
EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
dp.id,
|
||||
dp.id.into(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@ -253,7 +253,7 @@ pub mod test {
|
||||
let dp = storage::device_profile::test::create_device_profile(None).await;
|
||||
let d = storage::device::test::create_device(
|
||||
EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
dp.id,
|
||||
dp.id.into(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@ -278,7 +278,7 @@ pub mod test {
|
||||
let dp = storage::device_profile::test::create_device_profile(None).await;
|
||||
let d = storage::device::test::create_device(
|
||||
EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
|
||||
dp.id,
|
||||
dp.id.into(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
@ -1,177 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::str::FromStr;
|
||||
|
||||
use diesel::backend::Backend;
|
||||
use diesel::pg::Pg;
|
||||
use diesel::sql_types::{Jsonb, Text};
|
||||
use diesel::{deserialize, serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)]
|
||||
#[diesel(sql_type = Jsonb)]
|
||||
pub struct KeyValue(HashMap<String, String>);
|
||||
|
||||
impl KeyValue {
|
||||
pub fn new(m: HashMap<String, String>) -> Self {
|
||||
KeyValue(m)
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_hashmap(&self) -> HashMap<String, String> {
|
||||
self.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for KeyValue {
|
||||
type Target = HashMap<String, String>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for KeyValue {
|
||||
fn deref_mut(&mut self) -> &mut HashMap<String, String> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl deserialize::FromSql<Jsonb, Pg> for KeyValue {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
|
||||
let kv: HashMap<String, String> = serde_json::from_value(value)?;
|
||||
Ok(KeyValue(kv))
|
||||
}
|
||||
}
|
||||
|
||||
impl serialize::ToSql<Jsonb, Pg> for KeyValue {
|
||||
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
let value = serde_json::to_value(&self.0)?;
|
||||
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, AsExpression, FromSqlRow, PartialEq, Eq)]
|
||||
#[diesel(sql_type = Jsonb)]
|
||||
pub struct Measurements(HashMap<String, Measurement>);
|
||||
|
||||
impl Measurements {
|
||||
pub fn new(m: HashMap<String, Measurement>) -> Self {
|
||||
Measurements(m)
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_hashmap(&self) -> HashMap<String, Measurement> {
|
||||
self.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Measurements {
|
||||
type Target = HashMap<String, Measurement>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Measurements {
|
||||
fn deref_mut(&mut self) -> &mut HashMap<String, Measurement> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl deserialize::FromSql<Jsonb, Pg> for Measurements {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
|
||||
let kv: HashMap<String, Measurement> = serde_json::from_value(value)?;
|
||||
Ok(Measurements::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
impl serialize::ToSql<Jsonb, Pg> for Measurements {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result {
|
||||
let value = serde_json::to_value(&self.0)?;
|
||||
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct Measurement {
|
||||
pub name: String,
|
||||
pub kind: MeasurementKind,
|
||||
}
|
||||
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum MeasurementKind {
|
||||
// Unknown.
|
||||
UNKNOWN,
|
||||
// Incrementing counters which are not reset on each reporting.
|
||||
COUNTER,
|
||||
// Counters that do get reset upon reading.
|
||||
ABSOLUTE,
|
||||
// E.g. a temperature value.
|
||||
GAUGE,
|
||||
// E.g. a firmware version, true / false value.
|
||||
STRING,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, AsExpression, FromSqlRow)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||
pub enum MulticastGroupSchedulingType {
|
||||
// Delay.
|
||||
DELAY,
|
||||
// GPS time.
|
||||
GPS_TIME,
|
||||
}
|
||||
|
||||
impl fmt::Display for MulticastGroupSchedulingType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> deserialize::FromSql<Text, DB> for MulticastGroupSchedulingType
|
||||
where
|
||||
DB: Backend,
|
||||
*const str: deserialize::FromSql<Text, DB>,
|
||||
{
|
||||
fn from_sql(value: <DB as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let string = <*const str>::from_sql(value)?;
|
||||
Ok(Self::from_str(unsafe { &*string })?)
|
||||
}
|
||||
}
|
||||
|
||||
impl serialize::ToSql<Text, diesel::pg::Pg> for MulticastGroupSchedulingType
|
||||
where
|
||||
str: serialize::ToSql<Text, diesel::pg::Pg>,
|
||||
{
|
||||
fn to_sql<'b>(
|
||||
&'b self,
|
||||
out: &mut serialize::Output<'b, '_, diesel::pg::Pg>,
|
||||
) -> serialize::Result {
|
||||
<str as serialize::ToSql<Text, diesel::pg::Pg>>::to_sql(
|
||||
&self.to_string(),
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for MulticastGroupSchedulingType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
"DELAY" => MulticastGroupSchedulingType::DELAY,
|
||||
"GPS_TIME" => MulticastGroupSchedulingType::GPS_TIME,
|
||||
_ => {
|
||||
return Err(anyhow!("Unexpected MulticastGroupSchedulingType: {}", s));
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
91
chirpstack/src/storage/fields/big_decimal.rs
Normal file
91
chirpstack/src/storage/fields/big_decimal.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use diesel::{
|
||||
backend::Backend,
|
||||
{deserialize, serialize},
|
||||
};
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::{pg::Pg, sql_types::Numeric};
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::{sql_types::Double, sqlite::Sqlite};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)]
|
||||
#[cfg_attr(feature="postgres", diesel(sql_type = Numeric))]
|
||||
#[cfg_attr(feature="sqlite", diesel(sql_type = Double))]
|
||||
pub struct BigDecimal(bigdecimal::BigDecimal);
|
||||
|
||||
impl std::convert::AsRef<bigdecimal::BigDecimal> for BigDecimal {
|
||||
fn as_ref(&self) -> &bigdecimal::BigDecimal {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<bigdecimal::BigDecimal> for BigDecimal {
|
||||
fn from(value: bigdecimal::BigDecimal) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::TryFrom<f32> for BigDecimal {
|
||||
type Error = <bigdecimal::BigDecimal as TryFrom<f32>>::Error;
|
||||
fn try_from(value: f32) -> Result<Self, Self::Error> {
|
||||
bigdecimal::BigDecimal::try_from(value).map(|bd| bd.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for BigDecimal {
|
||||
type Target = bigdecimal::BigDecimal;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for BigDecimal {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<Numeric, Pg> for BigDecimal {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let u = <bigdecimal::BigDecimal>::from_sql(value)?;
|
||||
Ok(BigDecimal(u))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Numeric, Pg> for BigDecimal {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
<bigdecimal::BigDecimal as serialize::ToSql<Numeric, Pg>>::to_sql(
|
||||
&self.0,
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<Double, Sqlite> for BigDecimal
|
||||
where
|
||||
f64: deserialize::FromSql<Double, Sqlite>,
|
||||
{
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
use bigdecimal::FromPrimitive;
|
||||
let bd_val =
|
||||
<f64 as deserialize::FromSql<diesel::sql_types::Double, Sqlite>>::from_sql(value)?;
|
||||
let bd = bigdecimal::BigDecimal::from_f64(bd_val)
|
||||
.ok_or_else(|| format!("Unrepresentable BigDecimal from f64 value"))?;
|
||||
Ok(BigDecimal(bd))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Double, Sqlite> for BigDecimal {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
|
||||
use bigdecimal::ToPrimitive;
|
||||
let value = self
|
||||
.0
|
||||
.to_f64()
|
||||
.ok_or_else(|| format!("Unrepresentable f64 value as BigDecimal"))?;
|
||||
out.set_value(value);
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
92
chirpstack/src/storage/fields/dev_nonces.rs
Normal file
92
chirpstack/src/storage/fields/dev_nonces.rs
Normal file
@ -0,0 +1,92 @@
|
||||
use diesel::backend::Backend;
|
||||
use diesel::{deserialize, serialize};
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::{
|
||||
pg::Pg,
|
||||
sql_types::{Array, Int4, Nullable},
|
||||
};
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::{sql_types::Text, sqlite::Sqlite};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
type DevNoncesPgType = Array<Nullable<Int4>>;
|
||||
|
||||
// Sqlite has no native array type so use text
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)]
|
||||
#[serde(transparent)]
|
||||
#[cfg_attr(feature = "postgres", diesel(sql_type = DevNoncesPgType))]
|
||||
#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))]
|
||||
pub struct DevNonces(DevNoncesInner);
|
||||
|
||||
pub type DevNoncesInner = Vec<Option<i32>>;
|
||||
|
||||
impl std::default::Default for DevNonces {
|
||||
fn default() -> Self {
|
||||
Self(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::AsRef<DevNoncesInner> for DevNonces {
|
||||
fn as_ref(&self) -> &DevNoncesInner {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<DevNoncesInner> for DevNonces {
|
||||
fn from(value: DevNoncesInner) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for DevNonces {
|
||||
type Target = DevNoncesInner;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for DevNonces {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<DevNoncesPgType, Pg> for DevNonces {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let sql_val = <DevNoncesInner>::from_sql(value)?;
|
||||
Ok(DevNonces(sql_val))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<DevNoncesPgType, Pg> for DevNonces {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
<DevNoncesInner as serialize::ToSql<DevNoncesPgType, Pg>>::to_sql(
|
||||
&self.0,
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<Text, Sqlite> for DevNonces
|
||||
where
|
||||
*const str: deserialize::FromSql<Text, Sqlite>,
|
||||
{
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let s =
|
||||
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
|
||||
let nonces = serde_json::from_str::<DevNonces>(unsafe { &*s })?;
|
||||
Ok(nonces)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for DevNonces {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(serde_json::to_string(self)?);
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
83
chirpstack/src/storage/fields/device_session.rs
Normal file
83
chirpstack/src/storage/fields/device_session.rs
Normal file
@ -0,0 +1,83 @@
|
||||
use std::io::Cursor;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use diesel::backend::Backend;
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::pg::Pg;
|
||||
use diesel::sql_types::Binary;
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{deserialize, serialize};
|
||||
use prost::Message;
|
||||
|
||||
use chirpstack_api::internal;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, AsExpression, FromSqlRow)]
|
||||
#[diesel(sql_type = diesel::sql_types::Binary)]
|
||||
pub struct DeviceSession(internal::DeviceSession);
|
||||
|
||||
impl DeviceSession {
|
||||
pub fn new(m: internal::DeviceSession) -> Self {
|
||||
DeviceSession(m)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<internal::DeviceSession> for DeviceSession {
|
||||
fn from(u: internal::DeviceSession) -> Self {
|
||||
Self(u)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<&internal::DeviceSession> for DeviceSession {
|
||||
fn from(u: &internal::DeviceSession) -> Self {
|
||||
Self::from(u.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::Into<internal::DeviceSession> for DeviceSession {
|
||||
fn into(self) -> internal::DeviceSession {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DeviceSession {
|
||||
type Target = internal::DeviceSession;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for DeviceSession {
|
||||
fn deref_mut(&mut self) -> &mut internal::DeviceSession {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> deserialize::FromSql<Binary, DB> for DeviceSession
|
||||
where
|
||||
DB: Backend,
|
||||
*const [u8]: deserialize::FromSql<Binary, DB>,
|
||||
{
|
||||
fn from_sql(value: <DB as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let bindata = <*const [u8] as deserialize::FromSql<Binary, DB>>::from_sql(value)?;
|
||||
let ds = internal::DeviceSession::decode(&mut Cursor::new(unsafe { &*bindata }))?;
|
||||
Ok(DeviceSession(ds))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Binary, Pg> for DeviceSession {
|
||||
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
let encoded = self.encode_to_vec();
|
||||
<Vec<u8> as serialize::ToSql<Binary, Pg>>::to_sql(&encoded, &mut out.reborrow())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Binary, Sqlite> for DeviceSession {
|
||||
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(self.encode_to_vec());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
78
chirpstack/src/storage/fields/key_value.rs
Normal file
78
chirpstack/src/storage/fields/key_value.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use diesel::backend::Backend;
|
||||
|
||||
use diesel::{deserialize, serialize};
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::{pg::Pg, sql_types::Jsonb};
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::{sql_types::Text, sqlite::Sqlite};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)]
|
||||
#[cfg_attr(feature = "postgres", diesel(sql_type = Jsonb))]
|
||||
#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))]
|
||||
pub struct KeyValue(HashMap<String, String>);
|
||||
|
||||
impl KeyValue {
|
||||
pub fn new(m: HashMap<String, String>) -> Self {
|
||||
KeyValue(m)
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_hashmap(&self) -> HashMap<String, String> {
|
||||
self.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for KeyValue {
|
||||
type Target = HashMap<String, String>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for KeyValue {
|
||||
fn deref_mut(&mut self) -> &mut HashMap<String, String> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<Jsonb, Pg> for KeyValue {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
|
||||
let kv: HashMap<String, String> = serde_json::from_value(value)?;
|
||||
Ok(KeyValue(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Jsonb, Pg> for KeyValue {
|
||||
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
let value = serde_json::to_value(&self.0)?;
|
||||
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<Text, Sqlite> for KeyValue
|
||||
where
|
||||
*const str: deserialize::FromSql<Text, Sqlite>,
|
||||
{
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let s =
|
||||
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
|
||||
let kv: HashMap<String, String> = serde_json::from_str(unsafe { &*s })?;
|
||||
Ok(KeyValue(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for KeyValue {
|
||||
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(serde_json::to_string(&self.0)?);
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
101
chirpstack/src/storage/fields/measurements.rs
Normal file
101
chirpstack/src/storage/fields/measurements.rs
Normal file
@ -0,0 +1,101 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use diesel::backend::Backend;
|
||||
use diesel::{deserialize, serialize};
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::{pg::Pg, sql_types::Jsonb};
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::{sql_types::Text, sqlite::Sqlite};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct Measurement {
|
||||
pub name: String,
|
||||
pub kind: MeasurementKind,
|
||||
}
|
||||
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum MeasurementKind {
|
||||
// Unknown.
|
||||
UNKNOWN,
|
||||
// Incrementing counters which are not reset on each reporting.
|
||||
COUNTER,
|
||||
// Counters that do get reset upon reading.
|
||||
ABSOLUTE,
|
||||
// E.g. a temperature value.
|
||||
GAUGE,
|
||||
// E.g. a firmware version, true / false value.
|
||||
STRING,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, AsExpression, FromSqlRow, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "postgres", diesel(sql_type = Jsonb))]
|
||||
#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))]
|
||||
pub struct Measurements(HashMap<String, Measurement>);
|
||||
|
||||
impl Measurements {
|
||||
pub fn new(m: HashMap<String, Measurement>) -> Self {
|
||||
Measurements(m)
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_hashmap(&self) -> HashMap<String, Measurement> {
|
||||
self.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Measurements {
|
||||
type Target = HashMap<String, Measurement>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Measurements {
|
||||
fn deref_mut(&mut self) -> &mut HashMap<String, Measurement> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<Jsonb, Pg> for Measurements {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
|
||||
let kv: HashMap<String, Measurement> = serde_json::from_value(value)?;
|
||||
Ok(Measurements::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Jsonb, Pg> for Measurements {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result {
|
||||
let value = serde_json::to_value(&self.0)?;
|
||||
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<Text, Sqlite> for Measurements
|
||||
where
|
||||
*const str: deserialize::FromSql<Text, Sqlite>,
|
||||
{
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let s =
|
||||
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
|
||||
let kv: HashMap<String, Measurement> = serde_json::from_str(unsafe { &*s })?;
|
||||
Ok(Measurements::new(kv))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for Measurements {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result {
|
||||
let value = serde_json::to_string(&self.0)?;
|
||||
out.set_value(value);
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
37
chirpstack/src/storage/fields/mod.rs
Normal file
37
chirpstack/src/storage/fields/mod.rs
Normal file
@ -0,0 +1,37 @@
|
||||
mod big_decimal;
|
||||
mod dev_nonces;
|
||||
mod device_session;
|
||||
mod key_value;
|
||||
mod measurements;
|
||||
mod multicast_group_scheduling_type;
|
||||
mod uuid;
|
||||
|
||||
pub use big_decimal::BigDecimal;
|
||||
pub use dev_nonces::*;
|
||||
pub use device_session::DeviceSession;
|
||||
pub use key_value::KeyValue;
|
||||
pub use measurements::*;
|
||||
pub use multicast_group_scheduling_type::MulticastGroupSchedulingType;
|
||||
pub use uuid::Uuid;
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
pub mod sql_types {
|
||||
pub type Timestamptz = diesel::sql_types::Timestamptz;
|
||||
|
||||
pub type JsonT = diesel::sql_types::Jsonb;
|
||||
|
||||
pub type Uuid = diesel::sql_types::Uuid;
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub mod sql_types {
|
||||
pub type Timestamptz = diesel::sql_types::TimestamptzSqlite;
|
||||
|
||||
// TODO: sqlite is adding "jsonb" support, different from postgres
|
||||
// So we may switch the column to blob?
|
||||
// see https://sqlite.org/draft/jsonb.html
|
||||
pub type JsonT = diesel::sql_types::Text;
|
||||
|
||||
// Sqlite has no native json type so use text
|
||||
pub type Uuid = diesel::sql_types::Text;
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use diesel::backend::Backend;
|
||||
use diesel::sql_types::Text;
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{deserialize, serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, AsExpression, FromSqlRow)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[diesel(sql_type = diesel::sql_types::Text)]
|
||||
pub enum MulticastGroupSchedulingType {
|
||||
// Delay.
|
||||
DELAY,
|
||||
// GPS time.
|
||||
GPS_TIME,
|
||||
}
|
||||
|
||||
impl fmt::Display for MulticastGroupSchedulingType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> deserialize::FromSql<Text, DB> for MulticastGroupSchedulingType
|
||||
where
|
||||
DB: Backend,
|
||||
*const str: deserialize::FromSql<Text, DB>,
|
||||
{
|
||||
fn from_sql(value: <DB as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let string = <*const str>::from_sql(value)?;
|
||||
Ok(Self::from_str(unsafe { &*string })?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<Text, diesel::pg::Pg> for MulticastGroupSchedulingType
|
||||
where
|
||||
str: serialize::ToSql<Text, diesel::pg::Pg>,
|
||||
{
|
||||
fn to_sql<'b>(
|
||||
&'b self,
|
||||
out: &mut serialize::Output<'b, '_, diesel::pg::Pg>,
|
||||
) -> serialize::Result {
|
||||
<str as serialize::ToSql<Text, diesel::pg::Pg>>::to_sql(
|
||||
&self.to_string(),
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<Text, Sqlite> for MulticastGroupSchedulingType {
|
||||
fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(self.to_string());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for MulticastGroupSchedulingType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
"DELAY" => MulticastGroupSchedulingType::DELAY,
|
||||
"GPS_TIME" => MulticastGroupSchedulingType::GPS_TIME,
|
||||
_ => {
|
||||
return Err(anyhow!("Unexpected MulticastGroupSchedulingType: {}", s));
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
87
chirpstack/src/storage/fields/uuid.rs
Normal file
87
chirpstack/src/storage/fields/uuid.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use diesel::backend::Backend;
|
||||
#[cfg(feature = "postgres")]
|
||||
use diesel::pg::Pg;
|
||||
#[cfg(feature = "sqlite")]
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{deserialize, serialize};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize, Serialize, Copy, Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)]
|
||||
#[serde(transparent)]
|
||||
#[cfg_attr(feature = "postgres", diesel(sql_type = diesel::sql_types::Uuid))]
|
||||
#[cfg_attr(feature = "sqlite", diesel(sql_type = diesel::sql_types::Text))]
|
||||
pub struct Uuid(uuid::Uuid);
|
||||
|
||||
impl std::convert::From<uuid::Uuid> for Uuid {
|
||||
fn from(u: uuid::Uuid) -> Self {
|
||||
Self(u)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<&uuid::Uuid> for Uuid {
|
||||
fn from(u: &uuid::Uuid) -> Self {
|
||||
Self::from(u.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::Into<uuid::Uuid> for Uuid {
|
||||
fn into(self) -> uuid::Uuid {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Uuid {
|
||||
type Target = uuid::Uuid;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for Uuid {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Uuid {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl deserialize::FromSql<diesel::sql_types::Uuid, Pg> for Uuid {
|
||||
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let u = <uuid::Uuid>::from_sql(value)?;
|
||||
Ok(Uuid(u))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl serialize::ToSql<diesel::sql_types::Uuid, Pg> for Uuid {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
|
||||
<uuid::Uuid as serialize::ToSql<diesel::sql_types::Uuid, Pg>>::to_sql(
|
||||
&self.0,
|
||||
&mut out.reborrow(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl deserialize::FromSql<diesel::sql_types::Text, Sqlite> for Uuid {
|
||||
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
|
||||
let s =
|
||||
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
|
||||
let u = uuid::Uuid::try_parse(unsafe { &*s })?;
|
||||
Ok(Uuid(u))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl serialize::ToSql<diesel::sql_types::Text, Sqlite> for Uuid {
|
||||
fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
|
||||
out.set_value(self.0.to_string());
|
||||
Ok(serialize::IsNull::No)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user