diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 55725a74..49262e82 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,6 +13,13 @@ env: jobs: tests: runs-on: ubuntu-latest + strategy: + matrix: + database: + - postgres + - sqlite + env: + DATABASE: ${{ matrix.database }} steps: - name: Checkout @@ -32,7 +39,7 @@ jobs: ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ - key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-cargo-test-${{ matrix.database }}-${{ hashFiles('**/Cargo.lock') }} - name: Start dependency services run: docker compose up -d diff --git a/.gitignore b/.gitignore index a917d4ea..80b4975d 100644 --- a/.gitignore +++ b/.gitignore @@ -11,8 +11,12 @@ # Binary packages /dist +# SQLite databases +*.sqlite + # Rust target directory **/target +**/target-sqlite # Certificates /chirpstack/configuration/certs/* diff --git a/Cargo.lock b/Cargo.lock index cb90aaf7..7c4ffa78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -876,6 +876,7 @@ dependencies = [ "rustls 0.23.12", "rustls-native-certs", "rustls-pemfile", + "scoped-futures", "serde", "serde_json", "serde_urlencoded", @@ -907,7 +908,6 @@ dependencies = [ name = "chirpstack_api" version = "4.9.0" dependencies = [ - "diesel", "hex", "pbjson", "pbjson-build", @@ -1303,10 +1303,12 @@ dependencies = [ "chrono", "diesel_derives", "itoa", + "libsqlite3-sys", "num-bigint", "num-integer", "num-traits", "serde_json", + "time", "uuid", ] @@ -2165,9 +2167,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-channel", @@ -2466,6 +2468,16 @@ dependencies = [ "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4588d65215825ee71ebff9e1c9982067833b1355d7546845ffdb3165cbd7456" +dependencies = [ + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -5019,6 +5031,12 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" diff --git a/Makefile b/Makefile index 1ef1243d..0c333f14 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ dist: # Install dev dependencies dev-dependencies: cargo install cross --version 0.2.5 - cargo install diesel_cli --version 2.2.1 --no-default-features --features postgres + cargo install diesel_cli --version 2.2.1 --no-default-features --features postgres,sqlite cargo install cargo-deb --version 1.43.1 cargo install cargo-generate-rpm --version 0.12.1 diff --git a/README.md b/README.md index 9fd2e58e..464894d5 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,11 @@ docker compose up -d Run the following command to run the ChirpStack tests: ```bash +# Test (with PostgresQL database backend) make test + +# Test with SQLite database backend +DATABASE=sqlite make test ``` ### Building ChirpStack binaries @@ -109,6 +113,34 @@ make release-amd64 make dist ``` +By default the above commands will build ChirpStack with the PostgresQL database +database backend. Set the `DATABASE=sqlite` env. variable to compile ChirpStack +with the SQLite database backend. + +### Database migrations + +To create a new database migration, execute: + +``` +make migration-generate NAME=test-migration +``` + +To apply migrations, execute: + +``` +make migration-run +``` + +To revert a migration, execute: + +``` +make migration-revert +``` + +By default the above commands will execute the migration commands using the +PostgresQL database backend. To execute migration commands for the SQLite +database backend, set the `DATABASE=sqlite` env. variable. + ## License ChirpStack Network Server is distributed under the MIT license. See also diff --git a/api/rust/Cargo.toml b/api/rust/Cargo.toml index 375b83cb..96ee92bd 100644 --- a/api/rust/Cargo.toml +++ b/api/rust/Cargo.toml @@ -12,7 +12,6 @@ default = ["api", "json"] api = ["tonic/transport", "tonic-build/transport", "tokio"] json = ["pbjson", "pbjson-types", "serde"] - diesel = ["dep:diesel"] internal = [] [dependencies] @@ -29,7 +28,6 @@ pbjson = { version = "0.7", optional = true } pbjson-types = { version = "0.7", optional = true } serde = { version = "1.0", optional = true } - diesel = { version = "2.2", features = ["postgres_backend"], optional = true } [build-dependencies] tonic-build = { version = "0.12", features = [ diff --git a/api/rust/src/internal.rs b/api/rust/src/internal.rs index 9ea97098..d62ba6c1 100644 --- a/api/rust/src/internal.rs +++ b/api/rust/src/internal.rs @@ -2,13 +2,6 @@ include!(concat!(env!("OUT_DIR"), "/internal/internal.rs")); #[cfg(feature = "json")] include!(concat!(env!("OUT_DIR"), "/internal/internal.serde.rs")); -#[cfg(feature = "diesel")] -use diesel::{backend::Backend, deserialize, serialize, sql_types::Binary}; -#[cfg(feature = "diesel")] -use prost::Message; -#[cfg(feature = "diesel")] -use std::io::Cursor; - impl DeviceSession { pub fn get_a_f_cnt_down(&self) -> u32 { if self.mac_version().to_string().starts_with("1.0") { @@ -30,28 +23,3 @@ impl DeviceSession { } } } - -#[cfg(feature = "diesel")] -impl deserialize::FromSql for DeviceSession -where - DB: Backend, - *const [u8]: deserialize::FromSql, -{ - fn from_sql(value: DB::RawValue<'_>) -> deserialize::Result { - let bytes = as deserialize::FromSql>::from_sql(value)?; - Ok(DeviceSession::decode(&mut Cursor::new(bytes))?) - } -} - -#[cfg(feature = "diesel")] -impl serialize::ToSql for DeviceSession -where - [u8]: serialize::ToSql, -{ - fn to_sql(&self, out: &mut serialize::Output<'_, '_, diesel::pg::Pg>) -> serialize::Result { - <[u8] as serialize::ToSql>::to_sql( - &self.encode_to_vec(), - &mut out.reborrow(), - ) - } -} diff --git a/chirpstack/Cargo.toml b/chirpstack/Cargo.toml index 4b23a11e..f964d3f0 100644 --- a/chirpstack/Cargo.toml +++ b/chirpstack/Cargo.toml @@ -26,20 +26,16 @@ email_address = "0.2" diesel = { version = "2.2", features = [ "chrono", - "uuid", - "serde_json", "numeric", "64-column-tables", - "postgres_backend", ] } diesel_migrations = { version = "2.2" } diesel-async = { version = "0.5", features = [ "deadpool", - "postgres", "async-connection-wrapper", ] } - tokio-postgres = "0.7" - tokio-postgres-rustls = "0.12" + tokio-postgres = { version = "0.7", optional = true } + tokio-postgres-rustls = { version = "0.12", optional = true } bigdecimal = "0.4" redis = { version = "0.26", features = ["tls-rustls", "tokio-rustls-comp"] } deadpool-redis = { version = "0.16", features = ["cluster"] } @@ -53,11 +49,7 @@ ], default-features = true } # ChirpStack API definitions - chirpstack_api = { path = "../api/rust", features = [ - "default", - "internal", - "diesel", - ] } + chirpstack_api = { path = "../api/rust", features = ["default", "internal"] } lrwn = { path = "../lrwn", features = [ "serde", "diesel", @@ -161,6 +153,7 @@ petgraph = "0.6" prometheus-client = "0.22" pin-project = "1.1" + scoped-futures = { version = "0.1", features = ["std"] } signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = ["futures-v0_3"] } @@ -171,6 +164,23 @@ dotenv = "0.15" [features] + default = ["postgres"] + postgres = [ + "tokio-postgres", + "tokio-postgres-rustls", + "diesel/postgres_backend", + "diesel/serde_json", + "diesel/uuid", + "diesel-async/postgres", + "lrwn/postgres", + ] + sqlite = [ + "diesel/sqlite", + "diesel/returning_clauses_for_sqlite_3_35", + "lrwn/sqlite", + "diesel-async/sync-connection-wrapper", + "diesel-async/sqlite", + ] test-all-integrations = [ "test-integration-amqp", "test-integration-kafka", diff --git a/chirpstack/Makefile b/chirpstack/Makefile index 2146e9e8..7cec9de6 100644 --- a/chirpstack/Makefile +++ b/chirpstack/Makefile @@ -1,20 +1,21 @@ .PHONY: dist PKG_VERSION := $(shell cargo metadata --no-deps --format-version 1 | jq -r '.packages[0].version') +DATABASE ?= postgres debug-amd64: - cross build --target x86_64-unknown-linux-musl + cross build --target x86_64-unknown-linux-musl --no-default-features --features="$(DATABASE)" release-amd64: - cross build --target x86_64-unknown-linux-musl --release + cross build --target x86_64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)" dist: # Keep these in this order, as aarch64 is based on Debian Buster (older), # the others on Bullseye. For some build scripts we want to build against # least recent LIBC. - cross build --target aarch64-unknown-linux-musl --release - cross build --target x86_64-unknown-linux-musl --release - cross build --target armv7-unknown-linux-musleabihf --release + cross build --target aarch64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)" + cross build --target x86_64-unknown-linux-musl --release --no-default-features --features="$(DATABASE)" + cross build --target armv7-unknown-linux-musleabihf --release --no-default-features --features="$(DATABASE)" cargo deb --target x86_64-unknown-linux-musl --no-build --no-strip cargo deb --target armv7-unknown-linux-musleabihf --no-build --no-strip @@ -40,10 +41,38 @@ dist: test: cargo fmt --check - cargo clippy --no-deps - TZ=UTC cargo test + cargo clippy --no-deps --no-default-features --features="$(DATABASE)" + TZ=UTC cargo test --no-default-features --features="$(DATABASE)" test-all: cargo fmt --check - cargo clippy --no-deps - TZ=UTC cargo test --features test-all-integrations \ No newline at end of file + cargo clippy --no-deps --no-default-features --features="$(DATABASE)" + TZ=UTC cargo test --no-default-features --features="$(DATABASE),test-all-integrations" + +migration-generate: +ifeq ($(NAME),) + @echo "You must provide a NAME parameter, e.g. make migration-generate NAME=test-migration" +else + diesel --config-file diesel_$(DATABASE).toml migration --migration-dir migrations_$(DATABASE) generate $(NAME) +endif + +migration-run: chirpstack_test.sqlite +ifeq ($(DATABASE),postgres) + diesel --config-file diesel_postgres.toml migration --migration-dir migrations_postgres run +endif +ifeq ($(DATABASE),sqlite) + DATABASE_URL="chirpstack_test.sqlite" diesel --config-file diesel_sqlite.toml migration --migration-dir migrations_sqlite run + sed -i 's/Timestamp/TimestamptzSqlite/g' src/storage/schema_sqlite.rs +endif + +migration-revert: chirpstack_test.sqlite +ifeq ($(DATABASE),postgres) + diesel --config-file diesel_postgres.toml migration --migration-dir migrations_postgres revert +endif +ifeq ($(DATABASE),sqlite) + DATABASE_URL="chirpstack_test.sqlite" diesel --config-file diesel_sqlite.toml migration --migration-dir migrations_sqlite revert + sed -i 's/Timestamp/TimestamptzSqlite/g' src/storage/schema_sqlite.rs +endif + +chirpstack_test.sqlite: + DATABASE_URL=chirpstack_test.sqlite diesel --config-file diesel_sqlite.toml setup --migration-dir migrations_sqlite diff --git a/chirpstack/diesel.toml b/chirpstack/diesel_postgres.toml similarity index 73% rename from chirpstack/diesel.toml rename to chirpstack/diesel_postgres.toml index ea03dd5f..c783e1f2 100644 --- a/chirpstack/diesel.toml +++ b/chirpstack/diesel_postgres.toml @@ -2,4 +2,4 @@ # see diesel.rs/guides/configuring-diesel-cli [print_schema] -file = "src/storage/schema.rs" +file = "src/storage/schema_postgres.rs" diff --git a/chirpstack/diesel_sqlite.toml b/chirpstack/diesel_sqlite.toml new file mode 100644 index 00000000..be457676 --- /dev/null +++ b/chirpstack/diesel_sqlite.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] + file = "src/storage/schema_sqlite.rs" diff --git a/chirpstack/migrations/00000000000000_initial/down.sql b/chirpstack/migrations_postgres/00000000000000_initial/down.sql similarity index 100% rename from chirpstack/migrations/00000000000000_initial/down.sql rename to chirpstack/migrations_postgres/00000000000000_initial/down.sql diff --git a/chirpstack/migrations/00000000000000_initial/up.sql b/chirpstack/migrations_postgres/00000000000000_initial/up.sql similarity index 100% rename from chirpstack/migrations/00000000000000_initial/up.sql rename to chirpstack/migrations_postgres/00000000000000_initial/up.sql diff --git a/chirpstack/migrations/2022-04-26-153628_device_profile_payload_codec_script/down.sql b/chirpstack/migrations_postgres/2022-04-26-153628_device_profile_payload_codec_script/down.sql similarity index 100% rename from chirpstack/migrations/2022-04-26-153628_device_profile_payload_codec_script/down.sql rename to chirpstack/migrations_postgres/2022-04-26-153628_device_profile_payload_codec_script/down.sql diff --git a/chirpstack/migrations/2022-04-26-153628_device_profile_payload_codec_script/up.sql b/chirpstack/migrations_postgres/2022-04-26-153628_device_profile_payload_codec_script/up.sql similarity index 100% rename from chirpstack/migrations/2022-04-26-153628_device_profile_payload_codec_script/up.sql rename to chirpstack/migrations_postgres/2022-04-26-153628_device_profile_payload_codec_script/up.sql diff --git a/chirpstack/migrations/2022-04-28-071028_device_profile_flush_queue_on_activate/down.sql b/chirpstack/migrations_postgres/2022-04-28-071028_device_profile_flush_queue_on_activate/down.sql similarity index 100% rename from chirpstack/migrations/2022-04-28-071028_device_profile_flush_queue_on_activate/down.sql rename to chirpstack/migrations_postgres/2022-04-28-071028_device_profile_flush_queue_on_activate/down.sql diff --git a/chirpstack/migrations/2022-04-28-071028_device_profile_flush_queue_on_activate/up.sql b/chirpstack/migrations_postgres/2022-04-28-071028_device_profile_flush_queue_on_activate/up.sql similarity index 100% rename from chirpstack/migrations/2022-04-28-071028_device_profile_flush_queue_on_activate/up.sql rename to chirpstack/migrations_postgres/2022-04-28-071028_device_profile_flush_queue_on_activate/up.sql diff --git a/chirpstack/migrations/2022-05-11-084032_device_profile_templates/down.sql b/chirpstack/migrations_postgres/2022-05-11-084032_device_profile_templates/down.sql similarity index 100% rename from chirpstack/migrations/2022-05-11-084032_device_profile_templates/down.sql rename to chirpstack/migrations_postgres/2022-05-11-084032_device_profile_templates/down.sql diff --git a/chirpstack/migrations/2022-05-11-084032_device_profile_templates/up.sql b/chirpstack/migrations_postgres/2022-05-11-084032_device_profile_templates/up.sql similarity index 100% rename from chirpstack/migrations/2022-05-11-084032_device_profile_templates/up.sql rename to chirpstack/migrations_postgres/2022-05-11-084032_device_profile_templates/up.sql diff --git a/chirpstack/migrations/2022-06-14-130020_device_profile_measurements/down.sql b/chirpstack/migrations_postgres/2022-06-14-130020_device_profile_measurements/down.sql similarity index 100% rename from chirpstack/migrations/2022-06-14-130020_device_profile_measurements/down.sql rename to chirpstack/migrations_postgres/2022-06-14-130020_device_profile_measurements/down.sql diff --git a/chirpstack/migrations/2022-06-14-130020_device_profile_measurements/up.sql b/chirpstack/migrations_postgres/2022-06-14-130020_device_profile_measurements/up.sql similarity index 100% rename from chirpstack/migrations/2022-06-14-130020_device_profile_measurements/up.sql rename to chirpstack/migrations_postgres/2022-06-14-130020_device_profile_measurements/up.sql diff --git a/chirpstack/migrations/2022-11-02-090533_auto_detect_measurements/down.sql b/chirpstack/migrations_postgres/2022-11-02-090533_auto_detect_measurements/down.sql similarity index 100% rename from chirpstack/migrations/2022-11-02-090533_auto_detect_measurements/down.sql rename to chirpstack/migrations_postgres/2022-11-02-090533_auto_detect_measurements/down.sql diff --git a/chirpstack/migrations/2022-11-02-090533_auto_detect_measurements/up.sql b/chirpstack/migrations_postgres/2022-11-02-090533_auto_detect_measurements/up.sql similarity index 100% rename from chirpstack/migrations/2022-11-02-090533_auto_detect_measurements/up.sql rename to chirpstack/migrations_postgres/2022-11-02-090533_auto_detect_measurements/up.sql diff --git a/chirpstack/migrations/2023-01-03-201442_device_profile_region_config_id/down.sql b/chirpstack/migrations_postgres/2023-01-03-201442_device_profile_region_config_id/down.sql similarity index 100% rename from chirpstack/migrations/2023-01-03-201442_device_profile_region_config_id/down.sql rename to chirpstack/migrations_postgres/2023-01-03-201442_device_profile_region_config_id/down.sql diff --git a/chirpstack/migrations/2023-01-03-201442_device_profile_region_config_id/up.sql b/chirpstack/migrations_postgres/2023-01-03-201442_device_profile_region_config_id/up.sql similarity index 100% rename from chirpstack/migrations/2023-01-03-201442_device_profile_region_config_id/up.sql rename to chirpstack/migrations_postgres/2023-01-03-201442_device_profile_region_config_id/up.sql diff --git a/chirpstack/migrations/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/down.sql b/chirpstack/migrations_postgres/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/down.sql similarity index 100% rename from chirpstack/migrations/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/down.sql rename to chirpstack/migrations_postgres/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/down.sql diff --git a/chirpstack/migrations/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/up.sql b/chirpstack/migrations_postgres/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/up.sql similarity index 100% rename from chirpstack/migrations/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/up.sql rename to chirpstack/migrations_postgres/2023-01-12-130153_rename_ping_slot_period_ping_slot_nb_k/up.sql diff --git a/chirpstack/migrations/2023-02-06-135050_multicast_gateway/down.sql b/chirpstack/migrations_postgres/2023-02-06-135050_multicast_gateway/down.sql similarity index 100% rename from chirpstack/migrations/2023-02-06-135050_multicast_gateway/down.sql rename to chirpstack/migrations_postgres/2023-02-06-135050_multicast_gateway/down.sql diff --git a/chirpstack/migrations/2023-02-06-135050_multicast_gateway/up.sql b/chirpstack/migrations_postgres/2023-02-06-135050_multicast_gateway/up.sql similarity index 100% rename from chirpstack/migrations/2023-02-06-135050_multicast_gateway/up.sql rename to chirpstack/migrations_postgres/2023-02-06-135050_multicast_gateway/up.sql diff --git a/chirpstack/migrations/2023-02-13-103316_update_private_gateways/down.sql b/chirpstack/migrations_postgres/2023-02-13-103316_update_private_gateways/down.sql similarity index 100% rename from chirpstack/migrations/2023-02-13-103316_update_private_gateways/down.sql rename to chirpstack/migrations_postgres/2023-02-13-103316_update_private_gateways/down.sql diff --git a/chirpstack/migrations/2023-02-13-103316_update_private_gateways/up.sql b/chirpstack/migrations_postgres/2023-02-13-103316_update_private_gateways/up.sql similarity index 100% rename from chirpstack/migrations/2023-02-13-103316_update_private_gateways/up.sql rename to chirpstack/migrations_postgres/2023-02-13-103316_update_private_gateways/up.sql diff --git a/chirpstack/migrations/2023-02-16-091535_add_relay_support/down.sql b/chirpstack/migrations_postgres/2023-02-16-091535_add_relay_support/down.sql similarity index 100% rename from chirpstack/migrations/2023-02-16-091535_add_relay_support/down.sql rename to chirpstack/migrations_postgres/2023-02-16-091535_add_relay_support/down.sql diff --git a/chirpstack/migrations/2023-02-16-091535_add_relay_support/up.sql b/chirpstack/migrations_postgres/2023-02-16-091535_add_relay_support/up.sql similarity index 100% rename from chirpstack/migrations/2023-02-16-091535_add_relay_support/up.sql rename to chirpstack/migrations_postgres/2023-02-16-091535_add_relay_support/up.sql diff --git a/chirpstack/migrations/2023-09-25-105457_encrypted_queue_items/down.sql b/chirpstack/migrations_postgres/2023-09-25-105457_encrypted_queue_items/down.sql similarity index 100% rename from chirpstack/migrations/2023-09-25-105457_encrypted_queue_items/down.sql rename to chirpstack/migrations_postgres/2023-09-25-105457_encrypted_queue_items/down.sql diff --git a/chirpstack/migrations/2023-09-25-105457_encrypted_queue_items/up.sql b/chirpstack/migrations_postgres/2023-09-25-105457_encrypted_queue_items/up.sql similarity index 100% rename from chirpstack/migrations/2023-09-25-105457_encrypted_queue_items/up.sql rename to chirpstack/migrations_postgres/2023-09-25-105457_encrypted_queue_items/up.sql diff --git a/chirpstack/migrations/2023-10-19-142614_add_tenant_and_app_tags/down.sql b/chirpstack/migrations_postgres/2023-10-19-142614_add_tenant_and_app_tags/down.sql similarity index 100% rename from chirpstack/migrations/2023-10-19-142614_add_tenant_and_app_tags/down.sql rename to chirpstack/migrations_postgres/2023-10-19-142614_add_tenant_and_app_tags/down.sql diff --git a/chirpstack/migrations/2023-10-19-142614_add_tenant_and_app_tags/up.sql b/chirpstack/migrations_postgres/2023-10-19-142614_add_tenant_and_app_tags/up.sql similarity index 100% rename from chirpstack/migrations/2023-10-19-142614_add_tenant_and_app_tags/up.sql rename to chirpstack/migrations_postgres/2023-10-19-142614_add_tenant_and_app_tags/up.sql diff --git a/chirpstack/migrations/2023-11-22-120700_device_profile_allow_roaming/down.sql b/chirpstack/migrations_postgres/2023-11-22-120700_device_profile_allow_roaming/down.sql similarity index 100% rename from chirpstack/migrations/2023-11-22-120700_device_profile_allow_roaming/down.sql rename to chirpstack/migrations_postgres/2023-11-22-120700_device_profile_allow_roaming/down.sql diff --git a/chirpstack/migrations/2023-11-22-120700_device_profile_allow_roaming/up.sql b/chirpstack/migrations_postgres/2023-11-22-120700_device_profile_allow_roaming/up.sql similarity index 100% rename from chirpstack/migrations/2023-11-22-120700_device_profile_allow_roaming/up.sql rename to chirpstack/migrations_postgres/2023-11-22-120700_device_profile_allow_roaming/up.sql diff --git a/chirpstack/migrations/2024-02-07-083424_add_device_session_to_device/down.sql b/chirpstack/migrations_postgres/2024-02-07-083424_add_device_session_to_device/down.sql similarity index 100% rename from chirpstack/migrations/2024-02-07-083424_add_device_session_to_device/down.sql rename to chirpstack/migrations_postgres/2024-02-07-083424_add_device_session_to_device/down.sql diff --git a/chirpstack/migrations/2024-02-07-083424_add_device_session_to_device/up.sql b/chirpstack/migrations_postgres/2024-02-07-083424_add_device_session_to_device/up.sql similarity index 100% rename from chirpstack/migrations/2024-02-07-083424_add_device_session_to_device/up.sql rename to chirpstack/migrations_postgres/2024-02-07-083424_add_device_session_to_device/up.sql diff --git a/chirpstack/migrations/2024-03-26-134652_align_multicast_ping_slot_config/down.sql b/chirpstack/migrations_postgres/2024-03-26-134652_align_multicast_ping_slot_config/down.sql similarity index 100% rename from chirpstack/migrations/2024-03-26-134652_align_multicast_ping_slot_config/down.sql rename to chirpstack/migrations_postgres/2024-03-26-134652_align_multicast_ping_slot_config/down.sql diff --git a/chirpstack/migrations/2024-03-26-134652_align_multicast_ping_slot_config/up.sql b/chirpstack/migrations_postgres/2024-03-26-134652_align_multicast_ping_slot_config/up.sql similarity index 100% rename from chirpstack/migrations/2024-03-26-134652_align_multicast_ping_slot_config/up.sql rename to chirpstack/migrations_postgres/2024-03-26-134652_align_multicast_ping_slot_config/up.sql diff --git a/chirpstack/migrations/2024-04-30-103242_add_rx1_delay_setting_to_ds/down.sql b/chirpstack/migrations_postgres/2024-04-30-103242_add_rx1_delay_setting_to_ds/down.sql similarity index 100% rename from chirpstack/migrations/2024-04-30-103242_add_rx1_delay_setting_to_ds/down.sql rename to chirpstack/migrations_postgres/2024-04-30-103242_add_rx1_delay_setting_to_ds/down.sql diff --git a/chirpstack/migrations/2024-04-30-103242_add_rx1_delay_setting_to_ds/up.sql b/chirpstack/migrations_postgres/2024-04-30-103242_add_rx1_delay_setting_to_ds/up.sql similarity index 100% rename from chirpstack/migrations/2024-04-30-103242_add_rx1_delay_setting_to_ds/up.sql rename to chirpstack/migrations_postgres/2024-04-30-103242_add_rx1_delay_setting_to_ds/up.sql diff --git a/chirpstack/migrations/2024-06-13-122655_add_relay_gateway/down.sql b/chirpstack/migrations_postgres/2024-06-13-122655_add_relay_gateway/down.sql similarity index 100% rename from chirpstack/migrations/2024-06-13-122655_add_relay_gateway/down.sql rename to chirpstack/migrations_postgres/2024-06-13-122655_add_relay_gateway/down.sql diff --git a/chirpstack/migrations/2024-06-13-122655_add_relay_gateway/up.sql b/chirpstack/migrations_postgres/2024-06-13-122655_add_relay_gateway/up.sql similarity index 100% rename from chirpstack/migrations/2024-06-13-122655_add_relay_gateway/up.sql rename to chirpstack/migrations_postgres/2024-06-13-122655_add_relay_gateway/up.sql diff --git a/chirpstack/migrations_sqlite/00000000000000_initial/down.sql b/chirpstack/migrations_sqlite/00000000000000_initial/down.sql new file mode 100644 index 00000000..de8fdb09 --- /dev/null +++ b/chirpstack/migrations_sqlite/00000000000000_initial/down.sql @@ -0,0 +1,18 @@ +drop table relay_gateway; +drop table multicast_group_gateway; +drop table multicast_group_queue_item; +drop table multicast_group_device; +drop table multicast_group; +drop table device_queue_item; +drop table device_keys; +drop table device; +drop table device_profile; +drop table api_key; +drop table application_integration; +drop table application; +drop table gateway; +drop table tenant_user; +drop table tenant; +drop table "user"; +drop table relay_device; +drop table device_profile_template; diff --git a/chirpstack/migrations_sqlite/00000000000000_initial/up.sql b/chirpstack/migrations_sqlite/00000000000000_initial/up.sql new file mode 100644 index 00000000..e730ae80 --- /dev/null +++ b/chirpstack/migrations_sqlite/00000000000000_initial/up.sql @@ -0,0 +1,392 @@ +-- user +create table "user" ( + id text not null primary key, + external_id text null, + created_at datetime not null, + updated_at datetime not null, + is_admin boolean not null, + is_active boolean not null, + email text not null, + email_verified boolean not null, + password_hash varchar(200) not null, + note text not null +); + +create unique index idx_user_email on "user"(email); +create unique index idx_user_external_id on "user"(external_id); + +insert into "user" ( + id, + created_at, + updated_at, + is_admin, + is_active, + email, + email_verified, + password_hash, + note +) values ( + '05244f12-6daf-4e1f-8315-c66783a0ab56', + datetime('now'), + datetime('now'), + TRUE, + TRUE, + 'admin', + FALSE, + '$pbkdf2-sha512$i=1,l=64$l8zGKtxRESq3PA2kFhHRWA$H3lGMxOt55wjwoc+myeOoABofJY9oDpldJa7fhqdjbh700V6FLPML75UmBOt9J5VFNjAL1AvqCozA1HJM0QVGA', + '' +); + +-- tenant +create table tenant ( + id text not null primary key, + created_at datetime not null, + updated_at datetime not null, + name varchar(100) not null, + description text not null, + can_have_gateways boolean not null, + max_device_count integer not null, + max_gateway_count integer not null, + private_gateways_up boolean not null, + private_gateways_down boolean not null default FALSE, + tags text not null default '{}' +); + +-- sqlite has advanced text search with https://www.sqlite.org/fts5.html +-- but looks like it is for a full table and not specific per column, to investigate +create index idx_tenant_name_trgm on "tenant"(name); + +insert into "tenant" ( + id, + created_at, + updated_at, + name, + description, + can_have_gateways, + max_device_count, + max_gateway_count, + private_gateways_up +) values ( + '52f14cd4-c6f1-4fbd-8f87-4025e1d49242', + datetime('now'), + datetime('now'), + 'ChirpStack', + '', + TRUE, + 0, + 0, + FALSE +); + +-- tenant user +create table tenant_user ( + tenant_id text not null references tenant on delete cascade, + user_id text not null references "user" on delete cascade, + created_at datetime not null, + updated_at datetime not null, + is_admin boolean not null, + is_device_admin boolean not null, + is_gateway_admin boolean not null, + primary key (tenant_id, user_id) +); + +create index idx_tenant_user_user_id on tenant_user (user_id); + +-- gateway +create table gateway ( + gateway_id blob not null primary key, + tenant_id text not null references tenant on delete cascade, + created_at datetime not null, + updated_at datetime not null, + last_seen_at datetime, + name varchar(100) not null, + description text not null, + latitude double precision not null, + longitude double precision not null, + altitude real not null, + stats_interval_secs integer not null, + tls_certificate blob, + tags text not null, + properties text not null +); + +create index idx_gateway_tenant_id on gateway (tenant_id); +create index idx_gateway_name_trgm on gateway (name); +create index idx_gateway_id_trgm on gateway (hex(gateway_id)); +create index idx_gateway_tags on gateway (tags); + +-- application +create table application ( + id text not null primary key, + tenant_id text not null references tenant on delete cascade, + created_at datetime not null, + updated_at datetime not null, + name varchar(100) not null, + description text not null, + mqtt_tls_cert blob, + tags text not null default '{}' +); + +create index idx_application_tenant_id on application (tenant_id); +create index idx_application_name_trgm on application (name); + +-- application integration +create table application_integration ( + application_id text not null references application on delete cascade, + kind varchar(20) not null, + created_at datetime not null, + updated_at datetime not null, + configuration text not null, + + primary key (application_id, kind) +); + +-- api-key +create table api_key ( + id text not null primary key, + created_at datetime not null, + name varchar(100) not null, + is_admin boolean not null, + tenant_id text null references tenant on delete cascade +); + +create index idx_api_key_tenant_id on api_key (tenant_id); + +-- device-profile +create table device_profile ( + id text not null primary key, + tenant_id text not null references tenant on delete cascade, + created_at datetime not null, + updated_at datetime not null, + name varchar(100) not null, + region varchar(10) not null, + mac_version varchar(10) not null, + reg_params_revision varchar(20) not null, + adr_algorithm_id varchar(100) not null, + payload_codec_runtime varchar(20) not null, + uplink_interval integer not null, + device_status_req_interval integer not null, + supports_otaa boolean not null, + supports_class_b boolean not null, + supports_class_c boolean not null, + class_b_timeout integer not null, + class_b_ping_slot_nb_k integer not null, + class_b_ping_slot_dr smallint not null, + class_b_ping_slot_freq bigint not null, + class_c_timeout integer not null, + abp_rx1_delay smallint not null, + abp_rx1_dr_offset smallint not null, + abp_rx2_dr smallint not null, + abp_rx2_freq bigint not null, + tags text not null, + payload_codec_script text not null default '', + flush_queue_on_activate boolean not null default FALSE, + description text not null default '', + measurements text not null default '{}', + auto_detect_measurements boolean not null default TRUE, + region_config_id varchar(100) null, + is_relay boolean not null default FALSE, + is_relay_ed boolean not null default FALSE, + relay_ed_relay_only boolean not null default FALSE, + relay_enabled boolean not null default FALSE, + relay_cad_periodicity smallint not null default 0, + relay_default_channel_index smallint not null default 0, + relay_second_channel_freq bigint not null default 0, + relay_second_channel_dr smallint not null default 0, + relay_second_channel_ack_offset smallint not null default 0, + relay_ed_activation_mode smallint not null default 0, + relay_ed_smart_enable_level smallint not null default 0, + relay_ed_back_off smallint not null default 0, + relay_ed_uplink_limit_bucket_size smallint not null default 0, + relay_ed_uplink_limit_reload_rate smallint not null default 0, + relay_join_req_limit_reload_rate smallint not null default 0, + relay_notify_limit_reload_rate smallint not null default 0, + relay_global_uplink_limit_reload_rate smallint not null default 0, + relay_overall_limit_reload_rate smallint not null default 0, + relay_join_req_limit_bucket_size smallint not null default 0, + relay_notify_limit_bucket_size smallint not null default 0, + relay_global_uplink_limit_bucket_size smallint not null default 0, + relay_overall_limit_bucket_size smallint not null default 0, + allow_roaming boolean not null default TRUE, + rx1_delay smallint not null default 0 +); + +create index idx_device_profile_tenant_id on device_profile (tenant_id); +create index idx_device_profile_name_trgm on device_profile (name); +create index idx_device_profile_tags on device_profile (tags); + +-- device +create table device ( + dev_eui blob not null primary key, + application_id text not null references application on delete cascade, + device_profile_id text not null references device_profile on delete cascade, + created_at datetime not null, + updated_at datetime not null, + last_seen_at datetime, + scheduler_run_after datetime, + name varchar(100) not null, + description text not null, + external_power_source boolean not null, + battery_level numeric(5, 2), + margin int, + dr smallint, + latitude double precision, + longitude double precision, + altitude real, + dev_addr blob, + enabled_class char(1) not null, + skip_fcnt_check boolean not null, + is_disabled boolean not null, + tags text not null, + variables text not null, + join_eui blob not null default x'00000000', + secondary_dev_addr blob, + device_session blob +); + +create index idx_device_application_id on device (application_id); +create index idx_device_device_profile_id on device (device_profile_id); +create index idx_device_name_trgm on device (name); +create index idx_device_dev_eui_trgm on device (hex(dev_eui)); +create index idx_device_dev_addr_trgm on device (hex(dev_addr)); +create index idx_device_tags on device (tags); + +create table device_keys ( + dev_eui blob not null primary key references device on delete cascade, + created_at datetime not null, + updated_at datetime not null, + nwk_key blob not null, + app_key blob not null, + dev_nonces text not null, + join_nonce int not null +); + +create table device_queue_item ( + id text not null primary key, + dev_eui blob references device on delete cascade not null, + created_at datetime not null, + f_port smallint not null, + confirmed boolean not null, + data blob not null, + is_pending boolean not null, + f_cnt_down bigint null, + timeout_after datetime, + is_encrypted boolean default FALSE not null +); + +create index idx_device_queue_item_dev_eui on device_queue_item (dev_eui); +create index idx_device_queue_item_created_at on device_queue_item (created_at); +create index idx_device_queue_item_timeout_after on device_queue_item (timeout_after); + +-- multicast groups +create table multicast_group ( + id text not null primary key, + application_id text not null references application on delete cascade, + created_at datetime not null, + updated_at datetime not null, + name varchar(100) not null, + region varchar(10) not null, + mc_addr blob not null, + mc_nwk_s_key blob not null, + mc_app_s_key blob not null, + f_cnt bigint not null, + group_type char(1) not null, + dr smallint not null, + frequency bigint not null, + class_b_ping_slot_nb_k smallint not null, + class_c_scheduling_type varchar(20) not null default 'delay' +); + +create index idx_multicast_group_application_id on multicast_group (application_id); +create index idx_multicast_group_name_trgm on multicast_group (name); + +create table multicast_group_device ( + multicast_group_id text not null references multicast_group on delete cascade, + dev_eui blob not null references device on delete cascade, + created_at datetime not null, + primary key (multicast_group_id, dev_eui) +); + +create table multicast_group_queue_item ( + id text not null primary key, + created_at datetime not null, + scheduler_run_after datetime not null, + multicast_group_id text not null references multicast_group on delete cascade, + gateway_id blob not null references gateway on delete cascade, + f_cnt bigint not null, + f_port smallint not null, + data blob not null, + emit_at_time_since_gps_epoch bigint +); + +create index idx_multicast_group_queue_item_multicast_group_id on multicast_group_queue_item (multicast_group_id); +create index idx_multicast_group_queue_item_scheduler_run_after on multicast_group_queue_item (scheduler_run_after); + +create table device_profile_template ( + id text not null primary key, + created_at datetime not null, + updated_at datetime not null, + name varchar(100) not null, + description text not null, + vendor varchar(100) not null, + firmware varchar(100) not null, + region varchar(10) not null, + mac_version varchar(10) not null, + reg_params_revision varchar(20) not null, + adr_algorithm_id varchar(100) not null, + payload_codec_runtime varchar(20) not null, + payload_codec_script text not null, + uplink_interval integer not null, + device_status_req_interval integer not null, + flush_queue_on_activate boolean not null, + supports_otaa boolean not null, + supports_class_b boolean not null, + supports_class_c boolean not null, + class_b_timeout integer not null, + class_b_ping_slot_nb_k integer not null, + class_b_ping_slot_dr smallint not null, + class_b_ping_slot_freq bigint not null, + class_c_timeout integer not null, + abp_rx1_delay smallint not null, + abp_rx1_dr_offset smallint not null, + abp_rx2_dr smallint not null, + abp_rx2_freq bigint not null, + tags text not null, + measurements text not null default '{}', + auto_detect_measurements boolean not null default TRUE +); + +create table multicast_group_gateway ( + multicast_group_id text not null references multicast_group on delete cascade, + gateway_id blob not null references gateway on delete cascade, + created_at datetime not null, + primary key (multicast_group_id, gateway_id) +); + +create table relay_device ( + relay_dev_eui blob not null references device on delete cascade, + dev_eui blob not null references device on delete cascade, + created_at datetime not null, + primary key (relay_dev_eui, dev_eui) +); + +create index idx_tenant_tags on tenant (tags); +create index idx_application_tags on application (tags); +create index idx_device_dev_addr on device (dev_addr); +create index idx_device_secondary_dev_addr on device (secondary_dev_addr); + + +-- relay gateway +create table relay_gateway ( + tenant_id text not null references tenant on delete cascade, + relay_id blob not null, + created_at datetime not null, + updated_at datetime not null, + last_seen_at datetime, + name varchar(100) not null, + description text not null, + stats_interval_secs integer not null, + region_config_id varchar(100) not null, + + primary key (tenant_id, relay_id) +); diff --git a/chirpstack/src/api/application.rs b/chirpstack/src/api/application.rs index 7d4c53cc..24a496b7 100644 --- a/chirpstack/src/api/application.rs +++ b/chirpstack/src/api/application.rs @@ -44,7 +44,7 @@ impl ApplicationService for Application { .await?; let a = application::Application { - tenant_id, + tenant_id: tenant_id.into(), name: req_app.name.clone(), description: req_app.description.clone(), tags: fields::KeyValue::new(req_app.tags.clone()), @@ -119,7 +119,7 @@ impl ApplicationService for Application { .await?; let _ = application::update(application::Application { - id: app_id, + id: app_id.into(), name: req_app.name.to_string(), description: req_app.description.to_string(), tags: fields::KeyValue::new(req_app.tags.clone()), @@ -279,7 +279,7 @@ impl ApplicationService for Application { .await?; let i = application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::Http, configuration: application::IntegrationConfiguration::Http( application::HttpConfiguration { @@ -367,7 +367,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::Http, configuration: application::IntegrationConfiguration::Http( application::HttpConfiguration { @@ -438,7 +438,7 @@ impl ApplicationService for Application { .await?; let i = application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::InfluxDb, configuration: application::IntegrationConfiguration::InfluxDb( application::InfluxDbConfiguration { @@ -535,7 +535,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::InfluxDb, configuration: application::IntegrationConfiguration::InfluxDb( application::InfluxDbConfiguration { @@ -610,7 +610,7 @@ impl ApplicationService for Application { .await?; let i = application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::ThingsBoard, configuration: application::IntegrationConfiguration::ThingsBoard( application::ThingsBoardConfiguration { @@ -689,7 +689,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::ThingsBoard, configuration: application::IntegrationConfiguration::ThingsBoard( application::ThingsBoardConfiguration { @@ -755,7 +755,7 @@ impl ApplicationService for Application { .await?; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::MyDevices, configuration: application::IntegrationConfiguration::MyDevices( application::MyDevicesConfiguration { @@ -832,7 +832,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::MyDevices, configuration: application::IntegrationConfiguration::MyDevices( application::MyDevicesConfiguration { @@ -907,7 +907,7 @@ impl ApplicationService for Application { }; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::LoraCloud, configuration: application::IntegrationConfiguration::LoraCloud( application::LoraCloudConfiguration { @@ -1032,7 +1032,7 @@ impl ApplicationService for Application { }; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::LoraCloud, configuration: application::IntegrationConfiguration::LoraCloud( application::LoraCloudConfiguration { @@ -1119,7 +1119,7 @@ impl ApplicationService for Application { .await?; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::GcpPubSub, configuration: application::IntegrationConfiguration::GcpPubSub( application::GcpPubSubConfiguration { @@ -1202,7 +1202,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::GcpPubSub, configuration: application::IntegrationConfiguration::GcpPubSub( application::GcpPubSubConfiguration { @@ -1271,7 +1271,7 @@ impl ApplicationService for Application { .await?; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::AwsSns, configuration: application::IntegrationConfiguration::AwsSns( application::AwsSnsConfiguration { @@ -1354,7 +1354,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::AwsSns, configuration: application::IntegrationConfiguration::AwsSns( application::AwsSnsConfiguration { @@ -1424,7 +1424,7 @@ impl ApplicationService for Application { .await?; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::AzureServiceBus, configuration: application::IntegrationConfiguration::AzureServiceBus( application::AzureServiceBusConfiguration { @@ -1506,7 +1506,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::AzureServiceBus, configuration: application::IntegrationConfiguration::AzureServiceBus( application::AzureServiceBusConfiguration { @@ -1574,7 +1574,7 @@ impl ApplicationService for Application { .await?; let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::PilotThings, configuration: application::IntegrationConfiguration::PilotThings( application::PilotThingsConfiguration { @@ -1653,7 +1653,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::PilotThings, configuration: application::IntegrationConfiguration::PilotThings( application::PilotThingsConfiguration { @@ -1730,7 +1730,7 @@ impl ApplicationService for Application { } let _ = application::create_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::Ifttt, configuration: application::IntegrationConfiguration::Ifttt( application::IftttConfiguration { @@ -1814,7 +1814,7 @@ impl ApplicationService for Application { .await?; let _ = application::update_integration(application::Integration { - application_id: app_id, + application_id: app_id.into(), kind: application::IntegrationKind::Ifttt, configuration: application::IntegrationConfiguration::Ifttt( application::IftttConfiguration { @@ -1945,7 +1945,9 @@ pub mod test { }), }; let mut create_req = Request::new(create_req); - create_req.extensions_mut().insert(AuthID::User(u.id)); + create_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let create_resp = service.create(create_req).await.unwrap(); let create_resp = create_resp.get_ref(); @@ -1954,7 +1956,9 @@ pub mod test { id: create_resp.id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Application { @@ -1976,7 +1980,9 @@ pub mod test { }), }; let mut up_req = Request::new(up_req); - up_req.extensions_mut().insert(AuthID::User(u.id)); + up_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.update(up_req).await.unwrap(); //get @@ -1984,7 +1990,9 @@ pub mod test { id: create_resp.id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Application { @@ -2004,7 +2012,9 @@ pub mod test { offset: 0, }; let mut list_req = Request::new(list_req); - list_req.extensions_mut().insert(AuthID::User(u.id)); + list_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let list_resp = service.list(list_req).await.unwrap(); assert_eq!(1, list_resp.get_ref().total_count); assert_eq!(1, list_resp.get_ref().result.len()); @@ -2014,14 +2024,18 @@ pub mod test { id: create_resp.id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.delete(del_req).await.unwrap(); let del_req = api::DeleteApplicationRequest { id: create_resp.id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let del_resp = service.delete(del_req).await; assert!(del_resp.is_err()); } diff --git a/chirpstack/src/api/auth/validator.rs b/chirpstack/src/api/auth/validator.rs index 4758d847..da122cdb 100644 --- a/chirpstack/src/api/auth/validator.rs +++ b/chirpstack/src/api/auth/validator.rs @@ -11,10 +11,10 @@ use lrwn::EUI64; use super::error::Error; use crate::api::auth::AuthID; use crate::helpers::errors::PrintFullError; -use crate::storage::get_async_db_conn; use crate::storage::schema::{ api_key, application, device, device_profile, gateway, multicast_group, tenant_user, user, }; +use crate::storage::{fields, get_async_db_conn}; #[derive(Copy, Clone)] pub enum Flag { @@ -94,7 +94,7 @@ impl Validator for ValidateActiveUser { async fn validate_user(&self, id: &Uuid) -> Result { let count = user::dsl::user .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .filter(user::dsl::is_active.eq(true)) .first(&mut get_async_db_conn().await?) .await?; @@ -119,7 +119,7 @@ impl Validator for ValidateIsAdmin { async fn validate_user(&self, id: &Uuid) -> Result { let count = user::dsl::user .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .filter( user::dsl::is_active .eq(true) @@ -144,7 +144,7 @@ impl Validator for ValidateActiveUserOrKey { async fn validate_key(&self, id: &Uuid) -> Result { let count = api_key::dsl::api_key .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await?; Ok(count) @@ -153,7 +153,7 @@ impl Validator for ValidateActiveUserOrKey { async fn validate_user(&self, id: &Uuid) -> Result { let count = user::dsl::user .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .filter(user::dsl::is_active.eq(true)) .first(&mut get_async_db_conn().await?) .await?; @@ -176,7 +176,7 @@ impl Validator for ValidateUsersAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(user::dsl::is_active.eq(true)) .into_boxed(); @@ -197,7 +197,7 @@ impl Validator for ValidateUsersAccess { // admin api key let count = api_key::dsl::api_key .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(api_key::dsl::is_admin.eq(true)) .first(&mut get_async_db_conn().await?) .await?; @@ -221,7 +221,7 @@ impl Validator for ValidateUserAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(user::dsl::is_active.eq(true)) .into_boxed(); @@ -232,7 +232,7 @@ impl Validator for ValidateUserAccess { q = q.filter( user::dsl::is_admin .eq(true) - .or(user::dsl::id.eq(&self.user_id)), + .or(user::dsl::id.eq(fields::Uuid::from(self.user_id))), ); } // admin user @@ -251,7 +251,7 @@ impl Validator for ValidateUserAccess { // admin api key let count = api_key::dsl::api_key .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(api_key::dsl::is_admin.eq(true)) .first(&mut get_async_db_conn().await?) .await?; @@ -281,7 +281,11 @@ impl Validator for ValidateApiKeysAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(&id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -292,7 +296,7 @@ impl Validator for ValidateApiKeysAccess { user::dsl::is_admin.eq(true).or(dsl::exists( tenant_user::dsl::tenant_user.filter( tenant_user::dsl::tenant_id - .eq(&self.tenant_id) + .eq(fields::Uuid::from(self.tenant_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and(tenant_user::dsl::is_admin.eq(true)), ), @@ -306,7 +310,7 @@ impl Validator for ValidateApiKeysAccess { user::dsl::is_admin.eq(true).or(dsl::exists( tenant_user::dsl::tenant_user.filter( tenant_user::dsl::tenant_id - .eq(&self.tenant_id) + .eq(fields::Uuid::from(self.tenant_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -341,7 +345,11 @@ impl Validator for ValidateApiKeyAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -358,7 +366,7 @@ impl Validator for ValidateApiKeyAccess { tenant_user::dsl::user_id .eq(user::dsl::id) .and(tenant_user::dsl::is_admin.eq(true)) - .and(api_key::dsl::id.eq(&self.id)), + .and(api_key::dsl::id.eq(fields::Uuid::from(self.id))), ), )), ); @@ -391,7 +399,7 @@ impl Validator for ValidateTenantsAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(user::dsl::is_active.eq(true)) .into_boxed(); @@ -414,7 +422,7 @@ impl Validator for ValidateTenantsAccess { // admin api key let count = api_key::dsl::api_key .select(dsl::count_star()) - .find(&id) + .find(fields::Uuid::from(id)) .filter(api_key::dsl::is_admin.eq(true)) .first(&mut get_async_db_conn().await?) .await?; @@ -438,22 +446,24 @@ impl Validator for ValidateTenantAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { // global admin // tenant user Flag::Read => { - q = q.filter( - user::is_admin.eq(true).or(dsl::exists( - tenant_user::dsl::tenant_user.filter( - tenant_user::dsl::user_id - .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)), + q = q.filter(user::is_admin.eq(true).or(dsl::exists( + tenant_user::dsl::tenant_user.filter( + tenant_user::dsl::user_id.eq(user::dsl::id).and( + tenant_user::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id)), ), - )), - ); + ), + ))); } // global admin @@ -471,7 +481,7 @@ impl Validator for ValidateTenantAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -481,7 +491,7 @@ impl Validator for ValidateTenantAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } // admin api key @@ -513,7 +523,11 @@ impl Validator for ValidateTenantUsersAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -525,7 +539,10 @@ impl Validator for ValidateTenantUsersAccess { tenant_user::dsl::tenant_user.filter( tenant_user::dsl::user_id .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)) + .and( + tenant_user::dsl::tenant_id + .eq(fields::Uuid::from(self.tenant_id)), + ) .and(tenant_user::dsl::is_admin.eq(true)), ), )), @@ -534,15 +551,13 @@ impl Validator for ValidateTenantUsersAccess { // global admin // tenant user Flag::List => { - q = q.filter( - user::dsl::is_admin.eq(true).or(dsl::exists( - tenant_user::dsl::tenant_user.filter( - tenant_user::dsl::user_id - .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)), + q = q.filter(user::dsl::is_admin.eq(true).or(dsl::exists( + tenant_user::dsl::tenant_user.filter( + tenant_user::dsl::user_id.eq(user::dsl::id).and( + tenant_user::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id)), ), - )), - ); + ), + ))); } _ => { return Ok(0); @@ -555,7 +570,7 @@ impl Validator for ValidateTenantUsersAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -565,7 +580,7 @@ impl Validator for ValidateTenantUsersAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } _ => { @@ -598,7 +613,11 @@ impl Validator for ValidateTenantUserAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -611,12 +630,13 @@ impl Validator for ValidateTenantUserAccess { tenant_user::dsl::tenant_user.filter( tenant_user::dsl::user_id .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)) .and( - tenant_user::dsl::is_admin - .eq(true) - .or(tenant_user::dsl::user_id.eq(&self.user_id)), - ), + tenant_user::dsl::tenant_id + .eq(fields::Uuid::from(self.tenant_id)), + ) + .and(tenant_user::dsl::is_admin.eq(true).or( + tenant_user::dsl::user_id.eq(fields::Uuid::from(self.user_id)), + )), ), )), ); @@ -629,7 +649,10 @@ impl Validator for ValidateTenantUserAccess { tenant_user::dsl::tenant_user.filter( tenant_user::dsl::user_id .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)) + .and( + tenant_user::dsl::tenant_id + .eq(fields::Uuid::from(self.tenant_id)), + ) .and(tenant_user::dsl::is_admin.eq(true)), ), )), @@ -646,7 +669,7 @@ impl Validator for ValidateTenantUserAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -656,7 +679,7 @@ impl Validator for ValidateTenantUserAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } _ => { @@ -684,7 +707,11 @@ impl Validator for ValidateApplicationsAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -697,7 +724,10 @@ impl Validator for ValidateApplicationsAccess { tenant_user::dsl::tenant_user.filter( tenant_user::dsl::user_id .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)) + .and( + tenant_user::dsl::tenant_id + .eq(fields::Uuid::from(self.tenant_id)), + ) .and( tenant_user::dsl::is_admin .eq(true) @@ -710,15 +740,13 @@ impl Validator for ValidateApplicationsAccess { // global admin // tenant user Flag::List => { - q = q.filter( - user::dsl::is_admin.eq(true).or(dsl::exists( - tenant_user::dsl::tenant_user.filter( - tenant_user::dsl::user_id - .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)), + q = q.filter(user::dsl::is_admin.eq(true).or(dsl::exists( + tenant_user::dsl::tenant_user.filter( + tenant_user::dsl::user_id.eq(user::dsl::id).and( + tenant_user::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id)), ), - )), - ); + ), + ))); } _ => { return Ok(0); @@ -731,7 +759,7 @@ impl Validator for ValidateApplicationsAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -741,7 +769,7 @@ impl Validator for ValidateApplicationsAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } // admin api key @@ -750,7 +778,7 @@ impl Validator for ValidateApplicationsAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } _ => { @@ -781,7 +809,11 @@ impl Validator for ValidateApplicationAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -797,7 +829,7 @@ impl Validator for ValidateApplicationAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -816,7 +848,7 @@ impl Validator for ValidateApplicationAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -838,20 +870,25 @@ impl Validator for ValidateApplicationAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { // admin api key // tenant api key Flag::Read | Flag::Update | Flag::Delete => { - q = q.filter(api_key::dsl::is_admin.eq(true).or(dsl::exists( - application::dsl::application.filter( - application::dsl::id.eq(&self.application_id).and( - api_key::dsl::tenant_id.eq(application::dsl::tenant_id.nullable()), + q = q.filter( + api_key::dsl::is_admin.eq(true).or(dsl::exists( + application::dsl::application.filter( + application::dsl::id + .eq(fields::Uuid::from(self.application_id)) + .and( + api_key::dsl::tenant_id + .eq(application::dsl::tenant_id.nullable()), + ), ), - ), - ))); + )), + ); } _ => { return Ok(0); @@ -877,7 +914,11 @@ impl Validator for ValidateDeviceProfileTemplatesAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -898,7 +939,7 @@ impl Validator for ValidateDeviceProfileTemplatesAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -932,7 +973,11 @@ impl Validator for ValidateDeviceProfileTemplateAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -953,7 +998,7 @@ impl Validator for ValidateDeviceProfileTemplateAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -988,7 +1033,11 @@ impl Validator for ValidateDeviceProfilesAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1001,7 +1050,10 @@ impl Validator for ValidateDeviceProfilesAccess { tenant_user::dsl::tenant_user.filter( tenant_user::dsl::user_id .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)) + .and( + tenant_user::dsl::tenant_id + .eq(fields::Uuid::from(self.tenant_id)), + ) .and( tenant_user::dsl::is_admin .eq(true) @@ -1014,15 +1066,13 @@ impl Validator for ValidateDeviceProfilesAccess { // global admin // tenant user Flag::List => { - q = q.filter( - user::dsl::is_admin.eq(true).or(dsl::exists( - tenant_user::dsl::tenant_user.filter( - tenant_user::dsl::user_id - .eq(user::dsl::id) - .and(tenant_user::dsl::tenant_id.eq(&self.tenant_id)), + q = q.filter(user::dsl::is_admin.eq(true).or(dsl::exists( + tenant_user::dsl::tenant_user.filter( + tenant_user::dsl::user_id.eq(user::dsl::id).and( + tenant_user::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id)), ), - )), - ); + ), + ))); } _ => { return Ok(0); @@ -1035,7 +1085,7 @@ impl Validator for ValidateDeviceProfilesAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -1045,7 +1095,7 @@ impl Validator for ValidateDeviceProfilesAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } _ => { @@ -1076,7 +1126,11 @@ impl Validator for ValidateDeviceProfileAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1092,7 +1146,7 @@ impl Validator for ValidateDeviceProfileAccess { )) .filter( device_profile::dsl::id - .eq(&self.device_profile_id) + .eq(fields::Uuid::from(self.device_profile_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1111,7 +1165,7 @@ impl Validator for ValidateDeviceProfileAccess { )) .filter( device_profile::dsl::id - .eq(&self.device_profile_id) + .eq(fields::Uuid::from(self.device_profile_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1133,20 +1187,25 @@ impl Validator for ValidateDeviceProfileAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { // admin api key // tenant api key Flag::Read | Flag::Update | Flag::Delete => { - q = q.filter(api_key::dsl::is_admin.eq(true).or(dsl::exists( - device_profile::dsl::device_profile.filter( - device_profile::dsl::id.eq(&self.device_profile_id).and( - api_key::dsl::tenant_id.eq(device_profile::dsl::tenant_id.nullable()), + q = q.filter( + api_key::dsl::is_admin.eq(true).or(dsl::exists( + device_profile::dsl::device_profile.filter( + device_profile::dsl::id + .eq(fields::Uuid::from(self.device_profile_id)) + .and( + api_key::dsl::tenant_id + .eq(device_profile::dsl::tenant_id.nullable()), + ), ), - ), - ))); + )), + ); } _ => { return Ok(0); @@ -1176,7 +1235,11 @@ impl Validator for ValidateDevicesAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1193,7 +1256,7 @@ impl Validator for ValidateDevicesAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1216,7 +1279,7 @@ impl Validator for ValidateDevicesAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1233,20 +1296,25 @@ impl Validator for ValidateDevicesAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { // admin api key // tenant api key Flag::Create | Flag::List => { - q = q.filter(api_key::dsl::is_admin.eq(true).or(dsl::exists( - application::dsl::application.filter( - application::dsl::id.eq(&self.application_id).and( - api_key::dsl::tenant_id.eq(application::dsl::tenant_id.nullable()), + q = q.filter( + api_key::dsl::is_admin.eq(true).or(dsl::exists( + application::dsl::application.filter( + application::dsl::id + .eq(fields::Uuid::from(self.application_id)) + .and( + api_key::dsl::tenant_id + .eq(application::dsl::tenant_id.nullable()), + ), ), - ), - ))); + )), + ); } _ => { return Ok(0); @@ -1273,7 +1341,11 @@ impl Validator for ValidateDeviceAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1332,7 +1404,7 @@ impl Validator for ValidateDeviceAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { @@ -1372,7 +1444,11 @@ impl Validator for ValidateDeviceQueueAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1406,7 +1482,7 @@ impl Validator for ValidateDeviceQueueAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { @@ -1446,7 +1522,11 @@ impl Validator for ValidateGatewaysAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1458,7 +1538,7 @@ impl Validator for ValidateGatewaysAccess { user::dsl::is_admin.eq(true).or(dsl::exists( tenant_user::dsl::tenant_user.filter( tenant_user::dsl::tenant_id - .eq(&self.tenant_id) + .eq(fields::Uuid::from(self.tenant_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1476,7 +1556,7 @@ impl Validator for ValidateGatewaysAccess { user::dsl::is_admin.eq(true).or(dsl::exists( tenant_user::dsl::tenant_user.filter( tenant_user::dsl::tenant_id - .eq(&self.tenant_id) + .eq(fields::Uuid::from(self.tenant_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1493,7 +1573,7 @@ impl Validator for ValidateGatewaysAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .find(id) + .find(fields::Uuid::from(id)) .into_boxed(); match self.flag { @@ -1503,7 +1583,7 @@ impl Validator for ValidateGatewaysAccess { q = q.filter( api_key::dsl::is_admin .eq(true) - .or(api_key::dsl::tenant_id.eq(&self.tenant_id)), + .or(api_key::dsl::tenant_id.eq(fields::Uuid::from(self.tenant_id))), ); } _ => { @@ -1531,7 +1611,11 @@ impl Validator for ValidateGatewayAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1588,7 +1672,7 @@ impl Validator for ValidateGatewayAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { @@ -1632,7 +1716,11 @@ impl Validator for ValidateMulticastGroupsAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1649,7 +1737,7 @@ impl Validator for ValidateMulticastGroupsAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1672,7 +1760,7 @@ impl Validator for ValidateMulticastGroupsAccess { )) .filter( application::dsl::id - .eq(&self.application_id) + .eq(fields::Uuid::from(self.application_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1689,20 +1777,25 @@ impl Validator for ValidateMulticastGroupsAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { // admin api key // tenant api key Flag::Create | Flag::List => { - q = q.filter(api_key::dsl::is_admin.eq(true).or(dsl::exists( - application::dsl::application.filter( - application::dsl::id.eq(&self.application_id).and( - api_key::dsl::tenant_id.eq(application::dsl::tenant_id.nullable()), + q = q.filter( + api_key::dsl::is_admin.eq(true).or(dsl::exists( + application::dsl::application.filter( + application::dsl::id + .eq(fields::Uuid::from(self.application_id)) + .and( + api_key::dsl::tenant_id + .eq(application::dsl::tenant_id.nullable()), + ), ), - ), - ))); + )), + ); } _ => { return Ok(0); @@ -1732,7 +1825,11 @@ impl Validator for ValidateMulticastGroupAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1749,7 +1846,7 @@ impl Validator for ValidateMulticastGroupAccess { )) .filter( multicast_group::dsl::id - .eq(&self.multicast_group_id) + .eq(fields::Uuid::from(self.multicast_group_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1769,7 +1866,7 @@ impl Validator for ValidateMulticastGroupAccess { )) .filter( multicast_group::dsl::id - .eq(&self.multicast_group_id) + .eq(fields::Uuid::from(self.multicast_group_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1791,7 +1888,7 @@ impl Validator for ValidateMulticastGroupAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { @@ -1802,9 +1899,14 @@ impl Validator for ValidateMulticastGroupAccess { api_key::dsl::is_admin.eq(true).or(dsl::exists( multicast_group::dsl::multicast_group .inner_join(application::table) - .filter(multicast_group::dsl::id.eq(&self.multicast_group_id).and( - api_key::dsl::tenant_id.eq(application::dsl::tenant_id.nullable()), - )), + .filter( + multicast_group::dsl::id + .eq(fields::Uuid::from(self.multicast_group_id)) + .and( + api_key::dsl::tenant_id + .eq(application::dsl::tenant_id.nullable()), + ), + ), )), ); } @@ -1836,7 +1938,11 @@ impl Validator for ValidateMulticastGroupQueueAccess { async fn validate_user(&self, id: &Uuid) -> Result { let mut q = user::dsl::user .select(dsl::count_star()) - .filter(user::dsl::id.eq(id).and(user::dsl::is_active.eq(true))) + .filter( + user::dsl::id + .eq(fields::Uuid::from(id)) + .and(user::dsl::is_active.eq(true)), + ) .into_boxed(); match self.flag { @@ -1854,7 +1960,7 @@ impl Validator for ValidateMulticastGroupQueueAccess { )) .filter( multicast_group::dsl::id - .eq(&self.multicast_group_id) + .eq(fields::Uuid::from(self.multicast_group_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)) .and( tenant_user::dsl::is_admin @@ -1878,7 +1984,7 @@ impl Validator for ValidateMulticastGroupQueueAccess { )) .filter( multicast_group::dsl::id - .eq(&self.multicast_group_id) + .eq(fields::Uuid::from(self.multicast_group_id)) .and(tenant_user::dsl::user_id.eq(user::dsl::id)), ), )), @@ -1895,7 +2001,7 @@ impl Validator for ValidateMulticastGroupQueueAccess { async fn validate_key(&self, id: &Uuid) -> Result { let mut q = api_key::dsl::api_key .select(dsl::count_star()) - .filter(api_key::dsl::id.eq(id)) + .filter(api_key::dsl::id.eq(fields::Uuid::from(id))) .into_boxed(); match self.flag { @@ -1906,9 +2012,14 @@ impl Validator for ValidateMulticastGroupQueueAccess { api_key::dsl::is_admin.eq(true).or(dsl::exists( multicast_group::dsl::multicast_group .inner_join(application::table) - .filter(multicast_group::dsl::id.eq(&self.multicast_group_id).and( - api_key::dsl::tenant_id.eq(application::dsl::tenant_id.nullable()), - )), + .filter( + multicast_group::dsl::id + .eq(fields::Uuid::from(self.multicast_group_id)) + .and( + api_key::dsl::tenant_id + .eq(application::dsl::tenant_id.nullable()), + ), + ), )), ); } @@ -1988,19 +2099,19 @@ pub mod test { // admin user ValidatorTest { validators: vec![ValidateIsAdmin::new()], - id: AuthID::User(users[0].id), + id: AuthID::User(users[0].id.into()), ok: true, }, // inactive ValidatorTest { validators: vec![ValidateIsAdmin::new()], - id: AuthID::User(users[1].id), + id: AuthID::User(users[1].id.into()), ok: false, }, // active regular user ValidatorTest { validators: vec![ValidateIsAdmin::new()], - id: AuthID::User(users[2].id), + id: AuthID::User(users[2].id.into()), ok: false, }, ]; @@ -2035,19 +2146,19 @@ pub mod test { // active user ValidatorTest { validators: vec![ValidateActiveUser::new()], - id: AuthID::User(users[0].id), + id: AuthID::User(users[0].id.into()), ok: true, }, // inactive user ValidatorTest { validators: vec![ValidateActiveUser::new()], - id: AuthID::User(users[1].id), + id: AuthID::User(users[1].id.into()), ok: false, }, // api key ValidatorTest { validators: vec![ValidateActiveUser::new()], - id: AuthID::Key(api_key.id), + id: AuthID::Key(api_key.id.into()), ok: false, }, ]; @@ -2083,19 +2194,19 @@ pub mod test { // active user ValidatorTest { validators: vec![ValidateActiveUserOrKey::new()], - id: AuthID::User(users[0].id), + id: AuthID::User(users[0].id.into()), ok: true, }, // inactive user ValidatorTest { validators: vec![ValidateActiveUserOrKey::new()], - id: AuthID::User(users[1].id), + id: AuthID::User(users[1].id.into()), ok: false, }, // api key ValidatorTest { validators: vec![ValidateActiveUserOrKey::new()], - id: AuthID::Key(api_key.id), + id: AuthID::Key(api_key.id.into()), ok: true, }, // non-existing key @@ -2155,7 +2266,7 @@ pub mod test { tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user.id, + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -2163,7 +2274,7 @@ pub mod test { tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_admin.id, + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) @@ -2178,37 +2289,37 @@ pub mod test { ValidateTenantsAccess::new(Flag::Create), ValidateTenantsAccess::new(Flag::List), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { validators: vec![ValidateTenantsAccess::new(Flag::List)], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: true, }, // normal user can list ValidatorTest { validators: vec![ValidateTenantsAccess::new(Flag::List)], - id: AuthID::User(user.id), + id: AuthID::User(user.id.into()), ok: true, }, // tenant user can not create ValidatorTest { validators: vec![ValidateTenantsAccess::new(Flag::Create)], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // normal user can not create ValidatorTest { validators: vec![ValidateTenantsAccess::new(Flag::Create)], - id: AuthID::User(user.id), + id: AuthID::User(user.id.into()), ok: false, }, // inactive user can not list ValidatorTest { validators: vec![ValidateTenantsAccess::new(Flag::Create)], - id: AuthID::User(user_inactive.id), + id: AuthID::User(user_inactive.id.into()), ok: false, }, ]; @@ -2223,7 +2334,7 @@ pub mod test { ValidateTenantsAccess::new(Flag::Create), ValidateTenantsAccess::new(Flag::List), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api can not create or list @@ -2232,7 +2343,7 @@ pub mod test { ValidateTenantsAccess::new(Flag::Create), ValidateTenantsAccess::new(Flag::List), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2244,65 +2355,65 @@ pub mod test { // global admin can read, update and delete ValidatorTest { validators: vec![ - ValidateTenantAccess::new(Flag::Read, tenant_a.id), - ValidateTenantAccess::new(Flag::Update, tenant_a.id), - ValidateTenantAccess::new(Flag::Delete, tenant_a.id), + ValidateTenantAccess::new(Flag::Read, tenant_a.id.into()), + ValidateTenantAccess::new(Flag::Update, tenant_a.id.into()), + ValidateTenantAccess::new(Flag::Delete, tenant_a.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id)], - id: AuthID::User(tenant_admin.id), + validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id.into())], + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant admin can not update ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id)], - id: AuthID::User(tenant_admin.id), + validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id.into())], + id: AuthID::User(tenant_admin.id.into()), ok: false, }, // tenant admin can not delete ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id)], - id: AuthID::User(tenant_admin.id), + validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id.into())], + id: AuthID::User(tenant_admin.id.into()), ok: false, }, // tenant user can not update ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // tenant user can not delete ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // normal user can not read ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id)], - id: AuthID::User(user.id), + validators: vec![ValidateTenantAccess::new(Flag::Read, tenant_a.id.into())], + id: AuthID::User(user.id.into()), ok: false, }, // normal user can not update ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id)], - id: AuthID::User(user.id), + validators: vec![ValidateTenantAccess::new(Flag::Update, tenant_a.id.into())], + id: AuthID::User(user.id.into()), ok: false, }, // normal user can not delete ValidatorTest { - validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id)], - id: AuthID::User(user.id), + validators: vec![ValidateTenantAccess::new(Flag::Delete, tenant_a.id.into())], + id: AuthID::User(user.id.into()), ok: false, }, ]; @@ -2313,38 +2424,38 @@ pub mod test { // admin api key can read, update and delete ValidatorTest { validators: vec![ - ValidateTenantAccess::new(Flag::Read, tenant_a.id), - ValidateTenantAccess::new(Flag::Update, tenant_a.id), - ValidateTenantAccess::new(Flag::Delete, tenant_a.id), + ValidateTenantAccess::new(Flag::Read, tenant_a.id.into()), + ValidateTenantAccess::new(Flag::Update, tenant_a.id.into()), + ValidateTenantAccess::new(Flag::Delete, tenant_a.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read ValidatorTest { validators: vec![ValidateTenantAccess::new( Flag::Read, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), )], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not update ValidatorTest { validators: vec![ValidateTenantAccess::new( Flag::Update, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), )], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, // tenant api key can not delete ValidatorTest { validators: vec![ValidateTenantAccess::new( Flag::Delete, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), )], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2399,7 +2510,7 @@ pub mod test { tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_admin.id, + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) @@ -2407,21 +2518,21 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user.id, + user_id: tenant_user.id.into(), ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_user.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_user.id.into(), ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user_other.id, + user_id: tenant_user_other.id.into(), ..Default::default() }) .await @@ -2432,43 +2543,55 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id), - ValidateTenantUsersAccess::new(Flag::List, tenant_a.id), + ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id.into()), + ValidateTenantUsersAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can create and list ValidatorTest { validators: vec![ - ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id), - ValidateTenantUsersAccess::new(Flag::List, tenant_a.id), + ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id.into()), + ValidateTenantUsersAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateTenantUsersAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateTenantUsersAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not create ValidatorTest { - validators: vec![ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateTenantUsersAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // normal user can not create ValidatorTest { - validators: vec![ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(user.id), + validators: vec![ValidateTenantUsersAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(user.id.into()), ok: false, }, // normal user can not list ValidatorTest { - validators: vec![ValidateTenantUsersAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(user.id), + validators: vec![ValidateTenantUsersAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(user.id.into()), ok: false, }, ]; @@ -2479,28 +2602,34 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id), - ValidateTenantUsersAccess::new(Flag::List, tenant_a.id), + ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id.into()), + ValidateTenantUsersAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list ValidatorTest { validators: vec![ - ValidateTenantUsersAccess::new(Flag::Create, api_key_tenant.tenant_id.unwrap()), - ValidateTenantUsersAccess::new(Flag::List, api_key_tenant.tenant_id.unwrap()), + ValidateTenantUsersAccess::new( + Flag::Create, + api_key_tenant.tenant_id.unwrap().into(), + ), + ValidateTenantUsersAccess::new( + Flag::List, + api_key_tenant.tenant_id.unwrap().into(), + ), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key for different tenant can not create or list ValidatorTest { validators: vec![ - ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id), - ValidateTenantUsersAccess::new(Flag::List, tenant_a.id), + ValidateTenantUsersAccess::new(Flag::Create, tenant_a.id.into()), + ValidateTenantUsersAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2511,60 +2640,104 @@ pub mod test { // admin user can read, update and delete ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Read, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Read, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read, update and delete ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Read, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Read, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant user can read own user ValidatorTest { validators: vec![ValidateTenantUserAccess::new( Flag::Read, - tenant_a.id, - tenant_user.id, + tenant_a.id.into(), + tenant_user.id.into(), )], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not read other user ValidatorTest { validators: vec![ValidateTenantUserAccess::new( Flag::Read, - tenant_a.id, - tenant_user_other.id, + tenant_a.id.into(), + tenant_user_other.id.into(), )], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // tenant user can not update or delete ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // normal user can not read, update or delete ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Read, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Read, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::User(user.id), + id: AuthID::User(user.id.into()), ok: false, }, ]; @@ -2575,11 +2748,23 @@ pub mod test { // admin api key can read, update and delete ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Read, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Read, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read, update and delete @@ -2587,31 +2772,43 @@ pub mod test { validators: vec![ ValidateTenantUserAccess::new( Flag::Read, - api_key_tenant.tenant_id.unwrap(), - tenant_user.id, + api_key_tenant.tenant_id.unwrap().into(), + tenant_user.id.into(), ), ValidateTenantUserAccess::new( Flag::Update, - api_key_tenant.tenant_id.unwrap(), - tenant_user.id, + api_key_tenant.tenant_id.unwrap().into(), + tenant_user.id.into(), ), ValidateTenantUserAccess::new( Flag::Delete, - api_key_tenant.tenant_id.unwrap(), - tenant_user.id, + api_key_tenant.tenant_id.unwrap().into(), + tenant_user.id.into(), ), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not read, update or delete for other tenant ValidatorTest { validators: vec![ - ValidateTenantUserAccess::new(Flag::Read, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Update, tenant_a.id, tenant_user.id), - ValidateTenantUserAccess::new(Flag::Delete, tenant_a.id, tenant_user.id), + ValidateTenantUserAccess::new( + Flag::Read, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Update, + tenant_a.id.into(), + tenant_user.id.into(), + ), + ValidateTenantUserAccess::new( + Flag::Delete, + tenant_a.id.into(), + tenant_user.id.into(), + ), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2670,13 +2867,14 @@ pub mod test { let api_key_admin = api_key::test::create_api_key(true, false).await; let api_key_tenant = api_key::test::create_api_key(false, true).await; - let app = application::test::create_application(Some(tenant_a.id)).await; + let app = application::test::create_application(Some(tenant_a.id.into())).await; let app_api_key_tenant = - application::test::create_application(Some(api_key_tenant.tenant_id.unwrap())).await; + application::test::create_application(Some(api_key_tenant.tenant_id.unwrap().into())) + .await; tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_admin.id, + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) @@ -2684,7 +2882,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_device_admin.id, + user_id: tenant_device_admin.id.into(), is_device_admin: true, ..Default::default() }) @@ -2692,7 +2890,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_gateway_admin.id, + user_id: tenant_gateway_admin.id.into(), is_gateway_admin: true, ..Default::default() }) @@ -2700,7 +2898,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user.id, + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -2711,61 +2909,73 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can create and list ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can create and list ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant gateway admin can list ValidatorTest { - validators: vec![ValidateApplicationsAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_gateway_admin.id), + validators: vec![ValidateApplicationsAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_gateway_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateApplicationsAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateApplicationsAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant gateway admin can not create ValidatorTest { - validators: vec![ValidateApplicationsAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_gateway_admin.id), + validators: vec![ValidateApplicationsAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_gateway_admin.id.into()), ok: false, }, // tenant user can not create ValidatorTest { - validators: vec![ValidateApplicationsAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateApplicationsAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // normal user can not create or list ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -2776,10 +2986,10 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list @@ -2787,20 +2997,23 @@ pub mod test { validators: vec![ ValidateApplicationsAccess::new( Flag::Create, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), + ), + ValidateApplicationsAccess::new( + Flag::List, + api_key_tenant.tenant_id.unwrap().into(), ), - ValidateApplicationsAccess::new(Flag::List, api_key_tenant.tenant_id.unwrap()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not create or list for other tenant ValidatorTest { validators: vec![ - ValidateApplicationsAccess::new(Flag::Create, tenant_a.id), - ValidateApplicationsAccess::new(Flag::List, tenant_a.id), + ValidateApplicationsAccess::new(Flag::Create, tenant_a.id.into()), + ValidateApplicationsAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2811,56 +3024,56 @@ pub mod test { // admin user can read, update and delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin user can read, update and delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can read, update and delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { - validators: vec![ValidateApplicationAccess::new(Flag::Read, app.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateApplicationAccess::new(Flag::Read, app.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // user can not read, update or delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, // tenant user can not update or delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, ]; @@ -2871,31 +3084,31 @@ pub mod test { // admin api key can read, update and delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read update and delete ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app_api_key_tenant.id), - ValidateApplicationAccess::new(Flag::Update, app_api_key_tenant.id), - ValidateApplicationAccess::new(Flag::Delete, app_api_key_tenant.id), + ValidateApplicationAccess::new(Flag::Read, app_api_key_tenant.id.into()), + ValidateApplicationAccess::new(Flag::Update, app_api_key_tenant.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app_api_key_tenant.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not read, update or delete app from other tentant ValidatorTest { validators: vec![ - ValidateApplicationAccess::new(Flag::Read, app.id), - ValidateApplicationAccess::new(Flag::Update, app.id), - ValidateApplicationAccess::new(Flag::Delete, app.id), + ValidateApplicationAccess::new(Flag::Read, app.id.into()), + ValidateApplicationAccess::new(Flag::Update, app.id.into()), + ValidateApplicationAccess::new(Flag::Delete, app.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2934,19 +3147,19 @@ pub mod test { ValidateDeviceProfileTemplatesAccess::new(Flag::Create), ValidateDeviceProfileTemplatesAccess::new(Flag::List), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // user can list ValidatorTest { validators: vec![ValidateDeviceProfileTemplatesAccess::new(Flag::List)], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: true, }, // user can not create ValidatorTest { validators: vec![ValidateDeviceProfileTemplatesAccess::new(Flag::Create)], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -2960,19 +3173,19 @@ pub mod test { ValidateDeviceProfileTemplatesAccess::new(Flag::Create), ValidateDeviceProfileTemplatesAccess::new(Flag::List), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can list ValidatorTest { validators: vec![ValidateDeviceProfileTemplatesAccess::new(Flag::List)], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api can not create ValidatorTest { validators: vec![ValidateDeviceProfileTemplatesAccess::new(Flag::Create)], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -2987,13 +3200,13 @@ pub mod test { ValidateDeviceProfileTemplateAccess::new(Flag::Update), ValidateDeviceProfileTemplateAccess::new(Flag::Delete), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // user can read ValidatorTest { validators: vec![ValidateDeviceProfileTemplateAccess::new(Flag::Read)], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: true, }, // user can not update or delete @@ -3002,7 +3215,7 @@ pub mod test { ValidateDeviceProfileTemplateAccess::new(Flag::Update), ValidateDeviceProfileTemplateAccess::new(Flag::Delete), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3017,13 +3230,13 @@ pub mod test { ValidateDeviceProfileTemplateAccess::new(Flag::Update), ValidateDeviceProfileTemplateAccess::new(Flag::Delete), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read ValidatorTest { validators: vec![ValidateDeviceProfileTemplateAccess::new(Flag::Read)], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not update or delete @@ -3032,7 +3245,7 @@ pub mod test { ValidateDeviceProfileTemplateAccess::new(Flag::Update), ValidateDeviceProfileTemplateAccess::new(Flag::Delete), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -3108,7 +3321,7 @@ pub mod test { tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_admin.id, + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) @@ -3116,7 +3329,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_device_admin.id, + user_id: tenant_device_admin.id.into(), is_device_admin: true, ..Default::default() }) @@ -3124,7 +3337,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_gateway_admin.id, + user_id: tenant_gateway_admin.id.into(), is_gateway_admin: true, ..Default::default() }) @@ -3132,7 +3345,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user.id, + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -3143,61 +3356,73 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin user can create and list ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can create and list ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant gateway admin can list ValidatorTest { - validators: vec![ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_gateway_admin.id), + validators: vec![ValidateDeviceProfilesAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_gateway_admin.id.into()), ok: true, }, // tenant users can list ValidatorTest { - validators: vec![ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateDeviceProfilesAccess::new( + Flag::List, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant users can not create ValidatorTest { - validators: vec![ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateDeviceProfilesAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // tenant gateway admin can not create ValidatorTest { - validators: vec![ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_gateway_admin.id), + validators: vec![ValidateDeviceProfilesAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_gateway_admin.id.into()), ok: false, }, // non-tenant users can not list or create ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3208,10 +3433,10 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list @@ -3219,23 +3444,23 @@ pub mod test { validators: vec![ ValidateDeviceProfilesAccess::new( Flag::Create, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), ), ValidateDeviceProfilesAccess::new( Flag::List, - api_key_tenant.tenant_id.unwrap(), + api_key_tenant.tenant_id.unwrap().into(), ), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not create or list for other tenant ValidatorTest { validators: vec![ - ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id), - ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id), + ValidateDeviceProfilesAccess::new(Flag::Create, tenant_a.id.into()), + ValidateDeviceProfilesAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -3246,61 +3471,61 @@ pub mod test { // admin user can read, update and delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read, update and delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can read, update and delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant gateway admin can read ValidatorTest { - validators: vec![ValidateDeviceProfileAccess::new(Flag::Read, dp.id)], - id: AuthID::User(tenant_gateway_admin.id), + validators: vec![ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into())], + id: AuthID::User(tenant_gateway_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { - validators: vec![ValidateDeviceProfileAccess::new(Flag::Read, dp.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant gateway admin can not update or delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::User(tenant_gateway_admin.id), + id: AuthID::User(tenant_gateway_admin.id.into()), ok: false, }, // tenant user can not update or delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, ]; @@ -3311,31 +3536,31 @@ pub mod test { // admin api key can read, update and delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read update and delete ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp_api_key_tenant.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp_api_key_tenant.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp_api_key_tenant.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp_api_key_tenant.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp_api_key_tenant.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp_api_key_tenant.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not read, update or delete for other tenant ValidatorTest { validators: vec![ - ValidateDeviceProfileAccess::new(Flag::Read, dp.id), - ValidateDeviceProfileAccess::new(Flag::Update, dp.id), - ValidateDeviceProfileAccess::new(Flag::Delete, dp.id), + ValidateDeviceProfileAccess::new(Flag::Read, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Update, dp.id.into()), + ValidateDeviceProfileAccess::new(Flag::Delete, dp.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -3394,35 +3619,36 @@ pub mod test { let api_key_other_tenant = api_key::test::create_api_key(false, true).await; let app = - application::test::create_application(Some(api_key_tenant.tenant_id.unwrap())).await; + application::test::create_application(Some(api_key_tenant.tenant_id.unwrap().into())) + .await; tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_device_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_device_admin.id.into(), is_device_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_gateway_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_gateway_admin.id.into(), is_gateway_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_user.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -3432,49 +3658,49 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin user can create and list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can create and list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateDevicesAccess::new(Flag::List, app.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateDevicesAccess::new(Flag::List, app.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not create ValidatorTest { - validators: vec![ValidateDevicesAccess::new(Flag::Create, app.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateDevicesAccess::new(Flag::Create, app.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other users can not create or list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3484,40 +3710,41 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not create or list for other tenant ValidatorTest { validators: vec![ - ValidateDevicesAccess::new(Flag::Create, app.id), - ValidateDevicesAccess::new(Flag::List, app.id), + ValidateDevicesAccess::new(Flag::Create, app.id.into()), + ValidateDevicesAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; run_tests(tests).await; - let dp = - device_profile::test::create_device_profile(Some(api_key_tenant.tenant_id.unwrap())) - .await; + let dp = device_profile::test::create_device_profile(Some( + api_key_tenant.tenant_id.unwrap().into(), + )) + .await; let dev = device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, - Some(app.id), + dp.id.into(), + Some(app.id.into()), ) .await; @@ -3529,7 +3756,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read, update and delete @@ -3539,7 +3766,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can read, update and delete @@ -3549,13 +3776,13 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { validators: vec![ValidateDeviceAccess::new(Flag::Read, dev.dev_eui)], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not update or delete @@ -3564,7 +3791,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other user can not read, update and delete @@ -3574,7 +3801,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3588,7 +3815,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read, update and delete @@ -3598,7 +3825,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // other api key can not read, update or delete @@ -3608,7 +3835,7 @@ pub mod test { ValidateDeviceAccess::new(Flag::Update, dev.dev_eui), ValidateDeviceAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; @@ -3645,23 +3872,25 @@ pub mod test { let api_key_other_tenant = api_key::test::create_api_key(false, true).await; let app = - application::test::create_application(Some(api_key_tenant.tenant_id.unwrap())).await; + application::test::create_application(Some(api_key_tenant.tenant_id.unwrap().into())) + .await; tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_user.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_user.id.into(), ..Default::default() }) .await .unwrap(); - let dp = - device_profile::test::create_device_profile(Some(api_key_tenant.tenant_id.unwrap())) - .await; + let dp = device_profile::test::create_device_profile(Some( + api_key_tenant.tenant_id.unwrap().into(), + )) + .await; let dev = device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, - Some(app.id), + dp.id.into(), + Some(app.id.into()), ) .await; @@ -3673,7 +3902,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant user can create list and delete @@ -3683,7 +3912,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: true, }, // other user can not create, list or delete @@ -3693,7 +3922,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3707,7 +3936,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create, list and delete @@ -3717,7 +3946,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // api key for other tenant cna not create, list or delete @@ -3727,7 +3956,7 @@ pub mod test { ValidateDeviceQueueAccess::new(Flag::List, dev.dev_eui), ValidateDeviceQueueAccess::new(Flag::Delete, dev.dev_eui), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; @@ -3791,7 +4020,7 @@ pub mod test { let gw_api_key_tenant = gateway::create(gateway::Gateway { name: "test-gw-tenant".into(), gateway_id: EUI64::from_str("0202030405060708").unwrap(), - tenant_id: api_key_tenant.tenant_id.unwrap(), + tenant_id: api_key_tenant.tenant_id.unwrap().into(), ..Default::default() }) .await @@ -3799,7 +4028,7 @@ pub mod test { tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_admin.id, + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) @@ -3807,7 +4036,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_gateway_admin.id, + user_id: tenant_gateway_admin.id.into(), is_gateway_admin: true, ..Default::default() }) @@ -3815,7 +4044,7 @@ pub mod test { .unwrap(); tenant::add_user(tenant::TenantUser { tenant_id: tenant_a.id, - user_id: tenant_user.id, + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -3826,49 +4055,52 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can create and list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant gateway admin can create and list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(tenant_gateway_admin.id), + id: AuthID::User(tenant_gateway_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateGatewaysAccess::new(Flag::List, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not create ValidatorTest { - validators: vec![ValidateGatewaysAccess::new(Flag::Create, tenant_a.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateGatewaysAccess::new( + Flag::Create, + tenant_a.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other users can not create or list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3879,28 +4111,34 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, api_key_tenant.tenant_id.unwrap()), - ValidateGatewaysAccess::new(Flag::List, api_key_tenant.tenant_id.unwrap()), + ValidateGatewaysAccess::new( + Flag::Create, + api_key_tenant.tenant_id.unwrap().into(), + ), + ValidateGatewaysAccess::new( + Flag::List, + api_key_tenant.tenant_id.unwrap().into(), + ), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not create or list for other tenant ValidatorTest { validators: vec![ - ValidateGatewaysAccess::new(Flag::Create, tenant_a.id), - ValidateGatewaysAccess::new(Flag::List, tenant_a.id), + ValidateGatewaysAccess::new(Flag::Create, tenant_a.id.into()), + ValidateGatewaysAccess::new(Flag::List, tenant_a.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -3915,7 +4153,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read, update and delete @@ -3925,7 +4163,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant gateway admin can read, update and delete @@ -3935,13 +4173,13 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::User(tenant_gateway_admin.id), + id: AuthID::User(tenant_gateway_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { validators: vec![ValidateGatewayAccess::new(Flag::Read, gw.gateway_id)], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not update or delete @@ -3950,7 +4188,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other user can not read, update or delete @@ -3960,7 +4198,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -3975,7 +4213,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read, update and delete @@ -3985,7 +4223,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw_api_key_tenant.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw_api_key_tenant.gateway_id), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not read, update or delete gw from other tenant @@ -3995,7 +4233,7 @@ pub mod test { ValidateGatewayAccess::new(Flag::Update, gw.gateway_id), ValidateGatewayAccess::new(Flag::Delete, gw.gateway_id), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: false, }, ]; @@ -4054,35 +4292,36 @@ pub mod test { let api_key_other_tenant = api_key::test::create_api_key(false, true).await; let app = - application::test::create_application(Some(api_key_tenant.tenant_id.unwrap())).await; + application::test::create_application(Some(api_key_tenant.tenant_id.unwrap().into())) + .await; tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_admin.id.into(), is_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_device_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_device_admin.id.into(), is_device_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_gateway_admin.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_gateway_admin.id.into(), is_gateway_admin: true, ..Default::default() }) .await .unwrap(); tenant::add_user(tenant::TenantUser { - tenant_id: api_key_tenant.tenant_id.unwrap(), - user_id: tenant_user.id, + tenant_id: api_key_tenant.tenant_id.unwrap().into(), + user_id: tenant_user.id.into(), ..Default::default() }) .await @@ -4093,49 +4332,55 @@ pub mod test { // admin user can create and list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can create and list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can create and list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateMulticastGroupsAccess::new(Flag::List, app.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateMulticastGroupsAccess::new( + Flag::List, + app.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not create ValidatorTest { - validators: vec![ValidateMulticastGroupsAccess::new(Flag::Create, app.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateMulticastGroupsAccess::new( + Flag::Create, + app.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other user can not create or list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -4146,28 +4391,28 @@ pub mod test { // admin api key can create and list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create and list ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // tenant api key can not create or list for other tenant ValidatorTest { validators: vec![ - ValidateMulticastGroupsAccess::new(Flag::Create, app.id), - ValidateMulticastGroupsAccess::new(Flag::List, app.id), + ValidateMulticastGroupsAccess::new(Flag::Create, app.id.into()), + ValidateMulticastGroupsAccess::new(Flag::List, app.id.into()), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; @@ -4186,56 +4431,56 @@ pub mod test { // admin user can read, update and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can read, update and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can read, update and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can read ValidatorTest { - validators: vec![ValidateMulticastGroupAccess::new(Flag::Read, mg.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into())], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not update or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // other user can not read, update or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -4246,31 +4491,31 @@ pub mod test { // admin api key can read, update and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can read, update and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // other api key can not read, update or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupAccess::new(Flag::Read, mg.id), - ValidateMulticastGroupAccess::new(Flag::Update, mg.id), - ValidateMulticastGroupAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupAccess::new(Flag::Read, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Update, mg.id.into()), + ValidateMulticastGroupAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; @@ -4281,56 +4526,59 @@ pub mod test { // admin user can create, list and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(user_admin.id), + id: AuthID::User(user_admin.id.into()), ok: true, }, // tenant admin can create, list and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_admin.id), + id: AuthID::User(tenant_admin.id.into()), ok: true, }, // tenant device admin can create, list and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_device_admin.id), + id: AuthID::User(tenant_device_admin.id.into()), ok: true, }, // tenant user can list ValidatorTest { - validators: vec![ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id)], - id: AuthID::User(tenant_user.id), + validators: vec![ValidateMulticastGroupQueueAccess::new( + Flag::List, + mg.id.into(), + )], + id: AuthID::User(tenant_user.id.into()), ok: true, }, // tenant user can not create or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(tenant_user.id), + id: AuthID::User(tenant_user.id.into()), ok: false, }, // uther user can not create, list or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::User(user_active.id), + id: AuthID::User(user_active.id.into()), ok: false, }, ]; @@ -4341,31 +4589,31 @@ pub mod test { // admin api key can create, list and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_admin.id), + id: AuthID::Key(api_key_admin.id.into()), ok: true, }, // tenant api key can create, list and delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_tenant.id), + id: AuthID::Key(api_key_tenant.id.into()), ok: true, }, // other api key can not create, list or delete ValidatorTest { validators: vec![ - ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id), - ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id), + ValidateMulticastGroupQueueAccess::new(Flag::Create, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::List, mg.id.into()), + ValidateMulticastGroupQueueAccess::new(Flag::Delete, mg.id.into()), ], - id: AuthID::Key(api_key_other_tenant.id), + id: AuthID::Key(api_key_other_tenant.id.into()), ok: false, }, ]; diff --git a/chirpstack/src/api/device.rs b/chirpstack/src/api/device.rs index aaa10871..7a77a617 100644 --- a/chirpstack/src/api/device.rs +++ b/chirpstack/src/api/device.rs @@ -64,8 +64,8 @@ impl DeviceService for Device { let d = device::Device { dev_eui, - application_id: app_id, - device_profile_id: dp_id, + application_id: app_id.into(), + device_profile_id: dp_id.into(), name: req_d.name.clone(), description: req_d.description.clone(), skip_fcnt_check: req_d.skip_fcnt_check, @@ -191,8 +191,8 @@ impl DeviceService for Device { // update let _ = device::update(device::Device { dev_eui, - application_id: app_id, - device_profile_id: dp_id, + application_id: app_id.into(), + device_profile_id: dp_id.into(), name: req_d.name.clone(), description: req_d.description.clone(), skip_fcnt_check: req_d.skip_fcnt_check, @@ -533,7 +533,7 @@ impl DeviceService for Device { dp.reset_session_to_boot_params(&mut ds); let mut device_changeset = device::DeviceChangeset { - device_session: Some(Some(ds)), + device_session: Some(Some(ds.into())), dev_addr: Some(Some(dev_addr)), secondary_dev_addr: Some(None), ..Default::default() @@ -1085,7 +1085,7 @@ impl DeviceService for Device { } let qi = device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui, f_port: req_qi.f_port as i16, confirmed: req_qi.confirmed, @@ -1539,11 +1539,14 @@ pub mod test { dev.dev_eui, &device::DeviceChangeset { dev_addr: Some(Some(DevAddr::from_be_bytes([1, 2, 3, 4]))), - device_session: Some(Some(internal::DeviceSession { - dev_addr: vec![1, 2, 3, 4], - js_session_key_id: vec![8, 7, 6, 5, 4, 3, 2, 1], - ..Default::default() - })), + device_session: Some(Some( + internal::DeviceSession { + dev_addr: vec![1, 2, 3, 4], + js_session_key_id: vec![8, 7, 6, 5, 4, 3, 2, 1], + ..Default::default() + } + .into(), + )), ..Default::default() }, ) @@ -1568,14 +1571,17 @@ pub mod test { device::partial_update( dev.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(internal::DeviceSession { - dev_addr: vec![1, 2, 3, 4], - app_s_key: Some(common::KeyEnvelope { - kek_label: "test-key".into(), - aes_key: vec![8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1], - }), - ..Default::default() - })), + device_session: Some(Some( + internal::DeviceSession { + dev_addr: vec![1, 2, 3, 4], + app_s_key: Some(common::KeyEnvelope { + kek_label: "test-key".into(), + aes_key: vec![8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1], + }), + ..Default::default() + } + .into(), + )), ..Default::default() }, ) diff --git a/chirpstack/src/api/device_profile.rs b/chirpstack/src/api/device_profile.rs index 1c862407..7c7c709c 100644 --- a/chirpstack/src/api/device_profile.rs +++ b/chirpstack/src/api/device_profile.rs @@ -45,7 +45,7 @@ impl DeviceProfileService for DeviceProfile { .await?; let mut dp = device_profile::DeviceProfile { - tenant_id, + tenant_id: tenant_id.into(), name: req_dp.name.clone(), description: req_dp.description.clone(), region: req_dp.region().from_proto(), @@ -247,7 +247,7 @@ impl DeviceProfileService for DeviceProfile { // update let _ = device_profile::update(device_profile::DeviceProfile { - id: dp_id, + id: dp_id.into(), name: req_dp.name.clone(), description: req_dp.description.clone(), region: req_dp.region().from_proto(), diff --git a/chirpstack/src/api/gateway.rs b/chirpstack/src/api/gateway.rs index f2b9e4e7..a1b3b389 100644 --- a/chirpstack/src/api/gateway.rs +++ b/chirpstack/src/api/gateway.rs @@ -58,7 +58,7 @@ impl GatewayService for Gateway { let gw = gateway::Gateway { gateway_id: EUI64::from_str(&req_gw.gateway_id).map_err(|e| e.status())?, - tenant_id, + tenant_id: tenant_id.into(), name: req_gw.name.clone(), description: req_gw.description.clone(), latitude: lat, @@ -851,8 +851,8 @@ impl GatewayService for Gateway { .await?; let _ = gateway::update_relay_gateway(gateway::RelayGateway { - tenant_id, relay_id, + tenant_id: tenant_id.into(), name: req_relay.name.clone(), description: req_relay.description.clone(), stats_interval_secs: req_relay.stats_interval as i32, @@ -1028,7 +1028,9 @@ pub mod test { }), }; let mut create_req = Request::new(create_req); - create_req.extensions_mut().insert(AuthID::User(u.id)); + create_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.create(create_req).await.unwrap(); // get @@ -1036,7 +1038,9 @@ pub mod test { gateway_id: "0102030405060708".into(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Gateway { @@ -1070,7 +1074,9 @@ pub mod test { }), }; let mut up_req = Request::new(up_req); - up_req.extensions_mut().insert(AuthID::User(u.id)); + up_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.update(up_req).await.unwrap(); // get @@ -1078,7 +1084,9 @@ pub mod test { gateway_id: "0102030405060708".into(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Gateway { @@ -1105,7 +1113,9 @@ pub mod test { ..Default::default() }; let mut list_req = Request::new(list_req); - list_req.extensions_mut().insert(AuthID::User(u.id)); + list_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let list_resp = service.list(list_req).await.unwrap(); assert_eq!(1, list_resp.get_ref().total_count); assert_eq!(1, list_resp.get_ref().result.len()); @@ -1115,14 +1125,18 @@ pub mod test { gateway_id: "0102030405060708".into(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.delete(del_req).await.unwrap(); let del_req = api::DeleteGatewayRequest { gateway_id: "0102030405060708".into(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let del_resp = service.delete(del_req).await; assert!(del_resp.is_err()); } @@ -1198,7 +1212,9 @@ pub mod test { aggregation: common::Aggregation::Day.into(), }; let mut stats_req = Request::new(stats_req); - stats_req.extensions_mut().insert(AuthID::User(u.id)); + stats_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let stats_resp = service.get_metrics(stats_req).await.unwrap(); let stats_resp = stats_resp.get_ref(); assert_eq!( @@ -1289,7 +1305,7 @@ pub mod test { end: Some(now_st.into()), }; let mut stats_req = Request::new(stats_req); - stats_req.extensions_mut().insert(AuthID::User(u.id)); + stats_req.extensions_mut().insert(AuthID::User(u.id.into())); let stats_resp = service.get_duty_cycle_metrics(stats_req).await.unwrap(); let stats_resp = stats_resp.get_ref(); assert_eq!( @@ -1363,7 +1379,9 @@ pub mod test { relay_id: "01020304".into(), }; let mut get_relay_req = Request::new(get_relay_req); - get_relay_req.extensions_mut().insert(AuthID::User(u.id)); + get_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let get_relay_resp = service.get_relay_gateway(get_relay_req).await.unwrap(); assert_eq!( Some(api::RelayGateway { @@ -1389,7 +1407,9 @@ pub mod test { }), }; let mut up_relay_req = Request::new(up_relay_req); - up_relay_req.extensions_mut().insert(AuthID::User(u.id)); + up_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let _ = service.update_relay_gateway(up_relay_req).await.unwrap(); // get relay gateway @@ -1398,7 +1418,9 @@ pub mod test { relay_id: "01020304".into(), }; let mut get_relay_req = Request::new(get_relay_req); - get_relay_req.extensions_mut().insert(AuthID::User(u.id)); + get_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let get_relay_resp = service.get_relay_gateway(get_relay_req).await.unwrap(); assert_eq!( Some(api::RelayGateway { @@ -1419,7 +1441,9 @@ pub mod test { offset: 0, }; let mut list_relay_req = Request::new(list_relay_req); - list_relay_req.extensions_mut().insert(AuthID::User(u.id)); + list_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let list_relay_resp = service.list_relay_gateways(list_relay_req).await.unwrap(); assert_eq!(1, list_relay_resp.get_ref().total_count); assert_eq!(1, list_relay_resp.get_ref().result.len()); @@ -1430,7 +1454,9 @@ pub mod test { relay_id: "01020304".into(), }; let mut del_relay_req = Request::new(del_relay_req); - del_relay_req.extensions_mut().insert(AuthID::User(u.id)); + del_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let del_relay_resp = service.delete_relay_gateway(del_relay_req).await; assert!(del_relay_resp.is_ok()); @@ -1439,7 +1465,9 @@ pub mod test { relay_id: "01020304".into(), }; let mut del_relay_req = Request::new(del_relay_req); - del_relay_req.extensions_mut().insert(AuthID::User(u.id)); + del_relay_req + .extensions_mut() + .insert(AuthID::User(u.id.into())); let del_relay_resp = service.delete_relay_gateway(del_relay_req).await; assert!(del_relay_resp.is_err()); } diff --git a/chirpstack/src/api/internal.rs b/chirpstack/src/api/internal.rs index c6639262..ce59f556 100644 --- a/chirpstack/src/api/internal.rs +++ b/chirpstack/src/api/internal.rs @@ -287,7 +287,11 @@ impl InternalService for Internal { let tenant_id = if req_key.tenant_id.is_empty() { None } else { - Some(Uuid::from_str(&req_key.tenant_id).map_err(|e| e.status())?) + Some( + Uuid::from_str(&req_key.tenant_id) + .map_err(|e| e.status())? + .into(), + ) }; if req_key.is_admin && tenant_id.is_some() { @@ -312,7 +316,7 @@ impl InternalService for Internal { let ak = api_key::ApiKey { name: req_key.name.clone(), is_admin: req_key.is_admin, - tenant_id, + tenant_id: tenant_id.map(|u| u.into()), ..Default::default() }; diff --git a/chirpstack/src/api/multicast.rs b/chirpstack/src/api/multicast.rs index bc06eaf1..0a1deb85 100644 --- a/chirpstack/src/api/multicast.rs +++ b/chirpstack/src/api/multicast.rs @@ -47,7 +47,7 @@ impl MulticastGroupService for MulticastGroup { .await?; let mg = multicast::MulticastGroup { - application_id: app_id, + application_id: app_id.into(), name: req_mg.name.clone(), region: req_mg.region().from_proto(), mc_addr: DevAddr::from_str(&req_mg.mc_addr).map_err(|e| e.status())?, @@ -154,7 +154,7 @@ impl MulticastGroupService for MulticastGroup { .await?; let _ = multicast::update(multicast::MulticastGroup { - id: mg_id, + id: mg_id.into(), name: req_mg.name.clone(), region: req_mg.region().from_proto(), mc_addr: DevAddr::from_str(&req_mg.mc_addr).map_err(|e| e.status())?, @@ -408,7 +408,7 @@ impl MulticastGroupService for MulticastGroup { .await?; let f_cnt = downlink::multicast::enqueue(multicast::MulticastGroupQueueItem { - multicast_group_id: mg_id, + multicast_group_id: mg_id.into(), f_port: req_enq.f_port as i16, data: req_enq.data.clone(), ..Default::default() diff --git a/chirpstack/src/api/tenant.rs b/chirpstack/src/api/tenant.rs index 34cb211c..5d998bc4 100644 --- a/chirpstack/src/api/tenant.rs +++ b/chirpstack/src/api/tenant.rs @@ -122,7 +122,7 @@ impl TenantService for Tenant { // update let _ = tenant::update(tenant::Tenant { - id: tenant_id, + id: tenant_id.into(), name: req_tenant.name.clone(), description: req_tenant.description.clone(), can_have_gateways: req_tenant.can_have_gateways, @@ -190,7 +190,7 @@ impl TenantService for Tenant { let u = user::get(id).await.map_err(|e| e.status())?; if !u.is_admin { - filters.user_id = Some(u.id); + filters.user_id = Some(u.id.into()); } } AuthID::Key(_) => { @@ -258,8 +258,8 @@ impl TenantService for Tenant { .await?; let _ = tenant::add_user(tenant::TenantUser { - tenant_id, - user_id, + tenant_id: tenant_id.into(), + user_id: user_id.into(), is_admin: req_user.is_admin, is_device_admin: req_user.is_device_admin, is_gateway_admin: req_user.is_gateway_admin, @@ -342,8 +342,8 @@ impl TenantService for Tenant { .await?; tenant::update_user(tenant::TenantUser { - tenant_id, - user_id, + tenant_id: tenant_id.into(), + user_id: user_id.into(), is_admin: req_user.is_admin, is_device_admin: req_user.is_device_admin, is_gateway_admin: req_user.is_gateway_admin, @@ -482,7 +482,9 @@ pub mod test { }), }; let mut create_req = Request::new(create_req); - create_req.extensions_mut().insert(AuthID::User(u.id)); + create_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let create_resp = service.create(create_req).await.unwrap(); // get @@ -490,7 +492,9 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Tenant { @@ -518,7 +522,9 @@ pub mod test { }), }; let mut up_req = Request::new(up_req); - up_req.extensions_mut().insert(AuthID::User(u.id)); + up_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.update(up_req).await.unwrap(); // get @@ -526,7 +532,9 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::Tenant { @@ -549,7 +557,9 @@ pub mod test { user_id: "".into(), }; let mut list_req = Request::new(list_req); - list_req.extensions_mut().insert(AuthID::User(u.id)); + list_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let list_resp = service.list(list_req).await.unwrap(); assert_eq!(1, list_resp.get_ref().total_count); assert_eq!(1, list_resp.get_ref().result.len()); @@ -559,14 +569,18 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.delete(del_req).await.unwrap(); let del_req = api::DeleteTenantRequest { id: create_resp.get_ref().id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let del_resp = service.delete(del_req).await; assert!(del_resp.is_err()); } diff --git a/chirpstack/src/api/user.rs b/chirpstack/src/api/user.rs index a21dba8f..81b2617a 100644 --- a/chirpstack/src/api/user.rs +++ b/chirpstack/src/api/user.rs @@ -64,8 +64,8 @@ impl UserService for User { let tenant_id = Uuid::from_str(&tu.tenant_id).map_err(|e| e.status())?; tenant::add_user(tenant::TenantUser { - tenant_id, - user_id: u.id, + tenant_id: tenant_id.into(), + user_id: u.id.into(), is_admin: tu.is_admin, is_device_admin: tu.is_device_admin, is_gateway_admin: tu.is_gateway_admin, @@ -138,7 +138,7 @@ impl UserService for User { // update let _ = user::update(user::User { - id: user_id, + id: user_id.into(), is_admin: req_user.is_admin, is_active: req_user.is_active, email: req_user.email.clone(), @@ -292,7 +292,9 @@ pub mod test { }), }; let mut create_req = Request::new(create_req); - create_req.extensions_mut().insert(AuthID::User(u.id)); + create_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let create_resp = service.create(create_req).await.unwrap(); // get @@ -300,7 +302,9 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::User { @@ -326,7 +330,9 @@ pub mod test { }), }; let mut up_req = Request::new(up_req); - up_req.extensions_mut().insert(AuthID::User(u.id)); + up_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.update(up_req).await.unwrap(); // get @@ -334,7 +340,9 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut get_req = Request::new(get_req); - get_req.extensions_mut().insert(AuthID::User(u.id)); + get_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let get_resp = service.get(get_req).await.unwrap(); assert_eq!( Some(api::User { @@ -354,7 +362,9 @@ pub mod test { password: "newpassword".into(), }; let mut up_req = Request::new(up_req); - up_req.extensions_mut().insert(AuthID::User(u.id)); + up_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.update_password(up_req).await.unwrap(); // list @@ -363,7 +373,9 @@ pub mod test { limit: 10, }; let mut list_req = Request::new(list_req); - list_req.extensions_mut().insert(AuthID::User(u.id)); + list_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let list_resp = service.list(list_req).await.unwrap(); // * Admin from migrations // * User that we created for auth @@ -376,14 +388,18 @@ pub mod test { id: create_resp.get_ref().id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let _ = service.delete(del_req).await.unwrap(); let del_req = api::DeleteUserRequest { id: create_resp.get_ref().id.clone(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let del_resp = service.delete(del_req).await; assert!(del_resp.is_err()); @@ -391,7 +407,9 @@ pub mod test { id: u.id.to_string(), }; let mut del_req = Request::new(del_req); - del_req.extensions_mut().insert(AuthID::User(u.id)); + del_req + .extensions_mut() + .insert(AuthID::User(Into::::into(u.id).clone())); let del_resp = service.delete(del_req).await; assert!(del_resp.is_err()); } diff --git a/chirpstack/src/cmd/configfile.rs b/chirpstack/src/cmd/configfile.rs index d5227820..a98c9d11 100644 --- a/chirpstack/src/cmd/configfile.rs +++ b/chirpstack/src/cmd/configfile.rs @@ -3,7 +3,8 @@ use handlebars::{no_escape, Handlebars}; use super::super::config; pub fn run() { - let template = r#" + let template = vec![ +r#" # Logging configuration [logging] @@ -20,7 +21,9 @@ pub fn run() { # Log as JSON. json={{ logging.json }} - +"#, +#[cfg(feature = "postgres")] +r#" # PostgreSQL configuration. [postgresql] @@ -46,8 +49,36 @@ pub fn run() { # the server-certificate is not signed by a CA in the platform certificate # store. ca_cert="{{ postgresql.ca_cert }}" +"#, +#[cfg(feature = "sqlite")] +r#" +# SQLite configuration. +[sqlite] + # Sqlite DB path. + # + # Format example: sqlite:///. + # + path="{{ sqlite.path }}" + # Max open connections. + # + # This sets the max. number of open connections that are allowed in the + # SQLite connection pool. + max_open_connections={{ sqlite.max_open_connections }} + + # PRAGMAs. + # + # This configures the list of PRAGMAs that are executed to prepare the + # SQLite library. For a full list of available PRAGMAs see: + # https://www.sqlite.org/pragma.html + pragmas=[ + {{#each sqlite.pragmas}} + "{{this}}", + {{/each}} + ] +"#, +r#" # Redis configuration. [redis] @@ -944,6 +975,7 @@ pub fn run() { kek="{{ this.kek }}" {{/each}} + # UI configuration. [ui] # Tileserver URL. @@ -958,14 +990,14 @@ pub fn run() { # default tileserver_url (OSM). If you configure a different tile-server, you # might need to update the map_attribution. map_attribution="{{ui.map_attribution}}" -"#; +"#].join("\n"); let mut reg = Handlebars::new(); reg.register_escape_fn(no_escape); let conf = config::get(); println!( "{}", - reg.render_template(template, &conf) + reg.render_template(&template, &conf) .expect("render configfile error") ); } diff --git a/chirpstack/src/cmd/migrate_ds_to_pg.rs b/chirpstack/src/cmd/migrate_ds_to_pg.rs index 4b0c26cf..18e9f827 100644 --- a/chirpstack/src/cmd/migrate_ds_to_pg.rs +++ b/chirpstack/src/cmd/migrate_ds_to_pg.rs @@ -43,7 +43,7 @@ pub async fn run() -> Result<()> { *dev_eui, &storage::device::DeviceChangeset { dev_addr: Some(Some(DevAddr::from_slice(&ds.dev_addr)?)), - device_session: Some(Some(ds)), + device_session: Some(Some(ds.into())), ..Default::default() }, ) diff --git a/chirpstack/src/codec/mod.rs b/chirpstack/src/codec/mod.rs index 00da18ff..5b84b8ef 100644 --- a/chirpstack/src/codec/mod.rs +++ b/chirpstack/src/codec/mod.rs @@ -5,8 +5,11 @@ use std::str::FromStr; use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; use diesel::backend::Backend; +#[cfg(feature = "postgres")] use diesel::pg::Pg; use diesel::sql_types::Text; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; use diesel::{deserialize, serialize}; use serde::{Deserialize, Serialize}; @@ -40,6 +43,7 @@ where } } +#[cfg(feature = "postgres")] impl serialize::ToSql for Codec where str: serialize::ToSql, @@ -49,6 +53,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for Codec { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + impl FromStr for Codec { type Err = anyhow::Error; diff --git a/chirpstack/src/config.rs b/chirpstack/src/config.rs index 94294961..8f433d57 100644 --- a/chirpstack/src/config.rs +++ b/chirpstack/src/config.rs @@ -19,6 +19,7 @@ pub struct Configuration { pub logging: Logging, pub postgresql: Postgresql, pub redis: Redis, + pub sqlite: Sqlite, pub api: Api, pub gateway: Gateway, pub network: Network, @@ -90,6 +91,29 @@ impl Default for Redis { } } +#[derive(Serialize, Deserialize, Clone)] +#[serde(default)] +pub struct Sqlite { + pub path: String, + pub pragmas: Vec, + pub max_open_connections: u32, +} + +impl Default for Sqlite { + fn default() -> Self { + Sqlite { + path: "sqlite://chirpstack.sqlite".into(), + pragmas: vec![ + // Set busy_timeout to avoid manually managing transaction business/contention + "busy_timeout = 1000".to_string(), + // Enable foreign-keys since it is off by default + "foreign_keys = ON".to_string(), + ], + max_open_connections: 4, + } + } +} + #[derive(Serialize, Deserialize, Clone)] #[serde(default)] pub struct Api { diff --git a/chirpstack/src/downlink/data.rs b/chirpstack/src/downlink/data.rs index e212f57d..bd79af54 100644 --- a/chirpstack/src/downlink/data.rs +++ b/chirpstack/src/downlink/data.rs @@ -363,7 +363,7 @@ impl Data { trace!("Selecting downlink gateway"); let gw_down = helpers::select_downlink_gateway( - Some(self.tenant.id), + Some(self.tenant.id.into()), &self.device.get_device_session()?.region_config_id, self.network_conf.gateway_prefer_min_margin, self.device_gateway_rx_info.as_mut().unwrap(), @@ -519,7 +519,8 @@ impl Data { }, }; - integration::ack_event(self.application.id, &self.device.variables, &pl).await; + integration::ack_event(self.application.id.into(), &self.device.variables, &pl) + .await; warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of timeout"); continue; @@ -549,7 +550,8 @@ impl Data { .collect(), }; - integration::log_event(self.application.id, &self.device.variables, &pl).await; + integration::log_event(self.application.id.into(), &self.device.variables, &pl) + .await; warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of max. payload size"); continue; @@ -585,7 +587,8 @@ impl Data { .collect(), }; - integration::log_event(self.application.id, &self.device.variables, &pl).await; + integration::log_event(self.application.id.into(), &self.device.variables, &pl) + .await; warn!(dev_eui = %self.device.dev_eui, device_queue_item_id = %qi.id, "Device queue-item discarded because of invalid frame-counter"); continue; @@ -2728,7 +2731,7 @@ mod test { name: "max payload size error".into(), max_payload_size: 10, queue_items: vec![device_queue::DeviceQueueItem { - id: qi_id, + id: qi_id.into(), dev_eui: d.dev_eui, f_port: 1, data: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], @@ -2768,7 +2771,7 @@ mod test { name: "is pending".into(), max_payload_size: 10, queue_items: vec![device_queue::DeviceQueueItem { - id: qi_id, + id: qi_id.into(), dev_eui: d.dev_eui, f_port: 1, f_cnt_down: Some(10), @@ -2800,7 +2803,7 @@ mod test { name: "invalid frame-counter".into(), max_payload_size: 10, queue_items: vec![device_queue::DeviceQueueItem { - id: qi_id, + id: qi_id.into(), dev_eui: d.dev_eui, f_port: 1, data: vec![1, 2, 3], @@ -2841,14 +2844,14 @@ mod test { name: "valid payload".into(), max_payload_size: 10, queue_items: vec![device_queue::DeviceQueueItem { - id: qi_id, + id: qi_id.into(), dev_eui: d.dev_eui, f_port: 1, data: vec![1, 2, 3], ..Default::default() }], expected_queue_item: Some(device_queue::DeviceQueueItem { - id: qi_id, + id: qi_id.into(), dev_eui: d.dev_eui, f_port: 1, data: vec![1, 2, 3], @@ -2874,7 +2877,7 @@ mod test { let d = device::partial_update( d.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(ds.clone())), + device_session: Some(Some(ds.clone().into())), ..Default::default() }, ) @@ -3418,11 +3421,14 @@ mod test { dev_addr: Some(*dev_addr), application_id: app.id, device_profile_id: dp_ed.id, - device_session: Some(internal::DeviceSession { - dev_addr: dev_addr.to_vec(), - nwk_s_enc_key: vec![0; 16], - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + dev_addr: dev_addr.to_vec(), + nwk_s_enc_key: vec![0; 16], + ..Default::default() + } + .into(), + ), ..Default::default() }) .await @@ -3435,7 +3441,7 @@ mod test { let d_relay = device::partial_update( d_relay.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(test.device_session.clone())), + device_session: Some(Some(test.device_session.clone().into())), ..Default::default() }, ) @@ -3884,7 +3890,7 @@ mod test { let d_relay = device::partial_update( d_relay.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(test.device_session.clone())), + device_session: Some(Some(test.device_session.clone().into())), ..Default::default() }, ) @@ -4015,7 +4021,7 @@ mod test { application: application::Application::default(), device_profile: test.device_profile.clone(), device: device::Device { - device_session: Some(test.device_session.clone()), + device_session: Some(test.device_session.clone().into()), ..Default::default() }, network_conf: config::get_region_network("eu868").unwrap(), @@ -4126,7 +4132,7 @@ mod test { application: application::Application::default(), device_profile: test.device_profile.clone(), device: device::Device { - device_session: Some(test.device_session.clone()), + device_session: Some(test.device_session.clone().into()), ..Default::default() }, network_conf: config::get_region_network("eu868").unwrap(), @@ -4247,7 +4253,7 @@ mod test { application: application::Application::default(), device_profile: test.device_profile.clone(), device: device::Device { - device_session: Some(test.device_session.clone()), + device_session: Some(test.device_session.clone().into()), ..Default::default() }, network_conf: config::get_region_network("eu868").unwrap(), @@ -4504,7 +4510,7 @@ mod test { let d_relay = device::partial_update( d_relay.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(test.device_session.clone())), + device_session: Some(Some(test.device_session.clone().into())), ..Default::default() }, ) diff --git a/chirpstack/src/downlink/helpers.rs b/chirpstack/src/downlink/helpers.rs index e2c079a6..e4093dcc 100644 --- a/chirpstack/src/downlink/helpers.rs +++ b/chirpstack/src/downlink/helpers.rs @@ -239,7 +239,7 @@ mod tests { }, // is_private_down is set, first gateway matches tenant. Test { - tenant_id: Some(t.id), + tenant_id: Some(t.id.into()), min_snr_margin: 0.0, rx_info: internal::DeviceGatewayRxInfo { items: vec![ @@ -262,7 +262,7 @@ mod tests { }, // is_private_down is set, second gateway matches tenant. Test { - tenant_id: Some(t.id), + tenant_id: Some(t.id.into()), min_snr_margin: 0.0, rx_info: internal::DeviceGatewayRxInfo { items: vec![ diff --git a/chirpstack/src/downlink/join.rs b/chirpstack/src/downlink/join.rs index 62b365b5..47855932 100644 --- a/chirpstack/src/downlink/join.rs +++ b/chirpstack/src/downlink/join.rs @@ -182,7 +182,7 @@ impl JoinAccept<'_> { trace!("Select downlink gateway"); let gw_down = helpers::select_downlink_gateway( - Some(self.tenant.id), + Some(self.tenant.id.into()), &self.uplink_frame_set.region_config_id, self.network_conf.gateway_prefer_min_margin, self.device_gateway_rx_info.as_mut().unwrap(), diff --git a/chirpstack/src/downlink/tx_ack.rs b/chirpstack/src/downlink/tx_ack.rs index 2149a780..61449320 100644 --- a/chirpstack/src/downlink/tx_ack.rs +++ b/chirpstack/src/downlink/tx_ack.rs @@ -434,7 +434,7 @@ impl TxAck { ..Default::default() }; - integration::log_event(app.id, &dev.variables, &pl).await; + integration::log_event(app.id.into(), &dev.variables, &pl).await; Ok(()) } @@ -483,7 +483,7 @@ impl TxAck { tx_info: self.downlink_frame_item.as_ref().unwrap().tx_info.clone(), }; - integration::txack_event(app.id, &dev.variables, &pl).await; + integration::txack_event(app.id.into(), &dev.variables, &pl).await; Ok(()) } @@ -532,7 +532,7 @@ impl TxAck { tx_info: self.downlink_frame_item.as_ref().unwrap().tx_info.clone(), }; - integration::txack_event(app.id, &dev.variables, &pl).await; + integration::txack_event(app.id.into(), &dev.variables, &pl).await; Ok(()) } diff --git a/chirpstack/src/integration/mod.rs b/chirpstack/src/integration/mod.rs index 06cc2b15..36c5cf05 100644 --- a/chirpstack/src/integration/mod.rs +++ b/chirpstack/src/integration/mod.rs @@ -28,6 +28,7 @@ pub mod mock; mod mqtt; mod mydevices; mod pilot_things; +#[cfg(feature = "postgres")] mod postgresql; mod redis; mod thingsboard; @@ -54,6 +55,7 @@ pub async fn setup() -> Result<()> { .context("Setup MQTT integration")?, )); } + #[cfg(feature = "postgres")] "postgresql" => integrations.push(Box::new( postgresql::Integration::new(&conf.integration.postgresql) .await @@ -533,7 +535,7 @@ async fn handle_down_command(application_id: String, pl: integration::DownlinkCo // Validate that the application_id from the topic is indeed the application ID to which // the device belongs. let dev = device::get(&dev_eui).await?; - if dev.application_id != app_id { + if Into::::into(dev.application_id) != app_id { return Err(anyhow!( "Application ID from topic does not match application ID from device" )); @@ -555,8 +557,8 @@ async fn handle_down_command(application_id: String, pl: integration::DownlinkCo let qi = device_queue::DeviceQueueItem { id: match pl.id.is_empty() { - true => Uuid::new_v4(), - false => Uuid::from_str(&pl.id)?, + true => Uuid::new_v4().into(), + false => Uuid::from_str(&pl.id)?.into(), }, f_port: pl.f_port as i16, confirmed: pl.confirmed, diff --git a/chirpstack/src/maccommand/configure_fwd_limit.rs b/chirpstack/src/maccommand/configure_fwd_limit.rs index 42bb331f..075694d8 100644 --- a/chirpstack/src/maccommand/configure_fwd_limit.rs +++ b/chirpstack/src/maccommand/configure_fwd_limit.rs @@ -118,7 +118,7 @@ mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/ctrl_uplink_list.rs b/chirpstack/src/maccommand/ctrl_uplink_list.rs index d2c9c205..8515d4af 100644 --- a/chirpstack/src/maccommand/ctrl_uplink_list.rs +++ b/chirpstack/src/maccommand/ctrl_uplink_list.rs @@ -277,7 +277,7 @@ mod test { device::partial_update( dev.dev_eui, &device::DeviceChangeset { - device_session: Some(Some(tst.device_session_ed.clone())), + device_session: Some(Some(tst.device_session_ed.clone().into())), ..Default::default() }, ) @@ -285,7 +285,7 @@ mod test { .unwrap(); let mut relay_dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; diff --git a/chirpstack/src/maccommand/dev_status.rs b/chirpstack/src/maccommand/dev_status.rs index f608e825..d88dcbeb 100644 --- a/chirpstack/src/maccommand/dev_status.rs +++ b/chirpstack/src/maccommand/dev_status.rs @@ -1,11 +1,10 @@ use anyhow::Result; -use bigdecimal::BigDecimal; use chrono::{DateTime, Utc}; use tracing::info; use crate::api::helpers::ToProto; use crate::integration; -use crate::storage::{application, device, device_profile, tenant}; +use crate::storage::{application, device, device_profile, fields, tenant}; use crate::uplink::{helpers, UplinkFrameSet}; use chirpstack_api::integration as integration_pb; @@ -29,8 +28,8 @@ pub async fn handle( margin: Some(pl.margin as i32), external_power_source: Some(pl.battery == 0), battery_level: Some(if pl.battery > 0 && pl.battery < 255 { - let v: BigDecimal = ((pl.battery as f32) / 254.0 * 100.0).try_into()?; - Some(v.with_scale(2)) + let v: fields::BigDecimal = ((pl.battery as f32) / 254.0 * 100.0).try_into()?; + Some(v.with_scale(2).into()) } else { None }), @@ -47,7 +46,7 @@ pub async fn handle( helpers::get_rx_timestamp(&uplink_frame_set.rx_info_set).into(); integration::status_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::StatusEvent { deduplication_id: uplink_frame_set.uplink_set_id.to_string(), @@ -203,7 +202,7 @@ pub mod test { assert_eq!(Some(10), d.margin); assert!(!d.external_power_source); assert_eq!( - Some(BigDecimal::from_str("100.00").unwrap()), + Some(bigdecimal::BigDecimal::from_str("100.00").unwrap().into()), d.battery_level ); } diff --git a/chirpstack/src/maccommand/end_device_conf.rs b/chirpstack/src/maccommand/end_device_conf.rs index a42c7d41..1b1cd642 100644 --- a/chirpstack/src/maccommand/end_device_conf.rs +++ b/chirpstack/src/maccommand/end_device_conf.rs @@ -178,7 +178,7 @@ mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/filter_list.rs b/chirpstack/src/maccommand/filter_list.rs index 38727138..f48cf2b3 100644 --- a/chirpstack/src/maccommand/filter_list.rs +++ b/chirpstack/src/maccommand/filter_list.rs @@ -216,7 +216,7 @@ mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle(&mut dev, &tst.filter_list_ans, tst.filter_list_req.as_ref()); diff --git a/chirpstack/src/maccommand/link_adr.rs b/chirpstack/src/maccommand/link_adr.rs index ca053107..e6f37b9f 100644 --- a/chirpstack/src/maccommand/link_adr.rs +++ b/chirpstack/src/maccommand/link_adr.rs @@ -361,7 +361,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { dev_eui: lrwn::EUI64::from_str("0102030405060708").unwrap(), - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let block = lrwn::MACCommandSet::new(vec![lrwn::MACCommand::LinkADRAns( diff --git a/chirpstack/src/maccommand/mod.rs b/chirpstack/src/maccommand/mod.rs index aede19de..e880f7a8 100644 --- a/chirpstack/src/maccommand/mod.rs +++ b/chirpstack/src/maccommand/mod.rs @@ -207,9 +207,12 @@ pub mod test { let dp: device_profile::DeviceProfile = Default::default(); let mut dev = device::Device { dev_eui: EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - device_session: Some(internal::DeviceSession { - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + ..Default::default() + } + .into(), + ), ..Default::default() }; diff --git a/chirpstack/src/maccommand/new_channel.rs b/chirpstack/src/maccommand/new_channel.rs index 91035c26..da5a987a 100644 --- a/chirpstack/src/maccommand/new_channel.rs +++ b/chirpstack/src/maccommand/new_channel.rs @@ -472,7 +472,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; diff --git a/chirpstack/src/maccommand/notify_new_end_device.rs b/chirpstack/src/maccommand/notify_new_end_device.rs index d5c9d193..6f0aa137 100644 --- a/chirpstack/src/maccommand/notify_new_end_device.rs +++ b/chirpstack/src/maccommand/notify_new_end_device.rs @@ -65,7 +65,7 @@ pub async fn handle( .collect(), }; - integration::log_event(app.id, &dev.variables, &log_event).await; + integration::log_event(app.id.into(), &dev.variables, &log_event).await; Ok(None) } @@ -88,19 +88,19 @@ mod test { integration::set_mock().await; let t = tenant::Tenant { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), name: "tenant".to_string(), ..Default::default() }; let app = application::Application { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), name: "app".to_string(), ..Default::default() }; let dp = device_profile::DeviceProfile { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), name: "dp".to_string(), tags: fields::KeyValue::new( [("dp_tag".to_string(), "dp_value".to_string())] diff --git a/chirpstack/src/maccommand/ping_slot_channel.rs b/chirpstack/src/maccommand/ping_slot_channel.rs index eff941ef..d8a6b849 100644 --- a/chirpstack/src/maccommand/ping_slot_channel.rs +++ b/chirpstack/src/maccommand/ping_slot_channel.rs @@ -183,7 +183,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/ping_slot_info.rs b/chirpstack/src/maccommand/ping_slot_info.rs index 45dbdc8e..c7a3e4f8 100644 --- a/chirpstack/src/maccommand/ping_slot_info.rs +++ b/chirpstack/src/maccommand/ping_slot_info.rs @@ -37,7 +37,7 @@ pub mod test { #[test] fn test_handle() { let mut dev = device::Device { - device_session: Some(internal::DeviceSession::default()), + device_session: Some(internal::DeviceSession::default().into()), ..Default::default() }; let block = lrwn::MACCommandSet::new(vec![lrwn::MACCommand::PingSlotInfoReq( diff --git a/chirpstack/src/maccommand/rejoin_param_setup.rs b/chirpstack/src/maccommand/rejoin_param_setup.rs index 37a26c6f..de73779a 100644 --- a/chirpstack/src/maccommand/rejoin_param_setup.rs +++ b/chirpstack/src/maccommand/rejoin_param_setup.rs @@ -161,7 +161,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/relay_conf.rs b/chirpstack/src/maccommand/relay_conf.rs index 3116efec..bca270e3 100644 --- a/chirpstack/src/maccommand/relay_conf.rs +++ b/chirpstack/src/maccommand/relay_conf.rs @@ -180,7 +180,7 @@ mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle(&mut dev, &tst.relay_conf_ans, tst.relay_conf_req.as_ref()); diff --git a/chirpstack/src/maccommand/reset.rs b/chirpstack/src/maccommand/reset.rs index baa54254..e7fcd556 100644 --- a/chirpstack/src/maccommand/reset.rs +++ b/chirpstack/src/maccommand/reset.rs @@ -48,21 +48,24 @@ pub mod test { #[test] fn test_handle() { let mut dev = device::Device { - device_session: Some(internal::DeviceSession { - tx_power_index: 3, - min_supported_tx_power_index: 1, - max_supported_tx_power_index: 5, - extra_uplink_channels: [(3, Default::default())].iter().cloned().collect(), - rx1_delay: 3, - rx1_dr_offset: 1, - rx2_dr: 5, - rx2_frequency: 868900000, - enabled_uplink_channel_indices: vec![0, 1], - class_b_ping_slot_dr: 3, - class_b_ping_slot_freq: 868100000, - nb_trans: 3, - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + tx_power_index: 3, + min_supported_tx_power_index: 1, + max_supported_tx_power_index: 5, + extra_uplink_channels: [(3, Default::default())].iter().cloned().collect(), + rx1_delay: 3, + rx1_dr_offset: 1, + rx2_dr: 5, + rx2_frequency: 868900000, + enabled_uplink_channel_indices: vec![0, 1], + class_b_ping_slot_dr: 3, + class_b_ping_slot_freq: 868100000, + nb_trans: 3, + ..Default::default() + } + .into(), + ), ..Default::default() }; let dp = device_profile::DeviceProfile { diff --git a/chirpstack/src/maccommand/rx_param_setup.rs b/chirpstack/src/maccommand/rx_param_setup.rs index 5620e6d4..eacf81bd 100644 --- a/chirpstack/src/maccommand/rx_param_setup.rs +++ b/chirpstack/src/maccommand/rx_param_setup.rs @@ -184,7 +184,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/rx_timing_setup.rs b/chirpstack/src/maccommand/rx_timing_setup.rs index 2b3a2412..e6f1ae6e 100644 --- a/chirpstack/src/maccommand/rx_timing_setup.rs +++ b/chirpstack/src/maccommand/rx_timing_setup.rs @@ -103,7 +103,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/tx_param_setup.rs b/chirpstack/src/maccommand/tx_param_setup.rs index ec7d3a8d..27affbd7 100644 --- a/chirpstack/src/maccommand/tx_param_setup.rs +++ b/chirpstack/src/maccommand/tx_param_setup.rs @@ -139,7 +139,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; let resp = handle( diff --git a/chirpstack/src/maccommand/update_uplink_list.rs b/chirpstack/src/maccommand/update_uplink_list.rs index 77759b3a..2016f252 100644 --- a/chirpstack/src/maccommand/update_uplink_list.rs +++ b/chirpstack/src/maccommand/update_uplink_list.rs @@ -126,7 +126,7 @@ pub mod test { for tst in &tests { let mut dev = device::Device { - device_session: Some(tst.device_session.clone()), + device_session: Some(tst.device_session.clone().into()), ..Default::default() }; diff --git a/chirpstack/src/storage/api_key.rs b/chirpstack/src/storage/api_key.rs index 51fc68ef..f29ecb45 100644 --- a/chirpstack/src/storage/api_key.rs +++ b/chirpstack/src/storage/api_key.rs @@ -8,16 +8,16 @@ use uuid::Uuid; use super::error::Error; use super::schema::api_key; -use super::{error, get_async_db_conn}; +use super::{error, fields, get_async_db_conn}; #[derive(Queryable, Insertable, PartialEq, Eq, Debug)] #[diesel(table_name = api_key)] pub struct ApiKey { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub name: String, pub is_admin: bool, - pub tenant_id: Option, + pub tenant_id: Option, } impl ApiKey { @@ -33,7 +33,7 @@ impl ApiKey { impl Default for ApiKey { fn default() -> Self { ApiKey { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), created_at: Utc::now(), name: "".into(), is_admin: false, @@ -61,7 +61,7 @@ pub async fn create(ak: ApiKey) -> Result { } pub async fn delete(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(api_key::dsl::api_key.find(&id)) + let ra = diesel::delete(api_key::dsl::api_key.find(fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await?; if ra == 0 { @@ -78,7 +78,7 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(api_key::dsl::tenant_id.eq(tenant_id)); + q = q.filter(api_key::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -90,7 +90,7 @@ pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result Result { api_key::dsl::api_key - .find(&id) + .find(fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| error::Error::from_diesel(e, id.to_string())) @@ -162,7 +162,7 @@ pub mod test { }, FilterTest { filters: Filters { - tenant_id: ak_tenant.tenant_id, + tenant_id: ak_tenant.tenant_id.map(|u| u.into()), is_admin: false, }, keys: vec![&ak_tenant], diff --git a/chirpstack/src/storage/application.rs b/chirpstack/src/storage/application.rs index be911e23..f9a88e66 100644 --- a/chirpstack/src/storage/application.rs +++ b/chirpstack/src/storage/application.rs @@ -4,14 +4,11 @@ use std::str::FromStr; use anyhow::Result; use chrono::{DateTime, Utc}; -use diesel::{ - backend::Backend, - deserialize, dsl, - pg::Pg, - prelude::*, - serialize, - sql_types::{Jsonb, Text}, -}; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; +use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text}; +#[cfg(feature = "postgres")] +use diesel::{pg::Pg, sql_types::Jsonb}; use diesel_async::RunQueryDsl; use serde::{Deserialize, Serialize}; use tracing::info; @@ -24,8 +21,8 @@ use super::{fields, get_async_db_conn}; #[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)] #[diesel(table_name = application)] pub struct Application { - pub id: Uuid, - pub tenant_id: Uuid, + pub id: fields::Uuid, + pub tenant_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -48,8 +45,8 @@ impl Default for Application { let now = Utc::now(); Application { - id: Uuid::new_v4(), - tenant_id: Uuid::nil(), + id: Uuid::new_v4().into(), + tenant_id: Uuid::nil().into(), created_at: now, updated_at: now, name: "".into(), @@ -68,7 +65,7 @@ pub struct Filters { #[derive(Queryable, PartialEq, Eq, Debug)] pub struct ApplicationListItem { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -129,6 +126,7 @@ where } } +#[cfg(feature = "postgres")] impl serialize::ToSql for IntegrationKind where str: serialize::ToSql, @@ -138,8 +136,16 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for IntegrationKind { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + #[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow, Serialize, Deserialize)] -#[diesel(sql_type = Jsonb)] +#[diesel(sql_type = fields::sql_types::JsonT)] pub enum IntegrationConfiguration { None, Http(HttpConfiguration), @@ -154,6 +160,7 @@ pub enum IntegrationConfiguration { Ifttt(IftttConfiguration), } +#[cfg(feature = "postgres")] impl deserialize::FromSql for IntegrationConfiguration { fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { let value = >::from_sql(value)?; @@ -161,6 +168,7 @@ impl deserialize::FromSql for IntegrationConfiguration { } } +#[cfg(feature = "postgres")] impl serialize::ToSql for IntegrationConfiguration { fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result { let value = serde_json::to_value(self)?; @@ -168,6 +176,23 @@ impl serialize::ToSql for IntegrationConfiguration { } } +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for IntegrationConfiguration { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let s = + <*const str as deserialize::FromSql>::from_sql(value)?; + Ok(serde_json::from_str(unsafe { &*s })?) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for IntegrationConfiguration { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result { + out.set_value(serde_json::to_string(self)?); + Ok(serialize::IsNull::No) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct HttpConfiguration { pub headers: HashMap, @@ -268,7 +293,7 @@ pub struct IftttConfiguration { #[derive(Clone, Queryable, Insertable, PartialEq, Eq, Debug)] #[diesel(table_name = application_integration)] pub struct Integration { - pub application_id: Uuid, + pub application_id: fields::Uuid, pub kind: IntegrationKind, pub created_at: DateTime, pub updated_at: DateTime, @@ -280,7 +305,7 @@ impl Default for Integration { let now = Utc::now(); Integration { - application_id: Uuid::nil(), + application_id: Uuid::nil().into(), kind: IntegrationKind::Http, created_at: now, updated_at: now, @@ -305,7 +330,7 @@ pub async fn create(a: Application) -> Result { pub async fn get(id: &Uuid) -> Result { let a = application::dsl::application - .find(&id) + .find(fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; @@ -335,11 +360,12 @@ pub async fn update(a: Application) -> Result { } pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result { - let app: Application = diesel::update(application::dsl::application.find(&id)) - .set(application::mqtt_tls_cert.eq(cert)) - .get_result(&mut get_async_db_conn().await?) - .await - .map_err(|e| Error::from_diesel(e, id.to_string()))?; + let app: Application = + diesel::update(application::dsl::application.find(fields::Uuid::from(id))) + .set(application::mqtt_tls_cert.eq(cert)) + .get_result(&mut get_async_db_conn().await?) + .await + .map_err(|e| Error::from_diesel(e, id.to_string()))?; info!( application_id = %id, @@ -350,7 +376,7 @@ pub async fn update_mqtt_cls_cert(id: &Uuid, cert: &[u8]) -> Result Result<(), Error> { - let ra = diesel::delete(application::dsl::application.find(&id)) + let ra = diesel::delete(application::dsl::application.find(fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await?; if ra == 0 { @@ -371,11 +397,18 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(application::dsl::tenant_id.eq(tenant_id)); + q = q.filter(application::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(search) = &filters.search { - q = q.filter(application::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(application::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(application::dsl::name.like(format!("%{}%", search))); + } } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -397,11 +430,18 @@ pub async fn list( .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(application::dsl::tenant_id.eq(tenant_id)); + q = q.filter(application::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(search) = &filters.search { - q = q.filter(application::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(application::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(application::dsl::name.like(format!("%{}%", search))); + } } let items = q @@ -431,7 +471,7 @@ pub async fn get_integration( let mut i: Integration = application_integration::dsl::application_integration .filter( application_integration::dsl::application_id - .eq(application_id) + .eq(fields::Uuid::from(application_id)) .and(application_integration::dsl::kind.eq(kind)), ) .first(&mut get_async_db_conn().await?) @@ -478,7 +518,7 @@ pub async fn delete_integration(application_id: &Uuid, kind: IntegrationKind) -> let ra = diesel::delete( application_integration::dsl::application_integration.filter( application_integration::dsl::application_id - .eq(&application_id) + .eq(fields::Uuid::from(application_id)) .and(application_integration::dsl::kind.eq(&kind)), ), ) @@ -497,20 +537,21 @@ pub async fn get_integrations_for_application( application_id: &Uuid, ) -> Result, Error> { let items: Vec = application_integration::dsl::application_integration - .filter(application_integration::dsl::application_id.eq(&application_id)) + .filter(application_integration::dsl::application_id.eq(fields::Uuid::from(application_id))) .order_by(application_integration::dsl::kind) .load(&mut get_async_db_conn().await?) .await?; Ok(items) } -pub async fn get_measurement_keys(application_id: &Uuid) -> Result, Error> { - #[derive(QueryableByName)] - struct Measurement { - #[diesel(sql_type = diesel::sql_types::Text)] - pub key: String, - } +#[derive(QueryableByName)] +struct Measurement { + #[diesel(sql_type = diesel::sql_types::Text)] + pub key: String, +} +#[cfg(feature = "postgres")] +pub async fn get_measurement_keys(application_id: &Uuid) -> Result, Error> { let keys: Vec = diesel::sql_query( r#" select @@ -525,7 +566,28 @@ pub async fn get_measurement_keys(application_id: &Uuid) -> Result, key "#, ) - .bind::(application_id) + .bind::(fields::Uuid::from(application_id)) + .load(&mut get_async_db_conn().await?) + .await + .map_err(|e| Error::from_diesel(e, application_id.to_string()))?; + Ok(keys.iter().map(|k| k.key.clone()).collect()) +} + +#[cfg(feature = "sqlite")] +pub async fn get_measurement_keys(application_id: &Uuid) -> Result, Error> { + let keys: Vec = diesel::sql_query( + r#" + select distinct json_each.key as key + from device_profile dp, json_each(dp.measurements) + inner join device d + on d.device_profile_id = dp.id + where + d.application_id = ? + order by + key + "#, + ) + .bind::(fields::Uuid::from(application_id)) .load(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, application_id.to_string()))?; @@ -548,7 +610,7 @@ pub mod test { pub async fn create_application(tenant_id: Option) -> Application { let tenant_id = match tenant_id { - Some(v) => v, + Some(v) => v.into(), None => { let t = storage::tenant::test::create_tenant().await; t.id @@ -623,7 +685,7 @@ pub mod test { }, FilterTest { filters: Filters { - tenant_id: Some(app.tenant_id), + tenant_id: Some(app.tenant_id.into()), search: None, }, apps: vec![&app], diff --git a/chirpstack/src/storage/device.rs b/chirpstack/src/storage/device.rs index b7729726..9f283301 100644 --- a/chirpstack/src/storage/device.rs +++ b/chirpstack/src/storage/device.rs @@ -1,9 +1,9 @@ use std::collections::HashMap; use std::fmt; +use std::ops::{Deref, DerefMut}; use std::str::FromStr; use anyhow::{Context, Result}; -use bigdecimal::BigDecimal; use chrono::{DateTime, Duration, Utc}; use diesel::{backend::Backend, deserialize, dsl, prelude::*, serialize, sql_types::Text}; use diesel_async::RunQueryDsl; @@ -14,7 +14,7 @@ use chirpstack_api::internal; use lrwn::{DevAddr, EUI64}; use super::schema::{application, device, device_profile, multicast_group_device, tenant}; -use super::{error::Error, fields, get_async_db_conn}; +use super::{db_transaction, error::Error, fields, get_async_db_conn}; use crate::api::helpers::FromProto; use crate::config; @@ -62,6 +62,7 @@ where } } +#[cfg(feature = "postgres")] impl serialize::ToSql for DeviceClass where str: serialize::ToSql, @@ -77,12 +78,23 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for DeviceClass { + fn to_sql( + &self, + out: &mut serialize::Output<'_, '_, diesel::sqlite::Sqlite>, + ) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + #[derive(Queryable, QueryableByName, Insertable, PartialEq, Debug, Clone)] #[diesel(table_name = device)] pub struct Device { pub dev_eui: EUI64, - pub application_id: Uuid, - pub device_profile_id: Uuid, + pub application_id: fields::Uuid, + pub device_profile_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub last_seen_at: Option>, @@ -90,7 +102,7 @@ pub struct Device { pub name: String, pub description: String, pub external_power_source: bool, - pub battery_level: Option, + pub battery_level: Option, pub margin: Option, pub dr: Option, pub latitude: Option, @@ -104,7 +116,7 @@ pub struct Device { pub variables: fields::KeyValue, pub join_eui: EUI64, pub secondary_dev_addr: Option, - pub device_session: Option, + pub device_session: Option, } #[derive(AsChangeset, Debug, Clone, Default)] @@ -116,10 +128,10 @@ pub struct DeviceChangeset { pub enabled_class: Option, pub join_eui: Option, pub secondary_dev_addr: Option>, - pub device_session: Option>, + pub device_session: Option>, pub margin: Option, pub external_power_source: Option, - pub battery_level: Option>, + pub battery_level: Option>, pub scheduler_run_after: Option>>, pub is_disabled: Option, } @@ -135,12 +147,14 @@ impl Device { pub fn get_device_session(&self) -> Result<&internal::DeviceSession, Error> { self.device_session .as_ref() + .map(|ds| ds.deref()) .ok_or_else(|| Error::NotFound(self.dev_eui.to_string())) } pub fn get_device_session_mut(&mut self) -> Result<&mut internal::DeviceSession, Error> { self.device_session .as_mut() + .map(|ds| ds.deref_mut()) .ok_or_else(|| Error::NotFound(self.dev_eui.to_string())) } @@ -155,8 +169,8 @@ impl Default for Device { Device { dev_eui: EUI64::default(), - application_id: Uuid::nil(), - device_profile_id: Uuid::nil(), + application_id: Uuid::nil().into(), + device_profile_id: Uuid::nil().into(), created_at: now, updated_at: now, last_seen_at: None, @@ -188,14 +202,14 @@ pub struct DeviceListItem { pub dev_eui: EUI64, pub name: String, pub description: String, - pub device_profile_id: Uuid, + pub device_profile_id: fields::Uuid, pub device_profile_name: String, pub created_at: DateTime, pub updated_at: DateTime, pub last_seen_at: Option>, pub margin: Option, pub external_power_source: bool, - pub battery_level: Option, + pub battery_level: Option, } #[derive(Default, Clone)] @@ -223,52 +237,50 @@ pub struct DevicesDataRate { pub async fn create(d: Device) -> Result { let mut c = get_async_db_conn().await?; - let d: Device = c - .build_transaction() - .run::(|c| { - Box::pin(async move { - // use for update to lock the tenant - let t: super::tenant::Tenant = tenant::dsl::tenant - .select(( - tenant::dsl::id, - tenant::dsl::created_at, - tenant::dsl::updated_at, - tenant::dsl::name, - tenant::dsl::description, - tenant::dsl::can_have_gateways, - tenant::dsl::max_device_count, - tenant::dsl::max_gateway_count, - tenant::dsl::private_gateways_up, - tenant::dsl::private_gateways_down, - tenant::dsl::tags, - )) - .inner_join(application::table) - .filter(application::dsl::id.eq(&d.application_id)) - .for_update() - .first(c) - .await?; + let d: Device = db_transaction::(&mut c, |c| { + Box::pin(async move { + let query = tenant::dsl::tenant + .select(( + tenant::dsl::id, + tenant::dsl::created_at, + tenant::dsl::updated_at, + tenant::dsl::name, + tenant::dsl::description, + tenant::dsl::can_have_gateways, + tenant::dsl::max_device_count, + tenant::dsl::max_gateway_count, + tenant::dsl::private_gateways_up, + tenant::dsl::private_gateways_down, + tenant::dsl::tags, + )) + .inner_join(application::table) + .filter(application::dsl::id.eq(&d.application_id)); + // use for update to lock the tenant + #[cfg(feature = "postgres")] + let query = query.for_update(); + let t: super::tenant::Tenant = query.first(c).await?; - let dev_count: i64 = device::dsl::device - .select(dsl::count_star()) - .inner_join(application::table) - .filter(application::dsl::tenant_id.eq(&t.id)) - .first(c) - .await?; + let dev_count: i64 = device::dsl::device + .select(dsl::count_star()) + .inner_join(application::table) + .filter(application::dsl::tenant_id.eq(&t.id)) + .first(c) + .await?; - if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count { - return Err(Error::NotAllowed( - "Max number of devices exceeded for tenant".into(), - )); - } + if t.max_device_count != 0 && dev_count as i32 >= t.max_device_count { + return Err(Error::NotAllowed( + "Max number of devices exceeded for tenant".into(), + )); + } - diesel::insert_into(device::table) - .values(&d) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, d.dev_eui.to_string())) - }) + diesel::insert_into(device::table) + .values(&d) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, d.dev_eui.to_string())) }) - .await?; + }) + .await?; info!(dev_eui = %d.dev_eui, "Device created"); Ok(d) } @@ -304,130 +316,129 @@ pub async fn get_for_phypayload_and_incr_f_cnt_up( let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::(|c| { - Box::pin(async move { - let mut devices: Vec = device::dsl::device - .filter( - device::dsl::dev_addr - .eq(&dev_addr) - .or(device::dsl::secondary_dev_addr.eq(&dev_addr)), - ) - .filter(device::dsl::is_disabled.eq(false)) - .for_update() - .load(c) - .await?; + db_transaction::(&mut c, |c| { + Box::pin(async move { + let query = device::dsl::device + .filter( + device::dsl::dev_addr + .eq(&dev_addr) + .or(device::dsl::secondary_dev_addr.eq(&dev_addr)), + ) + .filter(device::dsl::is_disabled.eq(false)); + #[cfg(feature = "postgres")] + let query = query.for_update(); + let mut devices: Vec = query.load(c).await?; - if devices.is_empty() { - return Err(Error::NotFound(dev_addr.to_string())); + if devices.is_empty() { + return Err(Error::NotFound(dev_addr.to_string())); + } + + for d in &mut devices { + let mut sessions = vec![]; + + if let Some(ds) = &d.device_session { + sessions.push(ds.clone()); + if let Some(ds) = &ds.pending_rejoin_device_session { + sessions.push(ds.as_ref().into()); + } } - for d in &mut devices { - let mut sessions = vec![]; - - if let Some(ds) = &d.device_session { - sessions.push(ds.clone()); - if let Some(ds) = &ds.pending_rejoin_device_session { - sessions.push(*ds.clone()); - } + for ds in &mut sessions { + if ds.dev_addr != dev_addr.to_vec() { + continue; } - for ds in &mut sessions { - if ds.dev_addr != dev_addr.to_vec() { - continue; + // Get the full 32bit frame-counter. + let full_f_cnt = get_full_f_cnt_up(ds.f_cnt_up, f_cnt_orig); + let f_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.f_nwk_s_int_key)?; + let s_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.s_nwk_s_int_key)?; + + // Check both the full frame-counter and the received frame-counter + // truncated to the 16LSB. + // The latter is needed in case of a frame-counter reset as the + // GetFullFCntUp will think the 16LSB has rolled over and will + // increment the 16MSB bit. + let mut mic_ok = false; + for f_cnt in [full_f_cnt, f_cnt_orig] { + // Set the full f_cnt. + if let lrwn::Payload::MACPayload(pl) = &mut phy.payload { + pl.fhdr.f_cnt = f_cnt; } - // Get the full 32bit frame-counter. - let full_f_cnt = get_full_f_cnt_up(ds.f_cnt_up, f_cnt_orig); - let f_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.f_nwk_s_int_key)?; - let s_nwk_s_int_key = lrwn::AES128Key::from_slice(&ds.s_nwk_s_int_key)?; - - // Check both the full frame-counter and the received frame-counter - // truncated to the 16LSB. - // The latter is needed in case of a frame-counter reset as the - // GetFullFCntUp will think the 16LSB has rolled over and will - // increment the 16MSB bit. - let mut mic_ok = false; - for f_cnt in [full_f_cnt, f_cnt_orig] { - // Set the full f_cnt. - if let lrwn::Payload::MACPayload(pl) = &mut phy.payload { - pl.fhdr.f_cnt = f_cnt; - } - - mic_ok = phy - .validate_uplink_data_mic( - ds.mac_version().from_proto(), - ds.conf_f_cnt, - tx_dr, - tx_ch, - &f_nwk_s_int_key, - &s_nwk_s_int_key, - ) - .context("Validate MIC")?; - - if mic_ok { - break; - } - } + mic_ok = phy + .validate_uplink_data_mic( + ds.mac_version().from_proto(), + ds.conf_f_cnt, + tx_dr, + tx_ch, + &f_nwk_s_int_key, + &s_nwk_s_int_key, + ) + .context("Validate MIC")?; if mic_ok { - let full_f_cnt = if let lrwn::Payload::MACPayload(pl) = &phy.payload { - pl.fhdr.f_cnt - } else { - 0 - }; - - if let Some(relay) = &ds.relay { - if !relayed && relay.ed_relay_only { - info!( - dev_eui = %d.dev_eui, - "Only communication through relay is allowed" - ); - return Err(Error::NotFound(dev_addr.to_string())); - } - } - - if full_f_cnt >= ds.f_cnt_up { - // We immediately save the device-session to make sure that concurrent calls for - // the same uplink will fail on the frame-counter validation. - let ds_f_cnt_up = ds.f_cnt_up; - ds.f_cnt_up = full_f_cnt + 1; - - let _ = diesel::update(device::dsl::device.find(d.dev_eui)) - .set(device::device_session.eq(&ds.clone())) - .execute(c) - .await?; - - // We do return the device-session with original frame-counter - ds.f_cnt_up = ds_f_cnt_up; - d.device_session = Some(ds.clone()); - return Ok(ValidationStatus::Ok(full_f_cnt, d.clone())); - } else if ds.skip_f_cnt_check { - // re-transmission or frame-counter reset - ds.f_cnt_up = 0; - d.device_session = Some(ds.clone()); - return Ok(ValidationStatus::Ok(full_f_cnt, d.clone())); - } else if full_f_cnt == (ds.f_cnt_up - 1) { - // re-transmission, the frame-counter did not increment - d.device_session = Some(ds.clone()); - return Ok(ValidationStatus::Retransmission(full_f_cnt, d.clone())); - } else { - d.device_session = Some(ds.clone()); - return Ok(ValidationStatus::Reset(full_f_cnt, d.clone())); - } - } - - // Restore the original f_cnt. - if let lrwn::Payload::MACPayload(pl) = &mut phy.payload { - pl.fhdr.f_cnt = f_cnt_orig; + break; } } - } - Err(Error::InvalidMIC) - }) + if mic_ok { + let full_f_cnt = if let lrwn::Payload::MACPayload(pl) = &phy.payload { + pl.fhdr.f_cnt + } else { + 0 + }; + + if let Some(relay) = &ds.relay { + if !relayed && relay.ed_relay_only { + info!( + dev_eui = %d.dev_eui, + "Only communication through relay is allowed" + ); + return Err(Error::NotFound(dev_addr.to_string())); + } + } + + if full_f_cnt >= ds.f_cnt_up { + // We immediately save the device-session to make sure that concurrent calls for + // the same uplink will fail on the frame-counter validation. + let ds_f_cnt_up = ds.f_cnt_up; + ds.f_cnt_up = full_f_cnt + 1; + + let _ = diesel::update(device::dsl::device.find(d.dev_eui)) + .set(device::device_session.eq(&ds.clone())) + .execute(c) + .await?; + + // We do return the device-session with original frame-counter + ds.f_cnt_up = ds_f_cnt_up; + d.device_session = Some(ds.clone()); + return Ok(ValidationStatus::Ok(full_f_cnt, d.clone())); + } else if ds.skip_f_cnt_check { + // re-transmission or frame-counter reset + ds.f_cnt_up = 0; + d.device_session = Some(ds.clone()); + return Ok(ValidationStatus::Ok(full_f_cnt, d.clone())); + } else if full_f_cnt == (ds.f_cnt_up - 1) { + // re-transmission, the frame-counter did not increment + d.device_session = Some(ds.clone()); + return Ok(ValidationStatus::Retransmission(full_f_cnt, d.clone())); + } else { + d.device_session = Some(ds.clone()); + return Ok(ValidationStatus::Reset(full_f_cnt, d.clone())); + } + } + + // Restore the original f_cnt. + if let lrwn::Payload::MACPayload(pl) = &mut phy.payload { + pl.fhdr.f_cnt = f_cnt_orig; + } + } + } + + Err(Error::InvalidMIC) }) - .await + }) + .await } pub async fn get_for_phypayload( @@ -462,7 +473,7 @@ pub async fn get_for_phypayload( if let Some(ds) = &d.device_session { sessions.push(ds.clone()); if let Some(ds) = &ds.pending_rejoin_device_session { - sessions.push(*ds.clone()); + sessions.push(ds.as_ref().into()); } } @@ -559,15 +570,25 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(device::dsl::application_id.eq(application_id)); + q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id))); } if let Some(search) = &filters.search { - q = q.filter(device::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(device::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(device::dsl::name.like(format!("%{}%", search))); + } } if let Some(multicast_group_id) = &filters.multicast_group_id { - q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id)); + q = q.filter( + multicast_group_device::dsl::multicast_group_id + .eq(fields::Uuid::from(multicast_group_id)), + ); } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -598,15 +619,25 @@ pub async fn list( .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(device::dsl::application_id.eq(application_id)); + q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id))); } if let Some(search) = &filters.search { - q = q.filter(device::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(device::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(device::dsl::name.like(format!("%{}%", search))); + } } if let Some(multicast_group_id) = &filters.multicast_group_id { - q = q.filter(multicast_group_device::dsl::multicast_group_id.eq(multicast_group_id)); + q = q.filter( + multicast_group_device::dsl::multicast_group_id + .eq(fields::Uuid::from(multicast_group_id)), + ); } q.order_by(device::dsl::name) @@ -617,6 +648,7 @@ pub async fn list( .map_err(|e| Error::from_diesel(e, "".into())) } +#[cfg(feature = "postgres")] pub async fn get_active_inactive(tenant_id: &Option) -> Result { diesel::sql_query(r#" with device_active_inactive as ( @@ -637,11 +669,43 @@ pub async fn get_active_inactive(tenant_id: &Option) -> Result, _>(tenant_id) + .bind::, _>(tenant_id.map(fields::Uuid::from)) .get_result(&mut get_async_db_conn().await?).await .map_err(|e| Error::from_diesel(e, "".into())) } +#[cfg(feature = "sqlite")] +pub async fn get_active_inactive(tenant_id: &Option) -> Result { + diesel::sql_query( + r#" + with device_active_inactive as ( + select + dp.uplink_interval * 1.5 as uplink_interval, + d.last_seen_at as last_seen_at, + (unixepoch('now') - unixepoch(last_seen_at)) as not_seen_duration + from + device d + inner join device_profile dp + on d.device_profile_id = dp.id + where + ?1 is null or dp.tenant_id = ?1 + ) + select + coalesce(sum(case when last_seen_at is null then 1 end), 0) as never_seen_count, + coalesce(sum(case when not_seen_duration > uplink_interval then 1 end), 0) as inactive_count, + coalesce(sum(case when not_seen_duration <= uplink_interval then 1 end), 0) as active_count + from + device_active_inactive + "#, + ) + .bind::, _>( + tenant_id.map(fields::Uuid::from), + ) + .get_result(&mut get_async_db_conn().await?) + .await + .map_err(|e| Error::from_diesel(e, "".into())) +} + pub async fn get_data_rates(tenant_id: &Option) -> Result, Error> { let mut q = device::dsl::device .inner_join(device_profile::table) @@ -655,7 +719,7 @@ pub async fn get_data_rates(tenant_id: &Option) -> Result) -> Result Result> { let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::, Error, _>(|c| { - Box::pin(async { - let conf = config::get(); + db_transaction::, Error, _>(&mut c, |c| { + Box::pin(async { + let conf = config::get(); - // This query will: - // * Select the devices for which a Class-B or Class-C downlink can be scheduled. - // * Lock the device records for update with skip locked such that other - // ChirpStack instances are able to do the same for the remaining devices. - // * Update the scheduler_run_after for these devices to now() + 2 * scheduler - // interval to avoid concurrency issues (other ChirpStack instance scheduling - // the same queue items). - // - // This way, we do not have to keep the device records locked until the scheduler - // finishes its batch as the same set of devices will not be returned until after - // the updated scheduler_run_after. Only if the scheduler takes more time than 2x the - // interval (the scheduler is still working on processing the batch after 2 x interval) - // this might cause issues. - // The alternative would be to keep the transaction open for a long time + keep - // the device records locked during this time which could case issues as well. - diesel::sql_query( - r#" + // This query will: + // * Select the devices for which a Class-B or Class-C downlink can be scheduled. + // * Lock the device records for update with skip locked such that other + // ChirpStack instances are able to do the same for the remaining devices. + // * Update the scheduler_run_after for these devices to now() + 2 * scheduler + // interval to avoid concurrency issues (other ChirpStack instance scheduling + // the same queue items). + // + // This way, we do not have to keep the device records locked until the scheduler + // finishes its batch as the same set of devices will not be returned until after + // the updated scheduler_run_after. Only if the scheduler takes more time than 2x the + // interval (the scheduler is still working on processing the batch after 2 x interval) + // this might cause issues. + // The alternative would be to keep the transaction open for a long time + keep + // the device records locked during this time which could case issues as well. + diesel::sql_query(if cfg!(feature = "sqlite") { + r#" + update + device + set + scheduler_run_after = ?3 + where + dev_eui in ( + select + d.dev_eui + from + device d + where + d.enabled_class in ('B', 'C') + and (d.scheduler_run_after is null or d.scheduler_run_after < ?2) + and d.is_disabled = FALSE + and exists ( + select + 1 + from + device_queue_item dq + where + dq.dev_eui = d.dev_eui + and not ( + -- pending queue-item with timeout_after in the future + (dq.is_pending = true and dq.timeout_after > ?2) + ) + ) + order by d.dev_eui + limit ?1 + ) + returning * + "# + } else { + r#" update device set @@ -718,20 +814,20 @@ pub async fn get_with_class_b_c_queue_items(limit: usize) -> Result> for update skip locked ) returning * - "#, - ) - .bind::(limit as i32) - .bind::(Utc::now()) - .bind::( - Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(), - ) - .load(c) - .await - .map_err(|e| Error::from_diesel(e, "".into())) + "# }) + .bind::(limit as i32) + .bind::(Utc::now()) + .bind::( + Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(), + ) + .load(c) + .await + .map_err(|e| Error::from_diesel(e, "".into())) }) - .await - .context("Get with Class B/C queue-items transaction") + }) + .await + .context("Get with Class B/C queue-items transaction") } // GetFullFCntUp returns the full 32bit frame-counter, given the fCntUp which @@ -786,9 +882,10 @@ pub mod test { }; let application_id = match application_id { - Some(v) => v, + Some(v) => v.into(), None => { - let a = storage::application::test::create_application(Some(tenant_id)).await; + let a = + storage::application::test::create_application(Some(tenant_id.into())).await; a.id } }; @@ -797,7 +894,7 @@ pub mod test { name: "test-dev".into(), dev_eui, application_id, - device_profile_id, + device_profile_id: device_profile_id.into(), ..Default::default() }; @@ -808,8 +905,12 @@ pub mod test { async fn test_device() { let _guard = test::prepare().await; let dp = storage::device_profile::test::create_device_profile(None).await; - let mut d = - create_device(EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), dp.id, None).await; + let mut d = create_device( + EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), + dp.id.into(), + None, + ) + .await; // get let d_get = get(&d.dev_eui).await.unwrap(); @@ -858,7 +959,7 @@ pub mod test { }, FilterTest { filters: Filters { - application_id: Some(d.application_id), + application_id: Some(d.application_id.into()), multicast_group_id: None, search: None, }, @@ -906,7 +1007,12 @@ pub mod test { async fn test_get_with_class_b_c_queue_items() { let _guard = test::prepare().await; let dp = storage::device_profile::test::create_device_profile(None).await; - let d = create_device(EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), dp.id, None).await; + let d = create_device( + EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), + dp.id.into(), + None, + ) + .await; // nothing in the queue let res = get_with_class_b_c_queue_items(10).await.unwrap(); @@ -1057,24 +1163,27 @@ pub mod test { name: "0101010101010101".into(), dev_eui: EUI64::from_be_bytes([1, 1, 1, 1, 1, 1, 1, 1]), dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])), - device_session: Some(internal::DeviceSession { - dev_addr: vec![0x01, 0x02, 0x03, 0x04], - s_nwk_s_int_key: vec![ - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, - ], - f_nwk_s_int_key: vec![ - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, - ], - nwk_s_enc_key: vec![ - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, - ], - f_cnt_up: 100, - skip_f_cnt_check: true, - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + dev_addr: vec![0x01, 0x02, 0x03, 0x04], + s_nwk_s_int_key: vec![ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, + ], + f_nwk_s_int_key: vec![ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, + ], + nwk_s_enc_key: vec![ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, + ], + f_cnt_up: 100, + skip_f_cnt_check: true, + ..Default::default() + } + .into(), + ), ..Default::default() }, Device { @@ -1083,23 +1192,26 @@ pub mod test { name: "0202020202020202".into(), dev_eui: EUI64::from_be_bytes([2, 2, 2, 2, 2, 2, 2, 2]), dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])), - device_session: Some(internal::DeviceSession { - dev_addr: vec![0x01, 0x02, 0x03, 0x04], - s_nwk_s_int_key: vec![ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, - ], - f_nwk_s_int_key: vec![ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, - ], - nwk_s_enc_key: vec![ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, - ], - f_cnt_up: 200, - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + dev_addr: vec![0x01, 0x02, 0x03, 0x04], + s_nwk_s_int_key: vec![ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, + ], + f_nwk_s_int_key: vec![ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, + ], + nwk_s_enc_key: vec![ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, + ], + f_cnt_up: 200, + ..Default::default() + } + .into(), + ), ..Default::default() }, Device { @@ -1109,40 +1221,43 @@ pub mod test { dev_eui: EUI64::from_be_bytes([3, 3, 3, 3, 3, 3, 3, 3]), dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])), secondary_dev_addr: Some(DevAddr::from_be_bytes([4, 3, 2, 1])), - device_session: Some(internal::DeviceSession { - dev_addr: vec![0x01, 0x02, 0x03, 0x04], - s_nwk_s_int_key: vec![ - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, - ], - f_nwk_s_int_key: vec![ - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, - ], - nwk_s_enc_key: vec![ - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, - ], - f_cnt_up: 300, - pending_rejoin_device_session: Some(Box::new(internal::DeviceSession { - dev_addr: vec![0x04, 0x03, 0x02, 0x01], + device_session: Some( + internal::DeviceSession { + dev_addr: vec![0x01, 0x02, 0x03, 0x04], s_nwk_s_int_key: vec![ - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, - 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, ], f_nwk_s_int_key: vec![ - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, - 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, ], nwk_s_enc_key: vec![ - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, - 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, ], - f_cnt_up: 0, + f_cnt_up: 300, + pending_rejoin_device_session: Some(Box::new(internal::DeviceSession { + dev_addr: vec![0x04, 0x03, 0x02, 0x01], + s_nwk_s_int_key: vec![ + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, + ], + f_nwk_s_int_key: vec![ + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, + ], + nwk_s_enc_key: vec![ + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, + ], + f_cnt_up: 0, + ..Default::default() + })), ..Default::default() - })), - ..Default::default() - }), + } + .into(), + ), ..Default::default() }, Device { @@ -1151,23 +1266,26 @@ pub mod test { name: "0505050505050505".into(), dev_eui: EUI64::from_be_bytes([5, 5, 5, 5, 5, 5, 5, 5]), dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])), - device_session: Some(internal::DeviceSession { - dev_addr: vec![0x01, 0x02, 0x03, 0x04], - s_nwk_s_int_key: vec![ - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, - ], - f_nwk_s_int_key: vec![ - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, - ], - nwk_s_enc_key: vec![ - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, - ], - f_cnt_up: (1 << 16) + 1, - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + dev_addr: vec![0x01, 0x02, 0x03, 0x04], + s_nwk_s_int_key: vec![ + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, + ], + f_nwk_s_int_key: vec![ + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, + ], + nwk_s_enc_key: vec![ + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, + ], + f_cnt_up: (1 << 16) + 1, + ..Default::default() + } + .into(), + ), ..Default::default() }, ]; diff --git a/chirpstack/src/storage/device_keys.rs b/chirpstack/src/storage/device_keys.rs index 682c5422..9a506fdd 100644 --- a/chirpstack/src/storage/device_keys.rs +++ b/chirpstack/src/storage/device_keys.rs @@ -7,8 +7,8 @@ use tracing::info; use lrwn::{AES128Key, EUI64}; use super::error::Error; -use super::get_async_db_conn; use super::schema::device_keys; +use super::{db_transaction, fields, get_async_db_conn}; #[derive(Queryable, Insertable, AsChangeset, PartialEq, Eq, Debug, Clone)] #[diesel(table_name = device_keys)] @@ -18,7 +18,7 @@ pub struct DeviceKeys { pub updated_at: DateTime, pub nwk_key: AES128Key, pub app_key: AES128Key, - pub dev_nonces: Vec>, + pub dev_nonces: fields::DevNonces, pub join_nonce: i32, } @@ -38,7 +38,7 @@ impl Default for DeviceKeys { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]), - dev_nonces: Vec::new(), + dev_nonces: Vec::new().into(), join_nonce: 0, } } @@ -94,8 +94,9 @@ pub async fn delete(dev_eui: &EUI64) -> Result<(), Error> { } pub async fn set_dev_nonces(dev_eui: &EUI64, nonces: &[i32]) -> Result { + let nonces: Vec> = nonces.iter().map(|v| Some(*v)).collect(); let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(dev_eui)) - .set(device_keys::dev_nonces.eq(nonces)) + .set(device_keys::dev_nonces.eq(fields::DevNonces::from(nonces))) .get_result(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; @@ -111,36 +112,35 @@ pub async fn validate_incr_join_and_store_dev_nonce( dev_nonce: i32, ) -> Result { let mut c = get_async_db_conn().await?; - let dk: DeviceKeys = c - .build_transaction() - .run::(|c| { - Box::pin(async move { - let mut dk: DeviceKeys = device_keys::dsl::device_keys - .find(&dev_eui) - .for_update() - .first(c) - .await - .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; + let dk: DeviceKeys = db_transaction::(&mut c, |c| { + Box::pin(async move { + let query = device_keys::dsl::device_keys.find(&dev_eui); + #[cfg(feature = "postgres")] + let query = query.for_update(); + let mut dk: DeviceKeys = query + .first(c) + .await + .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; - if dk.dev_nonces.contains(&(Some(dev_nonce))) { - return Err(Error::InvalidDevNonce); - } + if dk.dev_nonces.contains(&(Some(dev_nonce))) { + return Err(Error::InvalidDevNonce); + } - dk.dev_nonces.push(Some(dev_nonce)); - dk.join_nonce += 1; + dk.dev_nonces.push(Some(dev_nonce)); + dk.join_nonce += 1; - diesel::update(device_keys::dsl::device_keys.find(&dev_eui)) - .set(( - device_keys::updated_at.eq(Utc::now()), - device_keys::dev_nonces.eq(&dk.dev_nonces), - device_keys::join_nonce.eq(&dk.join_nonce), - )) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, dev_eui.to_string())) - }) + diesel::update(device_keys::dsl::device_keys.find(&dev_eui)) + .set(( + device_keys::updated_at.eq(Utc::now()), + device_keys::dev_nonces.eq(&dk.dev_nonces), + device_keys::join_nonce.eq(&dk.join_nonce), + )) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, dev_eui.to_string())) }) - .await?; + }) + .await?; info!(dev_eui = %dev_eui, dev_nonce = dev_nonce, "Device-nonce validated, join-nonce incremented and stored"); Ok(dk) @@ -155,7 +155,7 @@ pub mod test { pub async fn reset_nonces(dev_eui: &EUI64) -> Result { let dk: DeviceKeys = diesel::update(device_keys::dsl::device_keys.find(&dev_eui)) .set(( - device_keys::dev_nonces.eq::>(Vec::new()), + device_keys::dev_nonces.eq(fields::DevNonces::from(Vec::new())), device_keys::join_nonce.eq(0), )) .get_result(&mut get_async_db_conn().await?) diff --git a/chirpstack/src/storage/device_profile.rs b/chirpstack/src/storage/device_profile.rs index a4792b29..dc91d3ba 100644 --- a/chirpstack/src/storage/device_profile.rs +++ b/chirpstack/src/storage/device_profile.rs @@ -19,8 +19,8 @@ use chirpstack_api::internal; #[derive(Clone, Queryable, Insertable, Debug, PartialEq, Eq)] #[diesel(table_name = device_profile)] pub struct DeviceProfile { - pub id: Uuid, - pub tenant_id: Uuid, + pub id: fields::Uuid, + pub tenant_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -95,8 +95,8 @@ impl Default for DeviceProfile { let now = Utc::now(); DeviceProfile { - id: Uuid::new_v4(), - tenant_id: Uuid::nil(), + id: Uuid::new_v4().into(), + tenant_id: Uuid::nil().into(), created_at: now, updated_at: now, name: "".into(), @@ -185,7 +185,7 @@ impl DeviceProfile { #[derive(Queryable, PartialEq, Eq, Debug)] pub struct DeviceProfileListItem { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -217,7 +217,7 @@ pub async fn create(dp: DeviceProfile) -> Result { pub async fn get(id: &Uuid) -> Result { let dp = device_profile::dsl::device_profile - .find(&id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| error::Error::from_diesel(e, id.to_string()))?; @@ -297,17 +297,18 @@ pub async fn update(dp: DeviceProfile) -> Result { } pub async fn set_measurements(id: Uuid, m: &fields::Measurements) -> Result { - let dp: DeviceProfile = diesel::update(device_profile::dsl::device_profile.find(&id)) - .set(device_profile::measurements.eq(m)) - .get_result(&mut get_async_db_conn().await?) - .await - .map_err(|e| Error::from_diesel(e, id.to_string()))?; + let dp: DeviceProfile = + diesel::update(device_profile::dsl::device_profile.find(&fields::Uuid::from(id))) + .set(device_profile::measurements.eq(m)) + .get_result(&mut get_async_db_conn().await?) + .await + .map_err(|e| Error::from_diesel(e, id.to_string()))?; info!(id = %id, "Device-profile measurements updated"); Ok(dp) } pub async fn delete(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(device_profile::dsl::device_profile.find(&id)) + let ra = diesel::delete(device_profile::dsl::device_profile.find(&fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await?; if ra == 0 { @@ -323,11 +324,18 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(device_profile::dsl::tenant_id.eq(tenant_id)); + q = q.filter(device_profile::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(search) = &filters.search { - q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(device_profile::dsl::name.like(format!("%{}%", search))); + } } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -354,11 +362,18 @@ pub async fn list( .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(device_profile::dsl::tenant_id.eq(tenant_id)); + q = q.filter(device_profile::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(search) = &filters.search { - q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(device_profile::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(device_profile::dsl::name.like(format!("%{}%", search))); + } } let items = q @@ -386,7 +401,7 @@ pub mod test { pub async fn create_device_profile(tenant_id: Option) -> DeviceProfile { let tenant_id = match tenant_id { - Some(v) => v, + Some(v) => v.into(), None => { let t = storage::tenant::test::create_tenant().await; t.id @@ -462,7 +477,7 @@ pub mod test { }, FilterTest { filters: Filters { - tenant_id: Some(dp.tenant_id), + tenant_id: Some(dp.tenant_id.into()), search: None, }, dps: vec![&dp], diff --git a/chirpstack/src/storage/device_queue.rs b/chirpstack/src/storage/device_queue.rs index 20e64741..aa0aa959 100644 --- a/chirpstack/src/storage/device_queue.rs +++ b/chirpstack/src/storage/device_queue.rs @@ -5,15 +5,14 @@ use diesel_async::RunQueryDsl; use tracing::info; use uuid::Uuid; -use super::error::Error; -use super::get_async_db_conn; use super::schema::device_queue_item; +use super::{error::Error, fields, get_async_db_conn}; use lrwn::EUI64; #[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)] #[diesel(table_name = device_queue_item)] pub struct DeviceQueueItem { - pub id: Uuid, + pub id: fields::Uuid, pub dev_eui: EUI64, pub created_at: DateTime, pub f_port: i16, @@ -48,7 +47,7 @@ impl Default for DeviceQueueItem { let now = Utc::now(); DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: EUI64::from_be_bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), created_at: now, f_port: 0, @@ -76,7 +75,7 @@ pub async fn enqueue_item(qi: DeviceQueueItem) -> Result pub async fn get_item(id: &Uuid) -> Result { let qi = device_queue_item::dsl::device_queue_item - .find(id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; @@ -99,9 +98,10 @@ pub async fn update_item(qi: DeviceQueueItem) -> Result } pub async fn delete_item(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(device_queue_item::dsl::device_queue_item.find(&id)) - .execute(&mut get_async_db_conn().await?) - .await?; + let ra = + diesel::delete(device_queue_item::dsl::device_queue_item.find(&fields::Uuid::from(id))) + .execute(&mut get_async_db_conn().await?) + .await?; if ra == 0 { return Err(Error::NotFound(id.to_string())); } @@ -192,7 +192,7 @@ pub mod test { let dp = storage::device_profile::test::create_device_profile(None).await; let d = storage::device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, + dp.id.into(), None, ) .await; @@ -253,7 +253,7 @@ pub mod test { let dp = storage::device_profile::test::create_device_profile(None).await; let d = storage::device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, + dp.id.into(), None, ) .await; @@ -278,7 +278,7 @@ pub mod test { let dp = storage::device_profile::test::create_device_profile(None).await; let d = storage::device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, + dp.id.into(), None, ) .await; diff --git a/chirpstack/src/storage/fields.rs b/chirpstack/src/storage/fields.rs deleted file mode 100644 index 0f808db0..00000000 --- a/chirpstack/src/storage/fields.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::collections::HashMap; -use std::fmt; -use std::ops::{Deref, DerefMut}; -use std::str::FromStr; - -use diesel::backend::Backend; -use diesel::pg::Pg; -use diesel::sql_types::{Jsonb, Text}; -use diesel::{deserialize, serialize}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)] -#[diesel(sql_type = Jsonb)] -pub struct KeyValue(HashMap); - -impl KeyValue { - pub fn new(m: HashMap) -> Self { - KeyValue(m) - } - - #[allow(clippy::wrong_self_convention)] - pub fn into_hashmap(&self) -> HashMap { - self.0.clone() - } -} - -impl Deref for KeyValue { - type Target = HashMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for KeyValue { - fn deref_mut(&mut self) -> &mut HashMap { - &mut self.0 - } -} - -impl deserialize::FromSql for KeyValue { - fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { - let value = >::from_sql(value)?; - let kv: HashMap = serde_json::from_value(value)?; - Ok(KeyValue(kv)) - } -} - -impl serialize::ToSql for KeyValue { - fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { - let value = serde_json::to_value(&self.0)?; - >::to_sql(&value, &mut out.reborrow()) - } -} - -#[derive(Debug, Clone, AsExpression, FromSqlRow, PartialEq, Eq)] -#[diesel(sql_type = Jsonb)] -pub struct Measurements(HashMap); - -impl Measurements { - pub fn new(m: HashMap) -> Self { - Measurements(m) - } - - #[allow(clippy::wrong_self_convention)] - pub fn into_hashmap(&self) -> HashMap { - self.0.clone() - } -} - -impl Deref for Measurements { - type Target = HashMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Measurements { - fn deref_mut(&mut self) -> &mut HashMap { - &mut self.0 - } -} - -impl deserialize::FromSql for Measurements { - fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { - let value = >::from_sql(value)?; - let kv: HashMap = serde_json::from_value(value)?; - Ok(Measurements::new(kv)) - } -} - -impl serialize::ToSql for Measurements { - fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result { - let value = serde_json::to_value(&self.0)?; - >::to_sql(&value, &mut out.reborrow()) - } -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct Measurement { - pub name: String, - pub kind: MeasurementKind, -} - -#[allow(clippy::upper_case_acronyms)] -#[allow(non_camel_case_types)] -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] -pub enum MeasurementKind { - // Unknown. - UNKNOWN, - // Incrementing counters which are not reset on each reporting. - COUNTER, - // Counters that do get reset upon reading. - ABSOLUTE, - // E.g. a temperature value. - GAUGE, - // E.g. a firmware version, true / false value. - STRING, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, AsExpression, FromSqlRow)] -#[allow(clippy::upper_case_acronyms)] -#[allow(non_camel_case_types)] -#[diesel(sql_type = diesel::sql_types::Text)] -pub enum MulticastGroupSchedulingType { - // Delay. - DELAY, - // GPS time. - GPS_TIME, -} - -impl fmt::Display for MulticastGroupSchedulingType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl deserialize::FromSql for MulticastGroupSchedulingType -where - DB: Backend, - *const str: deserialize::FromSql, -{ - fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { - let string = <*const str>::from_sql(value)?; - Ok(Self::from_str(unsafe { &*string })?) - } -} - -impl serialize::ToSql for MulticastGroupSchedulingType -where - str: serialize::ToSql, -{ - fn to_sql<'b>( - &'b self, - out: &mut serialize::Output<'b, '_, diesel::pg::Pg>, - ) -> serialize::Result { - >::to_sql( - &self.to_string(), - &mut out.reborrow(), - ) - } -} - -impl FromStr for MulticastGroupSchedulingType { - type Err = anyhow::Error; - - fn from_str(s: &str) -> std::result::Result { - Ok(match s { - "DELAY" => MulticastGroupSchedulingType::DELAY, - "GPS_TIME" => MulticastGroupSchedulingType::GPS_TIME, - _ => { - return Err(anyhow!("Unexpected MulticastGroupSchedulingType: {}", s)); - } - }) - } -} diff --git a/chirpstack/src/storage/fields/big_decimal.rs b/chirpstack/src/storage/fields/big_decimal.rs new file mode 100644 index 00000000..987d5d85 --- /dev/null +++ b/chirpstack/src/storage/fields/big_decimal.rs @@ -0,0 +1,91 @@ +use diesel::{ + backend::Backend, + {deserialize, serialize}, +}; +#[cfg(feature = "postgres")] +use diesel::{pg::Pg, sql_types::Numeric}; +#[cfg(feature = "sqlite")] +use diesel::{sql_types::Double, sqlite::Sqlite}; + +#[derive(Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)] +#[cfg_attr(feature="postgres", diesel(sql_type = Numeric))] +#[cfg_attr(feature="sqlite", diesel(sql_type = Double))] +pub struct BigDecimal(bigdecimal::BigDecimal); + +impl std::convert::AsRef for BigDecimal { + fn as_ref(&self) -> &bigdecimal::BigDecimal { + &self.0 + } +} + +impl std::convert::From for BigDecimal { + fn from(value: bigdecimal::BigDecimal) -> Self { + Self(value) + } +} + +impl std::convert::TryFrom for BigDecimal { + type Error = >::Error; + fn try_from(value: f32) -> Result { + bigdecimal::BigDecimal::try_from(value).map(|bd| bd.into()) + } +} + +impl std::ops::Deref for BigDecimal { + type Target = bigdecimal::BigDecimal; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for BigDecimal { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "postgres")] +impl deserialize::FromSql for BigDecimal { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let u = ::from_sql(value)?; + Ok(BigDecimal(u)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for BigDecimal { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { + >::to_sql( + &self.0, + &mut out.reborrow(), + ) + } +} + +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for BigDecimal +where + f64: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + use bigdecimal::FromPrimitive; + let bd_val = + >::from_sql(value)?; + let bd = bigdecimal::BigDecimal::from_f64(bd_val) + .ok_or_else(|| format!("Unrepresentable BigDecimal from f64 value"))?; + Ok(BigDecimal(bd)) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for BigDecimal { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + use bigdecimal::ToPrimitive; + let value = self + .0 + .to_f64() + .ok_or_else(|| format!("Unrepresentable f64 value as BigDecimal"))?; + out.set_value(value); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/fields/dev_nonces.rs b/chirpstack/src/storage/fields/dev_nonces.rs new file mode 100644 index 00000000..43ed4d3e --- /dev/null +++ b/chirpstack/src/storage/fields/dev_nonces.rs @@ -0,0 +1,92 @@ +use diesel::backend::Backend; +use diesel::{deserialize, serialize}; +#[cfg(feature = "postgres")] +use diesel::{ + pg::Pg, + sql_types::{Array, Int4, Nullable}, +}; +#[cfg(feature = "sqlite")] +use diesel::{sql_types::Text, sqlite::Sqlite}; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "postgres")] +type DevNoncesPgType = Array>; + +// Sqlite has no native array type so use text +#[derive(Deserialize, Serialize, Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)] +#[serde(transparent)] +#[cfg_attr(feature = "postgres", diesel(sql_type = DevNoncesPgType))] +#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))] +pub struct DevNonces(DevNoncesInner); + +pub type DevNoncesInner = Vec>; + +impl std::default::Default for DevNonces { + fn default() -> Self { + Self(Vec::new()) + } +} + +impl std::convert::AsRef for DevNonces { + fn as_ref(&self) -> &DevNoncesInner { + &self.0 + } +} + +impl std::convert::From for DevNonces { + fn from(value: DevNoncesInner) -> Self { + Self(value) + } +} + +impl std::ops::Deref for DevNonces { + type Target = DevNoncesInner; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for DevNonces { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "postgres")] +impl deserialize::FromSql for DevNonces { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let sql_val = ::from_sql(value)?; + Ok(DevNonces(sql_val)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for DevNonces { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { + >::to_sql( + &self.0, + &mut out.reborrow(), + ) + } +} + +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for DevNonces +where + *const str: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let s = + <*const str as deserialize::FromSql>::from_sql(value)?; + let nonces = serde_json::from_str::(unsafe { &*s })?; + Ok(nonces) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for DevNonces { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(serde_json::to_string(self)?); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/fields/device_session.rs b/chirpstack/src/storage/fields/device_session.rs new file mode 100644 index 00000000..1d6c3123 --- /dev/null +++ b/chirpstack/src/storage/fields/device_session.rs @@ -0,0 +1,83 @@ +use std::io::Cursor; +use std::ops::{Deref, DerefMut}; + +use diesel::backend::Backend; +#[cfg(feature = "postgres")] +use diesel::pg::Pg; +use diesel::sql_types::Binary; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; +use diesel::{deserialize, serialize}; +use prost::Message; + +use chirpstack_api::internal; + +#[derive(Debug, Clone, PartialEq, AsExpression, FromSqlRow)] +#[diesel(sql_type = diesel::sql_types::Binary)] +pub struct DeviceSession(internal::DeviceSession); + +impl DeviceSession { + pub fn new(m: internal::DeviceSession) -> Self { + DeviceSession(m) + } +} + +impl std::convert::From for DeviceSession { + fn from(u: internal::DeviceSession) -> Self { + Self(u) + } +} + +impl std::convert::From<&internal::DeviceSession> for DeviceSession { + fn from(u: &internal::DeviceSession) -> Self { + Self::from(u.clone()) + } +} + +impl std::convert::Into for DeviceSession { + fn into(self) -> internal::DeviceSession { + self.0 + } +} + +impl Deref for DeviceSession { + type Target = internal::DeviceSession; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for DeviceSession { + fn deref_mut(&mut self) -> &mut internal::DeviceSession { + &mut self.0 + } +} + +impl deserialize::FromSql for DeviceSession +where + DB: Backend, + *const [u8]: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let bindata = <*const [u8] as deserialize::FromSql>::from_sql(value)?; + let ds = internal::DeviceSession::decode(&mut Cursor::new(unsafe { &*bindata }))?; + Ok(DeviceSession(ds)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for DeviceSession { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { + let encoded = self.encode_to_vec(); + as serialize::ToSql>::to_sql(&encoded, &mut out.reborrow()) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for DeviceSession { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.encode_to_vec()); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/fields/key_value.rs b/chirpstack/src/storage/fields/key_value.rs new file mode 100644 index 00000000..ae19e88b --- /dev/null +++ b/chirpstack/src/storage/fields/key_value.rs @@ -0,0 +1,78 @@ +use std::collections::HashMap; +use std::ops::{Deref, DerefMut}; + +use diesel::backend::Backend; + +use diesel::{deserialize, serialize}; +#[cfg(feature = "postgres")] +use diesel::{pg::Pg, sql_types::Jsonb}; +#[cfg(feature = "sqlite")] +use diesel::{sql_types::Text, sqlite::Sqlite}; + +#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)] +#[cfg_attr(feature = "postgres", diesel(sql_type = Jsonb))] +#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))] +pub struct KeyValue(HashMap); + +impl KeyValue { + pub fn new(m: HashMap) -> Self { + KeyValue(m) + } + + #[allow(clippy::wrong_self_convention)] + pub fn into_hashmap(&self) -> HashMap { + self.0.clone() + } +} + +impl Deref for KeyValue { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for KeyValue { + fn deref_mut(&mut self) -> &mut HashMap { + &mut self.0 + } +} + +#[cfg(feature = "postgres")] +impl deserialize::FromSql for KeyValue { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let value = >::from_sql(value)?; + let kv: HashMap = serde_json::from_value(value)?; + Ok(KeyValue(kv)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for KeyValue { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { + let value = serde_json::to_value(&self.0)?; + >::to_sql(&value, &mut out.reborrow()) + } +} + +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for KeyValue +where + *const str: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let s = + <*const str as deserialize::FromSql>::from_sql(value)?; + let kv: HashMap = serde_json::from_str(unsafe { &*s })?; + Ok(KeyValue(kv)) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for KeyValue { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(serde_json::to_string(&self.0)?); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/fields/measurements.rs b/chirpstack/src/storage/fields/measurements.rs new file mode 100644 index 00000000..12c9241f --- /dev/null +++ b/chirpstack/src/storage/fields/measurements.rs @@ -0,0 +1,101 @@ +use std::collections::HashMap; +use std::ops::{Deref, DerefMut}; + +use diesel::backend::Backend; +use diesel::{deserialize, serialize}; +#[cfg(feature = "postgres")] +use diesel::{pg::Pg, sql_types::Jsonb}; +#[cfg(feature = "sqlite")] +use diesel::{sql_types::Text, sqlite::Sqlite}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct Measurement { + pub name: String, + pub kind: MeasurementKind, +} + +#[allow(clippy::upper_case_acronyms)] +#[allow(non_camel_case_types)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] +pub enum MeasurementKind { + // Unknown. + UNKNOWN, + // Incrementing counters which are not reset on each reporting. + COUNTER, + // Counters that do get reset upon reading. + ABSOLUTE, + // E.g. a temperature value. + GAUGE, + // E.g. a firmware version, true / false value. + STRING, +} + +#[derive(Debug, Clone, AsExpression, FromSqlRow, PartialEq, Eq)] +#[cfg_attr(feature = "postgres", diesel(sql_type = Jsonb))] +#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))] +pub struct Measurements(HashMap); + +impl Measurements { + pub fn new(m: HashMap) -> Self { + Measurements(m) + } + + #[allow(clippy::wrong_self_convention)] + pub fn into_hashmap(&self) -> HashMap { + self.0.clone() + } +} + +impl Deref for Measurements { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Measurements { + fn deref_mut(&mut self) -> &mut HashMap { + &mut self.0 + } +} + +#[cfg(feature = "postgres")] +impl deserialize::FromSql for Measurements { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let value = >::from_sql(value)?; + let kv: HashMap = serde_json::from_value(value)?; + Ok(Measurements::new(kv)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for Measurements { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Pg>) -> serialize::Result { + let value = serde_json::to_value(&self.0)?; + >::to_sql(&value, &mut out.reborrow()) + } +} + +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for Measurements +where + *const str: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let s = + <*const str as deserialize::FromSql>::from_sql(value)?; + let kv: HashMap = serde_json::from_str(unsafe { &*s })?; + Ok(Measurements::new(kv)) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for Measurements { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result { + let value = serde_json::to_string(&self.0)?; + out.set_value(value); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/fields/mod.rs b/chirpstack/src/storage/fields/mod.rs new file mode 100644 index 00000000..57fc53dd --- /dev/null +++ b/chirpstack/src/storage/fields/mod.rs @@ -0,0 +1,37 @@ +mod big_decimal; +mod dev_nonces; +mod device_session; +mod key_value; +mod measurements; +mod multicast_group_scheduling_type; +mod uuid; + +pub use big_decimal::BigDecimal; +pub use dev_nonces::*; +pub use device_session::DeviceSession; +pub use key_value::KeyValue; +pub use measurements::*; +pub use multicast_group_scheduling_type::MulticastGroupSchedulingType; +pub use uuid::Uuid; + +#[cfg(feature = "postgres")] +pub mod sql_types { + pub type Timestamptz = diesel::sql_types::Timestamptz; + + pub type JsonT = diesel::sql_types::Jsonb; + + pub type Uuid = diesel::sql_types::Uuid; +} + +#[cfg(feature = "sqlite")] +pub mod sql_types { + pub type Timestamptz = diesel::sql_types::TimestamptzSqlite; + + // TODO: sqlite is adding "jsonb" support, different from postgres + // So we may switch the column to blob? + // see https://sqlite.org/draft/jsonb.html + pub type JsonT = diesel::sql_types::Text; + + // Sqlite has no native json type so use text + pub type Uuid = diesel::sql_types::Text; +} diff --git a/chirpstack/src/storage/fields/multicast_group_scheduling_type.rs b/chirpstack/src/storage/fields/multicast_group_scheduling_type.rs new file mode 100644 index 00000000..7212a2e2 --- /dev/null +++ b/chirpstack/src/storage/fields/multicast_group_scheduling_type.rs @@ -0,0 +1,75 @@ +use std::fmt; +use std::str::FromStr; + +use diesel::backend::Backend; +use diesel::sql_types::Text; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; +use diesel::{deserialize, serialize}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, AsExpression, FromSqlRow)] +#[allow(clippy::upper_case_acronyms)] +#[allow(non_camel_case_types)] +#[diesel(sql_type = diesel::sql_types::Text)] +pub enum MulticastGroupSchedulingType { + // Delay. + DELAY, + // GPS time. + GPS_TIME, +} + +impl fmt::Display for MulticastGroupSchedulingType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl deserialize::FromSql for MulticastGroupSchedulingType +where + DB: Backend, + *const str: deserialize::FromSql, +{ + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let string = <*const str>::from_sql(value)?; + Ok(Self::from_str(unsafe { &*string })?) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for MulticastGroupSchedulingType +where + str: serialize::ToSql, +{ + fn to_sql<'b>( + &'b self, + out: &mut serialize::Output<'b, '_, diesel::pg::Pg>, + ) -> serialize::Result { + >::to_sql( + &self.to_string(), + &mut out.reborrow(), + ) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for MulticastGroupSchedulingType { + fn to_sql(&self, out: &mut serialize::Output<'_, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + +impl FromStr for MulticastGroupSchedulingType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "DELAY" => MulticastGroupSchedulingType::DELAY, + "GPS_TIME" => MulticastGroupSchedulingType::GPS_TIME, + _ => { + return Err(anyhow!("Unexpected MulticastGroupSchedulingType: {}", s)); + } + }) + } +} diff --git a/chirpstack/src/storage/fields/uuid.rs b/chirpstack/src/storage/fields/uuid.rs new file mode 100644 index 00000000..a2be1a78 --- /dev/null +++ b/chirpstack/src/storage/fields/uuid.rs @@ -0,0 +1,87 @@ +use diesel::backend::Backend; +#[cfg(feature = "postgres")] +use diesel::pg::Pg; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; +use diesel::{deserialize, serialize}; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Serialize, Copy, Clone, Debug, Eq, PartialEq, AsExpression, FromSqlRow)] +#[serde(transparent)] +#[cfg_attr(feature = "postgres", diesel(sql_type = diesel::sql_types::Uuid))] +#[cfg_attr(feature = "sqlite", diesel(sql_type = diesel::sql_types::Text))] +pub struct Uuid(uuid::Uuid); + +impl std::convert::From for Uuid { + fn from(u: uuid::Uuid) -> Self { + Self(u) + } +} + +impl std::convert::From<&uuid::Uuid> for Uuid { + fn from(u: &uuid::Uuid) -> Self { + Self::from(u.clone()) + } +} + +impl std::convert::Into for Uuid { + fn into(self) -> uuid::Uuid { + self.0 + } +} + +impl std::ops::Deref for Uuid { + type Target = uuid::Uuid; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for Uuid { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl std::fmt::Display for Uuid { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.0) + } +} + +#[cfg(feature = "postgres")] +impl deserialize::FromSql for Uuid { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let u = ::from_sql(value)?; + Ok(Uuid(u)) + } +} + +#[cfg(feature = "postgres")] +impl serialize::ToSql for Uuid { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result { + >::to_sql( + &self.0, + &mut out.reborrow(), + ) + } +} + +#[cfg(feature = "sqlite")] +impl deserialize::FromSql for Uuid { + fn from_sql(value: ::RawValue<'_>) -> deserialize::Result { + let s = + <*const str as deserialize::FromSql>::from_sql(value)?; + let u = uuid::Uuid::try_parse(unsafe { &*s })?; + Ok(Uuid(u)) + } +} + +#[cfg(feature = "sqlite")] +impl serialize::ToSql for Uuid { + fn to_sql<'b>(&self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.0.to_string()); + Ok(serialize::IsNull::No) + } +} diff --git a/chirpstack/src/storage/gateway.rs b/chirpstack/src/storage/gateway.rs index 8f4f9bd2..1bbf8c07 100644 --- a/chirpstack/src/storage/gateway.rs +++ b/chirpstack/src/storage/gateway.rs @@ -10,7 +10,7 @@ use uuid::Uuid; use lrwn::{DevAddr, EUI64}; use super::schema::{gateway, multicast_group_gateway, relay_gateway, tenant}; -use super::{error::Error, fields, get_async_db_conn}; +use super::{db_transaction, error::Error, fields, get_async_db_conn}; pub type RelayId = DevAddr; @@ -18,7 +18,7 @@ pub type RelayId = DevAddr; #[diesel(table_name = gateway)] pub struct Gateway { pub gateway_id: EUI64, - pub tenant_id: Uuid, + pub tenant_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub last_seen_at: Option>, @@ -48,7 +48,7 @@ impl Default for Gateway { Gateway { gateway_id: EUI64::from_be_bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - tenant_id: Uuid::nil(), + tenant_id: Uuid::nil().into(), created_at: now, updated_at: now, last_seen_at: None, @@ -78,7 +78,7 @@ pub struct GatewayChangeset { #[derive(Queryable, PartialEq, Debug)] pub struct GatewayListItem { - pub tenant_id: Uuid, + pub tenant_id: fields::Uuid, pub gateway_id: EUI64, pub name: String, pub description: String, @@ -95,7 +95,7 @@ pub struct GatewayListItem { #[derive(Queryable, PartialEq, Debug)] pub struct GatewayMeta { pub gateway_id: EUI64, - pub tenant_id: Uuid, + pub tenant_id: fields::Uuid, pub latitude: f64, pub longitude: f64, pub altitude: f32, @@ -123,7 +123,7 @@ pub struct GatewayCountsByState { #[derive(Queryable, Insertable, PartialEq, Debug)] #[diesel(table_name = relay_gateway)] pub struct RelayGateway { - pub tenant_id: Uuid, + pub tenant_id: fields::Uuid, pub relay_id: RelayId, pub created_at: DateTime, pub updated_at: DateTime, @@ -140,7 +140,7 @@ impl Default for RelayGateway { RelayGateway { relay_id: RelayId::from_be_bytes([1, 2, 3, 4]), - tenant_id: Uuid::nil(), + tenant_id: Uuid::nil().into(), created_at: now, updated_at: now, last_seen_at: None, @@ -160,7 +160,7 @@ pub struct RelayGatewayFilters { #[derive(Queryable, PartialEq, Debug)] pub struct RelayGatewayListItem { pub relay_id: RelayId, - pub tenant_id: Uuid, + pub tenant_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub last_seen_at: Option>, @@ -173,42 +173,41 @@ pub struct RelayGatewayListItem { pub async fn create(gw: Gateway) -> Result { gw.validate()?; let mut c = get_async_db_conn().await?; - let gw: Gateway = c - .build_transaction() - .run::(|c| { - Box::pin(async move { - // use for_update to lock the tenant. - let t: super::tenant::Tenant = tenant::dsl::tenant - .find(&gw.tenant_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, gw.tenant_id.to_string()))?; + let gw: Gateway = db_transaction::(&mut c, |c| { + Box::pin(async move { + let query = tenant::dsl::tenant.find(&gw.tenant_id); + // use for_update to lock the tenant. + #[cfg(feature = "postgres")] + let query = query.for_update(); + let t: super::tenant::Tenant = query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, gw.tenant_id.to_string()))?; - if !t.can_have_gateways { - return Err(Error::NotAllowed("Tenant can not have gateways".into())); - } + if !t.can_have_gateways { + return Err(Error::NotAllowed("Tenant can not have gateways".into())); + } - let gw_count: i64 = gateway::dsl::gateway - .select(dsl::count_star()) - .filter(gateway::dsl::tenant_id.eq(&gw.tenant_id)) - .first(c) - .await?; + let gw_count: i64 = gateway::dsl::gateway + .select(dsl::count_star()) + .filter(gateway::dsl::tenant_id.eq(&gw.tenant_id)) + .first(c) + .await?; - if t.max_gateway_count != 0 && gw_count as i32 >= t.max_gateway_count { - return Err(Error::NotAllowed( - "Max number of gateways exceeded for tenant".into(), - )); - } + if t.max_gateway_count != 0 && gw_count as i32 >= t.max_gateway_count { + return Err(Error::NotAllowed( + "Max number of gateways exceeded for tenant".into(), + )); + } - diesel::insert_into(gateway::table) - .values(&gw) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string())) - }) + diesel::insert_into(gateway::table) + .values(&gw) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, gw.gateway_id.to_string())) }) - .await?; + }) + .await?; info!( gateway_id = %gw.gateway_id, "Gateway created" @@ -282,15 +281,25 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(gateway::dsl::tenant_id.eq(tenant_id)); + q = q.filter(gateway::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(multicast_group_id) = &filters.multicast_group_id { - q = q.filter(multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id)); + q = q.filter( + multicast_group_gateway::dsl::multicast_group_id + .eq(fields::Uuid::from(multicast_group_id)), + ); } if let Some(search) = &filters.search { - q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(gateway::dsl::name.like(format!("%{}%", search))); + } } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -321,15 +330,25 @@ pub async fn list( .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(gateway::dsl::tenant_id.eq(tenant_id)); + q = q.filter(gateway::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } if let Some(search) = &filters.search { - q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(gateway::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(gateway::dsl::name.like(format!("%{}%", search))); + } } if let Some(multicast_group_id) = &filters.multicast_group_id { - q = q.filter(multicast_group_gateway::dsl::multicast_group_id.eq(multicast_group_id)); + q = q.filter( + multicast_group_gateway::dsl::multicast_group_id + .eq(fields::Uuid::from(multicast_group_id)), + ); } let items = q @@ -360,6 +379,7 @@ pub async fn get_meta(gateway_id: &EUI64) -> Result { Ok(meta) } +#[cfg(feature = "postgres")] pub async fn get_counts_by_state(tenant_id: &Option) -> Result { let counts: GatewayCountsByState = diesel::sql_query(r#" select @@ -370,7 +390,22 @@ pub async fn get_counts_by_state(tenant_id: &Option) -> Result, _>(tenant_id).get_result(&mut get_async_db_conn().await?).await?; + "#).bind::, _>(tenant_id.map(|u| fields::Uuid::from(u))).get_result(&mut get_async_db_conn().await?).await?; + Ok(counts) +} + +#[cfg(feature = "sqlite")] +pub async fn get_counts_by_state(tenant_id: &Option) -> Result { + let counts: GatewayCountsByState = diesel::sql_query(r#" + select + coalesce(sum(case when last_seen_at is null then 1 end), 0) as never_seen_count, + coalesce(sum(case when (unixepoch('now') - unixepoch(last_seen_at)) > (stats_interval_secs * 2) then 1 end), 0) as offline_count, + coalesce(sum(case when (unixepoch('now') - unixepoch(last_seen_at)) <= (stats_interval_secs * 2) then 1 end), 0) as online_count + from + gateway + where + ?1 is null or tenant_id = ?1 + "#).bind::, _>(tenant_id.map(|u| fields::Uuid::from(u))).get_result(&mut get_async_db_conn().await?).await?; Ok(counts) } @@ -388,7 +423,7 @@ pub async fn create_relay_gateway(relay: RelayGateway) -> Result Result { let relay = relay_gateway::dsl::relay_gateway - .find((&tenant_id, &relay_id)) + .find((fields::Uuid::from(tenant_id), &relay_id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, relay_id.to_string()))?; @@ -421,16 +456,18 @@ pub async fn get_relay_gateway_count(filters: &RelayGatewayFilters) -> Result Result<(), Error> { - let ra = diesel::delete(relay_gateway::dsl::relay_gateway.find((&tenant_id, &relay_id))) - .execute(&mut get_async_db_conn().await?) - .await?; + let ra = diesel::delete( + relay_gateway::dsl::relay_gateway.find((fields::Uuid::from(tenant_id), &relay_id)), + ) + .execute(&mut get_async_db_conn().await?) + .await?; if ra == 0 { return Err(Error::NotFound(relay_id.to_string())); } @@ -460,7 +497,7 @@ pub async fn list_relay_gateways( .into_boxed(); if let Some(tenant_id) = &filters.tenant_id { - q = q.filter(relay_gateway::dsl::tenant_id.eq(tenant_id)); + q = q.filter(relay_gateway::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))); } let items = q @@ -540,7 +577,7 @@ pub mod test { .await .unwrap(); - storage::multicast::add_gateway(&mg.id, &gw.gateway_id) + storage::multicast::add_gateway(&mg.id.into(), &gw.gateway_id) .await .unwrap(); @@ -591,7 +628,7 @@ pub mod test { }, FilterTest { filters: Filters { - tenant_id: Some(gw.tenant_id), + tenant_id: Some(gw.tenant_id.into()), multicast_group_id: None, search: None, }, @@ -614,7 +651,7 @@ pub mod test { FilterTest { filters: Filters { tenant_id: None, - multicast_group_id: Some(mg.id), + multicast_group_id: Some(mg.id.into()), search: None, }, gws: vec![&gw], @@ -675,7 +712,7 @@ pub mod test { .unwrap(); // get - let relay_get = get_relay_gateway(relay.tenant_id, relay.relay_id) + let relay_get = get_relay_gateway(relay.tenant_id.into(), relay.relay_id) .await .unwrap(); assert_eq!(relay, relay_get); @@ -684,7 +721,7 @@ pub mod test { relay.name = "updated-relay".into(); relay.region_config_id = "us915_0".into(); relay = update_relay_gateway(relay).await.unwrap(); - let relay_get = get_relay_gateway(relay.tenant_id, relay.relay_id) + let relay_get = get_relay_gateway(relay.tenant_id.into(), relay.relay_id) .await .unwrap(); assert_eq!(relay, relay_get); @@ -700,7 +737,7 @@ pub mod test { }, RelayGatewayFilterTest { filters: RelayGatewayFilters { - tenant_id: Some(gw.tenant_id), + tenant_id: Some(gw.tenant_id.into()), }, relay_gateways: vec![&relay], count: 1, @@ -709,7 +746,7 @@ pub mod test { }, RelayGatewayFilterTest { filters: RelayGatewayFilters { - tenant_id: Some(gw.tenant_id), + tenant_id: Some(gw.tenant_id.into()), }, relay_gateways: vec![&relay], count: 1, @@ -738,10 +775,10 @@ pub mod test { } // delete - delete_relay_gateway(relay.tenant_id, relay.relay_id) + delete_relay_gateway(relay.tenant_id.into(), relay.relay_id) .await .unwrap(); - assert!(delete_relay_gateway(relay.tenant_id, relay.relay_id) + assert!(delete_relay_gateway(relay.tenant_id.into(), relay.relay_id) .await .is_err()); } diff --git a/chirpstack/src/storage/mod.rs b/chirpstack/src/storage/mod.rs index 3531bd97..7468fc3b 100644 --- a/chirpstack/src/storage/mod.rs +++ b/chirpstack/src/storage/mod.rs @@ -2,14 +2,8 @@ use std::sync::RwLock; use std::time::Instant; use anyhow::Result; -use diesel::{ConnectionError, ConnectionResult}; use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; -use diesel_async::pooled_connection::deadpool::{Object as DeadpoolObject, Pool as DeadpoolPool}; -use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig}; -use diesel_async::AsyncPgConnection; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; -use futures_util::future::BoxFuture; -use futures_util::FutureExt; use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; use redis::aio::ConnectionLike; use tokio::sync::RwLock as TokioRwLock; @@ -36,20 +30,23 @@ pub mod mac_command; pub mod metrics; pub mod multicast; pub mod passive_roaming; +#[cfg(feature = "postgres")] +mod postgres; pub mod relay; pub mod schema; +#[cfg(feature = "postgres")] +mod schema_postgres; +#[cfg(feature = "sqlite")] +mod schema_sqlite; pub mod search; +#[cfg(feature = "sqlite")] +mod sqlite; pub mod tenant; pub mod user; -use crate::helpers::tls::get_root_certs; use crate::monitoring::prometheus; -pub type AsyncPgPool = DeadpoolPool; -pub type AsyncPgPoolConnection = DeadpoolObject; - lazy_static! { - static ref ASYNC_PG_POOL: RwLock> = RwLock::new(None); static ref ASYNC_REDIS_POOL: TokioRwLock> = TokioRwLock::new(None); static ref REDIS_PREFIX: RwLock = RwLock::new("".to_string()); static ref STORAGE_REDIS_CONN_GET: Histogram = { @@ -61,18 +58,21 @@ lazy_static! { ); histogram }; - static ref STORAGE_PG_CONN_GET: Histogram = { - let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12)); - prometheus::register( - "storage_pg_conn_get_duration_seconds", - "Time between requesting a PostgreSQL connection and the connection-pool returning it", - histogram.clone(), - ); - histogram - }; } -pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); +#[cfg(feature = "postgres")] +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations_postgres"); +#[cfg(feature = "sqlite")] +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations_sqlite"); + +#[cfg(feature = "postgres")] +pub use postgres::{ + db_transaction, get_async_db_conn, AsyncPgPoolConnection as AsyncDbPoolConnection, +}; +#[cfg(feature = "sqlite")] +pub use sqlite::{ + db_transaction, get_async_db_conn, AsyncSqlitePoolConnection as AsyncDbPoolConnection, +}; #[derive(Clone)] pub enum AsyncRedisPool { @@ -117,18 +117,14 @@ impl ConnectionLike for AsyncRedisPoolConnection { pub async fn setup() -> Result<()> { let conf = config::get(); - info!("Setting up PostgreSQL connection pool"); - let mut config = ManagerConfig::default(); - config.custom_setup = Box::new(pg_establish_connection); - - let mgr = AsyncDieselConnectionManager::::new_with_config( - &conf.postgresql.dsn, - config, - ); - let pool = DeadpoolPool::builder(mgr) - .max_size(conf.postgresql.max_open_connections as usize) - .build()?; - set_async_db_pool(pool); + #[cfg(feature = "postgres")] + { + postgres::setup(&conf.postgresql)?; + } + #[cfg(feature = "sqlite")] + { + sqlite::setup(&conf.sqlite)?; + } run_db_migrations().await?; info!("Setting up Redis client"); @@ -157,55 +153,6 @@ pub async fn setup() -> Result<()> { Ok(()) } -// Source: -// https://github.com/weiznich/diesel_async/blob/main/examples/postgres/pooled-with-rustls/src/main.rs -fn pg_establish_connection(config: &str) -> BoxFuture> { - let fut = async { - let conf = config::get(); - - let root_certs = get_root_certs(if conf.postgresql.ca_cert.is_empty() { - None - } else { - Some(conf.postgresql.ca_cert.clone()) - }) - .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; - let rustls_config = rustls::ClientConfig::builder() - .with_root_certificates(root_certs) - .with_no_client_auth(); - let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config); - let (client, conn) = tokio_postgres::connect(config, tls) - .await - .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; - tokio::spawn(async move { - if let Err(e) = conn.await { - error!(error = %e, "PostgreSQL connection error"); - } - }); - AsyncPgConnection::try_from(client).await - }; - fut.boxed() -} - -pub fn get_async_db_pool() -> Result { - let pool_r = ASYNC_PG_POOL.read().unwrap(); - let pool: AsyncPgPool = pool_r - .as_ref() - .ok_or_else(|| anyhow!("PostgreSQL connection pool is not initialized"))? - .clone(); - Ok(pool) -} - -pub async fn get_async_db_conn() -> Result { - let pool = get_async_db_pool()?; - - let start = Instant::now(); - let res = pool.get().await?; - - STORAGE_PG_CONN_GET.observe(start.elapsed().as_secs_f64()); - - Ok(res) -} - async fn get_async_redis_pool() -> Result { let pool_r = ASYNC_REDIS_POOL.read().await; let pool: AsyncRedisPool = pool_r @@ -231,16 +178,11 @@ pub async fn get_async_redis_conn() -> Result { Ok(res) } -pub fn set_async_db_pool(p: AsyncPgPool) { - let mut pool_w = ASYNC_PG_POOL.write().unwrap(); - *pool_w = Some(p); -} - pub async fn run_db_migrations() -> Result<()> { info!("Applying schema migrations"); let c = get_async_db_conn().await?; - let mut c_wrapped: AsyncConnectionWrapper = + let mut c_wrapped: AsyncConnectionWrapper = AsyncConnectionWrapper::from(c); task::spawn_blocking(move || -> Result<()> { @@ -266,16 +208,16 @@ pub fn redis_key(s: String) -> String { #[cfg(test)] pub async fn reset_db() -> Result<()> { let c = get_async_db_conn().await?; - let mut c_wrapped: AsyncConnectionWrapper = + let mut c_wrapped: AsyncConnectionWrapper = AsyncConnectionWrapper::from(c); tokio::task::spawn_blocking(move || -> Result<()> { c_wrapped .revert_all_migrations(MIGRATIONS) - .map_err(|e| anyhow!("{}", e))?; + .map_err(|e| anyhow!("During revert: {}", e))?; c_wrapped .run_pending_migrations(MIGRATIONS) - .map_err(|e| anyhow!("{}", e))?; + .map_err(|e| anyhow!("During run: {}", e))?; Ok(()) }) diff --git a/chirpstack/src/storage/multicast.rs b/chirpstack/src/storage/multicast.rs index 07525828..f2000924 100644 --- a/chirpstack/src/storage/multicast.rs +++ b/chirpstack/src/storage/multicast.rs @@ -13,15 +13,15 @@ use super::schema::{ application, device, gateway, multicast_group, multicast_group_device, multicast_group_gateway, multicast_group_queue_item, }; -use super::{fields, get_async_db_conn}; +use super::{db_transaction, fields, get_async_db_conn}; use crate::downlink::classb; use crate::{config, gpstime::ToDateTime, gpstime::ToGpsTime}; #[derive(Clone, Queryable, Insertable, Debug, PartialEq, Eq)] #[diesel(table_name = multicast_group)] pub struct MulticastGroup { - pub id: Uuid, - pub application_id: Uuid, + pub id: fields::Uuid, + pub application_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -51,8 +51,8 @@ impl Default for MulticastGroup { let now = Utc::now(); MulticastGroup { - id: Uuid::new_v4(), - application_id: Uuid::nil(), + id: Uuid::new_v4().into(), + application_id: Uuid::nil().into(), created_at: now, updated_at: now, name: "".into(), @@ -72,7 +72,7 @@ impl Default for MulticastGroup { #[derive(Queryable, PartialEq, Eq, Debug)] pub struct MulticastGroupListItem { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -89,10 +89,10 @@ pub struct Filters { #[derive(Clone, Queryable, QueryableByName, Insertable, AsChangeset, Debug, PartialEq, Eq)] #[diesel(table_name = multicast_group_queue_item)] pub struct MulticastGroupQueueItem { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub scheduler_run_after: DateTime, - pub multicast_group_id: Uuid, + pub multicast_group_id: fields::Uuid, pub gateway_id: EUI64, pub f_cnt: i64, pub f_port: i16, @@ -117,10 +117,10 @@ impl Default for MulticastGroupQueueItem { let now = Utc::now(); MulticastGroupQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), created_at: now, scheduler_run_after: now, - multicast_group_id: Uuid::nil(), + multicast_group_id: Uuid::nil().into(), gateway_id: Default::default(), f_cnt: 0, f_port: 0, @@ -144,7 +144,7 @@ pub async fn create(mg: MulticastGroup) -> Result { pub async fn get(id: &Uuid) -> Result { multicast_group::dsl::multicast_group - .find(&id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string())) @@ -176,7 +176,7 @@ pub async fn update(mg: MulticastGroup) -> Result { } pub async fn delete(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(multicast_group::dsl::multicast_group.find(&id)) + let ra = diesel::delete(multicast_group::dsl::multicast_group.find(&fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await?; if ra == 0 { @@ -192,11 +192,18 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(multicast_group::dsl::application_id.eq(application_id)); + q = q.filter(multicast_group::dsl::application_id.eq(fields::Uuid::from(application_id))); } if let Some(search) = &filters.search { - q = q.filter(multicast_group::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(multicast_group::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(multicast_group::dsl::name.like(format!("%{}%", search))); + } } q.first(&mut get_async_db_conn().await?) @@ -221,11 +228,18 @@ pub async fn list( .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(multicast_group::dsl::application_id.eq(application_id)); + q = q.filter(multicast_group::dsl::application_id.eq(fields::Uuid::from(application_id))); } if let Some(search) = &filters.search { - q = q.filter(multicast_group::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(multicast_group::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(multicast_group::dsl::name.like(format!("%{}%", search))); + } } q.order_by(multicast_group::dsl::name) @@ -238,41 +252,45 @@ pub async fn list( pub async fn add_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> { let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::<(), Error, _>(|c| { - Box::pin(async move { - let d: super::device::Device = device::dsl::device - .find(&dev_eui) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; + db_transaction::<(), Error, _>(&mut c, |c| { + Box::pin(async move { + let device_query = device::dsl::device.find(&dev_eui); + #[cfg(feature = "postgres")] + let device_query = device_query.for_update(); + let d: super::device::Device = device_query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, dev_eui.to_string()))?; - let mg: MulticastGroup = multicast_group::dsl::multicast_group - .find(&group_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, group_id.to_string()))?; + let fields_group_id = fields::Uuid::from(group_id); - if d.application_id != mg.application_id { - // Device not found within the same application. - return Err(Error::NotFound(dev_eui.to_string())); - } + let multicast_group_query = + multicast_group::dsl::multicast_group.find(&fields_group_id); + #[cfg(feature = "postgres")] + let multicast_group_query = multicast_group_query.for_update(); + let mg: MulticastGroup = multicast_group_query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, group_id.to_string()))?; - let _ = diesel::insert_into(multicast_group_device::table) - .values(( - multicast_group_device::multicast_group_id.eq(&group_id), - multicast_group_device::dev_eui.eq(&dev_eui), - multicast_group_device::created_at.eq(Utc::now()), - )) - .execute(c) - .await - .map_err(|e| Error::from_diesel(e, "".into()))?; - Ok(()) - }) + if d.application_id != mg.application_id { + // Device not found within the same application. + return Err(Error::NotFound(dev_eui.to_string())); + } + + let _ = diesel::insert_into(multicast_group_device::table) + .values(( + multicast_group_device::multicast_group_id.eq(&fields_group_id), + multicast_group_device::dev_eui.eq(&dev_eui), + multicast_group_device::created_at.eq(Utc::now()), + )) + .execute(c) + .await + .map_err(|e| Error::from_diesel(e, "".into()))?; + Ok(()) }) - .await?; + }) + .await?; info!(multicast_group_id = %group_id, dev_eui = %dev_eui, "Device added to multicast-group"); Ok(()) } @@ -280,7 +298,7 @@ pub async fn add_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> { pub async fn remove_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error> { let ra = diesel::delete( multicast_group_device::dsl::multicast_group_device - .filter(multicast_group_device::multicast_group_id.eq(&group_id)) + .filter(multicast_group_device::multicast_group_id.eq(&fields::Uuid::from(group_id))) .filter(multicast_group_device::dev_eui.eq(&dev_eui)), ) .execute(&mut get_async_db_conn().await?) @@ -297,48 +315,53 @@ pub async fn remove_device(group_id: &Uuid, dev_eui: &EUI64) -> Result<(), Error pub async fn add_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> { let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::<(), Error, _>(|c| { - Box::pin(async move { - let gw: super::gateway::Gateway = gateway::dsl::gateway - .find(&gateway_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?; + db_transaction::<(), Error, _>(&mut c, |c| { + Box::pin(async move { + let gateway_query = gateway::dsl::gateway.find(&gateway_id); + #[cfg(feature = "postgres")] + let gateway_query = gateway_query.for_update(); + let gw: super::gateway::Gateway = gateway_query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, gateway_id.to_string()))?; - let mg: MulticastGroup = multicast_group::dsl::multicast_group - .find(&group_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, group_id.to_string()))?; + let fields_group_id = fields::Uuid::from(group_id); - let a: super::application::Application = application::dsl::application - .find(&mg.application_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, mg.application_id.to_string()))?; + let multicast_group_query = + multicast_group::dsl::multicast_group.find(&fields_group_id); + #[cfg(feature = "postgres")] + let multicast_group_query = multicast_group_query.for_update(); + let mg: MulticastGroup = multicast_group_query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, group_id.to_string()))?; - if a.tenant_id != gw.tenant_id { - // Gateway and multicast-group are not under same tenant. - return Err(Error::NotFound(gateway_id.to_string())); - } + let application_query = application::dsl::application.find(&mg.application_id); + #[cfg(feature = "postgres")] + let application_query = application_query.for_update(); + let a: super::application::Application = application_query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, mg.application_id.to_string()))?; - let _ = diesel::insert_into(multicast_group_gateway::table) - .values(( - multicast_group_gateway::multicast_group_id.eq(&group_id), - multicast_group_gateway::gateway_id.eq(&gateway_id), - multicast_group_gateway::created_at.eq(Utc::now()), - )) - .execute(c) - .await - .map_err(|e| Error::from_diesel(e, "".into()))?; - Ok(()) - }) + if a.tenant_id != gw.tenant_id { + // Gateway and multicast-group are not under same tenant. + return Err(Error::NotFound(gateway_id.to_string())); + } + + let _ = diesel::insert_into(multicast_group_gateway::table) + .values(( + multicast_group_gateway::multicast_group_id.eq(&fields_group_id), + multicast_group_gateway::gateway_id.eq(&gateway_id), + multicast_group_gateway::created_at.eq(Utc::now()), + )) + .execute(c) + .await + .map_err(|e| Error::from_diesel(e, "".into()))?; + Ok(()) }) - .await?; + }) + .await?; info!(multicast_group_id = %group_id, gateway_id = %gateway_id, "Gateway added to multicast-group"); Ok(()) } @@ -346,7 +369,7 @@ pub async fn add_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Erro pub async fn remove_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), Error> { let ra = diesel::delete( multicast_group_gateway::dsl::multicast_group_gateway - .filter(multicast_group_gateway::multicast_group_id.eq(&group_id)) + .filter(multicast_group_gateway::multicast_group_id.eq(&fields::Uuid::from(group_id))) .filter(multicast_group_gateway::gateway_id.eq(&gateway_id)), ) .execute(&mut get_async_db_conn().await?) @@ -364,7 +387,7 @@ pub async fn remove_gateway(group_id: &Uuid, gateway_id: &EUI64) -> Result<(), E pub async fn get_dev_euis(group_id: &Uuid) -> Result, Error> { multicast_group_device::dsl::multicast_group_device .select(multicast_group_device::dev_eui) - .filter(multicast_group_device::dsl::multicast_group_id.eq(&group_id)) + .filter(multicast_group_device::dsl::multicast_group_id.eq(&fields::Uuid::from(group_id))) .load(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, group_id.to_string())) @@ -373,7 +396,7 @@ pub async fn get_dev_euis(group_id: &Uuid) -> Result, Error> { pub async fn get_gateway_ids(group_id: &Uuid) -> Result, Error> { multicast_group_gateway::dsl::multicast_group_gateway .select(multicast_group_gateway::gateway_id) - .filter(multicast_group_gateway::dsl::multicast_group_id.eq(&group_id)) + .filter(multicast_group_gateway::dsl::multicast_group_id.eq(&fields::Uuid::from(group_id))) .load(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, group_id.to_string())) @@ -390,174 +413,185 @@ pub async fn enqueue( qi.validate()?; let mut c = get_async_db_conn().await?; let conf = config::get(); - let (ids, f_cnt) = c - .build_transaction() - .run::<(Vec, u32), Error, _>(|c| { - Box::pin(async move { - let mut ids: Vec = Vec::new(); - let mg: MulticastGroup = multicast_group::dsl::multicast_group - .find(&qi.multicast_group_id) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; + let (ids, f_cnt) = db_transaction::<(Vec, u32), Error, _>(&mut c, |c| { + Box::pin(async move { + let mut ids: Vec = Vec::new(); + let query = multicast_group::dsl::multicast_group.find(&qi.multicast_group_id); + #[cfg(feature = "postgres")] + let query = query.for_update(); + let mg: MulticastGroup = query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; - match mg.group_type.as_ref() { - "B" => { - // get ping nb - let ping_nb = 1 << mg.class_b_ping_slot_nb_k as usize; + match mg.group_type.as_ref() { + "B" => { + // get ping nb + let ping_nb = 1 << mg.class_b_ping_slot_nb_k as usize; - // get max. gps epoch time. - let res: Option = - multicast_group_queue_item::dsl::multicast_group_queue_item - .select(dsl::max( - multicast_group_queue_item::dsl::emit_at_time_since_gps_epoch, - )) - .filter( - multicast_group_queue_item::dsl::multicast_group_id - .eq(&qi.multicast_group_id), - ) - .first(c) - .await?; + // get max. gps epoch time. + let res: Option = + multicast_group_queue_item::dsl::multicast_group_queue_item + .select(dsl::max( + multicast_group_queue_item::dsl::emit_at_time_since_gps_epoch, + )) + .filter( + multicast_group_queue_item::dsl::multicast_group_id + .eq(&qi.multicast_group_id), + ) + .first(c) + .await?; - // Get timestamp after which we must generate the next ping-slot. - let ping_slot_after_gps_time = match res { - Some(v) => Duration::try_milliseconds(v).unwrap_or_default(), - None => (Utc::now() - + Duration::from_std( - conf.network.scheduler.multicast_class_b_margin, - ) + // Get timestamp after which we must generate the next ping-slot. + let ping_slot_after_gps_time = match res { + Some(v) => Duration::try_milliseconds(v).unwrap_or_default(), + None => (Utc::now() + + Duration::from_std(conf.network.scheduler.multicast_class_b_margin) .unwrap()) - .to_gps_time(), + .to_gps_time(), + }; + + let emit_at_time_since_gps_epoch = classb::get_next_ping_slot_after( + ping_slot_after_gps_time, + &mg.mc_addr, + ping_nb, + )?; + + let scheduler_run_after_ts = emit_at_time_since_gps_epoch.to_date_time() + - Duration::from_std(2 * conf.network.scheduler.interval).unwrap(); + + for gateway_id in gateway_ids { + let qi = MulticastGroupQueueItem { + scheduler_run_after: scheduler_run_after_ts, + multicast_group_id: mg.id.into(), + gateway_id: *gateway_id, + f_cnt: mg.f_cnt, + f_port: qi.f_port, + data: qi.data.clone(), + emit_at_time_since_gps_epoch: Some( + emit_at_time_since_gps_epoch.num_milliseconds(), + ), + ..Default::default() }; - let emit_at_time_since_gps_epoch = classb::get_next_ping_slot_after( - ping_slot_after_gps_time, - &mg.mc_addr, - ping_nb, - )?; - - let scheduler_run_after_ts = emit_at_time_since_gps_epoch.to_date_time() - - Duration::from_std(2 * conf.network.scheduler.interval).unwrap(); - - for gateway_id in gateway_ids { - let qi = MulticastGroupQueueItem { - scheduler_run_after: scheduler_run_after_ts, - multicast_group_id: mg.id, - gateway_id: *gateway_id, - f_cnt: mg.f_cnt, - f_port: qi.f_port, - data: qi.data.clone(), - emit_at_time_since_gps_epoch: Some( - emit_at_time_since_gps_epoch.num_milliseconds(), - ), - ..Default::default() - }; - - let qi: MulticastGroupQueueItem = - diesel::insert_into(multicast_group_queue_item::table) - .values(&qi) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; - ids.push(qi.id); - } + let qi: MulticastGroupQueueItem = + diesel::insert_into(multicast_group_queue_item::table) + .values(&qi) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; + ids.push(qi.id.into()); } - "C" => { - // Get max. scheduler_run_after timestamp. - let res: Option> = - multicast_group_queue_item::dsl::multicast_group_queue_item - .select(dsl::max( - multicast_group_queue_item::dsl::scheduler_run_after, - )) - .filter( - multicast_group_queue_item::dsl::multicast_group_id - .eq(&qi.multicast_group_id), - ) - .first(c) - .await?; + } + "C" => { + // Get max. scheduler_run_after timestamp. - let mut scheduler_run_after_ts = match res { - Some(v) => { - v + Duration::from_std( - conf.network.scheduler.multicast_class_c_margin, - ) + #[cfg(feature = "postgres")] + let res: Option> = + multicast_group_queue_item::dsl::multicast_group_queue_item + .select(dsl::max( + multicast_group_queue_item::dsl::scheduler_run_after, + )) + .filter( + multicast_group_queue_item::dsl::multicast_group_id + .eq(&qi.multicast_group_id), + ) + .first(c) + .await?; + + #[cfg(feature = "sqlite")] + let res: Option> = + multicast_group_queue_item::dsl::multicast_group_queue_item + .select(multicast_group_queue_item::dsl::scheduler_run_after) + .filter( + multicast_group_queue_item::dsl::multicast_group_id + .eq(&qi.multicast_group_id), + ) + .get_results(c) + .await? + .into_iter() + // fallback on code max instead of DB builtin + .max(); + + let mut scheduler_run_after_ts = match res { + Some(v) => { + v + Duration::from_std(conf.network.scheduler.multicast_class_c_margin) .unwrap() - } - None => Utc::now(), + } + None => Utc::now(), + }; + + let emit_at_time_since_gps_epoch = if mg.class_c_scheduling_type + == fields::MulticastGroupSchedulingType::GPS_TIME + { + // Increment with margin as requesting the gateway to send the + // downlink 'now' will result in a too late error from the gateway. + scheduler_run_after_ts += + Duration::from_std(conf.network.scheduler.multicast_class_c_margin) + .unwrap(); + Some(scheduler_run_after_ts.to_gps_time().num_milliseconds()) + } else { + None + }; + + for gateway_id in gateway_ids { + let qi = MulticastGroupQueueItem { + scheduler_run_after: scheduler_run_after_ts, + multicast_group_id: mg.id.into(), + gateway_id: *gateway_id, + f_cnt: mg.f_cnt, + f_port: qi.f_port, + data: qi.data.clone(), + emit_at_time_since_gps_epoch, + ..Default::default() }; - let emit_at_time_since_gps_epoch = if mg.class_c_scheduling_type - == fields::MulticastGroupSchedulingType::GPS_TIME + let qi: MulticastGroupQueueItem = + diesel::insert_into(multicast_group_queue_item::table) + .values(&qi) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; + ids.push(qi.id.into()); + + if mg.class_c_scheduling_type == fields::MulticastGroupSchedulingType::DELAY { - // Increment with margin as requesting the gateway to send the - // downlink 'now' will result in a too late error from the gateway. + // Increment timing for each gateway to avoid colissions. scheduler_run_after_ts += Duration::from_std(conf.network.scheduler.multicast_class_c_margin) .unwrap(); - Some(scheduler_run_after_ts.to_gps_time().num_milliseconds()) - } else { - None - }; - - for gateway_id in gateway_ids { - let qi = MulticastGroupQueueItem { - scheduler_run_after: scheduler_run_after_ts, - multicast_group_id: mg.id, - gateway_id: *gateway_id, - f_cnt: mg.f_cnt, - f_port: qi.f_port, - data: qi.data.clone(), - emit_at_time_since_gps_epoch, - ..Default::default() - }; - - let qi: MulticastGroupQueueItem = - diesel::insert_into(multicast_group_queue_item::table) - .values(&qi) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, mg.id.to_string()))?; - ids.push(qi.id); - - if mg.class_c_scheduling_type - == fields::MulticastGroupSchedulingType::DELAY - { - // Increment timing for each gateway to avoid colissions. - scheduler_run_after_ts += Duration::from_std( - conf.network.scheduler.multicast_class_c_margin, - ) - .unwrap(); - } } } - _ => { - return Err(Error::Anyhow(anyhow!( - "Invalid multicast group_type: {}", - mg.group_type - ))); - } } + _ => { + return Err(Error::Anyhow(anyhow!( + "Invalid multicast group_type: {}", + mg.group_type + ))); + } + } - diesel::update(multicast_group::dsl::multicast_group.find(&qi.multicast_group_id)) - .set(multicast_group::f_cnt.eq(mg.f_cnt + 1)) - .execute(c) - .await - .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; + diesel::update(multicast_group::dsl::multicast_group.find(&qi.multicast_group_id)) + .set(multicast_group::f_cnt.eq(mg.f_cnt + 1)) + .execute(c) + .await + .map_err(|e| Error::from_diesel(e, qi.multicast_group_id.to_string()))?; - // Return value before it was incremented - Ok((ids, mg.f_cnt as u32)) - }) + // Return value before it was incremented + Ok((ids, mg.f_cnt as u32)) }) - .await?; + }) + .await?; info!(multicast_group_id = %qi.multicast_group_id, f_cnt = f_cnt, "Multicast-group queue item created"); Ok((ids, f_cnt)) } pub async fn delete_queue_item(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(multicast_group_queue_item::dsl::multicast_group_queue_item.find(&id)) - .execute(&mut get_async_db_conn().await?) - .await?; + let ra = diesel::delete( + multicast_group_queue_item::dsl::multicast_group_queue_item.find(&fields::Uuid::from(id)), + ) + .execute(&mut get_async_db_conn().await?) + .await?; if ra == 0 { return Err(Error::NotFound(id.to_string())); } @@ -567,8 +601,10 @@ pub async fn delete_queue_item(id: &Uuid) -> Result<(), Error> { pub async fn flush_queue(multicast_group_id: &Uuid) -> Result<(), Error> { let _ = diesel::delete( - multicast_group_queue_item::dsl::multicast_group_queue_item - .filter(multicast_group_queue_item::multicast_group_id.eq(&multicast_group_id)), + multicast_group_queue_item::dsl::multicast_group_queue_item.filter( + multicast_group_queue_item::multicast_group_id + .eq(&fields::Uuid::from(multicast_group_id)), + ), ) .execute(&mut get_async_db_conn().await?) .await @@ -579,7 +615,10 @@ pub async fn flush_queue(multicast_group_id: &Uuid) -> Result<(), Error> { pub async fn get_queue(multicast_group_id: &Uuid) -> Result, Error> { multicast_group_queue_item::dsl::multicast_group_queue_item - .filter(multicast_group_queue_item::dsl::multicast_group_id.eq(&multicast_group_id)) + .filter( + multicast_group_queue_item::dsl::multicast_group_id + .eq(&fields::Uuid::from(multicast_group_id)), + ) .order_by(multicast_group_queue_item::created_at) .load(&mut get_async_db_conn().await?) .await @@ -588,11 +627,30 @@ pub async fn get_queue(multicast_group_id: &Uuid) -> Result Result> { let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::, Error, _>(|c| { + db_transaction::, Error, _>(&mut c, |c| { Box::pin(async move { let conf = config::get(); - diesel::sql_query( + diesel::sql_query(if cfg!(feature = "sqlite") { + r#" + update + multicast_group_queue_item + set + scheduler_run_after = ?3 + where + id in ( + select + id + from + multicast_group_queue_item + where + scheduler_run_after <= ?2 + order by + created_at + limit ?1 + ) + returning * + "# + } else { r#" update multicast_group_queue_item @@ -614,11 +672,11 @@ pub async fn get_schedulable_queue_items(limit: usize) -> Result(limit as i32) - .bind::(Utc::now()) - .bind::( + .bind::(Utc::now()) + .bind::( Utc::now() + Duration::from_std(2 * conf.network.scheduler.interval).unwrap(), ) .load(c) @@ -638,7 +696,7 @@ pub mod test { pub async fn get_queue_item(id: &Uuid) -> Result { multicast_group_queue_item::dsl::multicast_group_queue_item - .find(&id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string())) @@ -690,7 +748,7 @@ pub mod test { .unwrap(); // get - let mg_get = get(&mg.id).await.unwrap(); + let mg_get = get(&mg.id.into()).await.unwrap(); assert_eq!(mg, mg_get); // update @@ -698,7 +756,7 @@ pub mod test { mg.group_type = "B".into(); mg.class_b_ping_slot_nb_k = 4; mg = update(mg).await.unwrap(); - let mg_get = get(&mg.id).await.unwrap(); + let mg_get = get(&mg.id.into()).await.unwrap(); assert_eq!(mg, mg_get); // get count and list @@ -735,7 +793,7 @@ pub mod test { }, FilterTest { filters: Filters { - application_id: Some(app.id), + application_id: Some(app.id.into()), search: None, }, groups: vec![&mg], @@ -770,8 +828,8 @@ pub mod test { } // delete - delete(&mg.id).await.unwrap(); - assert!(delete(&mg.id).await.is_err()); + delete(&mg.id.into()).await.unwrap(); + assert!(delete(&mg.id.into()).await.is_err()); } #[tokio::test] @@ -828,15 +886,15 @@ pub mod test { .unwrap(); // add device - add_device(&mg.id, &d.dev_eui).await.unwrap(); + add_device(&mg.id.into(), &d.dev_eui).await.unwrap(); // get group deveuis - let dev_euis = get_dev_euis(&mg.id).await.unwrap(); + let dev_euis = get_dev_euis(&mg.id.into()).await.unwrap(); assert_eq!(vec![d.dev_eui], dev_euis); // remove device - remove_device(&mg.id, &d.dev_eui).await.unwrap(); - let dev_euis = get_dev_euis(&mg.id).await.unwrap(); + remove_device(&mg.id.into(), &d.dev_eui).await.unwrap(); + let dev_euis = get_dev_euis(&mg.id.into()).await.unwrap(); assert!(dev_euis.is_empty()); } @@ -886,15 +944,15 @@ pub mod test { .unwrap(); // add gateway - add_gateway(&mg.id, &gw.gateway_id).await.unwrap(); + add_gateway(&mg.id.into(), &gw.gateway_id).await.unwrap(); // get gateway ids - let gw_ids = get_gateway_ids(&mg.id).await.unwrap(); + let gw_ids = get_gateway_ids(&mg.id.into()).await.unwrap(); assert_eq!(vec![gw.gateway_id], gw_ids); // remove gateway - remove_gateway(&mg.id, &gw.gateway_id).await.unwrap(); - let gw_ids = get_gateway_ids(&mg.id).await.unwrap(); + remove_gateway(&mg.id.into(), &gw.gateway_id).await.unwrap(); + let gw_ids = get_gateway_ids(&mg.id.into()).await.unwrap(); assert!(gw_ids.is_empty()); } @@ -949,7 +1007,7 @@ pub mod test { // invalid f_port assert!(enqueue( MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), gateway_id: gw.gateway_id, f_cnt: 1, f_port: 0, @@ -963,7 +1021,7 @@ pub mod test { assert!(enqueue( MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), gateway_id: gw.gateway_id, f_cnt: 1, f_port: 256, @@ -978,7 +1036,7 @@ pub mod test { // Enqueue (Class-C) (delay) let (ids, f_cnt) = enqueue( MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), gateway_id: gw.gateway_id, f_cnt: 1, f_port: 2, @@ -1008,7 +1066,7 @@ pub mod test { let mut mg = update(mg).await.unwrap(); let (ids, f_cnt) = enqueue( MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), gateway_id: gw.gateway_id, f_cnt: 1, f_port: 2, @@ -1035,7 +1093,7 @@ pub mod test { let mg = update(mg).await.unwrap(); let (ids, f_cnt) = enqueue( MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), gateway_id: gw.gateway_id, f_cnt: 1, f_port: 2, @@ -1057,7 +1115,7 @@ pub mod test { assert_eq!(vec![3, 2, 1], qi_get.data); // flush queue - flush_queue(&mg.id).await.unwrap(); + flush_queue(&mg.id.into()).await.unwrap(); assert!(delete_queue_item(&ids[0]).await.is_err()); } } diff --git a/chirpstack/src/storage/postgres.rs b/chirpstack/src/storage/postgres.rs new file mode 100644 index 00000000..90e6c43e --- /dev/null +++ b/chirpstack/src/storage/postgres.rs @@ -0,0 +1,115 @@ +use std::sync::RwLock; +use std::time::Instant; + +use anyhow::Result; +use tracing::{error, info}; + +use crate::monitoring::prometheus; +use diesel::{ConnectionError, ConnectionResult}; +use diesel_async::pooled_connection::deadpool::{Object as DeadpoolObject, Pool as DeadpoolPool}; +use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig}; +use diesel_async::{AsyncConnection, AsyncPgConnection}; +use futures::{future::BoxFuture, FutureExt}; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use scoped_futures::ScopedBoxFuture; + +use crate::config; + +use crate::helpers::tls::get_root_certs; + +pub type AsyncPgPool = DeadpoolPool; +pub type AsyncPgPoolConnection = DeadpoolObject; + +lazy_static! { + static ref ASYNC_PG_POOL: RwLock> = RwLock::new(None); + static ref STORAGE_PG_CONN_GET: Histogram = { + let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12)); + prometheus::register( + "storage_pg_conn_get_duration_seconds", + "Time between requesting a PostgreSQL connection and the connection-pool returning it", + histogram.clone(), + ); + histogram + }; +} + +pub fn setup(conf: &config::Postgresql) -> Result<()> { + info!("Setting up PostgreSQL connection pool"); + let mut config = ManagerConfig::default(); + config.custom_setup = Box::new(pg_establish_connection); + let mgr = AsyncDieselConnectionManager::::new_with_config(&conf.dsn, config); + let pool = DeadpoolPool::builder(mgr) + .max_size(conf.max_open_connections as usize) + .build()?; + set_async_db_pool(pool); + + Ok(()) +} + +// Source: +// https://github.com/weiznich/diesel_async/blob/main/examples/postgres/pooled-with-rustls/src/main.rs +fn pg_establish_connection(config: &str) -> BoxFuture> { + let fut = async { + let conf = config::get(); + + let root_certs = get_root_certs(if conf.postgresql.ca_cert.is_empty() { + None + } else { + Some(conf.postgresql.ca_cert.clone()) + }) + .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; + let rustls_config = rustls::ClientConfig::builder() + .with_root_certificates(root_certs) + .with_no_client_auth(); + let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config); + let (client, conn) = tokio_postgres::connect(config, tls) + .await + .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; + tokio::spawn(async move { + if let Err(e) = conn.await { + error!(error = %e, "PostgreSQL connection error"); + } + }); + AsyncPgConnection::try_from(client).await + }; + fut.boxed() +} + +fn get_async_db_pool() -> Result { + let pool_r = ASYNC_PG_POOL.read().unwrap(); + let pool: AsyncPgPool = pool_r + .as_ref() + .ok_or_else(|| anyhow!("PostgreSQL connection pool is not initialized"))? + .clone(); + Ok(pool) +} + +pub async fn get_async_db_conn() -> Result { + let pool = get_async_db_pool()?; + + let start = Instant::now(); + let res = pool.get().await?; + + STORAGE_PG_CONN_GET.observe(start.elapsed().as_secs_f64()); + + Ok(res) +} + +pub async fn db_transaction<'a, R, E, F>( + conn: &mut AsyncPgPoolConnection, + callback: F, +) -> Result +where + F: for<'r> FnOnce(&'r mut AsyncPgPoolConnection) -> ScopedBoxFuture<'a, 'r, Result> + + Send + + 'a, + E: From + Send + 'a, + R: Send + 'a, +{ + conn.transaction(callback).await +} + +fn set_async_db_pool(p: AsyncPgPool) { + let mut pool_w = ASYNC_PG_POOL.write().unwrap(); + *pool_w = Some(p); +} diff --git a/chirpstack/src/storage/relay.rs b/chirpstack/src/storage/relay.rs index c858bb6a..b82553d7 100644 --- a/chirpstack/src/storage/relay.rs +++ b/chirpstack/src/storage/relay.rs @@ -8,7 +8,7 @@ use uuid::Uuid; use lrwn::{DevAddr, EUI64}; use super::schema::{device, device_profile, relay_device}; -use super::{device::Device, error::Error, get_async_db_conn}; +use super::{db_transaction, device::Device, error::Error, fields, get_async_db_conn}; // This is set to 15, because the FilterList must contain a "catch-all" record to filter all // uplinks that do not match the remaining records. This means that we can use 16 - 1 FilterList @@ -50,7 +50,7 @@ pub async fn get_relay_count(filters: &RelayFilters) -> Result { .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(device::dsl::application_id.eq(application_id)); + q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id))); } Ok(q.first(&mut get_async_db_conn().await?).await?) @@ -68,7 +68,7 @@ pub async fn list_relays( .into_boxed(); if let Some(application_id) = &filters.application_id { - q = q.filter(device::dsl::application_id.eq(application_id)); + q = q.filter(device::dsl::application_id.eq(fields::Uuid::from(application_id))); } q.order_by(device::dsl::name) @@ -128,86 +128,86 @@ pub async fn list_devices( pub async fn add_device(relay_dev_eui: EUI64, device_dev_eui: EUI64) -> Result<(), Error> { let mut c = get_async_db_conn().await?; - c.build_transaction() - .run::<(), Error, _>(|c| { - Box::pin(async move { - // We lock the relay device to avoid race-conditions in the validation. - let rd: Device = device::dsl::device - .find(&relay_dev_eui) - .for_update() - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, relay_dev_eui.to_string()))?; + db_transaction::<(), Error, _>(&mut c, |c| { + Box::pin(async move { + let query = device::dsl::device.find(&relay_dev_eui); + // We lock the relay device to avoid race-conditions in the validation. + #[cfg(feature = "postgres")] + let query = query.for_update(); + let rd: Device = query + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, relay_dev_eui.to_string()))?; - // Is the given relay_dev_eui a Relay? - let rdp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile - .find(&rd.device_profile_id) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, rd.device_profile_id.to_string()))?; - if !rdp.is_relay { - return Err(Error::Validation("Device is not a relay".to_string())); - } + // Is the given relay_dev_eui a Relay? + let rdp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile + .find(&rd.device_profile_id) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, rd.device_profile_id.to_string()))?; + if !rdp.is_relay { + return Err(Error::Validation("Device is not a relay".to_string())); + } - // Validate that relay and device are under the same application. - let d: Device = device::dsl::device - .find(&device_dev_eui) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, device_dev_eui.to_string()))?; + // Validate that relay and device are under the same application. + let d: Device = device::dsl::device + .find(&device_dev_eui) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, device_dev_eui.to_string()))?; - if rd.application_id != d.application_id { - return Err(Error::Validation( - "Relay and device must be under the same application".into(), - )); - } + if rd.application_id != d.application_id { + return Err(Error::Validation( + "Relay and device must be under the same application".into(), + )); + } - // Validate that the relay and device are under the same region. - let dp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile - .find(&d.device_profile_id) - .get_result(c) - .await - .map_err(|e| Error::from_diesel(e, d.device_profile_id.to_string()))?; - if rdp.region != dp.region { - return Err(Error::Validation( - "Relay and device must be under the same region".into(), - )); - } + // Validate that the relay and device are under the same region. + let dp: super::device_profile::DeviceProfile = device_profile::dsl::device_profile + .find(&d.device_profile_id) + .get_result(c) + .await + .map_err(|e| Error::from_diesel(e, d.device_profile_id.to_string()))?; + if rdp.region != dp.region { + return Err(Error::Validation( + "Relay and device must be under the same region".into(), + )); + } - // Validate that the device is not a relay. - if dp.is_relay { - return Err(Error::Validation("Can not add relay to a relay".into())); - } + // Validate that the device is not a relay. + if dp.is_relay { + return Err(Error::Validation("Can not add relay to a relay".into())); + } - // Validate max. number of devices. - let count: i64 = relay_device::dsl::relay_device - .select(dsl::count_star()) - .filter(relay_device::dsl::relay_dev_eui.eq(&relay_dev_eui)) - .first(c) - .await - .map_err(|e| Error::from_diesel(e, "".into()))?; + // Validate max. number of devices. + let count: i64 = relay_device::dsl::relay_device + .select(dsl::count_star()) + .filter(relay_device::dsl::relay_dev_eui.eq(&relay_dev_eui)) + .first(c) + .await + .map_err(|e| Error::from_diesel(e, "".into()))?; - if count > RELAY_MAX_DEVICES { - return Err(Error::Validation(format!( - "Max number of devices that can be added to a relay is {}", - RELAY_MAX_DEVICES - ))); - } + if count > RELAY_MAX_DEVICES { + return Err(Error::Validation(format!( + "Max number of devices that can be added to a relay is {}", + RELAY_MAX_DEVICES + ))); + } - let _ = diesel::insert_into(relay_device::table) - .values(( - relay_device::relay_dev_eui.eq(&relay_dev_eui), - relay_device::dev_eui.eq(&device_dev_eui), - relay_device::created_at.eq(Utc::now()), - )) - .execute(c) - .await - .map_err(|e| Error::from_diesel(e, "".into()))?; + let _ = diesel::insert_into(relay_device::table) + .values(( + relay_device::relay_dev_eui.eq(&relay_dev_eui), + relay_device::dev_eui.eq(&device_dev_eui), + relay_device::created_at.eq(Utc::now()), + )) + .execute(c) + .await + .map_err(|e| Error::from_diesel(e, "".into()))?; - Ok(()) - }) + Ok(()) }) - .await?; + }) + .await?; info!(relay_dev_eui = %relay_dev_eui, device_dev_eui = %device_dev_eui, "Device added to relay"); @@ -256,35 +256,35 @@ pub mod test { let d = storage::device::test::create_device( EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]), - dp.id, + dp.id.into(), None, ) .await; let d_relay = storage::device::test::create_device( EUI64::from_be_bytes([2, 2, 3, 4, 5, 6, 7, 8]), - dp_relay.id, - Some(d.application_id), + dp_relay.id.into(), + Some(d.application_id.into()), ) .await; let d_other_app = storage::device::test::create_device( EUI64::from_be_bytes([3, 2, 3, 4, 5, 6, 7, 8]), - dp.id, + dp.id.into(), None, ) .await; let d_other_same_app = storage::device::test::create_device( EUI64::from_be_bytes([4, 2, 3, 4, 5, 6, 7, 8]), - dp.id, - Some(d.application_id), + dp.id.into(), + Some(d.application_id.into()), ) .await; // relay count let relay_count = get_relay_count(&RelayFilters { - application_id: Some(d_relay.application_id), + application_id: Some(d_relay.application_id.into()), }) .await .unwrap(); @@ -295,7 +295,7 @@ pub mod test { 10, 0, &RelayFilters { - application_id: Some(d_relay.application_id), + application_id: Some(d_relay.application_id.into()), }, ) .await diff --git a/chirpstack/src/storage/schema.rs b/chirpstack/src/storage/schema.rs index 22c514dd..4b1b0efa 100644 --- a/chirpstack/src/storage/schema.rs +++ b/chirpstack/src/storage/schema.rs @@ -1,391 +1,4 @@ -// @generated automatically by Diesel CLI. - -diesel::table! { - api_key (id) { - id -> Uuid, - created_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - is_admin -> Bool, - tenant_id -> Nullable, - } -} - -diesel::table! { - application (id) { - id -> Uuid, - tenant_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - description -> Text, - mqtt_tls_cert -> Nullable, - tags -> Jsonb, - } -} - -diesel::table! { - application_integration (application_id, kind) { - application_id -> Uuid, - #[max_length = 20] - kind -> Varchar, - created_at -> Timestamptz, - updated_at -> Timestamptz, - configuration -> Jsonb, - } -} - -diesel::table! { - device (dev_eui) { - dev_eui -> Bytea, - application_id -> Uuid, - device_profile_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - last_seen_at -> Nullable, - scheduler_run_after -> Nullable, - #[max_length = 100] - name -> Varchar, - description -> Text, - external_power_source -> Bool, - battery_level -> Nullable, - margin -> Nullable, - dr -> Nullable, - latitude -> Nullable, - longitude -> Nullable, - altitude -> Nullable, - dev_addr -> Nullable, - #[max_length = 1] - enabled_class -> Bpchar, - skip_fcnt_check -> Bool, - is_disabled -> Bool, - tags -> Jsonb, - variables -> Jsonb, - join_eui -> Bytea, - secondary_dev_addr -> Nullable, - device_session -> Nullable, - } -} - -diesel::table! { - device_keys (dev_eui) { - dev_eui -> Bytea, - created_at -> Timestamptz, - updated_at -> Timestamptz, - nwk_key -> Bytea, - app_key -> Bytea, - dev_nonces -> Array>, - join_nonce -> Int4, - } -} - -diesel::table! { - device_profile (id) { - id -> Uuid, - tenant_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - #[max_length = 10] - region -> Varchar, - #[max_length = 10] - mac_version -> Varchar, - #[max_length = 20] - reg_params_revision -> Varchar, - #[max_length = 100] - adr_algorithm_id -> Varchar, - #[max_length = 20] - payload_codec_runtime -> Varchar, - uplink_interval -> Int4, - device_status_req_interval -> Int4, - supports_otaa -> Bool, - supports_class_b -> Bool, - supports_class_c -> Bool, - class_b_timeout -> Int4, - class_b_ping_slot_nb_k -> Int4, - class_b_ping_slot_dr -> Int2, - class_b_ping_slot_freq -> Int8, - class_c_timeout -> Int4, - abp_rx1_delay -> Int2, - abp_rx1_dr_offset -> Int2, - abp_rx2_dr -> Int2, - abp_rx2_freq -> Int8, - tags -> Jsonb, - payload_codec_script -> Text, - flush_queue_on_activate -> Bool, - description -> Text, - measurements -> Jsonb, - auto_detect_measurements -> Bool, - #[max_length = 100] - region_config_id -> Nullable, - is_relay -> Bool, - is_relay_ed -> Bool, - relay_ed_relay_only -> Bool, - relay_enabled -> Bool, - relay_cad_periodicity -> Int2, - relay_default_channel_index -> Int2, - relay_second_channel_freq -> Int8, - relay_second_channel_dr -> Int2, - relay_second_channel_ack_offset -> Int2, - relay_ed_activation_mode -> Int2, - relay_ed_smart_enable_level -> Int2, - relay_ed_back_off -> Int2, - relay_ed_uplink_limit_bucket_size -> Int2, - relay_ed_uplink_limit_reload_rate -> Int2, - relay_join_req_limit_reload_rate -> Int2, - relay_notify_limit_reload_rate -> Int2, - relay_global_uplink_limit_reload_rate -> Int2, - relay_overall_limit_reload_rate -> Int2, - relay_join_req_limit_bucket_size -> Int2, - relay_notify_limit_bucket_size -> Int2, - relay_global_uplink_limit_bucket_size -> Int2, - relay_overall_limit_bucket_size -> Int2, - allow_roaming -> Bool, - rx1_delay -> Int2, - } -} - -diesel::table! { - device_profile_template (id) { - id -> Text, - created_at -> Timestamptz, - updated_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - description -> Text, - #[max_length = 100] - vendor -> Varchar, - #[max_length = 100] - firmware -> Varchar, - #[max_length = 10] - region -> Varchar, - #[max_length = 10] - mac_version -> Varchar, - #[max_length = 20] - reg_params_revision -> Varchar, - #[max_length = 100] - adr_algorithm_id -> Varchar, - #[max_length = 20] - payload_codec_runtime -> Varchar, - payload_codec_script -> Text, - uplink_interval -> Int4, - device_status_req_interval -> Int4, - flush_queue_on_activate -> Bool, - supports_otaa -> Bool, - supports_class_b -> Bool, - supports_class_c -> Bool, - class_b_timeout -> Int4, - class_b_ping_slot_nb_k -> Int4, - class_b_ping_slot_dr -> Int2, - class_b_ping_slot_freq -> Int8, - class_c_timeout -> Int4, - abp_rx1_delay -> Int2, - abp_rx1_dr_offset -> Int2, - abp_rx2_dr -> Int2, - abp_rx2_freq -> Int8, - tags -> Jsonb, - measurements -> Jsonb, - auto_detect_measurements -> Bool, - } -} - -diesel::table! { - device_queue_item (id) { - id -> Uuid, - dev_eui -> Bytea, - created_at -> Timestamptz, - f_port -> Int2, - confirmed -> Bool, - data -> Bytea, - is_pending -> Bool, - f_cnt_down -> Nullable, - timeout_after -> Nullable, - is_encrypted -> Bool, - } -} - -diesel::table! { - gateway (gateway_id) { - gateway_id -> Bytea, - tenant_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - last_seen_at -> Nullable, - #[max_length = 100] - name -> Varchar, - description -> Text, - latitude -> Float8, - longitude -> Float8, - altitude -> Float4, - stats_interval_secs -> Int4, - tls_certificate -> Nullable, - tags -> Jsonb, - properties -> Jsonb, - } -} - -diesel::table! { - multicast_group (id) { - id -> Uuid, - application_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - #[max_length = 10] - region -> Varchar, - mc_addr -> Bytea, - mc_nwk_s_key -> Bytea, - mc_app_s_key -> Bytea, - f_cnt -> Int8, - #[max_length = 1] - group_type -> Bpchar, - dr -> Int2, - frequency -> Int8, - class_b_ping_slot_nb_k -> Int2, - #[max_length = 20] - class_c_scheduling_type -> Varchar, - } -} - -diesel::table! { - multicast_group_device (multicast_group_id, dev_eui) { - multicast_group_id -> Uuid, - dev_eui -> Bytea, - created_at -> Timestamptz, - } -} - -diesel::table! { - multicast_group_gateway (multicast_group_id, gateway_id) { - multicast_group_id -> Uuid, - gateway_id -> Bytea, - created_at -> Timestamptz, - } -} - -diesel::table! { - multicast_group_queue_item (id) { - id -> Uuid, - created_at -> Timestamptz, - scheduler_run_after -> Timestamptz, - multicast_group_id -> Uuid, - gateway_id -> Bytea, - f_cnt -> Int8, - f_port -> Int2, - data -> Bytea, - emit_at_time_since_gps_epoch -> Nullable, - } -} - -diesel::table! { - relay_device (relay_dev_eui, dev_eui) { - relay_dev_eui -> Bytea, - dev_eui -> Bytea, - created_at -> Timestamptz, - } -} - -diesel::table! { - relay_gateway (tenant_id, relay_id) { - tenant_id -> Uuid, - relay_id -> Bytea, - created_at -> Timestamptz, - updated_at -> Timestamptz, - last_seen_at -> Nullable, - #[max_length = 100] - name -> Varchar, - description -> Text, - stats_interval_secs -> Int4, - #[max_length = 100] - region_config_id -> Varchar, - } -} - -diesel::table! { - tenant (id) { - id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - #[max_length = 100] - name -> Varchar, - description -> Text, - can_have_gateways -> Bool, - max_device_count -> Int4, - max_gateway_count -> Int4, - private_gateways_up -> Bool, - private_gateways_down -> Bool, - tags -> Jsonb, - } -} - -diesel::table! { - tenant_user (tenant_id, user_id) { - tenant_id -> Uuid, - user_id -> Uuid, - created_at -> Timestamptz, - updated_at -> Timestamptz, - is_admin -> Bool, - is_device_admin -> Bool, - is_gateway_admin -> Bool, - } -} - -diesel::table! { - user (id) { - id -> Uuid, - external_id -> Nullable, - created_at -> Timestamptz, - updated_at -> Timestamptz, - is_admin -> Bool, - is_active -> Bool, - email -> Text, - email_verified -> Bool, - #[max_length = 200] - password_hash -> Varchar, - note -> Text, - } -} - -diesel::joinable!(api_key -> tenant (tenant_id)); -diesel::joinable!(application -> tenant (tenant_id)); -diesel::joinable!(application_integration -> application (application_id)); -diesel::joinable!(device -> application (application_id)); -diesel::joinable!(device -> device_profile (device_profile_id)); -diesel::joinable!(device_keys -> device (dev_eui)); -diesel::joinable!(device_profile -> tenant (tenant_id)); -diesel::joinable!(device_queue_item -> device (dev_eui)); -diesel::joinable!(gateway -> tenant (tenant_id)); -diesel::joinable!(multicast_group -> application (application_id)); -diesel::joinable!(multicast_group_device -> device (dev_eui)); -diesel::joinable!(multicast_group_device -> multicast_group (multicast_group_id)); -diesel::joinable!(multicast_group_gateway -> gateway (gateway_id)); -diesel::joinable!(multicast_group_gateway -> multicast_group (multicast_group_id)); -diesel::joinable!(multicast_group_queue_item -> gateway (gateway_id)); -diesel::joinable!(multicast_group_queue_item -> multicast_group (multicast_group_id)); -diesel::joinable!(relay_gateway -> tenant (tenant_id)); -diesel::joinable!(tenant_user -> tenant (tenant_id)); -diesel::joinable!(tenant_user -> user (user_id)); - -diesel::allow_tables_to_appear_in_same_query!( - api_key, - application, - application_integration, - device, - device_keys, - device_profile, - device_profile_template, - device_queue_item, - gateway, - multicast_group, - multicast_group_device, - multicast_group_gateway, - multicast_group_queue_item, - relay_device, - relay_gateway, - tenant, - tenant_user, - user, -); +#[cfg(feature = "postgres")] +pub use super::schema_postgres::*; +#[cfg(feature = "sqlite")] +pub use super::schema_sqlite::*; diff --git a/chirpstack/src/storage/schema_postgres.rs b/chirpstack/src/storage/schema_postgres.rs new file mode 100644 index 00000000..22c514dd --- /dev/null +++ b/chirpstack/src/storage/schema_postgres.rs @@ -0,0 +1,391 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + api_key (id) { + id -> Uuid, + created_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + is_admin -> Bool, + tenant_id -> Nullable, + } +} + +diesel::table! { + application (id) { + id -> Uuid, + tenant_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + description -> Text, + mqtt_tls_cert -> Nullable, + tags -> Jsonb, + } +} + +diesel::table! { + application_integration (application_id, kind) { + application_id -> Uuid, + #[max_length = 20] + kind -> Varchar, + created_at -> Timestamptz, + updated_at -> Timestamptz, + configuration -> Jsonb, + } +} + +diesel::table! { + device (dev_eui) { + dev_eui -> Bytea, + application_id -> Uuid, + device_profile_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + last_seen_at -> Nullable, + scheduler_run_after -> Nullable, + #[max_length = 100] + name -> Varchar, + description -> Text, + external_power_source -> Bool, + battery_level -> Nullable, + margin -> Nullable, + dr -> Nullable, + latitude -> Nullable, + longitude -> Nullable, + altitude -> Nullable, + dev_addr -> Nullable, + #[max_length = 1] + enabled_class -> Bpchar, + skip_fcnt_check -> Bool, + is_disabled -> Bool, + tags -> Jsonb, + variables -> Jsonb, + join_eui -> Bytea, + secondary_dev_addr -> Nullable, + device_session -> Nullable, + } +} + +diesel::table! { + device_keys (dev_eui) { + dev_eui -> Bytea, + created_at -> Timestamptz, + updated_at -> Timestamptz, + nwk_key -> Bytea, + app_key -> Bytea, + dev_nonces -> Array>, + join_nonce -> Int4, + } +} + +diesel::table! { + device_profile (id) { + id -> Uuid, + tenant_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + #[max_length = 10] + region -> Varchar, + #[max_length = 10] + mac_version -> Varchar, + #[max_length = 20] + reg_params_revision -> Varchar, + #[max_length = 100] + adr_algorithm_id -> Varchar, + #[max_length = 20] + payload_codec_runtime -> Varchar, + uplink_interval -> Int4, + device_status_req_interval -> Int4, + supports_otaa -> Bool, + supports_class_b -> Bool, + supports_class_c -> Bool, + class_b_timeout -> Int4, + class_b_ping_slot_nb_k -> Int4, + class_b_ping_slot_dr -> Int2, + class_b_ping_slot_freq -> Int8, + class_c_timeout -> Int4, + abp_rx1_delay -> Int2, + abp_rx1_dr_offset -> Int2, + abp_rx2_dr -> Int2, + abp_rx2_freq -> Int8, + tags -> Jsonb, + payload_codec_script -> Text, + flush_queue_on_activate -> Bool, + description -> Text, + measurements -> Jsonb, + auto_detect_measurements -> Bool, + #[max_length = 100] + region_config_id -> Nullable, + is_relay -> Bool, + is_relay_ed -> Bool, + relay_ed_relay_only -> Bool, + relay_enabled -> Bool, + relay_cad_periodicity -> Int2, + relay_default_channel_index -> Int2, + relay_second_channel_freq -> Int8, + relay_second_channel_dr -> Int2, + relay_second_channel_ack_offset -> Int2, + relay_ed_activation_mode -> Int2, + relay_ed_smart_enable_level -> Int2, + relay_ed_back_off -> Int2, + relay_ed_uplink_limit_bucket_size -> Int2, + relay_ed_uplink_limit_reload_rate -> Int2, + relay_join_req_limit_reload_rate -> Int2, + relay_notify_limit_reload_rate -> Int2, + relay_global_uplink_limit_reload_rate -> Int2, + relay_overall_limit_reload_rate -> Int2, + relay_join_req_limit_bucket_size -> Int2, + relay_notify_limit_bucket_size -> Int2, + relay_global_uplink_limit_bucket_size -> Int2, + relay_overall_limit_bucket_size -> Int2, + allow_roaming -> Bool, + rx1_delay -> Int2, + } +} + +diesel::table! { + device_profile_template (id) { + id -> Text, + created_at -> Timestamptz, + updated_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + description -> Text, + #[max_length = 100] + vendor -> Varchar, + #[max_length = 100] + firmware -> Varchar, + #[max_length = 10] + region -> Varchar, + #[max_length = 10] + mac_version -> Varchar, + #[max_length = 20] + reg_params_revision -> Varchar, + #[max_length = 100] + adr_algorithm_id -> Varchar, + #[max_length = 20] + payload_codec_runtime -> Varchar, + payload_codec_script -> Text, + uplink_interval -> Int4, + device_status_req_interval -> Int4, + flush_queue_on_activate -> Bool, + supports_otaa -> Bool, + supports_class_b -> Bool, + supports_class_c -> Bool, + class_b_timeout -> Int4, + class_b_ping_slot_nb_k -> Int4, + class_b_ping_slot_dr -> Int2, + class_b_ping_slot_freq -> Int8, + class_c_timeout -> Int4, + abp_rx1_delay -> Int2, + abp_rx1_dr_offset -> Int2, + abp_rx2_dr -> Int2, + abp_rx2_freq -> Int8, + tags -> Jsonb, + measurements -> Jsonb, + auto_detect_measurements -> Bool, + } +} + +diesel::table! { + device_queue_item (id) { + id -> Uuid, + dev_eui -> Bytea, + created_at -> Timestamptz, + f_port -> Int2, + confirmed -> Bool, + data -> Bytea, + is_pending -> Bool, + f_cnt_down -> Nullable, + timeout_after -> Nullable, + is_encrypted -> Bool, + } +} + +diesel::table! { + gateway (gateway_id) { + gateway_id -> Bytea, + tenant_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + last_seen_at -> Nullable, + #[max_length = 100] + name -> Varchar, + description -> Text, + latitude -> Float8, + longitude -> Float8, + altitude -> Float4, + stats_interval_secs -> Int4, + tls_certificate -> Nullable, + tags -> Jsonb, + properties -> Jsonb, + } +} + +diesel::table! { + multicast_group (id) { + id -> Uuid, + application_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + #[max_length = 10] + region -> Varchar, + mc_addr -> Bytea, + mc_nwk_s_key -> Bytea, + mc_app_s_key -> Bytea, + f_cnt -> Int8, + #[max_length = 1] + group_type -> Bpchar, + dr -> Int2, + frequency -> Int8, + class_b_ping_slot_nb_k -> Int2, + #[max_length = 20] + class_c_scheduling_type -> Varchar, + } +} + +diesel::table! { + multicast_group_device (multicast_group_id, dev_eui) { + multicast_group_id -> Uuid, + dev_eui -> Bytea, + created_at -> Timestamptz, + } +} + +diesel::table! { + multicast_group_gateway (multicast_group_id, gateway_id) { + multicast_group_id -> Uuid, + gateway_id -> Bytea, + created_at -> Timestamptz, + } +} + +diesel::table! { + multicast_group_queue_item (id) { + id -> Uuid, + created_at -> Timestamptz, + scheduler_run_after -> Timestamptz, + multicast_group_id -> Uuid, + gateway_id -> Bytea, + f_cnt -> Int8, + f_port -> Int2, + data -> Bytea, + emit_at_time_since_gps_epoch -> Nullable, + } +} + +diesel::table! { + relay_device (relay_dev_eui, dev_eui) { + relay_dev_eui -> Bytea, + dev_eui -> Bytea, + created_at -> Timestamptz, + } +} + +diesel::table! { + relay_gateway (tenant_id, relay_id) { + tenant_id -> Uuid, + relay_id -> Bytea, + created_at -> Timestamptz, + updated_at -> Timestamptz, + last_seen_at -> Nullable, + #[max_length = 100] + name -> Varchar, + description -> Text, + stats_interval_secs -> Int4, + #[max_length = 100] + region_config_id -> Varchar, + } +} + +diesel::table! { + tenant (id) { + id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + #[max_length = 100] + name -> Varchar, + description -> Text, + can_have_gateways -> Bool, + max_device_count -> Int4, + max_gateway_count -> Int4, + private_gateways_up -> Bool, + private_gateways_down -> Bool, + tags -> Jsonb, + } +} + +diesel::table! { + tenant_user (tenant_id, user_id) { + tenant_id -> Uuid, + user_id -> Uuid, + created_at -> Timestamptz, + updated_at -> Timestamptz, + is_admin -> Bool, + is_device_admin -> Bool, + is_gateway_admin -> Bool, + } +} + +diesel::table! { + user (id) { + id -> Uuid, + external_id -> Nullable, + created_at -> Timestamptz, + updated_at -> Timestamptz, + is_admin -> Bool, + is_active -> Bool, + email -> Text, + email_verified -> Bool, + #[max_length = 200] + password_hash -> Varchar, + note -> Text, + } +} + +diesel::joinable!(api_key -> tenant (tenant_id)); +diesel::joinable!(application -> tenant (tenant_id)); +diesel::joinable!(application_integration -> application (application_id)); +diesel::joinable!(device -> application (application_id)); +diesel::joinable!(device -> device_profile (device_profile_id)); +diesel::joinable!(device_keys -> device (dev_eui)); +diesel::joinable!(device_profile -> tenant (tenant_id)); +diesel::joinable!(device_queue_item -> device (dev_eui)); +diesel::joinable!(gateway -> tenant (tenant_id)); +diesel::joinable!(multicast_group -> application (application_id)); +diesel::joinable!(multicast_group_device -> device (dev_eui)); +diesel::joinable!(multicast_group_device -> multicast_group (multicast_group_id)); +diesel::joinable!(multicast_group_gateway -> gateway (gateway_id)); +diesel::joinable!(multicast_group_gateway -> multicast_group (multicast_group_id)); +diesel::joinable!(multicast_group_queue_item -> gateway (gateway_id)); +diesel::joinable!(multicast_group_queue_item -> multicast_group (multicast_group_id)); +diesel::joinable!(relay_gateway -> tenant (tenant_id)); +diesel::joinable!(tenant_user -> tenant (tenant_id)); +diesel::joinable!(tenant_user -> user (user_id)); + +diesel::allow_tables_to_appear_in_same_query!( + api_key, + application, + application_integration, + device, + device_keys, + device_profile, + device_profile_template, + device_queue_item, + gateway, + multicast_group, + multicast_group_device, + multicast_group_gateway, + multicast_group_queue_item, + relay_device, + relay_gateway, + tenant, + tenant_user, + user, +); diff --git a/chirpstack/src/storage/schema_sqlite.rs b/chirpstack/src/storage/schema_sqlite.rs new file mode 100644 index 00000000..047c5852 --- /dev/null +++ b/chirpstack/src/storage/schema_sqlite.rs @@ -0,0 +1,362 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + api_key (id) { + id -> Text, + created_at -> TimestamptzSqlite, + name -> Text, + is_admin -> Bool, + tenant_id -> Nullable, + } +} + +diesel::table! { + application (id) { + id -> Text, + tenant_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + name -> Text, + description -> Text, + mqtt_tls_cert -> Nullable, + tags -> Text, + } +} + +diesel::table! { + application_integration (application_id, kind) { + application_id -> Text, + kind -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + configuration -> Text, + } +} + +diesel::table! { + device (dev_eui) { + dev_eui -> Binary, + application_id -> Text, + device_profile_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + last_seen_at -> Nullable, + scheduler_run_after -> Nullable, + name -> Text, + description -> Text, + external_power_source -> Bool, + battery_level -> Nullable, + margin -> Nullable, + dr -> Nullable, + latitude -> Nullable, + longitude -> Nullable, + altitude -> Nullable, + dev_addr -> Nullable, + enabled_class -> Text, + skip_fcnt_check -> Bool, + is_disabled -> Bool, + tags -> Text, + variables -> Text, + join_eui -> Binary, + secondary_dev_addr -> Nullable, + device_session -> Nullable, + } +} + +diesel::table! { + device_keys (dev_eui) { + dev_eui -> Binary, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + nwk_key -> Binary, + app_key -> Binary, + dev_nonces -> Text, + join_nonce -> Integer, + } +} + +diesel::table! { + device_profile (id) { + id -> Text, + tenant_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + name -> Text, + region -> Text, + mac_version -> Text, + reg_params_revision -> Text, + adr_algorithm_id -> Text, + payload_codec_runtime -> Text, + uplink_interval -> Integer, + device_status_req_interval -> Integer, + supports_otaa -> Bool, + supports_class_b -> Bool, + supports_class_c -> Bool, + class_b_timeout -> Integer, + class_b_ping_slot_nb_k -> Integer, + class_b_ping_slot_dr -> SmallInt, + class_b_ping_slot_freq -> BigInt, + class_c_timeout -> Integer, + abp_rx1_delay -> SmallInt, + abp_rx1_dr_offset -> SmallInt, + abp_rx2_dr -> SmallInt, + abp_rx2_freq -> BigInt, + tags -> Text, + payload_codec_script -> Text, + flush_queue_on_activate -> Bool, + description -> Text, + measurements -> Text, + auto_detect_measurements -> Bool, + region_config_id -> Nullable, + is_relay -> Bool, + is_relay_ed -> Bool, + relay_ed_relay_only -> Bool, + relay_enabled -> Bool, + relay_cad_periodicity -> SmallInt, + relay_default_channel_index -> SmallInt, + relay_second_channel_freq -> BigInt, + relay_second_channel_dr -> SmallInt, + relay_second_channel_ack_offset -> SmallInt, + relay_ed_activation_mode -> SmallInt, + relay_ed_smart_enable_level -> SmallInt, + relay_ed_back_off -> SmallInt, + relay_ed_uplink_limit_bucket_size -> SmallInt, + relay_ed_uplink_limit_reload_rate -> SmallInt, + relay_join_req_limit_reload_rate -> SmallInt, + relay_notify_limit_reload_rate -> SmallInt, + relay_global_uplink_limit_reload_rate -> SmallInt, + relay_overall_limit_reload_rate -> SmallInt, + relay_join_req_limit_bucket_size -> SmallInt, + relay_notify_limit_bucket_size -> SmallInt, + relay_global_uplink_limit_bucket_size -> SmallInt, + relay_overall_limit_bucket_size -> SmallInt, + allow_roaming -> Bool, + rx1_delay -> SmallInt, + } +} + +diesel::table! { + device_profile_template (id) { + id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + name -> Text, + description -> Text, + vendor -> Text, + firmware -> Text, + region -> Text, + mac_version -> Text, + reg_params_revision -> Text, + adr_algorithm_id -> Text, + payload_codec_runtime -> Text, + payload_codec_script -> Text, + uplink_interval -> Integer, + device_status_req_interval -> Integer, + flush_queue_on_activate -> Bool, + supports_otaa -> Bool, + supports_class_b -> Bool, + supports_class_c -> Bool, + class_b_timeout -> Integer, + class_b_ping_slot_nb_k -> Integer, + class_b_ping_slot_dr -> SmallInt, + class_b_ping_slot_freq -> BigInt, + class_c_timeout -> Integer, + abp_rx1_delay -> SmallInt, + abp_rx1_dr_offset -> SmallInt, + abp_rx2_dr -> SmallInt, + abp_rx2_freq -> BigInt, + tags -> Text, + measurements -> Text, + auto_detect_measurements -> Bool, + } +} + +diesel::table! { + device_queue_item (id) { + id -> Text, + dev_eui -> Binary, + created_at -> TimestamptzSqlite, + f_port -> SmallInt, + confirmed -> Bool, + data -> Binary, + is_pending -> Bool, + f_cnt_down -> Nullable, + timeout_after -> Nullable, + is_encrypted -> Bool, + } +} + +diesel::table! { + gateway (gateway_id) { + gateway_id -> Binary, + tenant_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + last_seen_at -> Nullable, + name -> Text, + description -> Text, + latitude -> Double, + longitude -> Double, + altitude -> Float, + stats_interval_secs -> Integer, + tls_certificate -> Nullable, + tags -> Text, + properties -> Text, + } +} + +diesel::table! { + multicast_group (id) { + id -> Text, + application_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + name -> Text, + region -> Text, + mc_addr -> Binary, + mc_nwk_s_key -> Binary, + mc_app_s_key -> Binary, + f_cnt -> BigInt, + group_type -> Text, + dr -> SmallInt, + frequency -> BigInt, + class_b_ping_slot_nb_k -> SmallInt, + class_c_scheduling_type -> Text, + } +} + +diesel::table! { + multicast_group_device (multicast_group_id, dev_eui) { + multicast_group_id -> Text, + dev_eui -> Binary, + created_at -> TimestamptzSqlite, + } +} + +diesel::table! { + multicast_group_gateway (multicast_group_id, gateway_id) { + multicast_group_id -> Text, + gateway_id -> Binary, + created_at -> TimestamptzSqlite, + } +} + +diesel::table! { + multicast_group_queue_item (id) { + id -> Text, + created_at -> TimestamptzSqlite, + scheduler_run_after -> TimestamptzSqlite, + multicast_group_id -> Text, + gateway_id -> Binary, + f_cnt -> BigInt, + f_port -> SmallInt, + data -> Binary, + emit_at_time_since_gps_epoch -> Nullable, + } +} + +diesel::table! { + relay_device (relay_dev_eui, dev_eui) { + relay_dev_eui -> Binary, + dev_eui -> Binary, + created_at -> TimestamptzSqlite, + } +} + +diesel::table! { + relay_gateway (tenant_id, relay_id) { + tenant_id -> Text, + relay_id -> Binary, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + last_seen_at -> Nullable, + name -> Text, + description -> Text, + stats_interval_secs -> Integer, + region_config_id -> Text, + } +} + +diesel::table! { + tenant (id) { + id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + name -> Text, + description -> Text, + can_have_gateways -> Bool, + max_device_count -> Integer, + max_gateway_count -> Integer, + private_gateways_up -> Bool, + private_gateways_down -> Bool, + tags -> Text, + } +} + +diesel::table! { + tenant_user (tenant_id, user_id) { + tenant_id -> Text, + user_id -> Text, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + is_admin -> Bool, + is_device_admin -> Bool, + is_gateway_admin -> Bool, + } +} + +diesel::table! { + user (id) { + id -> Text, + external_id -> Nullable, + created_at -> TimestamptzSqlite, + updated_at -> TimestamptzSqlite, + is_admin -> Bool, + is_active -> Bool, + email -> Text, + email_verified -> Bool, + password_hash -> Text, + note -> Text, + } +} + +diesel::joinable!(api_key -> tenant (tenant_id)); +diesel::joinable!(application -> tenant (tenant_id)); +diesel::joinable!(application_integration -> application (application_id)); +diesel::joinable!(device -> application (application_id)); +diesel::joinable!(device -> device_profile (device_profile_id)); +diesel::joinable!(device_keys -> device (dev_eui)); +diesel::joinable!(device_profile -> tenant (tenant_id)); +diesel::joinable!(device_queue_item -> device (dev_eui)); +diesel::joinable!(gateway -> tenant (tenant_id)); +diesel::joinable!(multicast_group -> application (application_id)); +diesel::joinable!(multicast_group_device -> device (dev_eui)); +diesel::joinable!(multicast_group_device -> multicast_group (multicast_group_id)); +diesel::joinable!(multicast_group_gateway -> gateway (gateway_id)); +diesel::joinable!(multicast_group_gateway -> multicast_group (multicast_group_id)); +diesel::joinable!(multicast_group_queue_item -> gateway (gateway_id)); +diesel::joinable!(multicast_group_queue_item -> multicast_group (multicast_group_id)); +diesel::joinable!(relay_gateway -> tenant (tenant_id)); +diesel::joinable!(tenant_user -> tenant (tenant_id)); +diesel::joinable!(tenant_user -> user (user_id)); + +diesel::allow_tables_to_appear_in_same_query!( + api_key, + application, + application_integration, + device, + device_keys, + device_profile, + device_profile_template, + device_queue_item, + gateway, + multicast_group, + multicast_group_device, + multicast_group_gateway, + multicast_group_queue_item, + relay_device, + relay_gateway, + tenant, + tenant_user, + user, +); diff --git a/chirpstack/src/storage/search.rs b/chirpstack/src/storage/search.rs index 948f0baa..5ecb0a49 100644 --- a/chirpstack/src/storage/search.rs +++ b/chirpstack/src/storage/search.rs @@ -5,8 +5,7 @@ use diesel_async::RunQueryDsl; use regex::Regex; use uuid::Uuid; -use super::error::Error; -use super::get_async_db_conn; +use super::{error::Error, fields, get_async_db_conn}; use lrwn::EUI64; lazy_static! { @@ -19,12 +18,12 @@ pub struct SearchResult { pub kind: String, #[diesel(sql_type = diesel::sql_types::Float)] pub score: f32, - #[diesel(sql_type = diesel::sql_types::Nullable)] - pub tenant_id: Option, + #[diesel(sql_type = diesel::sql_types::Nullable)] + pub tenant_id: Option, #[diesel(sql_type = diesel::sql_types::Nullable)] pub tenant_name: Option, - #[diesel(sql_type = diesel::sql_types::Nullable)] - pub application_id: Option, + #[diesel(sql_type = diesel::sql_types::Nullable)] + pub application_id: Option, #[diesel(sql_type = diesel::sql_types::Nullable)] pub application_name: Option, #[diesel(sql_type = diesel::sql_types::Nullable)] @@ -37,6 +36,7 @@ pub struct SearchResult { pub gateway_name: Option, } +#[cfg(feature = "postgres")] pub async fn global_search( user_id: &Uuid, global_admin: bool, @@ -152,15 +152,175 @@ pub async fn global_search( .bind::(&search) .bind::(&query) .bind::(global_admin) - .bind::(&user_id) + .bind::(&fields::Uuid::from(user_id)) .bind::(limit as i64) .bind::(offset as i64) - .bind::(tags) + .bind::(tags) .load(&mut get_async_db_conn().await?).await?; Ok(res) } +#[cfg(feature = "sqlite")] +pub async fn global_search( + user_id: &Uuid, + global_admin: bool, + search: &str, + limit: usize, + offset: usize, +) -> Result, Error> { + let (query, tags) = parse_search_query(search); + let query = format!("%{}%", query); + let tags = serde_json::to_string(&tags).context("To serde_json string")?; + + let res: Vec = diesel::sql_query( + r#" + -- device + select + 'device' as kind, + 0.0 as score, + t.id as tenant_id, + t.name as tenant_name, + a.id as application_id, + a.name as application_name, + d.dev_eui as device_dev_eui, + d.name as device_name, + null as gateway_id, + null as gateway_name + from device d + inner join application a + on a.id = d.application_id + inner join tenant t + on t.id = a.tenant_id + left join tenant_user tu + on tu.tenant_id = t.id + left join "user" u + on u.id = tu.user_id + where + (?3 = true or u.id = ?4) + and ( + d.name like ?2 or hex(d.dev_eui) like ?2 or hex(d.dev_addr) like ?2 + or ( + ?7 != '{}' + and 0 = ( + -- this makes sure tags are present + -- by counting number of different top level json values + select + count(*) + from json_each(?7) search_tag + left join json_each(d.tags) item_tag + on search_tag.key = item_tag.key + where + -- `is not` is like `!=` but handles null + search_tag.value is not item_tag.value + ) + ) + ) + -- gateway + union + select + 'gateway' as kind, + 0.0 as score, + t.id as tenant_id, + t.name as tenant_name, + null as application_id, + null as application_name, + null as device_dev_eui, + null as device_name, + g.gateway_id as gateway_id, + g.name as gateway_name + from + gateway g + inner join tenant t + on t.id = g.tenant_id + left join tenant_user tu + on tu.tenant_id = t.id + left join "user" u + on u.id = tu.user_id + where + (?3 = true or u.id = ?4) + and ( + g.name like ?2 or hex(g.gateway_id) like ?2 + or ( + ?7 != '{}' + and 0 = ( + -- this makes sure tags are present + -- by counting number of different top level json values + select + count(*) + from json_each(?7) search_tag + left join json_each(g.tags) item_tag + on search_tag.key = item_tag.key + where + -- `is not` is like `!=` but handles null + search_tag.value is not item_tag.value + ) + ) + ) + -- tenant + union + select + 'tenant' as kind, + 0.0 as score, + t.id as tenant_id, + t.name as tenant_name, + null as application_id, + null as application_name, + null as device_dev_eui, + null as device_name, + null as gateway_id, + null as gateway_name + from + tenant t + left join tenant_user tu + on tu.tenant_id = t.id + left join "user" u + on u.id = tu.user_id + where + (?3 = true or u.id = ?4) + and t.name like ?2 + -- application + union + select + 'application' as kind, + 0.0 as score, + t.id as tenant_id, + t.name as tenant_name, + a.id as application_id, + a.name as application_name, + null as device_dev_eui, + null as device_name, + null as gateway_id, + null as gateway_name + from + application a + inner join tenant t + on t.id = a.tenant_id + left join tenant_user tu + on tu.tenant_id = t.id + left join "user" u + on u.id = tu.user_id + where + (?3 = true or u.id = ?4) + and a.name like ?2 + limit ?5 + offset ?6 + "#, + ) + // first argument is unused but kept to facilitate diffing with postgres query + .bind::(&search) + .bind::(&query) + .bind::(global_admin) + .bind::(&fields::Uuid::from(user_id)) + .bind::(limit as i64) + .bind::(offset as i64) + .bind::(tags) + .load(&mut get_async_db_conn().await?) + .await?; + + Ok(res) +} + fn parse_search_query(q: &str) -> (String, HashMap) { let mut tags: HashMap = HashMap::new(); @@ -233,6 +393,14 @@ pub mod test { } } + fn build_tags(tags: &[(&str, &str)]) -> fields::KeyValue { + fields::KeyValue::new( + tags.iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), + ) + } + #[tokio::test] async fn test_global_search() { let _guard = test::prepare().await; @@ -272,6 +440,7 @@ pub mod test { gateway_id: EUI64::from_str("0102030405060708").unwrap(), name: "test-gateway".into(), tenant_id: t.id, + tags: build_tags(&[("common_tag", "value"), ("mytag", "gw_value")]), ..Default::default() }) .await @@ -280,8 +449,20 @@ pub mod test { let _d = device::create(device::Device { dev_eui: EUI64::from_str("0203040506070809").unwrap(), name: "test-device".into(), + application_id: a.id.clone(), + device_profile_id: dp.id.clone(), + tags: build_tags(&[("common_tag", "value"), ("mytag", "dev_value")]), + ..Default::default() + }) + .await + .unwrap(); + + let _d = device::create(device::Device { + dev_eui: EUI64::from_str("030405060708090A").unwrap(), + name: "sensor".into(), application_id: a.id, device_profile_id: dp.id, + tags: build_tags(&[("a", "1"), ("b", "2")]), ..Default::default() }) .await @@ -295,8 +476,12 @@ pub mod test { "010203".into(), "020304".into(), "device".into(), + "other mytag:gw_value".into(), + "other mytag:dev_value".into(), + "other common_tag:value".into(), ]; for q in &queries { + println!("{q}"); let res = global_search(&u.id, false, q, 10, 0).await.unwrap(); assert_eq!(0, res.len()); } @@ -311,12 +496,17 @@ pub mod test { ("device".into(), 1), ("dev".into(), 1), ("gatew".into(), 1), + ("other mytag:gw_value".into(), 1), + ("other mytag:dev_value".into(), 1), + ("other common_tag:value".into(), 2), + ("other a:1 b:2".into(), 1), ] .iter() .cloned() .collect(); for (k, v) in &queries { let res = global_search(&u.id, true, k, 10, 0).await.unwrap(); + println!("{res:#?}"); assert_eq!(*v, res.len(), "query: {}", k); } @@ -338,6 +528,10 @@ pub mod test { ("device".into(), 1), ("dev".into(), 1), ("gatew".into(), 1), + ("other mytag:gw_value".into(), 1), + ("other mytag:dev_value".into(), 1), + ("other common_tag:value".into(), 2), + ("other a:1 b:2".into(), 1), ] .iter() .cloned() diff --git a/chirpstack/src/storage/sqlite.rs b/chirpstack/src/storage/sqlite.rs new file mode 100644 index 00000000..75d93d94 --- /dev/null +++ b/chirpstack/src/storage/sqlite.rs @@ -0,0 +1,116 @@ +use std::sync::RwLock; +use std::time::Instant; + +use anyhow::Result; +use tracing::info; + +use crate::monitoring::prometheus; +use diesel::sqlite::SqliteConnection; +use diesel::{Connection, ConnectionError, ConnectionResult}; +use diesel_async::pooled_connection::deadpool::{Object as DeadpoolObject, Pool as DeadpoolPool}; +use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig}; +use diesel_async::sync_connection_wrapper::SyncConnectionWrapper; +use futures::future::{BoxFuture, FutureExt, TryFutureExt}; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use scoped_futures::ScopedBoxFuture; + +use crate::config; + +pub type AsyncSqlitePool = DeadpoolPool>; +pub type AsyncSqlitePoolConnection = DeadpoolObject>; + +lazy_static! { + static ref ASYNC_SQLITE_POOL: RwLock> = RwLock::new(None); + static ref STORAGE_SQLITE_CONN_GET: Histogram = { + let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12)); + prometheus::register( + "storage_sqlite_conn_get_duration_seconds", + "Time between requesting a SQLite connection and the connection-pool returning it", + histogram.clone(), + ); + histogram + }; +} + +pub fn setup(conf: &config::Sqlite) -> Result<()> { + info!("Setting up SQLite connection pool"); + let mut config = ManagerConfig::default(); + config.custom_setup = Box::new(sqlite_establish_connection); + let mgr = + AsyncDieselConnectionManager::>::new_with_config( + &conf.path, config, + ); + let pool = DeadpoolPool::builder(mgr) + .max_size(conf.max_open_connections as usize) + .build()?; + set_async_db_pool(pool); + + Ok(()) +} + +fn sqlite_establish_connection( + url: &str, +) -> BoxFuture>> { + let url = url.to_string(); + tokio::task::spawn_blocking( + move || -> ConnectionResult> { + let mut conn = SqliteConnection::establish(&url)?; + + use diesel::connection::SimpleConnection; + let conf = config::get(); + let pragmas = &conf + .sqlite + .pragmas + .iter() + .map(|p| format!("PRAGMA {};", p)) + .collect::>() + .join(""); + conn.batch_execute(&pragmas) + .map_err(|err| ConnectionError::BadConnection(err.to_string()))?; + Ok(SyncConnectionWrapper::new(conn)) + }, + ) + .unwrap_or_else(|err| Err(ConnectionError::BadConnection(err.to_string()))) + .boxed() +} + +fn get_async_db_pool() -> Result { + let pool_r = ASYNC_SQLITE_POOL.read().unwrap(); + let pool: AsyncSqlitePool = pool_r + .as_ref() + .ok_or_else(|| anyhow!("SQLite connection pool is not initialized"))? + .clone(); + Ok(pool) +} + +pub async fn get_async_db_conn() -> Result { + let pool = get_async_db_pool()?; + + let start = Instant::now(); + let res = pool.get().await?; + + STORAGE_SQLITE_CONN_GET.observe(start.elapsed().as_secs_f64()); + + Ok(res) +} + +pub async fn db_transaction<'a, R, E, F>( + conn: &mut AsyncSqlitePoolConnection, + callback: F, +) -> Result +where + F: for<'r> FnOnce( + &'r mut SyncConnectionWrapper, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send + + 'a, + E: From + Send + 'a, + R: Send + 'a, +{ + conn.immediate_transaction(callback).await +} + +fn set_async_db_pool(p: AsyncSqlitePool) { + let mut pool_w = ASYNC_SQLITE_POOL.write().unwrap(); + *pool_w = Some(p); +} diff --git a/chirpstack/src/storage/tenant.rs b/chirpstack/src/storage/tenant.rs index 37e4d0ef..220b02bf 100644 --- a/chirpstack/src/storage/tenant.rs +++ b/chirpstack/src/storage/tenant.rs @@ -14,7 +14,7 @@ use super::{fields, get_async_db_conn}; #[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)] #[diesel(table_name = tenant)] pub struct Tenant { - pub id: Uuid, + pub id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub name: String, @@ -41,7 +41,7 @@ impl Default for Tenant { let now = Utc::now(); Tenant { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), created_at: now, updated_at: now, name: "".into(), @@ -59,8 +59,8 @@ impl Default for Tenant { #[derive(Queryable, Insertable, AsChangeset, PartialEq, Eq, Debug)] #[diesel(table_name = tenant_user)] pub struct TenantUser { - pub tenant_id: Uuid, - pub user_id: Uuid, + pub tenant_id: fields::Uuid, + pub user_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub is_admin: bool, @@ -73,8 +73,8 @@ impl Default for TenantUser { let now = Utc::now(); TenantUser { - tenant_id: Uuid::nil(), - user_id: Uuid::nil(), + tenant_id: Uuid::nil().into(), + user_id: Uuid::nil().into(), created_at: now, updated_at: now, is_admin: false, @@ -86,8 +86,8 @@ impl Default for TenantUser { #[derive(Queryable, PartialEq, Eq, Debug)] pub struct TenantUserListItem { - pub tenant_id: Uuid, - pub user_id: Uuid, + pub tenant_id: fields::Uuid, + pub user_id: fields::Uuid, pub created_at: DateTime, pub updated_at: DateTime, pub email: String, @@ -116,7 +116,7 @@ pub async fn create(t: Tenant) -> Result { pub async fn get(id: &Uuid) -> Result { let t = tenant::dsl::tenant - .find(&id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; @@ -146,7 +146,7 @@ pub async fn update(t: Tenant) -> Result { } pub async fn delete(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(tenant::dsl::tenant.find(&id)) + let ra = diesel::delete(tenant::dsl::tenant.find(&fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; @@ -164,11 +164,18 @@ pub async fn get_count(filters: &Filters) -> Result { .into_boxed(); if let Some(user_id) = &filters.user_id { - q = q.filter(tenant_user::dsl::user_id.eq(user_id)); + q = q.filter(tenant_user::dsl::user_id.eq(fields::Uuid::from(user_id))); } if let Some(search) = &filters.search { - q = q.filter(tenant::dsl::name.ilike(format!("%{}%", search))); + #[cfg(feature = "postgres")] + { + q = q.filter(tenant::dsl::name.ilike(format!("%{}%", search))); + } + #[cfg(feature = "sqlite")] + { + q = q.filter(tenant::dsl::name.like(format!("%{}%", search))); + } } Ok( @@ -189,11 +196,18 @@ pub async fn list(limit: i64, offset: i64, filters: &Filters) -> Result Result { pub async fn get_user(tenant_id: &Uuid, user_id: &Uuid) -> Result { let tu: TenantUser = tenant_user::dsl::tenant_user - .filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) - .filter(tenant_user::dsl::user_id.eq(&user_id)) + .filter(tenant_user::dsl::tenant_id.eq(&fields::Uuid::from(tenant_id))) + .filter(tenant_user::dsl::user_id.eq(&fields::Uuid::from(user_id))) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, user_id.to_string()))?; @@ -245,7 +259,7 @@ pub async fn get_user(tenant_id: &Uuid, user_id: &Uuid) -> Result Result { let count = tenant_user::dsl::tenant_user .select(dsl::count_star()) - .filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) + .filter(tenant_user::dsl::tenant_id.eq(fields::Uuid::from(tenant_id))) .first(&mut get_async_db_conn().await?) .await?; Ok(count) @@ -268,7 +282,7 @@ pub async fn get_users( tenant_user::dsl::is_device_admin, tenant_user::dsl::is_gateway_admin, )) - .filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) + .filter(tenant_user::dsl::tenant_id.eq(&fields::Uuid::from(tenant_id))) .order_by(user::dsl::email) .limit(limit) .offset(offset) @@ -281,8 +295,8 @@ pub async fn get_users( pub async fn delete_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<(), Error> { let ra = diesel::delete( tenant_user::dsl::tenant_user - .filter(tenant_user::dsl::tenant_id.eq(&tenant_id)) - .filter(tenant_user::dsl::user_id.eq(&user_id)), + .filter(tenant_user::dsl::tenant_id.eq(&fields::Uuid::from(tenant_id))) + .filter(tenant_user::dsl::user_id.eq(&fields::Uuid::from(user_id))), ) .execute(&mut get_async_db_conn().await?) .await?; @@ -299,7 +313,7 @@ pub async fn delete_user(tenant_id: &Uuid, user_id: &Uuid) -> Result<(), Error> pub async fn get_tenant_users_for_user(user_id: &Uuid) -> Result, Error> { let items = tenant_user::dsl::tenant_user - .filter(tenant_user::dsl::user_id.eq(&user_id)) + .filter(tenant_user::dsl::user_id.eq(&fields::Uuid::from(user_id))) .load(&mut get_async_db_conn().await?) .await?; Ok(items) @@ -324,7 +338,7 @@ pub mod test { pub async fn create_tenant() -> Tenant { let t = Tenant { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), created_at: Utc::now().round_subsecs(1), updated_at: Utc::now().round_subsecs(1), name: "test t".into(), @@ -365,7 +379,7 @@ pub mod test { let tu = TenantUser { tenant_id: t.id, - user_id: user.id, + user_id: user.id.into(), is_admin: true, ..Default::default() }; @@ -426,7 +440,7 @@ pub mod test { }, FilterTest { filter: Filters { - user_id: Some(user.id), + user_id: Some(user.id.into()), search: None, }, ts: vec![&t], @@ -466,7 +480,7 @@ pub mod test { let tu = TenantUser { tenant_id: t.id, - user_id: user.id, + user_id: user.id.into(), is_admin: true, ..Default::default() }; diff --git a/chirpstack/src/storage/user.rs b/chirpstack/src/storage/user.rs index b25b367c..66d0ce2b 100644 --- a/chirpstack/src/storage/user.rs +++ b/chirpstack/src/storage/user.rs @@ -12,13 +12,13 @@ use tracing::info; use uuid::Uuid; use super::error::Error; -use super::get_async_db_conn; use super::schema::user; +use super::{fields, get_async_db_conn}; #[derive(Queryable, Insertable, PartialEq, Eq, Debug, Clone)] #[diesel(table_name = user)] pub struct User { - pub id: Uuid, + pub id: fields::Uuid, pub external_id: Option, pub created_at: DateTime, pub updated_at: DateTime, @@ -35,7 +35,7 @@ impl Default for User { let now = Utc::now(); User { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), external_id: None, created_at: now, updated_at: now, @@ -78,7 +78,7 @@ pub async fn create(u: User) -> Result { pub async fn get(id: &Uuid) -> Result { let u = user::dsl::user - .find(&id) + .find(&fields::Uuid::from(id)) .first(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; @@ -147,7 +147,7 @@ pub async fn update(u: User) -> Result { } pub async fn set_password_hash(id: &Uuid, hash: &str) -> Result { - let u: User = diesel::update(user::dsl::user.find(&id)) + let u: User = diesel::update(user::dsl::user.find(&fields::Uuid::from(id))) .set(user::password_hash.eq(&hash)) .get_result(&mut get_async_db_conn().await?) .await @@ -157,7 +157,7 @@ pub async fn set_password_hash(id: &Uuid, hash: &str) -> Result { } pub async fn delete(id: &Uuid) -> Result<(), Error> { - let ra = diesel::delete(user::dsl::user.find(&id)) + let ra = diesel::delete(user::dsl::user.find(&fields::Uuid::from(id))) .execute(&mut get_async_db_conn().await?) .await .map_err(|e| Error::from_diesel(e, id.to_string()))?; diff --git a/chirpstack/src/test/class_a_pr_test.rs b/chirpstack/src/test/class_a_pr_test.rs index bfcc5b14..c60f8a2e 100644 --- a/chirpstack/src/test/class_a_pr_test.rs +++ b/chirpstack/src/test/class_a_pr_test.rs @@ -230,24 +230,27 @@ async fn test_sns_uplink() { dev_eui: EUI64::from_be_bytes([2, 2, 3, 4, 5, 6, 7, 8]), enabled_class: DeviceClass::B, dev_addr: Some(dev_addr), - device_session: Some(internal::DeviceSession { - mac_version: common::MacVersion::Lorawan104.into(), - dev_addr: dev_addr.to_vec(), - f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - app_s_key: Some(common::KeyEnvelope { - kek_label: "".into(), - aes_key: vec![16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], - }), - f_cnt_up: 8, - n_f_cnt_down: 5, - enabled_uplink_channel_indices: vec![0, 1, 2], - rx1_delay: 1, - rx2_frequency: 869525000, - region_config_id: "eu868".into(), - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + mac_version: common::MacVersion::Lorawan104.into(), + dev_addr: dev_addr.to_vec(), + f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + app_s_key: Some(common::KeyEnvelope { + kek_label: "".into(), + aes_key: vec![16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], + }), + f_cnt_up: 8, + n_f_cnt_down: 5, + enabled_uplink_channel_indices: vec![0, 1, 2], + rx1_delay: 1, + rx2_frequency: 869525000, + region_config_id: "eu868".into(), + ..Default::default() + } + .into(), + ), ..Default::default() }) .await @@ -478,24 +481,27 @@ async fn test_sns_roaming_not_allowed() { dev_eui: EUI64::from_be_bytes([2, 2, 3, 4, 5, 6, 7, 8]), enabled_class: DeviceClass::B, dev_addr: Some(dev_addr), - device_session: Some(internal::DeviceSession { - mac_version: common::MacVersion::Lorawan104.into(), - dev_addr: dev_addr.to_vec(), - f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - app_s_key: Some(common::KeyEnvelope { - kek_label: "".into(), - aes_key: vec![16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], - }), - f_cnt_up: 8, - n_f_cnt_down: 5, - enabled_uplink_channel_indices: vec![0, 1, 2], - rx1_delay: 1, - rx2_frequency: 869525000, - region_config_id: "eu868".into(), - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + mac_version: common::MacVersion::Lorawan104.into(), + dev_addr: dev_addr.to_vec(), + f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + app_s_key: Some(common::KeyEnvelope { + kek_label: "".into(), + aes_key: vec![16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], + }), + f_cnt_up: 8, + n_f_cnt_down: 5, + enabled_uplink_channel_indices: vec![0, 1, 2], + rx1_delay: 1, + rx2_frequency: 869525000, + region_config_id: "eu868".into(), + ..Default::default() + } + .into(), + ), ..Default::default() }) .await diff --git a/chirpstack/src/test/class_a_test.rs b/chirpstack/src/test/class_a_test.rs index e6109a5b..b338186c 100644 --- a/chirpstack/src/test/class_a_test.rs +++ b/chirpstack/src/test/class_a_test.rs @@ -8,7 +8,7 @@ use super::assert; use crate::storage::{ application, device::{self, DeviceClass}, - device_profile, device_queue, gateway, mac_command, reset_redis, tenant, + device_profile, device_queue, fields, gateway, mac_command, reset_redis, tenant, }; use crate::{config, gateway::backend as gateway_backend, integration, region, test, uplink}; use chirpstack_api::{common, gw, integration as integration_pb, internal, stream}; @@ -95,20 +95,23 @@ async fn test_gateway_filtering() { dev_eui: EUI64::from_be_bytes([2, 2, 3, 4, 5, 6, 7, 8]), enabled_class: DeviceClass::B, dev_addr: Some(DevAddr::from_be_bytes([1, 2, 3, 4])), - device_session: Some(internal::DeviceSession { - mac_version: common::MacVersion::Lorawan102.into(), - dev_addr: vec![1, 2, 3, 4], - f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - f_cnt_up: 7, - n_f_cnt_down: 5, - enabled_uplink_channel_indices: vec![0, 1, 2], - rx1_delay: 1, - rx2_frequency: 869525000, - region_config_id: "eu868".into(), - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + mac_version: common::MacVersion::Lorawan102.into(), + dev_addr: vec![1, 2, 3, 4], + f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + f_cnt_up: 7, + n_f_cnt_down: 5, + enabled_uplink_channel_indices: vec![0, 1, 2], + rx1_delay: 1, + rx2_frequency: 869525000, + region_config_id: "eu868".into(), + ..Default::default() + } + .into(), + ), ..Default::default() }) .await @@ -1266,7 +1269,7 @@ async fn test_lorawan_10_uplink() { name: "unconfirmed uplink with payload + ACK".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, f_cnt_down: Some(4), @@ -1896,7 +1899,7 @@ async fn test_lorawan_10_end_to_end_enc() { name: "end-to-end encryption using AppSkey + encrypted downlink".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![1, 2, 3, 4], @@ -2160,7 +2163,7 @@ async fn test_lorawan_11_uplink() { name: "unconfirmed uplink with payload + ACK".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, f_cnt_down: Some(4), @@ -2885,7 +2888,7 @@ async fn test_lorawan_10_mac_commands() { .into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![1, 2, 3, 4], @@ -3351,7 +3354,7 @@ async fn test_lorawan_10_device_queue() { name: "unconfirmed uplink + one unconfirmed downlink payload in queue".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], @@ -3430,14 +3433,14 @@ async fn test_lorawan_10_device_queue() { dev_eui: dev.dev_eui, device_queue_items: vec![ device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], ..Default::default() }, device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![2, 2, 3, 4], @@ -3520,7 +3523,7 @@ async fn test_lorawan_10_device_queue() { name: "unconfirmed uplink + one confirmed downlink payload in queue".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], @@ -3599,7 +3602,7 @@ async fn test_lorawan_10_device_queue() { name: "unconfirmed uplink data + downlink payload which exceeds the max payload size (for dr 0)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![0; 52], @@ -3636,7 +3639,7 @@ async fn test_lorawan_10_device_queue() { name: "unconfirmed uplink data + one unconfirmed downlink payload in queue (exactly max size for dr 0) + one mac command".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![0; 51], @@ -3829,7 +3832,7 @@ async fn test_lorawan_11_device_queue() { name: "unconfirmed uplink + one unconfirmed downlink payload in queue".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], @@ -3908,14 +3911,14 @@ async fn test_lorawan_11_device_queue() { dev_eui: dev.dev_eui, device_queue_items: vec![ device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], ..Default::default() }, device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![2, 2, 3, 4], @@ -3998,7 +4001,7 @@ async fn test_lorawan_11_device_queue() { name: "unconfirmed uplink + one confirmed downlink payload in queue".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], @@ -4077,7 +4080,7 @@ async fn test_lorawan_11_device_queue() { name: "unconfirmed uplink data + downlink payload which exceeds the max payload size (for dr 0)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![0; 52], @@ -4114,7 +4117,7 @@ async fn test_lorawan_11_device_queue() { name: "unconfirmed uplink data + one unconfirmed downlink payload in queue (exactly max size for dr 0) + one mac command".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![0; 51], @@ -5427,7 +5430,7 @@ async fn test_lorawan_11_receive_window_selection() { name: "unconfirmed uplink with payload (rx1)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![1], @@ -5497,7 +5500,7 @@ async fn test_lorawan_11_receive_window_selection() { name: "unconfirmed uplink with payload (rx2)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![1], @@ -5567,7 +5570,7 @@ async fn test_lorawan_11_receive_window_selection() { name: "unconfirmed uplink with payload (rx1 + rx2)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![1], @@ -5666,7 +5669,7 @@ async fn test_lorawan_11_receive_window_selection() { name: "unconfirmed uplink with payload (rx1, payload exceeds rx2 limit)".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 1, data: vec![0; 100], @@ -5783,7 +5786,12 @@ async fn run_test(t: &Test) { device::partial_update( t.dev_eui, &device::DeviceChangeset { - device_session: Some(t.device_session.clone()), + device_session: Some( + t.device_session + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) diff --git a/chirpstack/src/test/class_b_test.rs b/chirpstack/src/test/class_b_test.rs index f5f3ff5c..b247ff4d 100644 --- a/chirpstack/src/test/class_b_test.rs +++ b/chirpstack/src/test/class_b_test.rs @@ -5,7 +5,7 @@ use crate::gpstime::ToGpsTime; use crate::storage::{ application, device::{self, DeviceClass}, - device_gateway, device_profile, device_queue, gateway, reset_redis, tenant, + device_gateway, device_profile, device_queue, fields, gateway, reset_redis, tenant, }; use crate::{ config, downlink, downlink::classb, gateway::backend as gateway_backend, integration, test, @@ -295,7 +295,7 @@ async fn test_downlink_scheduler() { name: "class-b downlink".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -347,7 +347,7 @@ async fn test_downlink_scheduler() { name: "scheduler_run_after has not yet expired".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -375,14 +375,14 @@ async fn test_downlink_scheduler() { dev_eui: dev.dev_eui, device_queue_items: vec![ device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], ..Default::default() }, device_queue::DeviceQueueItem { - id: Uuid::new_v4(), + id: Uuid::new_v4().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3, 4], @@ -449,7 +449,12 @@ async fn run_uplink_test(t: &UplinkTest) { device::partial_update( t.dev_eui, &device::DeviceChangeset { - device_session: Some(t.device_session.clone()), + device_session: Some( + t.device_session + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) @@ -490,7 +495,12 @@ async fn run_scheduler_test(t: &DownlinkTest) { device::partial_update( t.dev_eui, &device::DeviceChangeset { - device_session: Some(t.device_session.clone()), + device_session: Some( + t.device_session + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) diff --git a/chirpstack/src/test/class_c_test.rs b/chirpstack/src/test/class_c_test.rs index bc4b818e..a4f07cbf 100644 --- a/chirpstack/src/test/class_c_test.rs +++ b/chirpstack/src/test/class_c_test.rs @@ -4,7 +4,7 @@ use super::assert; use crate::storage::{ application, device::{self, DeviceClass}, - device_gateway, device_profile, device_queue, gateway, reset_redis, tenant, + device_gateway, device_profile, device_queue, fields, gateway, reset_redis, tenant, }; use crate::{downlink, gateway::backend as gateway_backend, integration, test}; use chirpstack_api::{common, gw, internal}; @@ -114,7 +114,7 @@ async fn test_downlink_scheduler() { name: "device has not yet sent an uplink".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -141,7 +141,7 @@ async fn test_downlink_scheduler() { name: "unconfirmed data".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -188,7 +188,7 @@ async fn test_downlink_scheduler() { name: "scheduler_run_after has not yet expired".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -215,7 +215,7 @@ async fn test_downlink_scheduler() { name: "unconfirmed data".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![1, 2, 3], @@ -276,7 +276,7 @@ async fn test_downlink_scheduler() { name: "unconfirmed data".into(), dev_eui: dev.dev_eui, device_queue_items: vec![device_queue::DeviceQueueItem { - id: Uuid::nil(), + id: Uuid::nil().into(), dev_eui: dev.dev_eui, f_port: 10, data: vec![0; 300], @@ -303,7 +303,12 @@ async fn run_scheduler_test(t: &DownlinkTest) { device::partial_update( t.dev_eui, &device::DeviceChangeset { - device_session: Some(t.device_session.clone()), + device_session: Some( + t.device_session + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) diff --git a/chirpstack/src/test/mod.rs b/chirpstack/src/test/mod.rs index 10bae5e7..15e06d58 100644 --- a/chirpstack/src/test/mod.rs +++ b/chirpstack/src/test/mod.rs @@ -39,6 +39,7 @@ pub async fn prepare<'a>() -> std::sync::MutexGuard<'a, ()> { let mut conf: config::Configuration = Default::default(); conf.postgresql.dsn = env::var("TEST_POSTGRESQL_DSN").unwrap(); conf.redis.servers = vec![env::var("TEST_REDIS_URL").unwrap()]; + conf.sqlite.path = ":memory:".to_string(); conf.network.enabled_regions = vec!["eu868".to_string()]; conf.regions = vec![config::Region { id: "eu868".to_string(), diff --git a/chirpstack/src/test/multicast_test.rs b/chirpstack/src/test/multicast_test.rs index c539abd4..5645abbf 100644 --- a/chirpstack/src/test/multicast_test.rs +++ b/chirpstack/src/test/multicast_test.rs @@ -91,7 +91,9 @@ async fn test_multicast() { }) .await .unwrap(); - multicast::add_device(&mg.id, &d.dev_eui).await.unwrap(); + multicast::add_device(&mg.id.into(), &d.dev_eui) + .await + .unwrap(); // device <> gateway device_gateway::save_rx_info(&internal::DeviceGatewayRxInfo { @@ -116,7 +118,7 @@ async fn test_multicast() { name: "one item in queue".into(), multicast_group: mg.clone(), multicast_group_queue_items: vec![multicast::MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), f_port: 5, data: vec![1, 2, 3], ..Default::default() @@ -158,13 +160,13 @@ async fn test_multicast() { multicast_group: mg.clone(), multicast_group_queue_items: vec![ multicast::MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), f_port: 5, data: vec![1, 2, 3], ..Default::default() }, multicast::MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), f_port: 6, data: vec![1, 2, 3], ..Default::default() @@ -207,13 +209,13 @@ async fn test_multicast() { multicast_group: mg.clone(), multicast_group_queue_items: vec![ multicast::MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), f_port: 5, data: vec![2; 300], ..Default::default() }, multicast::MulticastGroupQueueItem { - multicast_group_id: mg.id, + multicast_group_id: mg.id.into(), f_port: 6, data: vec![1, 2, 3], ..Default::default() diff --git a/chirpstack/src/test/otaa_pr_test.rs b/chirpstack/src/test/otaa_pr_test.rs index 539acfbb..4a79e027 100644 --- a/chirpstack/src/test/otaa_pr_test.rs +++ b/chirpstack/src/test/otaa_pr_test.rs @@ -314,7 +314,7 @@ async fn test_sns() { let dk = device_keys::create(device_keys::DeviceKeys { dev_eui: dev.dev_eui, nwk_key: AES128Key::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), - dev_nonces: vec![], + dev_nonces: vec![].into(), ..Default::default() }) .await @@ -499,7 +499,7 @@ async fn test_sns_roaming_not_allowed() { let dk = device_keys::create(device_keys::DeviceKeys { dev_eui: dev.dev_eui, nwk_key: AES128Key::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), - dev_nonces: vec![], + dev_nonces: vec![].into(), ..Default::default() }) .await diff --git a/chirpstack/src/test/otaa_test.rs b/chirpstack/src/test/otaa_test.rs index d6f5bb69..91bed7e1 100644 --- a/chirpstack/src/test/otaa_test.rs +++ b/chirpstack/src/test/otaa_test.rs @@ -101,7 +101,7 @@ async fn test_gateway_filtering() { let dk = device_keys::create(device_keys::DeviceKeys { dev_eui: dev.dev_eui, nwk_key: AES128Key::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), - dev_nonces: vec![Some(258)], + dev_nonces: vec![Some(258)].into(), ..Default::default() }) .await @@ -273,7 +273,7 @@ async fn test_lorawan_10() { let dk = device_keys::create(device_keys::DeviceKeys { dev_eui: dev.dev_eui, nwk_key: AES128Key::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), - dev_nonces: vec![Some(258)], + dev_nonces: vec![Some(258)].into(), ..Default::default() }) .await @@ -929,7 +929,7 @@ async fn test_lorawan_11() { dev_eui: dev.dev_eui, nwk_key: AES128Key::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), app_key: AES128Key::from_bytes([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), - dev_nonces: vec![Some(258)], + dev_nonces: vec![Some(258)].into(), ..Default::default() }) .await diff --git a/chirpstack/src/test/relay_class_a_test.rs b/chirpstack/src/test/relay_class_a_test.rs index c2b9255a..73517a5d 100644 --- a/chirpstack/src/test/relay_class_a_test.rs +++ b/chirpstack/src/test/relay_class_a_test.rs @@ -6,7 +6,7 @@ use super::assert; use crate::storage::{ application, device::{self, DeviceClass}, - device_profile, device_queue, gateway, reset_redis, tenant, + device_profile, device_queue, fields, gateway, reset_redis, tenant, }; use crate::{gateway::backend as gateway_backend, integration, test, uplink}; use chirpstack_api::{common, gw, integration as integration_pb, internal}; @@ -782,7 +782,12 @@ async fn run_test(t: &Test) { device::partial_update( t.dev_eui_relay, &device::DeviceChangeset { - device_session: Some(t.device_session_relay.clone()), + device_session: Some( + t.device_session_relay + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) @@ -791,7 +796,12 @@ async fn run_test(t: &Test) { device::partial_update( t.dev_eui_relay_ed, &device::DeviceChangeset { - device_session: Some(t.device_session_relay_ed.clone()), + device_session: Some( + t.device_session_relay_ed + .as_ref() + .map(fields::DeviceSession::from) + .clone(), + ), ..Default::default() }, ) diff --git a/chirpstack/src/test/relay_otaa_test.rs b/chirpstack/src/test/relay_otaa_test.rs index fe6121e4..faf21196 100644 --- a/chirpstack/src/test/relay_otaa_test.rs +++ b/chirpstack/src/test/relay_otaa_test.rs @@ -97,19 +97,22 @@ async fn test_lorawan_10() { dev_eui: EUI64::from_be_bytes([1, 1, 1, 1, 1, 1, 1, 2]), enabled_class: DeviceClass::A, dev_addr: Some(DevAddr::from_be_bytes([4, 3, 2, 1])), - device_session: Some(internal::DeviceSession { - mac_version: common::MacVersion::Lorawan102.into(), - dev_addr: vec![4, 3, 2, 1], - f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - f_cnt_up: 10, - n_f_cnt_down: 5, - rx1_delay: 1, - rx2_frequency: 869525000, - region_config_id: "eu868".into(), - ..Default::default() - }), + device_session: Some( + internal::DeviceSession { + mac_version: common::MacVersion::Lorawan102.into(), + dev_addr: vec![4, 3, 2, 1], + f_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + s_nwk_s_int_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + nwk_s_enc_key: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + f_cnt_up: 10, + n_f_cnt_down: 5, + rx1_delay: 1, + rx2_frequency: 869525000, + region_config_id: "eu868".into(), + ..Default::default() + } + .into(), + ), ..Default::default() }) .await @@ -291,7 +294,8 @@ async fn test_lorawan_10() { region_config_id: "eu868".to_string(), class_b_ping_slot_nb: 1, ..Default::default() - }, + } + .into(), ), assert::downlink_frame(gw::DownlinkFrame { items: vec![ diff --git a/chirpstack/src/uplink/data.rs b/chirpstack/src/uplink/data.rs index eaa9ebe0..e7515621 100644 --- a/chirpstack/src/uplink/data.rs +++ b/chirpstack/src/uplink/data.rs @@ -491,7 +491,7 @@ impl Data { .cloned() .collect(), }; - integration::log_event(app.id, &dev.variables, &pl).await; + integration::log_event(app.id.into(), &dev.variables, &pl).await; } if self.reset { @@ -509,7 +509,7 @@ impl Data { .cloned() .collect(), }; - integration::log_event(app.id, &dev.variables, &pl).await; + integration::log_event(app.id.into(), &dev.variables, &pl).await; } Err(Error::Abort) @@ -549,7 +549,7 @@ impl Data { trace!("Filtering rx_info by tenant_id"); match filter_rx_info_by_tenant_id( - self.application.as_ref().unwrap().tenant_id, + self.application.as_ref().unwrap().tenant_id.into(), &mut self.uplink_frame_set, ) { Ok(_) => Ok(()), @@ -974,7 +974,7 @@ impl Data { Ok(v) => v, Err(e) => { integration::log_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::LogEvent { time: Some(Utc::now().into()), @@ -997,7 +997,7 @@ impl Data { }; } - integration::uplink_event(app.id, &dev.variables, &pl).await; + integration::uplink_event(app.id.into(), &dev.variables, &pl).await; self.uplink_event = Some(pl); @@ -1080,7 +1080,7 @@ impl Data { if update_dp_measurements { self.device_profile = - Some(device_profile::set_measurements(dp.id, &measurements).await?); + Some(device_profile::set_measurements(dp.id.into(), &measurements).await?); } Ok(()) @@ -1153,7 +1153,7 @@ impl Data { tags.extend((*dev.tags).clone()); integration::ack_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::AckEvent { deduplication_id: self.uplink_frame_set.uplink_set_id.to_string(), diff --git a/chirpstack/src/uplink/join.rs b/chirpstack/src/uplink/join.rs index 8254e1be..29b111dd 100644 --- a/chirpstack/src/uplink/join.rs +++ b/chirpstack/src/uplink/join.rs @@ -341,7 +341,7 @@ impl JoinRequest { trace!("Filtering rx_info by tenant_id"); filter_rx_info_by_tenant_id( - self.application.as_ref().unwrap().tenant_id, + self.application.as_ref().unwrap().tenant_id.into(), &mut self.uplink_frame_set, )?; Ok(()) @@ -412,7 +412,7 @@ impl JoinRequest { let dev = self.device.as_ref().unwrap(); integration::log_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::LogEvent { time: Some(Utc::now().into()), @@ -465,7 +465,7 @@ impl JoinRequest { Err(v) => match v { StorageError::InvalidDevNonce => { integration::log_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::LogEvent { time: Some(Utc::now().into()), @@ -841,7 +841,7 @@ impl JoinRequest { None => {} } - device.device_session = Some(ds); + device.device_session = Some(ds.into()); Ok(()) } @@ -956,7 +956,7 @@ impl JoinRequest { }, }; - integration::join_event(app.id, &dev.variables, &pl).await; + integration::join_event(app.id.into(), &dev.variables, &pl).await; Ok(()) } } diff --git a/chirpstack/src/uplink/join_sns.rs b/chirpstack/src/uplink/join_sns.rs index 98244a27..6736c533 100644 --- a/chirpstack/src/uplink/join_sns.rs +++ b/chirpstack/src/uplink/join_sns.rs @@ -309,7 +309,7 @@ impl JoinRequest { let dev = self.device.as_ref().unwrap(); integration::log_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::LogEvent { time: Some(Utc::now().into()), @@ -362,7 +362,7 @@ impl JoinRequest { Err(v) => match v { StorageError::InvalidDevNonce => { integration::log_event( - app.id, + app.id.into(), &dev.variables, &integration_pb::LogEvent { time: Some(Utc::now().into()), @@ -623,7 +623,7 @@ impl JoinRequest { } } - device.device_session = Some(ds); + device.device_session = Some(ds.into()); Ok(()) } @@ -649,7 +649,7 @@ impl JoinRequest { device::partial_update( self.device.as_ref().unwrap().dev_eui, &device::DeviceChangeset { - device_session: Some(Some(ds.clone())), + device_session: Some(Some(ds.into())), join_eui: Some(self.join_request.as_ref().unwrap().join_eui), dev_addr: Some(Some(self.dev_addr.unwrap())), secondary_dev_addr: Some(None), @@ -706,7 +706,7 @@ impl JoinRequest { }, }; - integration::join_event(app.id, &dev.variables, &pl).await; + integration::join_event(app.id.into(), &dev.variables, &pl).await; Ok(()) } diff --git a/chirpstack/src/uplink/mesh.rs b/chirpstack/src/uplink/mesh.rs index 5e41852e..e9a2fb98 100644 --- a/chirpstack/src/uplink/mesh.rs +++ b/chirpstack/src/uplink/mesh.rs @@ -84,7 +84,7 @@ impl MeshHeartbeat { } }; - match gateway::get_relay_gateway(border_gw.tenant_id, self.relay_id).await { + match gateway::get_relay_gateway(border_gw.tenant_id.into(), self.relay_id).await { Ok(mut v) => { if let Some(last_seen_at) = v.last_seen_at { if last_seen_at > ts { diff --git a/chirpstack/src/uplink/mod.rs b/chirpstack/src/uplink/mod.rs index 5621285b..a8f603bd 100644 --- a/chirpstack/src/uplink/mod.rs +++ b/chirpstack/src/uplink/mod.rs @@ -401,7 +401,8 @@ async fn update_gateway_metadata(ufs: &mut UplinkFrameSet) -> Result<()> { .insert(gw_id, gw_meta.is_private_up); ufs.gateway_private_down_map .insert(gw_id, gw_meta.is_private_down); - ufs.gateway_tenant_id_map.insert(gw_id, gw_meta.tenant_id); + ufs.gateway_tenant_id_map + .insert(gw_id, gw_meta.tenant_id.into()); } Ok(()) diff --git a/cross/Dockerfile.aarch64-unknown-linux-musl b/cross/Dockerfile.aarch64-unknown-linux-musl index 15e89df4..034a273b 100644 --- a/cross/Dockerfile.aarch64-unknown-linux-musl +++ b/cross/Dockerfile.aarch64-unknown-linux-musl @@ -4,3 +4,16 @@ RUN apt-get update && \ apt-get --assume-yes install \ protobuf-compiler \ libprotobuf-dev + +ENV MUSL_PREFIX=aarch64-linux-musl +ENV SQLITE_VERSION=3460000 + +RUN echo "Building SQLite" && \ + cd /tmp && \ + curl -fLO "https://sqlite.org/2024/sqlite-autoconf-$SQLITE_VERSION.tar.gz" && \ + tar xvzf "sqlite-autoconf-$SQLITE_VERSION.tar.gz" && cd "sqlite-autoconf-$SQLITE_VERSION" && \ + env CC=$MUSL_PREFIX-gcc ./configure --host=aarch64-linux --enable-static --prefix=/usr/local/$MUSL_PREFIX-target && \ + make && make install && \ + rm -r /tmp/* + +ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig diff --git a/cross/Dockerfile.armv7-unknown-linux-musleabihf b/cross/Dockerfile.armv7-unknown-linux-musleabihf index 4ba3dcec..def2be6b 100644 --- a/cross/Dockerfile.armv7-unknown-linux-musleabihf +++ b/cross/Dockerfile.armv7-unknown-linux-musleabihf @@ -4,3 +4,16 @@ RUN apt-get update && \ apt-get --assume-yes install \ protobuf-compiler \ libprotobuf-dev + +ENV MUSL_PREFIX=arm-linux-musleabihf +ENV SQLITE_VERSION=3460000 + +RUN echo "Building SQLite" && \ + cd /tmp && \ + curl -fLO "https://sqlite.org/2024/sqlite-autoconf-$SQLITE_VERSION.tar.gz" && \ + tar xvzf "sqlite-autoconf-$SQLITE_VERSION.tar.gz" && cd "sqlite-autoconf-$SQLITE_VERSION" && \ + env CC=$MUSL_PREFIX-gcc ./configure --host=arm-linux --enable-static --prefix=/usr/local/$MUSL_PREFIX-target && \ + make && make install && \ + rm -r /tmp/* + +ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig diff --git a/cross/Dockerfile.x86_64-unknown-linux-musl b/cross/Dockerfile.x86_64-unknown-linux-musl index a138532d..0c1094ac 100644 --- a/cross/Dockerfile.x86_64-unknown-linux-musl +++ b/cross/Dockerfile.x86_64-unknown-linux-musl @@ -4,3 +4,16 @@ RUN apt-get update && \ apt-get --assume-yes install \ protobuf-compiler \ libprotobuf-dev + +ENV MUSL_PREFIX=x86_64-linux-musl +ENV SQLITE_VERSION=3460000 + +RUN echo "Building SQLite" && \ + cd /tmp && \ + curl -fLO "https://sqlite.org/2024/sqlite-autoconf-$SQLITE_VERSION.tar.gz" && \ + tar xvzf "sqlite-autoconf-$SQLITE_VERSION.tar.gz" && cd "sqlite-autoconf-$SQLITE_VERSION" && \ + env CC=$MUSL_PREFIX-gcc ./configure --enable-static --prefix=/usr/local/$MUSL_PREFIX-target && \ + make && make install && \ + rm -r /tmp/* + +ENV PKG_CONFIG_PATH=/usr/local/$MUSL_PREFIX-target/lib/pkgconfig diff --git a/lrwn/Cargo.toml b/lrwn/Cargo.toml index 8594cac8..55d378f4 100644 --- a/lrwn/Cargo.toml +++ b/lrwn/Cargo.toml @@ -16,7 +16,7 @@ cmac = { version = "0.7", optional = true } aes = { version = "0.8", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } - diesel = { version = "2.2", features = ["postgres_backend"], optional = true } + diesel = { version = "2.2", optional = true } # Error handling thiserror = "1.0" @@ -28,6 +28,8 @@ [features] default = [] diesel = ["dep:diesel", "serde"] + postgres = ["diesel", "diesel/postgres_backend"] + sqlite = ["diesel", "diesel/sqlite"] serde = ["dep:serde"] crypto = ["dep:cmac", "dep:aes"] regions = [] diff --git a/lrwn/src/aes128.rs b/lrwn/src/aes128.rs index 200bdb0d..9ae77870 100644 --- a/lrwn/src/aes128.rs +++ b/lrwn/src/aes128.rs @@ -2,6 +2,8 @@ use std::fmt; use std::str::FromStr; use anyhow::Result; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; #[cfg(feature = "diesel")] use diesel::{backend::Backend, deserialize, serialize, sql_types::Binary}; #[cfg(feature = "serde")] @@ -127,7 +129,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for AES128Key where [u8]: serialize::ToSql, @@ -140,6 +142,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for AES128Key { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(Vec::from(self.to_bytes().as_slice())); + Ok(serialize::IsNull::No) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lrwn/src/devaddr.rs b/lrwn/src/devaddr.rs index 1393d080..1e0eec4c 100644 --- a/lrwn/src/devaddr.rs +++ b/lrwn/src/devaddr.rs @@ -2,6 +2,8 @@ use std::fmt; use std::str::FromStr; use anyhow::Result; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; #[cfg(feature = "diesel")] use diesel::{backend::Backend, deserialize, serialize, sql_types::Binary}; #[cfg(feature = "serde")] @@ -270,7 +272,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for DevAddr where [u8]: serialize::ToSql, @@ -283,6 +285,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for DevAddr { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(Vec::from(self.to_be_bytes().as_slice())); + Ok(serialize::IsNull::No) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lrwn/src/eui64.rs b/lrwn/src/eui64.rs index cdb510a0..ed7734e1 100644 --- a/lrwn/src/eui64.rs +++ b/lrwn/src/eui64.rs @@ -2,6 +2,8 @@ use std::fmt; use std::str::FromStr; use anyhow::{Context, Result}; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; #[cfg(feature = "diesel")] use diesel::{backend::Backend, deserialize, serialize, sql_types::Binary}; #[cfg(feature = "serde")] @@ -135,7 +137,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for EUI64 where [u8]: serialize::ToSql, @@ -148,6 +150,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for EUI64 { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(Vec::from(self.to_be_bytes().as_slice())); + Ok(serialize::IsNull::No) + } +} + #[cfg(feature = "diesel")] impl diesel::sql_types::SqlType for EUI64 { type IsNull = diesel::sql_types::is_nullable::NotNull; diff --git a/lrwn/src/maccommand.rs b/lrwn/src/maccommand.rs index 00eb829a..5f527d04 100644 --- a/lrwn/src/maccommand.rs +++ b/lrwn/src/maccommand.rs @@ -4,6 +4,8 @@ use std::ops::{Deref, DerefMut}; use std::time::Duration; use anyhow::Result; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; #[cfg(feature = "diesel")] use diesel::{backend::Backend, deserialize, serialize, sql_types::SmallInt}; #[cfg(feature = "serde")] @@ -1886,7 +1888,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for RelayModeActivation where i16: serialize::ToSql, @@ -1897,6 +1899,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for RelayModeActivation { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_u8() as i32); + Ok(serialize::IsNull::No) + } +} + #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "serde", derive(Serialize))] pub struct ActivationRelayMode { diff --git a/lrwn/src/region/mod.rs b/lrwn/src/region/mod.rs index 0b564ce6..8d88768b 100644 --- a/lrwn/src/region/mod.rs +++ b/lrwn/src/region/mod.rs @@ -4,6 +4,8 @@ use std::str::FromStr; use std::time::Duration; use anyhow::{Context, Result}; +#[cfg(feature = "sqlite")] +use diesel::sqlite::Sqlite; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -69,7 +71,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for CommonName where str: serialize::ToSql, @@ -85,6 +87,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for CommonName { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + impl FromStr for CommonName { type Err = anyhow::Error; @@ -183,7 +193,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for Revision where str: serialize::ToSql, @@ -199,6 +209,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for Revision { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + #[allow(non_camel_case_types)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "diesel", derive(AsExpression, FromSqlRow))] @@ -268,7 +286,7 @@ where } } -#[cfg(feature = "diesel")] +#[cfg(feature = "postgres")] impl serialize::ToSql for MacVersion where str: serialize::ToSql, @@ -284,6 +302,14 @@ where } } +#[cfg(feature = "sqlite")] +impl serialize::ToSql for MacVersion { + fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result { + out.set_value(self.to_string()); + Ok(serialize::IsNull::No) + } +} + #[derive(Clone)] pub struct DataRate { pub uplink: bool, diff --git a/shell.nix b/shell.nix index 8b9a5070..f1cf032d 100644 --- a/shell.nix +++ b/shell.nix @@ -18,6 +18,8 @@ pkgs.mkShell { pkgs.protoc-gen-grpc-web # grpc-web api pkgs.protoc-gen-go # go api pkgs.protoc-gen-go-grpc + pkgs.openssl + pkgs.sqlite ]; LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.llvmPackages.libclang.lib}/lib/clang/${pkgs.llvmPackages.libclang.version}/include";