Compare commits

..

76 Commits

Author SHA1 Message Date
92b2439377 Bump version to 4.12.0-test.4 2025-04-21 15:23:08 +01:00
6d084b3b21 Add option to pass auth. header to join-server. 2025-04-16 10:55:08 +01:00
75e9106bbb Remove lazy_static dependency. 2025-04-16 09:51:49 +01:00
4ce4828a78 lrwn: Remove lazy_static dependency. 2025-04-15 16:06:03 +01:00
9ecf4fef1b ui: Update vite to v6.2.6. 2025-04-15 15:54:17 +01:00
8f9316af2c Bump golang.org/x/net from 0.33.0 to 0.36.0 in /examples/frame_log/go (#651)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.33.0 to 0.36.0.
- [Commits](https://github.com/golang/net/compare/v0.33.0...v0.36.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.36.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-15 15:28:32 +01:00
1f2a7d390a Bump vite from 5.4.12 to 5.4.18 in /ui (#652)
Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.12 to 5.4.18.
- [Release notes](https://github.com/vitejs/vite/releases)
- [Changelog](https://github.com/vitejs/vite/blob/v5.4.18/packages/vite/CHANGELOG.md)
- [Commits](https://github.com/vitejs/vite/commits/v5.4.18/packages/vite)

---
updated-dependencies:
- dependency-name: vite
  dependency-version: 5.4.18
  dependency-type: direct:development
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-15 15:28:19 +01:00
990bf57da5 Update dependencies. 2025-04-15 15:07:10 +01:00
b336690a65 Update tonic to 0.13. 2025-04-15 15:00:23 +01:00
7597bcaabf Replace tenant UUID by gen_random_uuid() in pg migration.
This should have been part of 5fd57de6ce.
2025-04-15 13:51:53 +01:00
0ae1294a63 Show origin in case of parse error.
Fixes #632.
2025-04-15 13:32:01 +01:00
8e0a29ed55 Update PostgreSQL to v13 in tests.
This is oldest version that is still supported.
2025-04-15 12:41:49 +01:00
5fd57de6ce Replace static UUID by gen_random_uuid() in pg migration.
Fixes #634.
2025-04-15 12:28:44 +01:00
d91fb77617 Install cargo-deb through shell.nix. 2025-04-15 11:29:01 +01:00
fa63c306fd Add default ADR example in JS. 2025-04-15 10:16:17 +01:00
7d1e85e575 Bump golang.org/x/net from 0.33.0 to 0.36.0 in /api/go (#639)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.33.0 to 0.36.0.
- [Commits](https://github.com/golang/net/compare/v0.33.0...v0.36.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-26 11:40:20 +00:00
e392f52444 Update prometheus-client dependency. 2025-03-26 11:34:22 +00:00
e30a2e0e77 Bump version to 4.12.0-test.3 2025-03-25 13:43:56 +00:00
b2adac5a49 Fix formatting (cargo fmt). 2025-03-25 13:13:41 +00:00
ca7b0a2e61 Fix sqlite migration typo. 2025-03-25 11:31:12 +00:00
849d27f148 Fix cargo clippy feedback. 2025-03-25 11:31:07 +00:00
5ce35eef5e ui: Disable start fuota deployment button once started. 2025-03-24 11:38:07 +00:00
236b468aa4 Store and increase TS004 session_cnt per device.
For TS004 v2.0.0, the session_cnt must be incremented for every
fragmentation-session.
2025-03-24 11:33:16 +00:00
c130be9dd0 Bump redis from 0.29.1 to 0.29.2 (#636)
Bumps [redis](https://github.com/redis-rs/redis-rs) from 0.29.1 to 0.29.2.
- [Release notes](https://github.com/redis-rs/redis-rs/releases)
- [Commits](https://github.com/redis-rs/redis-rs/compare/redis-0.29.1...redis-0.29.2)

---
updated-dependencies:
- dependency-name: redis
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-24 09:53:11 +00:00
4b77fa441d Bump version to 4.12.0-test.2 2025-03-20 12:25:23 +00:00
c137136d4d Update chirpstack configfile template.
See also chirpstack/chirpstack-docs#25
2025-03-20 11:49:07 +00:00
27689d172f Bump vite from 5.3.6 to 5.4.12 in /ui (#604)
Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.6 to 5.4.12.
- [Release notes](https://github.com/vitejs/vite/releases)
- [Changelog](https://github.com/vitejs/vite/blob/v5.4.12/packages/vite/CHANGELOG.md)
- [Commits](https://github.com/vitejs/vite/commits/v5.4.12/packages/vite)

---
updated-dependencies:
- dependency-name: vite
  dependency-type: direct:development
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-20 11:26:39 +00:00
730ed09840 Update Rust toolchain version. 2025-03-20 10:58:30 +00:00
105ea2806a Remove rand_core and import re-export. 2025-03-20 10:53:14 +00:00
8f34ea2ca5 Bump golang.org/x/net from 0.23.0 to 0.33.0 in /api/go (#599)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.23.0 to 0.33.0.
- [Commits](https://github.com/golang/net/compare/v0.23.0...v0.33.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-20 10:41:12 +00:00
447df411df Bump golang.org/x/net from 0.23.0 to 0.36.0 in /examples/request_log/go (#627)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.23.0 to 0.36.0.
- [Commits](https://github.com/golang/net/compare/v0.23.0...v0.36.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-20 10:40:58 +00:00
e228125031 Bump @babel/runtime from 7.24.7 to 7.26.10 in /ui (#633)
Bumps [@babel/runtime](https://github.com/babel/babel/tree/HEAD/packages/babel-runtime) from 7.24.7 to 7.26.10.
- [Release notes](https://github.com/babel/babel/releases)
- [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md)
- [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-runtime)

---
updated-dependencies:
- dependency-name: "@babel/runtime"
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-20 10:40:43 +00:00
7c134a549d Update dependencies. 2025-03-20 10:39:52 +00:00
f97af991be Generalize auto-conversion of SEC1 EC keys to PKCS#8. (#618)
Get the key curve from the params of the SEC1 certificate instead of
assuming that it is P256.
2025-03-20 10:10:57 +00:00
293cfe2664 api: Update Go generated code. 2025-03-20 09:03:47 +00:00
5bbd71ab3a Add warnings to fuota deployment job + UI.
In case some devices do not complete a job, this makes it possible
to show a warning in the UI showing the amount of devices that did
not complete the job.
2025-03-19 14:47:47 +00:00
f02256245c lrwn: Align v2 fragmentation fec with LBM stack.
This aligns the forward-error-correction code with the LoRa Basics
Modem stack, which seems to be different from the TS004 MATLAB example
code in that it only calls the matrix_line function for the redundancy
frames and thus the n argument ranges from 1 until (and including) the
number of redundancy frames.

The TS004 MATLAB example calls the matrix_line function for every
fragment, thus the n argument ranges from 1 until (and including) m +
the number of redundancy frames. While n <= m, it returns early.
2025-03-18 13:32:14 +00:00
a0f07b5303 Initial FUOTA v2 implementation.
This implements selecting the v2.0.0 app-layer package in the
device-profile and handling these payloads in the FUOTA flow.
2025-03-18 12:44:15 +00:00
60547ff973 Fix fuota device timeout filter.
We should filter on error_msg = "" instead of != NULL, as we only would
like to update the error_msg for devices that not yet have an error set.
Else we would overwrite an earlier error message.
2025-03-13 15:09:03 +00:00
351406c363 lrwn: Implement v2 app layer fragmentation structs + encoding. 2025-03-13 15:09:03 +00:00
8b59136942 lrwn: Implement v2 app layer multicast setup structs. 2025-03-13 15:09:03 +00:00
b5e562aa64 lrwn: Implement v2 app layer clock sync structs.
These are the same as the v1 struct, buts re-exporting will make the
documentation confusing + will become inconsistent with other app layer
packages that do provide different struct implementations.
2025-03-13 15:09:03 +00:00
5a7694a3a4 Bump version to 4.12.0-test.1 2025-03-13 15:09:03 +00:00
98ba2f3198 Set device tags after FUOTA complete. 2025-03-13 15:09:03 +00:00
bbdf2dd781 Error if there are no fuota devices + cleanup mc group.
In case there are no fuota devices (e.g. all devices failed the previous
step), this will log a warning and the flow will continue with multicast
cleanup and completion steps.
2025-03-13 15:09:03 +00:00
71cc1aca74 Set FUOTA deployment completed_at. 2025-03-13 15:09:03 +00:00
27f6d2cf03 Implement full FUOTA flow + UI components. 2025-03-13 15:09:03 +00:00
b8ab0182de ui: Make app-layer params configurable. 2025-03-13 15:09:03 +00:00
b1e6c97942 Add get_ and update_device to fuota storage + add return_msg.
The return_msg (text) field can be used to capture errors, e.g. when the
end-device failed to setup the multicast-group.
2025-03-13 15:09:03 +00:00
e75b62f335 lrwn: Add function for encrypting McKey. 2025-03-13 15:09:03 +00:00
cac682c245 Implement handling AppTimeReq / AppTimeAns. 2025-03-13 15:09:03 +00:00
b61a684739 Update fuota + device-keys structs / storage.
This add the gen_app_key to the device keys which is needed for FUOTA
and adds a random multicast address + key to the fuota deployment. To
the FUOTA job structure, this adds a return msg such that errors can
be captured in the database.
2025-03-13 15:09:03 +00:00
439a6b0542 lrwn: Fix clocksync time_correction type.
The correct type is i32 instead of u32, as the value can be negative.
2025-03-13 15:09:03 +00:00
f9efed2317 Rename ts00x_port to _f_port.
This is consistent with the naming in the lrwn package.
2025-03-13 15:09:03 +00:00
4984e8556d ui: First part of FUOTA UI implementation.
Currently this allows for creating FUOTA dpeloyments and adding to /
removing from devices and gateways. In its current state, it does not
show the status of the FUOTA deployment.
2025-03-13 15:09:03 +00:00
43753958ef api: List devices by device-profile + expose tags. 2025-03-13 15:09:03 +00:00
1d76fabdb0 Add APIs + functions to get app. device-profiles and tags.
These API methods can be used to given an application id, retrieve
the list of used device-profiles and device tags.
2025-03-13 15:09:03 +00:00
de7e0c619d Update fuota API. Add options for auto-calculation of params.
This adds options to auto-calculate the fragment size (based on max.
payload size available for the given data-rate) and multicast
timeout (based on server settings).
2025-03-13 15:09:03 +00:00
38386b23f2 Add start job + get schedulable jobs functions + API. 2025-03-13 15:09:03 +00:00
a3e27d8b65 Add tests + add fuota jobs functions. 2025-03-13 15:09:03 +00:00
9b735d6521 Add first fuota storage functions / API. 2025-03-13 15:09:03 +00:00
d000cd3385 Add option to filter devices by tags. 2025-03-13 15:09:03 +00:00
ac52cce7ee api: Extend 'limit' field documentation. 2025-03-13 15:09:03 +00:00
bbce25efbf Add app-layer params field to device-profile API. 2025-03-13 15:09:03 +00:00
4e7ab31714 Add app-layer params field to device-profile schema. 2025-03-13 15:09:03 +00:00
3c3c1f125d Refactor device-profile relay fields. 2025-03-13 15:09:03 +00:00
909eaed1ba Refactor device-profile class-c fields. 2025-03-13 15:09:03 +00:00
b8c02b943c Refactor device-profile class-b fields. 2025-03-13 15:09:03 +00:00
82ed66cf09 Refactor device-profile abp fields.
This this puts the ABP parameters into a single JSON(B) field, to reduce
the amount of device-profile fields that currently exist. The same work
will be done for Class-B/C and Relay parameters. Once completed, this
means we can drop the diesel '64-column-tables' feature, which will
reduce compile time.
2025-03-13 15:09:03 +00:00
f3d3262006 lrwn: Implement v1 applayer multicastsetup key functions. 2025-03-13 15:09:03 +00:00
ffe01d387c lrwn: Implement applayer v1 fragmentation encoding func. 2025-03-13 15:09:03 +00:00
d1f4f42a79 lrwn: Implement v1 applayer fragmentation structs. 2025-03-13 15:09:03 +00:00
bf21297a42 lrwn: Replace Duration with u32 in applayer timesync. 2025-03-13 15:09:03 +00:00
bcb8aaad4f lrwn: Implement v1 applayer multicast setup structs. 2025-03-13 15:09:03 +00:00
f43c9154bc lrwn: Implement v1 applayer clock sync structs. 2025-03-13 15:09:03 +00:00
3e7f09db62 Add Yandex ID OAuth provider support. (#622) 2025-03-12 13:03:35 +00:00
133 changed files with 6816 additions and 2353 deletions

1669
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -6,9 +6,7 @@ dist:
cd chirpstack && make dist
# Install dev dependencies
# TODO: test latest cargo-deb and move it to shell.nix.
dev-dependencies:
cargo install cargo-deb --version 1.43.1 --locked
cargo install cargo-generate-rpm --version 0.12.1 --locked
# Set the versions

View File

@ -328,6 +328,8 @@ const (
Ts003Version_TS003_NOT_IMPLEMENTED Ts003Version = 0
// v1.0.0.
Ts003Version_TS003_V100 Ts003Version = 1
// v2.0.0
Ts003Version_TS003_v200 Ts003Version = 2
)
// Enum value maps for Ts003Version.
@ -335,10 +337,12 @@ var (
Ts003Version_name = map[int32]string{
0: "TS003_NOT_IMPLEMENTED",
1: "TS003_V100",
2: "TS003_v200",
}
Ts003Version_value = map[string]int32{
"TS003_NOT_IMPLEMENTED": 0,
"TS003_V100": 1,
"TS003_v200": 2,
}
)
@ -376,6 +380,8 @@ const (
Ts004Version_TS004_NOT_IMPLEMENTED Ts004Version = 0
// v1.0.0.
Ts004Version_TS004_V100 Ts004Version = 1
// v2.0.0
Ts004Version_TS004_V200 Ts004Version = 2
)
// Enum value maps for Ts004Version.
@ -383,10 +389,12 @@ var (
Ts004Version_name = map[int32]string{
0: "TS004_NOT_IMPLEMENTED",
1: "TS004_V100",
2: "TS004_V200",
}
Ts004Version_value = map[string]int32{
"TS004_NOT_IMPLEMENTED": 0,
"TS004_V100": 1,
"TS004_V200": 2,
}
)
@ -424,6 +432,8 @@ const (
Ts005Version_TS005_NOT_IMPLEMENTED Ts005Version = 0
// v1.0.0.
Ts005Version_TS005_V100 Ts005Version = 1
// v2.0.0
Ts005Version_TS005_V200 Ts005Version = 2
)
// Enum value maps for Ts005Version.
@ -431,10 +441,12 @@ var (
Ts005Version_name = map[int32]string{
0: "TS005_NOT_IMPLEMENTED",
1: "TS005_V100",
2: "TS005_V200",
}
Ts005Version_value = map[string]int32{
"TS005_NOT_IMPLEMENTED": 0,
"TS005_V100": 1,
"TS005_V200": 2,
}
)
@ -2251,18 +2263,21 @@ var file_api_device_profile_proto_rawDesc = []byte{
0x4c, 0x45, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x10, 0x01, 0x12,
0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15,
0x45, 0x4e, 0x44, 0x5f, 0x44, 0x45, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52,
0x4f, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x0c, 0x54, 0x73, 0x30, 0x30, 0x33,
0x4f, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x49, 0x0a, 0x0c, 0x54, 0x73, 0x30, 0x30, 0x33,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x53, 0x30, 0x30, 0x33,
0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44,
0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x53, 0x30, 0x30, 0x33, 0x5f, 0x56, 0x31, 0x30, 0x30,
0x10, 0x01, 0x2a, 0x39, 0x0a, 0x0c, 0x54, 0x73, 0x30, 0x30, 0x34, 0x56, 0x65, 0x72, 0x73, 0x69,
0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x53, 0x30, 0x30, 0x33, 0x5f, 0x76, 0x32, 0x30, 0x30,
0x10, 0x02, 0x2a, 0x49, 0x0a, 0x0c, 0x54, 0x73, 0x30, 0x30, 0x34, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x53, 0x30, 0x30, 0x34, 0x5f, 0x4e, 0x4f, 0x54, 0x5f,
0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a,
0x0a, 0x54, 0x53, 0x30, 0x30, 0x34, 0x5f, 0x56, 0x31, 0x30, 0x30, 0x10, 0x01, 0x2a, 0x39, 0x0a,
0x0a, 0x54, 0x53, 0x30, 0x30, 0x34, 0x5f, 0x56, 0x31, 0x30, 0x30, 0x10, 0x01, 0x12, 0x0e, 0x0a,
0x0a, 0x54, 0x53, 0x30, 0x30, 0x34, 0x5f, 0x56, 0x32, 0x30, 0x30, 0x10, 0x02, 0x2a, 0x49, 0x0a,
0x0c, 0x54, 0x73, 0x30, 0x30, 0x35, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
0x15, 0x54, 0x53, 0x30, 0x30, 0x35, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45,
0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x53, 0x30, 0x30,
0x35, 0x5f, 0x56, 0x31, 0x30, 0x30, 0x10, 0x01, 0x32, 0xb8, 0x05, 0x0a, 0x14, 0x44, 0x65, 0x76,
0x35, 0x5f, 0x56, 0x31, 0x30, 0x30, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x53, 0x30, 0x30,
0x35, 0x5f, 0x56, 0x32, 0x30, 0x30, 0x10, 0x02, 0x32, 0xb8, 0x05, 0x0a, 0x14, 0x44, 0x65, 0x76,
0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x12, 0x6c, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72,

7
api/go/go.mod vendored
View File

@ -1,6 +1,7 @@
module github.com/chirpstack/chirpstack/api/go/v4
go 1.21
toolchain go1.24.1
require (
google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa
@ -10,8 +11,8 @@ require (
require (
github.com/golang/protobuf v1.5.4 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/net v0.36.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa // indirect
)

12
api/go/go.sum vendored
View File

@ -2,12 +2,12 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa h1:Jt1XW5PaLXF1/ePZrznsh/aAUvI7Adfc3LY1dAKlzRs=
google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:K4kfzHtI0kqWA79gecJarFtDn/Mls+GxQcg3Zox91Ac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa h1:RBgMaUMP+6soRkik4VoN8ojR2nex2TqZwjSSogic+eo=

View File

@ -1,6 +1,6 @@
{
"name": "@chirpstack/chirpstack-api-grpc-web",
"version": "4.12.0-test.1",
"version": "4.12.0-test.4",
"description": "Chirpstack gRPC-web API",
"license": "MIT",
"devDependencies": {

View File

@ -8,7 +8,7 @@ plugins {
}
group = "io.chirpstack"
version = "4.12.0-test.1"
version = "4.12.0-test.4"
repositories {
mavenCentral()

2
api/js/package.json vendored
View File

@ -1,6 +1,6 @@
{
"name": "@chirpstack/chirpstack-api",
"version": "4.12.0-test.1",
"version": "4.12.0-test.4",
"description": "Chirpstack JS and TS API",
"license": "MIT",
"devDependencies": {

View File

@ -9,7 +9,7 @@ plugins {
}
group = "io.chirpstack"
version = "4.12.0-test.1"
version = "4.12.0-test.4"
repositories {
mavenCentral()

View File

@ -3,7 +3,7 @@
"description": "Chirpstack PHP API",
"license": "MIT",
"type": "library",
"version": "4.12.0-test.1",
"version": "4.12.0-test.4",
"require": {
"php": ">=7.0.0",
"grpc/grpc": "^v1.57.0",

View File

@ -104,6 +104,9 @@ enum Ts003Version {
// v1.0.0.
TS003_V100 = 1;
// v2.0.0
TS003_v200 = 2;
}
enum Ts004Version {
@ -112,6 +115,9 @@ enum Ts004Version {
// v1.0.0.
TS004_V100 = 1;
// v2.0.0
TS004_V200 = 2;
}
enum Ts005Version {
@ -120,6 +126,9 @@ enum Ts005Version {
// v1.0.0.
TS005_V100 = 1;
// v2.0.0
TS005_V200 = 2;
}
// DeviceProfileService is the service providing API methods for managing

View File

@ -393,6 +393,9 @@ message FuotaDeploymentJob {
// Scheduler run after.
google.protobuf.Timestamp scheduler_run_after = 6;
// Warning message.
string warning_msg = 7;
// Error message.
string error_msg = 7;
string error_msg = 8;
}

View File

@ -18,7 +18,7 @@ CLASSIFIERS = [
setup(
name='chirpstack-api',
version = "4.12.0-test.1",
version = "4.12.0-test.4",
url='https://github.com/brocaar/chirpstack-api',
author='Orne Brocaar',
author_email='info@brocaar.com',

11
api/rust/Cargo.toml vendored
View File

@ -1,7 +1,7 @@
[package]
name = "chirpstack_api"
description = "ChirpStack Protobuf / gRPC API definitions."
version = "4.12.0-test.1"
version = "4.12.0-test.4"
authors = ["Orne Brocaar <info@brocaar.com>"]
license = "MIT"
homepage = "https://www.chirpstack.io"
@ -18,19 +18,18 @@
prost = "0.13"
prost-types = "0.13"
hex = "0.4"
rand = "0.8"
tonic = { version = "0.12", features = [
rand = "0.9"
tonic = { version = "0.13", features = [
"codegen",
"prost",
], default-features = false, optional = true }
tokio = { version = "1.41", features = ["macros"], optional = true }
tokio = { version = "1.44", features = ["macros"], optional = true }
pbjson = { version = "0.7", optional = true }
pbjson-types = { version = "0.7", optional = true }
serde = { version = "1.0", optional = true }
[build-dependencies]
tonic-build = { version = "0.12", features = [
tonic-build = { version = "0.13", features = [
"prost",
], default-features = false }
pbjson-build = "0.7"

View File

@ -104,6 +104,9 @@ enum Ts003Version {
// v1.0.0.
TS003_V100 = 1;
// v2.0.0
TS003_v200 = 2;
}
enum Ts004Version {
@ -112,6 +115,9 @@ enum Ts004Version {
// v1.0.0.
TS004_V100 = 1;
// v2.0.0
TS004_V200 = 2;
}
enum Ts005Version {
@ -120,6 +126,9 @@ enum Ts005Version {
// v1.0.0.
TS005_V100 = 1;
// v2.0.0
TS005_V200 = 2;
}
// DeviceProfileService is the service providing API methods for managing

View File

@ -393,6 +393,9 @@ message FuotaDeploymentJob {
// Scheduler run after.
google.protobuf.Timestamp scheduler_run_after = 6;
// Warning message.
string warning_msg = 7;
// Error message.
string error_msg = 7;
string error_msg = 8;
}

4
api/rust/src/gw.rs vendored
View File

@ -115,11 +115,11 @@ impl UplinkFrame {
if let Some(rx_info) = &self.rx_info_legacy {
if self.rx_info.is_none() {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
self.rx_info = Some(UplinkRxInfo {
gateway_id: hex::encode(&rx_info.gateway_id),
uplink_id: rng.gen::<u32>(),
uplink_id: rng.random::<u32>(),
gw_time: rx_info.time,
ns_time: None,
time_since_gps_epoch: rx_info.time_since_gps_epoch,

View File

@ -1,6 +1,6 @@
[package]
name = "backend"
version = "4.12.0-test.1"
version = "4.12.0-test.4"
authors = ["Orne Brocaar <info@brocaar.com>"]
edition = "2018"
publish = false
@ -12,14 +12,14 @@
anyhow = "1.0"
tracing = "0.1"
hex = "0.4"
rand = "0.8"
rand = "0.9"
aes-kw = "0.2"
reqwest = { version = "0.12", features = [
"json",
"rustls-tls",
], default-features = false }
chrono = { version = "0.4", features = ["serde"] }
tokio = { version = "1.42", features = ["macros"] }
tokio = { version = "1.44", features = ["macros"] }
chirpstack_api = { path = "../api/rust", default-features = false, features = [
"json",
] }

View File

@ -3,14 +3,14 @@
description = "Library for building external ChirpStack integrations"
homepage = "https://www.chirpstack.io/"
license = "MIT"
version = "4.12.0-test.1"
version = "4.12.0-test.4"
authors = ["Orne Brocaar <info@brocaar.com>"]
edition = "2021"
repository = "https://github.com/chirpstack/chirpstack"
[dependencies]
chirpstack_api = { path = "../api/rust", version = "4.12.0-test.1" }
redis = { version = "0.27", features = [
chirpstack_api = { path = "../api/rust", version = "4.12.0-test.4" }
redis = { version = "0.29", features = [
"cluster-async",
"tokio-rustls-comp",
] }
@ -23,7 +23,6 @@
], default-features = true }
async-trait = "0.1"
serde = { version = "1.0", features = ["derive"] }
tokio = { version = "1.42", features = ["macros", "rt-multi-thread"] }
lazy_static = "1.5"
tokio = { version = "1.44", features = ["macros", "rt-multi-thread"] }
serde_json = "1.0"
toml = "0.8"

View File

@ -1,8 +1,6 @@
#[macro_use]
extern crate lazy_static;
use std::io::Cursor;
use std::str::FromStr;
use std::sync::LazyLock;
use anyhow::Result;
use async_trait::async_trait;
@ -13,10 +11,8 @@ use tracing_subscriber::{filter, prelude::*};
use chirpstack_api::{integration as integration_pb, prost::Message};
lazy_static! {
static ref INTEGRATION: RwLock<Option<Box<dyn IntegrationTrait + Sync + Send>>> =
RwLock::new(None);
}
static INTEGRATION: LazyLock<RwLock<Option<Box<dyn IntegrationTrait + Sync + Send>>>> =
LazyLock::new(|| RwLock::new(None));
#[derive(Default, Deserialize, Clone)]
#[serde(default)]
@ -203,7 +199,7 @@ impl Integration {
for stream_key in &srr.keys {
for stream_id in &stream_key.ids {
redis::cmd("XACK")
let _: () = redis::cmd("XACK")
.arg(&key)
.arg(&self.consumer_group)
.arg(&stream_id.id)

View File

@ -3,7 +3,7 @@
description = "ChirpStack is an open-source LoRaWAN(TM) Network Server"
repository = "https://github.com/chirpstack/chirpstack"
homepage = "https://www.chirpstack.io/"
version = "4.12.0-test.1"
version = "4.12.0-test.4"
authors = ["Orne Brocaar <info@brocaar.com>"]
edition = "2021"
publish = false
@ -20,7 +20,7 @@
serde_urlencoded = "0.7"
humantime-serde = "1.1"
toml = "0.8"
handlebars = "6.2"
handlebars = "6.3"
validator = { version = "0.20", features = ["derive"] }
# Database
@ -34,8 +34,8 @@
tokio-postgres = { version = "0.7", optional = true }
tokio-postgres-rustls = { version = "0.13", optional = true }
bigdecimal = "0.4"
redis = { version = "0.27", features = ["tls-rustls", "tokio-rustls-comp"] }
deadpool-redis = { version = "0.18", features = ["cluster", "serde"] }
redis = { version = "0.29", features = ["tls-rustls", "tokio-rustls-comp"] }
deadpool-redis = { version = "0.20", features = ["cluster", "serde"] }
# Logging
tracing = "0.1"
@ -78,24 +78,24 @@
] }
# gRPC and Protobuf
tonic = "0.12"
tonic-web = "0.12"
tonic-reflection = "0.12"
tokio = { version = "1.42", features = ["macros", "rt-multi-thread"] }
tonic = "0.13"
tonic-web = "0.13"
tonic-reflection = "0.13"
tokio = { version = "1.44", features = ["macros", "rt-multi-thread"] }
tokio-stream = "0.1"
prost-types = "0.13"
prost = "0.13"
pbjson-types = "0.7"
# gRPC and HTTP multiplexing
axum = "0.7"
axum-server = { version = "0.7.1", features = ["tls-rustls-no-provider"] }
axum = "0.8"
axum-server = { version = "0.7", features = ["tls-rustls-no-provider"] }
tower = { version = "0.5", features = ["util"] }
futures = "0.3"
futures-util = "0.3"
http = "1.1"
http = "1.3"
http-body = "1.0"
rust-embed = "8.5"
rust-embed = "8.7"
mime_guess = "2.0"
tower-http = { version = "0.6", features = ["trace", "auth"] }
@ -105,7 +105,6 @@
# Authentication
pbkdf2 = { version = "0.12", features = ["simple"] }
rand_core = { version = "0.6", features = ["std"] }
jsonwebtoken = "9.3"
rustls = { version = "0.23", default-features = false, features = [
"logging",
@ -116,13 +115,12 @@
rustls-native-certs = "0.8"
rustls-pemfile = "2.2"
pem = "3.0"
x509-parser = "0.16"
x509-parser = "0.17"
rsa = "0.9"
elliptic-curve = { version = "0.13", features = ["pem"] }
p256 = "0.13"
sec1 = { version = "0.7.3", features = ["alloc", "pem", "pkcs8"] }
rcgen = { version = "0.13.1", features = ["x509-parser"] }
oauth2 = "5.0.0-alpha.4"
openidconnect = { version = "4.0.0-alpha.2", features = [
oauth2 = "5.0.0"
openidconnect = { version = "4.0.0", features = [
"accept-rfc3339-timestamps",
] }
@ -131,7 +129,7 @@
hex = "0.4"
# Codecs
rquickjs = { version = "0.8", features = [
rquickjs = { version = "0.9", features = [
"bindgen",
"loader",
"array-buffer",
@ -139,17 +137,16 @@
] }
# Misc
lazy_static = "1.5"
uuid = { version = "1.11", features = ["v4", "serde"] }
uuid = { version = "1.16", features = ["v4", "serde"] }
chrono = "0.4"
async-trait = "0.1"
aes = "0.8"
rand = "0.8"
rand = "0.9"
base64 = "0.22"
async-recursion = "1.1"
regex = "1.11"
petgraph = "0.6"
prometheus-client = "0.22"
petgraph = "0.7"
prometheus-client = "0.23"
pin-project = "1.1"
scoped-futures = { version = "0.1", features = ["std"] }
signal-hook = "0.3"
@ -158,7 +155,7 @@
# Development and testing
[dev-dependencies]
httpmock = "0.7.0"
bytes = "1.8"
bytes = "1.10"
dotenv = "0.15"
[features]

View File

@ -47,12 +47,12 @@ dist:
test:
cargo fmt --check
cargo clippy --no-deps --no-default-features --features="$(DATABASE)"
TZ=UTC cargo test --no-default-features --features="$(DATABASE)"
RUST_MIN_STACK=8388608 TZ=UTC cargo test --no-default-features --features="$(DATABASE)"
test-all:
cargo fmt --check
cargo clippy --no-deps --no-default-features --features="$(DATABASE)"
TZ=UTC cargo test --no-default-features --features="$(DATABASE),test-all-integrations"
RUST_MIN_STACK=8388608 TZ=UTC cargo test --no-default-features --features="$(DATABASE),test-all-integrations"
migration-generate:
ifeq ($(NAME),)

View File

@ -26,7 +26,7 @@ insert into "user" (
password_hash,
note
) values (
'05244f12-6daf-4e1f-8315-c66783a0ab56',
gen_random_uuid(),
now(),
now(),
true,
@ -63,7 +63,7 @@ insert into "tenant" (
max_gateway_count,
private_gateways
) values (
'52f14cd4-c6f1-4fbd-8f87-4025e1d49242',
gen_random_uuid(),
now(),
now(),
'ChirpStack',

View File

@ -1,3 +1,6 @@
alter table device
drop column app_layer_params;
alter table device_keys
drop column gen_app_key;

View File

@ -15,6 +15,8 @@ create table fuota_deployment (
multicast_class_b_ping_slot_nb_k smallint not null,
multicast_frequency bigint not null,
multicast_timeout smallint not null,
multicast_session_start timestamp with time zone null,
multicast_session_end timestamp with time zone null,
unicast_max_retry_count smallint not null,
fragmentation_fragment_size smallint not null,
fragmentation_redundancy_percentage smallint not null,
@ -57,6 +59,7 @@ create table fuota_deployment_job (
max_retry_count smallint not null,
attempt_count smallint not null,
scheduler_run_after timestamp with time zone not null,
warning_msg text not null,
error_msg text not null,
primary key (fuota_deployment_id, job)
@ -70,3 +73,9 @@ alter table device_keys
alter table device_keys
alter column gen_app_key drop default;
alter table device
add column app_layer_params jsonb not null default '{}';
alter table device
alter column app_layer_params drop default;

View File

@ -1,3 +1,6 @@
alter table device
drop column app_layer_params;
alter table device_keys
drop column gen_app_key;

View File

@ -15,6 +15,8 @@ create table fuota_deployment (
multicast_class_b_ping_slot_nb_k smallint not null,
multicast_frequency bigint not null,
multicast_timeout smallint not null,
multicast_session_start datetime null,
multicast_session_end datetime null,
unicast_max_retry_count smallint not null,
fragmentation_fragment_size smallint not null,
fragmentation_redundancy_percentage smallint not null,
@ -57,6 +59,7 @@ create table fuota_deployment_job (
max_retry_count smallint not null,
attempt_count smallint not null,
scheduler_run_after datetime not null,
warning_msg text not null,
error_msg text not null,
primary key (fuota_deployment_id, job)
@ -67,3 +70,6 @@ create index idx_fuota_deployment_job_scheduler_run_after on fuota_deployment_jo
alter table device_keys
add column gen_app_key blob not null default x'00000000000000000000000000000000';
alter table device
add column app_layer_params text not null default '{}';

View File

@ -1,6 +1,6 @@
use anyhow::{Context, Result};
use async_trait::async_trait;
use rand::seq::SliceRandom;
use rand::seq::IndexedRandom;
use super::{Handler, Request, Response};
use crate::region;
@ -134,7 +134,7 @@ impl Handler for Algorithm {
// In case there are multiple with the same coding-rate, we take
// a random one.
resp.dr = drs
.choose(&mut rand::thread_rng())
.choose(&mut rand::rng())
.cloned()
.ok_or_else(|| anyhow!("Random returned None"))?;
resp.nb_trans = 1; // 1 is the recommeded value

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::sync::LazyLock;
use anyhow::Result;
use async_trait::async_trait;
@ -14,10 +15,8 @@ pub mod lora_lr_fhss;
pub mod lr_fhss;
pub mod plugin;
lazy_static! {
static ref ADR_ALGORITHMS: RwLock<HashMap<String, Box<dyn Handler + Sync + Send>>> =
RwLock::new(HashMap::new());
}
static ADR_ALGORITHMS: LazyLock<RwLock<HashMap<String, Box<dyn Handler + Sync + Send>>>> =
LazyLock::new(|| RwLock::new(HashMap::new()));
pub async fn setup() -> Result<()> {
info!("Setting up adr algorithms");

View File

@ -3,7 +3,7 @@ use rand::RngCore;
use lrwn::AES128Key;
pub fn get_random_aes_key() -> AES128Key {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
let mut key: [u8; 16] = [0; 16];
rng.fill_bytes(&mut key);
AES128Key::from_bytes(key)

View File

@ -2010,7 +2010,7 @@ pub mod test {
let mut create_req = Request::new(create_req);
create_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let create_resp = service.create(create_req).await.unwrap();
let create_resp = create_resp.get_ref();
@ -2021,7 +2021,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Application {
@ -2045,7 +2045,7 @@ pub mod test {
let mut up_req = Request::new(up_req);
up_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.update(up_req).await.unwrap();
//get
@ -2055,7 +2055,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Application {
@ -2077,7 +2077,7 @@ pub mod test {
let mut list_req = Request::new(list_req);
list_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let list_resp = service.list(list_req).await.unwrap();
assert_eq!(1, list_resp.get_ref().total_count);
assert_eq!(1, list_resp.get_ref().result.len());
@ -2089,7 +2089,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.delete(del_req).await.unwrap();
let del_req = api::DeleteApplicationRequest {
@ -2098,7 +2098,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let del_resp = service.delete(del_req).await;
assert!(del_resp.is_err());
}

View File

@ -2483,7 +2483,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user.id.into(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -2491,7 +2491,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_admin.id.into(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
@ -2727,7 +2727,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_admin.id.into(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
@ -2735,21 +2735,21 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user.id.into(),
user_id: tenant_user.id,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_user.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_user.id,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user_other.id.into(),
user_id: tenant_user_other.id,
..Default::default()
})
.await
@ -3091,7 +3091,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_admin.id.into(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
@ -3099,7 +3099,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_device_admin.id.into(),
user_id: tenant_device_admin.id,
is_device_admin: true,
..Default::default()
})
@ -3107,7 +3107,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_gateway_admin.id.into(),
user_id: tenant_gateway_admin.id,
is_gateway_admin: true,
..Default::default()
})
@ -3115,7 +3115,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user.id.into(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -3538,7 +3538,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_admin.id.into(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
@ -3546,7 +3546,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_device_admin.id.into(),
user_id: tenant_device_admin.id,
is_device_admin: true,
..Default::default()
})
@ -3554,7 +3554,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_gateway_admin.id.into(),
user_id: tenant_gateway_admin.id,
is_gateway_admin: true,
..Default::default()
})
@ -3562,7 +3562,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user.id.into(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -3840,32 +3840,32 @@ pub mod test {
.await;
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_device_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_device_admin.id,
is_device_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_gateway_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_gateway_admin.id,
is_gateway_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_user.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -4093,8 +4093,8 @@ pub mod test {
.await;
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_user.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -4237,7 +4237,7 @@ pub mod test {
let gw_api_key_tenant = gateway::create(gateway::Gateway {
name: "test-gw-tenant".into(),
gateway_id: EUI64::from_str("0202030405060708").unwrap(),
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
..Default::default()
})
.await
@ -4245,7 +4245,7 @@ pub mod test {
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_admin.id.into(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
@ -4253,7 +4253,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_gateway_admin.id.into(),
user_id: tenant_gateway_admin.id,
is_gateway_admin: true,
..Default::default()
})
@ -4261,7 +4261,7 @@ pub mod test {
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: tenant_a.id,
user_id: tenant_user.id.into(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -4513,32 +4513,32 @@ pub mod test {
.await;
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_admin.id,
is_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_device_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_device_admin.id,
is_device_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_gateway_admin.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_gateway_admin.id,
is_gateway_admin: true,
..Default::default()
})
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
user_id: tenant_user.id.into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_user.id,
..Default::default()
})
.await
@ -4901,7 +4901,7 @@ pub mod test {
.await
.unwrap();
tenant::add_user(tenant::TenantUser {
tenant_id: api_key_tenant.tenant_id.unwrap().into(),
tenant_id: api_key_tenant.tenant_id.unwrap(),
user_id: tenant_device_admin.id,
is_device_admin: true,
..Default::default()

View File

@ -88,7 +88,7 @@ impl DeviceProfileService for DeviceProfile {
rx1_delay: req_dp.abp_rx1_delay as u8,
rx1_dr_offset: req_dp.abp_rx1_dr_offset as u8,
rx2_dr: req_dp.abp_rx2_dr as u8,
rx2_freq: req_dp.abp_rx2_freq as u32,
rx2_freq: req_dp.abp_rx2_freq,
})
},
class_b_params: if req_dp.supports_class_b {
@ -96,7 +96,7 @@ impl DeviceProfileService for DeviceProfile {
timeout: req_dp.class_b_timeout as u16,
ping_slot_nb_k: req_dp.class_b_ping_slot_nb_k as u8,
ping_slot_dr: req_dp.class_b_ping_slot_dr as u8,
ping_slot_freq: req_dp.class_b_ping_slot_freq as u32,
ping_slot_freq: req_dp.class_b_ping_slot_freq,
})
} else {
None
@ -116,7 +116,7 @@ impl DeviceProfileService for DeviceProfile {
relay_enabled: req_dp.relay_enabled,
relay_cad_periodicity: req_dp.relay_cad_periodicity as u8,
default_channel_index: req_dp.relay_default_channel_index as u8,
second_channel_freq: req_dp.relay_second_channel_freq as u32,
second_channel_freq: req_dp.relay_second_channel_freq,
second_channel_dr: req_dp.relay_second_channel_dr as u8,
second_channel_ack_offset: req_dp.relay_second_channel_ack_offset as u8,
ed_activation_mode: req_dp.relay_ed_activation_mode().from_proto(),
@ -344,7 +344,7 @@ impl DeviceProfileService for DeviceProfile {
rx1_delay: req_dp.abp_rx1_delay as u8,
rx1_dr_offset: req_dp.abp_rx1_dr_offset as u8,
rx2_dr: req_dp.abp_rx2_dr as u8,
rx2_freq: req_dp.abp_rx2_freq as u32,
rx2_freq: req_dp.abp_rx2_freq,
})
},
class_b_params: if req_dp.supports_class_b {
@ -352,7 +352,7 @@ impl DeviceProfileService for DeviceProfile {
timeout: req_dp.class_b_timeout as u16,
ping_slot_nb_k: req_dp.class_b_ping_slot_nb_k as u8,
ping_slot_dr: req_dp.class_b_ping_slot_dr as u8,
ping_slot_freq: req_dp.class_b_ping_slot_freq as u32,
ping_slot_freq: req_dp.class_b_ping_slot_freq,
})
} else {
None
@ -372,7 +372,7 @@ impl DeviceProfileService for DeviceProfile {
relay_enabled: req_dp.relay_enabled,
relay_cad_periodicity: req_dp.relay_cad_periodicity as u8,
default_channel_index: req_dp.relay_default_channel_index as u8,
second_channel_freq: req_dp.relay_second_channel_freq as u32,
second_channel_freq: req_dp.relay_second_channel_freq,
second_channel_dr: req_dp.relay_second_channel_dr as u8,
second_channel_ack_offset: req_dp.relay_second_channel_ack_offset as u8,
ed_activation_mode: req_dp.relay_ed_activation_mode().from_proto(),
@ -406,7 +406,6 @@ impl DeviceProfileService for DeviceProfile {
ts004_f_port: app_layer_params.ts004_f_port as u8,
ts005_version: app_layer_params.ts005_version().from_proto(),
ts005_f_port: app_layer_params.ts005_f_port as u8,
..Default::default()
}
},
..Default::default()

View File

@ -50,9 +50,9 @@ impl ToStatus for storage::error::Error {
storage::error::Error::ValidatorValidate(_) => {
Status::new(Code::InvalidArgument, format!("{:#}", self))
}
storage::error::Error::MultiError(errors) => {
storage::error::Error::Multi(errors) => {
let errors = errors
.into_iter()
.iter()
.map(|e| e.to_string())
.collect::<Vec<String>>()
.join(", ");

View File

@ -346,11 +346,11 @@ impl FuotaService for Fuota {
started_at: d
.started_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
completed_at: d
.completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
name: d.name.clone(),
})
.collect(),
@ -462,23 +462,23 @@ impl FuotaService for Fuota {
completed_at: d
.completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
mc_group_setup_completed_at: d
.mc_group_setup_completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
mc_session_completed_at: d
.mc_session_completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
frag_session_setup_completed_at: d
.frag_session_setup_completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
frag_status_completed_at: d
.frag_status_completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
error_msg: d.error_msg.clone(),
})
.collect(),
@ -624,12 +624,13 @@ impl FuotaService for Fuota {
completed_at: j
.completed_at
.as_ref()
.map(|ts| helpers::datetime_to_prost_timestamp(ts)),
.map(helpers::datetime_to_prost_timestamp),
max_retry_count: j.max_retry_count as u32,
attempt_count: j.attempt_count as u32,
scheduler_run_after: Some(helpers::datetime_to_prost_timestamp(
&j.scheduler_run_after,
)),
warning_msg: j.warning_msg.clone(),
error_msg: j.error_msg.clone(),
})
.collect(),

View File

@ -1036,7 +1036,7 @@ pub mod test {
let mut create_req = Request::new(create_req);
create_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.create(create_req).await.unwrap();
// get
@ -1046,7 +1046,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Gateway {
@ -1082,7 +1082,7 @@ pub mod test {
let mut up_req = Request::new(up_req);
up_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.update(up_req).await.unwrap();
// get
@ -1092,7 +1092,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Gateway {
@ -1121,7 +1121,7 @@ pub mod test {
let mut list_req = Request::new(list_req);
list_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let list_resp = service.list(list_req).await.unwrap();
assert_eq!(1, list_resp.get_ref().total_count);
assert_eq!(1, list_resp.get_ref().result.len());
@ -1133,7 +1133,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.delete(del_req).await.unwrap();
let del_req = api::DeleteGatewayRequest {
@ -1142,7 +1142,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let del_resp = service.delete(del_req).await;
assert!(del_resp.is_err());
}
@ -1220,7 +1220,7 @@ pub mod test {
let mut stats_req = Request::new(stats_req);
stats_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let stats_resp = service.get_metrics(stats_req).await.unwrap();
let stats_resp = stats_resp.get_ref();
assert_eq!(

View File

@ -292,6 +292,7 @@ impl ToProto<api::Ts003Version> for Option<fields::device_profile::Ts003Version>
match self {
None => api::Ts003Version::Ts003NotImplemented,
Some(fields::device_profile::Ts003Version::V100) => api::Ts003Version::Ts003V100,
Some(fields::device_profile::Ts003Version::V200) => api::Ts003Version::Ts003V200,
}
}
}
@ -301,6 +302,7 @@ impl FromProto<Option<fields::device_profile::Ts003Version>> for api::Ts003Versi
match self {
api::Ts003Version::Ts003NotImplemented => None,
api::Ts003Version::Ts003V100 => Some(fields::device_profile::Ts003Version::V100),
api::Ts003Version::Ts003V200 => Some(fields::device_profile::Ts003Version::V200),
}
}
}
@ -310,6 +312,7 @@ impl ToProto<api::Ts004Version> for Option<fields::device_profile::Ts004Version>
match self {
None => api::Ts004Version::Ts004NotImplemented,
Some(fields::device_profile::Ts004Version::V100) => api::Ts004Version::Ts004V100,
Some(fields::device_profile::Ts004Version::V200) => api::Ts004Version::Ts004V200,
}
}
}
@ -319,6 +322,7 @@ impl FromProto<Option<fields::device_profile::Ts004Version>> for api::Ts004Versi
match self {
api::Ts004Version::Ts004NotImplemented => None,
api::Ts004Version::Ts004V100 => Some(fields::device_profile::Ts004Version::V100),
api::Ts004Version::Ts004V200 => Some(fields::device_profile::Ts004Version::V200),
}
}
}
@ -328,6 +332,7 @@ impl ToProto<api::Ts005Version> for Option<fields::device_profile::Ts005Version>
match self {
None => api::Ts005Version::Ts005NotImplemented,
Some(fields::device_profile::Ts005Version::V100) => api::Ts005Version::Ts005V100,
Some(fields::device_profile::Ts005Version::V200) => api::Ts005Version::Ts005V200,
}
}
}
@ -337,6 +342,7 @@ impl FromProto<Option<fields::device_profile::Ts005Version>> for api::Ts005Versi
match self {
api::Ts005Version::Ts005NotImplemented => None,
api::Ts005Version::Ts005V100 => Some(fields::device_profile::Ts005Version::V100),
api::Ts005Version::Ts005V200 => Some(fields::device_profile::Ts005Version::V200),
}
}
}

View File

@ -1,3 +1,4 @@
use std::sync::LazyLock;
use std::time::{Duration, Instant};
use std::{
future::Future,
@ -5,7 +6,7 @@ use std::{
task::{Context, Poll},
};
use anyhow::Result;
use anyhow::{Context as AnyhowContext, Result};
use axum::{response::IntoResponse, routing::get, Router};
use http::{
header::{self, HeaderMap, HeaderValue},
@ -67,33 +68,31 @@ pub mod relay;
pub mod tenant;
pub mod user;
lazy_static! {
static ref GRPC_COUNTER: Family<GrpcLabels, Counter> = {
let counter = Family::<GrpcLabels, Counter>::default();
prometheus::register(
"api_requests_handled",
"Number of API requests handled by service, method and status code",
counter.clone(),
);
counter
};
static ref GRPC_HISTOGRAM: Family<GrpcLabels, Histogram> = {
let histogram = Family::<GrpcLabels, Histogram>::new_with_constructor(|| {
Histogram::new(
[
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,
]
.into_iter(),
)
});
prometheus::register(
"api_requests_handled_seconds",
"Duration of API requests handled by service, method and status code",
histogram.clone(),
);
histogram
};
}
static GRPC_COUNTER: LazyLock<Family<GrpcLabels, Counter>> = LazyLock::new(|| {
let counter = Family::<GrpcLabels, Counter>::default();
prometheus::register(
"api_requests_handled",
"Number of API requests handled by service, method and status code",
counter.clone(),
);
counter
});
static GRPC_HISTOGRAM: LazyLock<Family<GrpcLabels, Histogram>> = LazyLock::new(|| {
let histogram = Family::<GrpcLabels, Histogram>::new_with_constructor(|| {
Histogram::new(
[
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,
]
.into_iter(),
)
});
prometheus::register(
"api_requests_handled_seconds",
"Duration of API requests handled by service, method and status code",
histogram.clone(),
);
histogram
});
#[derive(RustEmbed)]
#[folder = "../ui/build"]
@ -103,7 +102,7 @@ type BoxError = Box<dyn std::error::Error + Send + Sync>;
pub async fn setup() -> Result<()> {
let conf = config::get();
let bind = conf.api.bind.parse()?;
let bind = conf.api.bind.parse().context("Parse api.bind config")?;
info!(bind = %bind, "Setting up API interface");
@ -114,7 +113,7 @@ pub async fn setup() -> Result<()> {
.route("/auth/oauth2/callback", get(oauth2::callback_handler))
.fallback(service_static_handler)
.into_service()
.map_response(|r| r.map(tonic::body::boxed));
.map_response(|r| r.map(tonic::body::Body::new));
let grpc = TonicServer::builder()
.accept_http1(true)

View File

@ -28,6 +28,12 @@ struct ClerkUserinfo {
pub user_id: String,
}
#[derive(Deserialize)]
struct YandexUserinfo {
pub default_email: String,
pub id: String,
}
#[derive(Deserialize)]
pub struct CallbackArgs {
pub code: String,
@ -129,9 +135,11 @@ pub async fn get_user(code: &str, state: &str) -> Result<User> {
let conf = config::get();
let provider = conf.user_authentication.oauth2.provider.clone();
let userinfo_url = conf.user_authentication.oauth2.userinfo_url.clone();
let assume_email_verified = conf.user_authentication.oauth2.assume_email_verified;
match provider.as_ref() {
"clerk" => get_clerk_user(access_token, &userinfo_url).await,
"yandex" => get_yandex_user(access_token, &userinfo_url, assume_email_verified).await,
_ => Err(anyhow!("Unsupported OAuth2 provider: {}", provider)),
}
}
@ -155,6 +163,25 @@ async fn get_clerk_user(token: &str, url: &str) -> Result<User> {
})
}
async fn get_yandex_user(token: &str, url: &str, assume_email_verified: bool) -> Result<User> {
let client = reqwest::Client::new();
let auth_header = format!("Bearer {}", token);
let resp: YandexUserinfo = client
.get(url)
.header(AUTHORIZATION, auth_header)
.send()
.await?
.json()
.await?;
Ok(User {
email: resp.default_email,
email_verified: assume_email_verified,
external_id: resp.id,
})
}
async fn store_verifier(
token: &oauth2::CsrfToken,
verifier: &oauth2::PkceCodeVerifier,

View File

@ -484,7 +484,7 @@ pub mod test {
let mut create_req = Request::new(create_req);
create_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let create_resp = service.create(create_req).await.unwrap();
// get
@ -494,7 +494,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Tenant {
@ -524,7 +524,7 @@ pub mod test {
let mut up_req = Request::new(up_req);
up_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.update(up_req).await.unwrap();
// get
@ -534,7 +534,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::Tenant {
@ -559,7 +559,7 @@ pub mod test {
let mut list_req = Request::new(list_req);
list_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let list_resp = service.list(list_req).await.unwrap();
assert_eq!(1, list_resp.get_ref().total_count);
assert_eq!(1, list_resp.get_ref().result.len());
@ -571,7 +571,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.delete(del_req).await.unwrap();
let del_req = api::DeleteTenantRequest {
@ -580,7 +580,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let del_resp = service.delete(del_req).await;
assert!(del_resp.is_err());
}

View File

@ -294,7 +294,7 @@ pub mod test {
let mut create_req = Request::new(create_req);
create_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let create_resp = service.create(create_req).await.unwrap();
// get
@ -304,7 +304,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::User {
@ -332,7 +332,7 @@ pub mod test {
let mut up_req = Request::new(up_req);
up_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.update(up_req).await.unwrap();
// get
@ -342,7 +342,7 @@ pub mod test {
let mut get_req = Request::new(get_req);
get_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let get_resp = service.get(get_req).await.unwrap();
assert_eq!(
Some(api::User {
@ -364,7 +364,7 @@ pub mod test {
let mut up_req = Request::new(up_req);
up_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.update_password(up_req).await.unwrap();
// list
@ -375,7 +375,7 @@ pub mod test {
let mut list_req = Request::new(list_req);
list_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let list_resp = service.list(list_req).await.unwrap();
// * Admin from migrations
// * User that we created for auth
@ -390,7 +390,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let _ = service.delete(del_req).await.unwrap();
let del_req = api::DeleteUserRequest {
@ -399,7 +399,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let del_resp = service.delete(del_req).await;
assert!(del_resp.is_err());
@ -409,7 +409,7 @@ pub mod test {
let mut del_req = Request::new(del_req);
del_req
.extensions_mut()
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id).clone()));
.insert(AuthID::User(Into::<uuid::Uuid>::into(u.id)));
let del_resp = service.delete(del_req).await;
assert!(del_resp.is_err());
}

View File

@ -21,6 +21,7 @@ pub async fn handle_uplink(
match version {
Ts003Version::V100 => handle_uplink_v100(dev, dp, rx_info, data).await,
Ts003Version::V200 => handle_uplink_v200(dev, dp, rx_info, data).await,
}
}
@ -32,11 +33,23 @@ async fn handle_uplink_v100(
) -> Result<()> {
let pl = clocksync::v1::Payload::from_slice(true, data)?;
match pl {
clocksync::v1::Payload::AppTimeReq(pl) => {
handle_v1_app_time_req(dev, dp, rx_info, pl).await?
}
_ => {}
if let clocksync::v1::Payload::AppTimeReq(pl) = pl {
handle_v1_app_time_req(dev, dp, rx_info, pl).await?
}
Ok(())
}
async fn handle_uplink_v200(
dev: &device::Device,
dp: &device_profile::DeviceProfile,
rx_info: &[gw::UplinkRxInfo],
data: &[u8],
) -> Result<()> {
let pl = clocksync::v2::Payload::from_slice(true, data)?;
if let clocksync::v2::Payload::AppTimeReq(pl) = pl {
handle_v2_app_time_req(dev, dp, rx_info, pl).await?
}
Ok(())
@ -91,6 +104,55 @@ async fn handle_v1_app_time_req(
Ok(())
}
async fn handle_v2_app_time_req(
dev: &device::Device,
dp: &device_profile::DeviceProfile,
rx_info: &[gw::UplinkRxInfo],
pl: clocksync::v2::AppTimeReqPayload,
) -> Result<()> {
info!("Handling AppTimeReq");
let now_time_since_gps = if let Some(t) = helpers::get_time_since_gps_epoch(rx_info) {
chrono::Duration::from_std(t)?
} else {
helpers::get_rx_timestamp_chrono(rx_info).to_gps_time()
};
let dev_time_since_gps = chrono::Duration::seconds(pl.device_time.into());
let time_diff = (now_time_since_gps - dev_time_since_gps).num_seconds();
let time_correction: i32 = if time_diff < 0 {
time_diff.try_into().unwrap_or(i32::MIN)
} else {
time_diff.try_into().unwrap_or(i32::MAX)
};
if time_diff == 0 && !pl.param.ans_required {
return Ok(());
}
info!(
time_correcrtion = time_correction,
"Responding with AppTimeAns"
);
let ans = clocksync::v2::Payload::AppTimeAns(clocksync::v2::AppTimeAnsPayload {
time_correction,
param: clocksync::v2::AppTimeAnsPayloadParam {
token_ans: pl.param.token_req,
},
});
device_queue::enqueue_item(device_queue::DeviceQueueItem {
dev_eui: dev.dev_eui,
f_port: dp.app_layer_params.ts003_f_port.into(),
data: ans.to_vec()?,
..Default::default()
})
.await?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
@ -113,7 +175,7 @@ mod test {
Test {
name: "device synced".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).try_into().unwrap()),
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v1::AppTimeReqPayload {
@ -128,7 +190,7 @@ mod test {
Test {
name: "device synced - ans required".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).try_into().unwrap()),
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v1::AppTimeReqPayload {
@ -146,7 +208,7 @@ mod test {
Test {
name: "device not synced (positive correction)".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).try_into().unwrap()),
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v1::AppTimeReqPayload {
@ -164,7 +226,7 @@ mod test {
Test {
name: "device not synced (negative correction)".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1200).try_into().unwrap()),
time_since_gps_epoch: Some(Duration::from_secs(1200).into()),
..Default::default()
},
req: clocksync::v1::AppTimeReqPayload {
@ -248,4 +310,153 @@ mod test {
}
}
}
#[tokio::test]
async fn test_handle_v2_app_time_req() {
struct Test {
name: String,
rx_info: gw::UplinkRxInfo,
req: clocksync::v2::AppTimeReqPayload,
expected: Option<clocksync::v2::AppTimeAnsPayload>,
}
let tests = vec![
Test {
name: "device synced".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v2::AppTimeReqPayload {
device_time: 1234,
param: clocksync::v2::AppTimeReqPayloadParam {
token_req: 8,
ans_required: false,
},
},
expected: None,
},
Test {
name: "device synced - ans required".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v2::AppTimeReqPayload {
device_time: 1234,
param: clocksync::v2::AppTimeReqPayloadParam {
token_req: 8,
ans_required: true,
},
},
expected: Some(clocksync::v2::AppTimeAnsPayload {
time_correction: 0,
param: clocksync::v2::AppTimeAnsPayloadParam { token_ans: 8 },
}),
},
Test {
name: "device not synced (positive correction)".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1234).into()),
..Default::default()
},
req: clocksync::v2::AppTimeReqPayload {
device_time: 1200,
param: clocksync::v2::AppTimeReqPayloadParam {
token_req: 8,
ans_required: false,
},
},
expected: Some(clocksync::v2::AppTimeAnsPayload {
time_correction: 34,
param: clocksync::v2::AppTimeAnsPayloadParam { token_ans: 8 },
}),
},
Test {
name: "device not synced (negative correction)".into(),
rx_info: gw::UplinkRxInfo {
time_since_gps_epoch: Some(Duration::from_secs(1200).into()),
..Default::default()
},
req: clocksync::v2::AppTimeReqPayload {
device_time: 1234,
param: clocksync::v2::AppTimeReqPayloadParam {
token_req: 8,
ans_required: false,
},
},
expected: Some(clocksync::v2::AppTimeAnsPayload {
time_correction: -34,
param: clocksync::v2::AppTimeAnsPayloadParam { token_ans: 8 },
}),
},
];
let _guard = test::prepare().await;
let t = tenant::create(tenant::Tenant {
name: "test-tenant".into(),
..Default::default()
})
.await
.unwrap();
let app = application::create(application::Application {
name: "test-app".into(),
tenant_id: t.id,
..Default::default()
})
.await
.unwrap();
let dp = device_profile::create(device_profile::DeviceProfile {
name: "test-dp".into(),
tenant_id: t.id,
app_layer_params: fields::AppLayerParams {
ts003_version: Some(Ts003Version::V200),
..Default::default()
},
..Default::default()
})
.await
.unwrap();
let d = device::create(device::Device {
name: "test-dev".into(),
dev_eui: EUI64::from_be_bytes([1, 2, 3, 4, 5, 6, 7, 8]),
application_id: app.id,
device_profile_id: dp.id,
..Default::default()
})
.await
.unwrap();
for tst in &tests {
println!("> {}", tst.name);
device_queue::flush_for_dev_eui(&d.dev_eui).await.unwrap();
let pl = clocksync::v2::Payload::AppTimeReq(tst.req.clone());
handle_uplink(
&d,
&dp,
&[tst.rx_info.clone()],
dp.app_layer_params.ts003_f_port,
&pl.to_vec().unwrap(),
)
.await;
let queue_items = device_queue::get_for_dev_eui(&d.dev_eui).await.unwrap();
if let Some(expected_pl) = &tst.expected {
assert_eq!(1, queue_items.len());
let qi = queue_items.first().unwrap();
assert_eq!(dp.app_layer_params.ts003_f_port as i16, qi.f_port);
let qi_pl = clocksync::v2::Payload::from_slice(false, &qi.data).unwrap();
let expected_pl = clocksync::v2::Payload::AppTimeAns(expected_pl.clone());
assert_eq!(expected_pl, qi_pl);
} else {
assert!(queue_items.is_empty());
}
}
}
}

View File

@ -18,6 +18,7 @@ pub async fn handle_uplink(
match version {
Ts004Version::V100 => handle_uplink_v100(dev, data).await,
Ts004Version::V200 => handle_uplink_v200(dev, data).await,
}
}
@ -37,6 +38,22 @@ async fn handle_uplink_v100(dev: &device::Device, data: &[u8]) -> Result<()> {
Ok(())
}
async fn handle_uplink_v200(dev: &device::Device, data: &[u8]) -> Result<()> {
let pl = fragmentation::v2::Payload::from_slice(true, data)?;
match pl {
fragmentation::v2::Payload::FragSessionSetupAns(pl) => {
handle_v2_frag_session_setup_ans(dev, pl).await?
}
fragmentation::v2::Payload::FragSessionStatusAns(pl) => {
handle_v2_frag_session_status_ans(dev, pl).await?
}
_ => {}
}
Ok(())
}
async fn handle_v1_frag_session_setup_ans(
dev: &device::Device,
pl: fragmentation::v1::FragSessionSetupAnsPayload,
@ -68,6 +85,39 @@ async fn handle_v1_frag_session_setup_ans(
Ok(())
}
async fn handle_v2_frag_session_setup_ans(
dev: &device::Device,
pl: fragmentation::v2::FragSessionSetupAnsPayload,
) -> Result<()> {
info!("Handling FragSessionSetupAns");
let mut fuota_dev = fuota::get_latest_device_by_dev_eui(dev.dev_eui).await?;
if pl.frag_algo_unsupported
| pl.not_enough_memory
| pl.frag_index_unsupported
| pl.wrong_descriptor
| pl.session_cnt_replay
{
warn!(
frag_index = pl.frag_index,
frag_algo_unsupported = pl.frag_algo_unsupported,
not_enough_memory = pl.not_enough_memory,
frag_index_unsupported = pl.frag_index_unsupported,
wrong_descriptor = pl.wrong_descriptor,
session_cnt_replay = pl.session_cnt_replay,
"FragSessionAns contains errors"
);
fuota_dev.error_msg = format!("Error: FragSessionAns response frag_algo_unsupported={}, not_enough_memory={}, frag_index_unsupported={}, wrong_descriptor={}, session_cnt_replay={}", pl.frag_algo_unsupported, pl.not_enough_memory, pl.frag_index_unsupported, pl.wrong_descriptor, pl.session_cnt_replay);
} else {
fuota_dev.frag_session_setup_completed_at = Some(Utc::now());
}
let _ = fuota::update_device(fuota_dev).await?;
Ok(())
}
async fn handle_v1_frag_session_status_ans(
dev: &device::Device,
pl: fragmentation::v1::FragSessionStatusAnsPayload,
@ -94,3 +144,36 @@ async fn handle_v1_frag_session_status_ans(
Ok(())
}
async fn handle_v2_frag_session_status_ans(
dev: &device::Device,
pl: fragmentation::v2::FragSessionStatusAnsPayload,
) -> Result<()> {
info!("Handling FragSessionStatusAnsPayload");
let mut fuota_dev = fuota::get_latest_device_by_dev_eui(dev.dev_eui).await?;
if pl.missing_frag != 0
|| pl.status.memory_error
|| pl.status.mic_error
|| pl.status.session_does_not_exist
{
warn!(
frag_index = pl.received_and_index.frag_index,
nb_frag_received = pl.received_and_index.nb_frag_received,
missing_frag = pl.missing_frag,
memory_error = pl.status.memory_error,
mic_error = pl.status.mic_error,
session_does_not_exist = pl.status.session_does_not_exist,
"FragSessionStatusAns contains errors"
);
fuota_dev.error_msg = format!("Error: FragSessionStatusAns response nb_frag_received={}, missing_frag={}, memory_error={}, mic_error={}, session_does_not_exist={}", pl.received_and_index.nb_frag_received, pl.missing_frag, pl.status.memory_error, pl.status.mic_error, pl.status.session_does_not_exist);
} else {
fuota_dev.frag_status_completed_at = Some(Utc::now());
}
let _ = fuota::update_device(fuota_dev).await?;
Ok(())
}

View File

@ -11,7 +11,10 @@ use lrwn::region::MacVersion;
use crate::config;
use crate::downlink;
use crate::gpstime::ToGpsTime;
use crate::storage::fields::{FuotaJob, RequestFragmentationSessionStatus};
use crate::storage::fields::{
device_profile::Ts004Version, device_profile::Ts005Version, FuotaJob,
RequestFragmentationSessionStatus,
};
use crate::storage::{device, device_keys, device_profile, device_queue, fuota, multicast};
pub struct Flow {
@ -109,14 +112,30 @@ impl Flow {
self.job.attempt_count += 1;
// Get McAppSKey + McNwkSKey.
let mc_app_s_key = multicastsetup::v1::get_mc_app_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?;
let mc_nwk_s_key = multicastsetup::v1::get_mc_net_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?;
let (mc_app_s_key, mc_nwk_s_key) = match self.device_profile.app_layer_params.ts005_version
{
Some(Ts005Version::V100) => (
multicastsetup::v1::get_mc_app_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?,
multicastsetup::v1::get_mc_net_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?,
),
Some(Ts005Version::V200) => (
multicastsetup::v2::get_mc_app_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?,
multicastsetup::v2::get_mc_net_s_key(
self.fuota_deployment.multicast_key,
self.fuota_deployment.multicast_addr,
)?,
),
None => return Err(anyhow!("Device-profile does not support TS005")),
};
let _ = multicast::create(multicast::MulticastGroup {
id: self.fuota_deployment.id,
@ -178,61 +197,119 @@ impl Flow {
}
async fn multicast_group_setup(&mut self) -> Result<Option<(FuotaJob, DateTime<Utc>)>> {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
return Ok(Some((FuotaJob::FragSessionSetup, Utc::now())));
}
info!("Sending McGroupSetupReq commands to devices");
self.job.attempt_count += 1;
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
// Filter on devices that have not completed the McGroupSetup.
let fuota_devices: Vec<fuota::FuotaDeploymentDevice> = fuota_devices
.into_iter()
.filter(|d| d.mc_group_setup_completed_at.is_none())
.collect();
if fuota_devices.is_empty() {
self.job.error_msg = "There are no devices available to complete this step".into();
return Ok(None);
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
info!("Set timeout error to devices that did not respond to McGroupSetupReq");
fuota::set_device_timeout_error(
self.fuota_deployment.id.into(),
true,
false,
false,
false,
)
.await?;
if !fuota_devices.is_empty() {
self.job.warning_msg = format!(
"{} devices did not complete the multicast group setup",
fuota_devices.len()
);
}
return Ok(Some((FuotaJob::FragSessionSetup, Utc::now())));
}
info!("Sending McGroupSetupReq commands to devices");
self.job.attempt_count += 1;
for fuota_dev in &fuota_devices {
let dev_keys = device_keys::get(&fuota_dev.dev_eui).await?;
let mc_root_key = match self.device_profile.mac_version {
MacVersion::LORAWAN_1_0_0
| MacVersion::LORAWAN_1_0_1
| MacVersion::LORAWAN_1_0_2
| MacVersion::LORAWAN_1_0_3
| MacVersion::LORAWAN_1_0_4 => {
multicastsetup::v1::get_mc_root_key_for_gen_app_key(dev_keys.gen_app_key)?
let pl = match self.device_profile.app_layer_params.ts005_version {
Some(Ts005Version::V100) => {
let mc_root_key = match self.device_profile.mac_version {
MacVersion::LORAWAN_1_0_0
| MacVersion::LORAWAN_1_0_1
| MacVersion::LORAWAN_1_0_2
| MacVersion::LORAWAN_1_0_3
| MacVersion::LORAWAN_1_0_4 => {
multicastsetup::v1::get_mc_root_key_for_gen_app_key(
dev_keys.gen_app_key,
)?
}
MacVersion::LORAWAN_1_1_0 | MacVersion::Latest => {
multicastsetup::v1::get_mc_root_key_for_app_key(dev_keys.app_key)?
}
};
let mc_ke_key = multicastsetup::v1::get_mc_ke_key(mc_root_key)?;
let mc_key_encrypted = multicastsetup::v1::encrypt_mc_key(
mc_ke_key,
self.fuota_deployment.multicast_key,
);
multicastsetup::v1::Payload::McGroupSetupReq(
multicastsetup::v1::McGroupSetupReqPayload {
mc_group_id_header:
multicastsetup::v1::McGroupSetupReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
mc_addr: self.fuota_deployment.multicast_addr,
mc_key_encrypted,
min_mc_f_count: 0,
max_mc_f_count: u32::MAX,
},
)
.to_vec()?
}
MacVersion::LORAWAN_1_1_0 | MacVersion::Latest => {
multicastsetup::v1::get_mc_root_key_for_app_key(dev_keys.app_key)?
Some(Ts005Version::V200) => {
let mc_root_key = match self.device_profile.mac_version {
MacVersion::LORAWAN_1_0_0
| MacVersion::LORAWAN_1_0_1
| MacVersion::LORAWAN_1_0_2
| MacVersion::LORAWAN_1_0_3
| MacVersion::LORAWAN_1_0_4 => {
multicastsetup::v2::get_mc_root_key_for_gen_app_key(
dev_keys.gen_app_key,
)?
}
MacVersion::LORAWAN_1_1_0 | MacVersion::Latest => {
multicastsetup::v2::get_mc_root_key_for_app_key(dev_keys.app_key)?
}
};
let mc_ke_key = multicastsetup::v2::get_mc_ke_key(mc_root_key)?;
let mc_key_encrypted = multicastsetup::v2::encrypt_mc_key(
mc_ke_key,
self.fuota_deployment.multicast_key,
);
multicastsetup::v2::Payload::McGroupSetupReq(
multicastsetup::v2::McGroupSetupReqPayload {
mc_group_id_header:
multicastsetup::v2::McGroupSetupReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
mc_addr: self.fuota_deployment.multicast_addr,
mc_key_encrypted,
min_mc_f_count: 0,
max_mc_f_count: u32::MAX,
},
)
.to_vec()?
}
None => return Err(anyhow!("Device-profile does not support TS005")),
};
let mc_ke_key = multicastsetup::v1::get_mc_ke_key(mc_root_key)?;
let mc_key_encrypted =
multicastsetup::v1::encrypt_mc_key(mc_ke_key, self.fuota_deployment.multicast_key);
let pl = multicastsetup::v1::Payload::McGroupSetupReq(
multicastsetup::v1::McGroupSetupReqPayload {
mc_group_id_header: multicastsetup::v1::McGroupSetupReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
mc_addr: self.fuota_deployment.multicast_addr,
mc_key_encrypted,
min_mc_f_count: 0,
max_mc_f_count: u32::MAX,
},
);
device_queue::enqueue_item(device_queue::DeviceQueueItem {
let _ = device_queue::enqueue_item(device_queue::DeviceQueueItem {
dev_eui: fuota_dev.dev_eui,
f_port: self.device_profile.app_layer_params.ts005_f_port.into(),
data: pl.to_vec()?,
data: pl,
..Default::default()
})
.await?;
@ -250,25 +327,11 @@ impl Flow {
}
async fn fragmentation_session_setup(&mut self) -> Result<Option<(FuotaJob, DateTime<Utc>)>> {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
return Ok(Some((FuotaJob::McSession, Utc::now())));
}
info!("Set timeout error to devices that did not respond to McGroupSetupReq");
fuota::set_device_timeout_error(self.fuota_deployment.id.into(), true, false, false, false)
.await?;
info!("Sending FragSessionSetupReq commands to devices");
self.job.attempt_count += 1;
let fragment_size = self.fuota_deployment.fragmentation_fragment_size as usize;
let fragments =
(self.fuota_deployment.payload.len() as f32 / fragment_size as f32).ceil() as usize;
let padding =
(fragment_size - (self.fuota_deployment.payload.len() % fragment_size)) % fragment_size;
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
let fuota_devices_completed_mc_group_setup_count = fuota_devices
.iter()
.filter(|d| d.mc_group_setup_completed_at.is_some())
.count();
// Filter on devices that have completed the previous step, but not yet the FragSessionSetup.
let fuota_devices: Vec<fuota::FuotaDeploymentDevice> = fuota_devices
@ -279,33 +342,124 @@ impl Flow {
})
.collect();
if fuota_devices.is_empty() {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
info!("Set timeout error to devices that did not respond to FragSessionSetupReq");
fuota::set_device_timeout_error(
self.fuota_deployment.id.into(),
false,
false,
true,
false,
)
.await?;
if !fuota_devices.is_empty() {
self.job.warning_msg = format!(
"{} devices did not complete the fragmentation session setup",
fuota_devices.len()
);
}
return Ok(Some((FuotaJob::McSession, Utc::now())));
}
info!("Sending FragSessionSetupReq commands to devices");
self.job.attempt_count += 1;
if fuota_devices_completed_mc_group_setup_count == 0 {
self.job.error_msg = "There are no devices available to complete this step".into();
return Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())));
}
for fuota_dev in &fuota_devices {
let pl = fragmentation::v1::Payload::FragSessionSetupReq(
fragmentation::v1::FragSessionSetupReqPayload {
frag_session: fragmentation::v1::FragSessionSetuReqPayloadFragSession {
mc_group_bit_mask: [true, false, false, false],
frag_index: 0,
},
nb_frag: fragments as u16,
frag_size: fragment_size as u8,
padding: padding as u8,
control: fragmentation::v1::FragSessionSetuReqPayloadControl {
block_ack_delay: 0,
fragmentation_matrix: 0,
},
descriptor: [0, 0, 0, 0],
},
);
let fragment_size = self.fuota_deployment.fragmentation_fragment_size as usize;
let fragments =
(self.fuota_deployment.payload.len() as f32 / fragment_size as f32).ceil() as usize;
let padding =
(fragment_size - (self.fuota_deployment.payload.len() % fragment_size)) % fragment_size;
device_queue::enqueue_item(device_queue::DeviceQueueItem {
for fuota_dev in &fuota_devices {
let pl = match self.device_profile.app_layer_params.ts004_version {
Some(Ts004Version::V100) => fragmentation::v1::Payload::FragSessionSetupReq(
fragmentation::v1::FragSessionSetupReqPayload {
frag_session: fragmentation::v1::FragSessionSetuReqPayloadFragSession {
mc_group_bit_mask: [true, false, false, false],
frag_index: 0,
},
nb_frag: fragments as u16,
frag_size: fragment_size as u8,
padding: padding as u8,
control: fragmentation::v1::FragSessionSetuReqPayloadControl {
block_ack_delay: 0,
fragmentation_matrix: 0,
},
descriptor: [0, 0, 0, 0],
},
)
.to_vec()?,
Some(Ts004Version::V200) => {
let dev = device::get(&fuota_dev.dev_eui).await?;
let session_cnt = dev.app_layer_params.ts004_session_cnt[0];
let mut app_layer_params = dev.app_layer_params.clone();
app_layer_params.ts004_session_cnt[0] += 1;
device::partial_update(
fuota_dev.dev_eui,
&device::DeviceChangeset {
app_layer_params: Some(app_layer_params),
..Default::default()
},
)
.await?;
let dev_keys = device_keys::get(&fuota_dev.dev_eui).await?;
let data_block_int_key = match self.device_profile.mac_version {
MacVersion::LORAWAN_1_0_0
| MacVersion::LORAWAN_1_0_1
| MacVersion::LORAWAN_1_0_2
| MacVersion::LORAWAN_1_0_3
| MacVersion::LORAWAN_1_0_4 => {
fragmentation::v2::get_data_block_int_key(dev_keys.gen_app_key)?
}
MacVersion::LORAWAN_1_1_0 | MacVersion::Latest => {
fragmentation::v2::get_data_block_int_key(dev_keys.app_key)?
}
};
let mic = fragmentation::v2::calculate_mic(
data_block_int_key,
session_cnt,
0,
[0, 0, 0, 0],
&self.fuota_deployment.payload,
)?;
fragmentation::v2::Payload::FragSessionSetupReq(
fragmentation::v2::FragSessionSetupReqPayload {
frag_session: fragmentation::v2::FragSessionSetuReqPayloadFragSession {
mc_group_bit_mask: [true, false, false, false],
frag_index: 0,
},
nb_frag: fragments as u16,
frag_size: fragment_size as u8,
padding: padding as u8,
control: fragmentation::v2::FragSessionSetuReqPayloadControl {
block_ack_delay: 0,
frag_algo: 0,
ack_reception: false,
},
descriptor: [0, 0, 0, 0],
mic,
session_cnt,
},
)
.to_vec()?
}
None => return Err(anyhow!("Device-profile does not support TS004")),
};
let _ = device_queue::enqueue_item(device_queue::DeviceQueueItem {
dev_eui: fuota_dev.dev_eui,
f_port: self.device_profile.app_layer_params.ts004_f_port.into(),
data: pl.to_vec()?,
data: pl,
..Default::default()
})
.await?;
@ -323,19 +477,11 @@ impl Flow {
}
async fn multicast_session_setup(&mut self) -> Result<Option<(FuotaJob, DateTime<Utc>)>> {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
return Ok(Some((FuotaJob::Enqueue, Utc::now())));
}
info!("Set timeout error to devices that did not respond to FragSessionSetupReq");
fuota::set_device_timeout_error(self.fuota_deployment.id.into(), false, false, true, false)
.await?;
info!("Sending McClassB/McClassCSessionReq commands to devices");
self.job.attempt_count += 1;
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
let fuota_devices_completed_frag_session_setup_count = fuota_devices
.iter()
.filter(|d| d.frag_session_setup_completed_at.is_some())
.count();
// Filter on devices that have completed the previous step, but not yet the McSession.
let fuota_devices: Vec<fuota::FuotaDeploymentDevice> = fuota_devices
@ -345,79 +491,189 @@ impl Flow {
})
.collect();
if fuota_devices.is_empty() {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
info!("Set timeout error to devices that did not respond to McSessionReq");
fuota::set_device_timeout_error(
self.fuota_deployment.id.into(),
false,
true,
false,
false,
)
.await?;
if !fuota_devices.is_empty() {
self.job.warning_msg = format!(
"{} devices did not complete the multicast session setup",
fuota_devices.len()
);
}
return Ok(Some((
FuotaJob::Enqueue,
self.fuota_deployment
.multicast_session_start
.unwrap_or_else(Utc::now),
)));
}
info!("Sending McClassB/McClassCSessionReq commands to devices");
self.job.attempt_count += 1;
if fuota_devices_completed_frag_session_setup_count == 0 {
self.job.error_msg = "There are no devices available to complete this step".into();
return Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())));
}
for fuota_dev in &fuota_devices {
// Calculate the session start and end dates the first time this job is executed.
if self.fuota_deployment.multicast_session_start.is_none()
&& self.fuota_deployment.multicast_session_end.is_none()
{
// We want to start the session (retry_count + 1) x the uplink_interval.
// Note that retry_count=0 means only one attempt.
let session_start = (Utc::now()
let session_start = Utc::now()
+ TimeDelta::seconds(
(self.job.max_retry_count as i64 + 1)
* self.device_profile.uplink_interval as i64,
))
);
let session_end = {
let timeout = match self.fuota_deployment.multicast_group_type.as_ref() {
"B" => Duration::from_secs(
128 * (1 << self.fuota_deployment.multicast_timeout as u64),
),
"C" => Duration::from_secs(1 << self.fuota_deployment.multicast_timeout as u64),
_ => return Err(anyhow!("Invalid multicast-group type")),
};
session_start + timeout
};
self.fuota_deployment.multicast_session_start = Some(session_start);
self.fuota_deployment.multicast_session_end = Some(session_end);
self.fuota_deployment = fuota::update_deployment(self.fuota_deployment.clone()).await?;
}
let session_start = self
.fuota_deployment
.multicast_session_start
.ok_or_else(|| anyhow!("multicast_session_start is None"))?
.to_gps_time()
.num_seconds()
% (1 << 32);
% (1 << 32);
let pl = match self.fuota_deployment.multicast_group_type.as_ref() {
"B" => multicastsetup::v1::Payload::McClassBSessionReq(
multicastsetup::v1::McClassBSessionReqPayload {
mc_group_id_header:
multicastsetup::v1::McClassBSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
for fuota_dev in &fuota_devices {
let pl = match self.device_profile.app_layer_params.ts005_version {
Some(Ts005Version::V100) => {
match self.fuota_deployment.multicast_group_type.as_ref() {
"B" => multicastsetup::v1::Payload::McClassBSessionReq(
multicastsetup::v1::McClassBSessionReqPayload {
mc_group_id_header:
multicastsetup::v1::McClassBSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
session_time: (session_start - (session_start % 128)) as u32,
time_out_periodicity:
multicastsetup::v1::McClassBSessionReqPayloadTimeOutPeriodicity {
time_out: self.fuota_deployment.multicast_timeout as u8,
periodicity: self.fuota_deployment.multicast_class_b_ping_slot_nb_k
as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
session_time: (session_start - (session_start % 128)) as u32,
time_out_periodicity:
multicastsetup::v1::McClassBSessionReqPayloadTimeOutPeriodicity {
time_out: self.fuota_deployment.multicast_timeout as u8,
periodicity: self.fuota_deployment.multicast_class_b_ping_slot_nb_k
as u8,
).to_vec()?,
"C" => multicastsetup::v1::Payload::McClassCSessionReq(
multicastsetup::v1::McClassCSessionReqPayload {
mc_group_id_header:
multicastsetup::v1::McClassCSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
session_time: session_start as u32,
session_time_out:
multicastsetup::v1::McClassCSessionReqPayloadSessionTimeOut {
time_out: self.fuota_deployment.multicast_timeout as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
),
"C" => multicastsetup::v1::Payload::McClassCSessionReq(
multicastsetup::v1::McClassCSessionReqPayload {
mc_group_id_header:
multicastsetup::v1::McClassCSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
session_time: session_start as u32,
session_time_out:
multicastsetup::v1::McClassCSessionReqPayloadSessionTimeOut {
time_out: self.fuota_deployment.multicast_timeout as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
),
_ => {
return Err(anyhow!(
"Unsupported group-type: {}",
self.fuota_deployment.multicast_group_type
))
).to_vec()?,
_ => {
return Err(anyhow!(
"Unsupported group-type: {}",
self.fuota_deployment.multicast_group_type
))
}
}
}
Some(Ts005Version::V200) => {
match self.fuota_deployment.multicast_group_type.as_ref() {
"B" => multicastsetup::v2::Payload::McClassBSessionReq(
multicastsetup::v2::McClassBSessionReqPayload {
mc_group_id_header:
multicastsetup::v2::McClassBSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
session_time: (session_start - (session_start % 128)) as u32,
time_out_periodicity:
multicastsetup::v2::McClassBSessionReqPayloadTimeOutPeriodicity {
time_out: self.fuota_deployment.multicast_timeout as u8,
periodicity: self.fuota_deployment.multicast_class_b_ping_slot_nb_k
as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
).to_vec()?,
"C" => multicastsetup::v2::Payload::McClassCSessionReq(
multicastsetup::v2::McClassCSessionReqPayload {
mc_group_id_header:
multicastsetup::v2::McClassCSessionReqPayloadMcGroupIdHeader {
mc_group_id: 0,
},
session_time: session_start as u32,
session_time_out:
multicastsetup::v2::McClassCSessionReqPayloadSessionTimeOut {
time_out: self.fuota_deployment.multicast_timeout as u8,
},
dl_frequ: self.fuota_deployment.multicast_frequency as u32,
dr: self.fuota_deployment.multicast_dr as u8,
},
).to_vec()?,
_ => {
return Err(anyhow!(
"Unsupported group-type: {}",
self.fuota_deployment.multicast_group_type
))
}
}
}
None => return Err(anyhow!("Device-profile does not support TS005")),
};
device_queue::enqueue_item(device_queue::DeviceQueueItem {
dev_eui: fuota_dev.dev_eui,
f_port: self.device_profile.app_layer_params.ts005_f_port.into(),
data: pl.to_vec()?,
data: pl,
..Default::default()
})
.await?;
}
// In this case we need to exactly try the max. attempts, because this is what the
// session-start time calculation is based on. If we continue with enqueueing too
// early, the multicast-session hasn't started yet.
let scheduler_run_after =
Utc::now() + TimeDelta::seconds(self.device_profile.uplink_interval as i64);
Ok(Some((FuotaJob::McSession, scheduler_run_after)))
if !fuota_devices.is_empty() {
// There are devices pending setup, we need to re-run this job.
let scheduler_run_after =
Utc::now() + TimeDelta::seconds(self.device_profile.uplink_interval as i64);
Ok(Some((FuotaJob::McSession, scheduler_run_after)))
} else {
Ok(Some((
FuotaJob::Enqueue,
self.fuota_deployment
.multicast_session_start
.unwrap_or_else(Utc::now),
)))
}
}
async fn enqueue(&mut self) -> Result<Option<(FuotaJob, DateTime<Utc>)>> {
@ -426,16 +682,12 @@ impl Flow {
return Ok(Some((FuotaJob::FragStatus, Utc::now())));
}
info!("Set timeout error to devices that did not respond to McSessionReq");
fuota::set_device_timeout_error(self.fuota_deployment.id.into(), false, true, false, false)
.await?;
info!("Enqueueing fragmented payload to multicast group");
self.job.attempt_count += 1;
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
// Filter on devices that have completed the previous step, but not yet the McSession.
// Filter on devices that have completed the previous step.
let fuota_devices: Vec<fuota::FuotaDeploymentDevice> = fuota_devices
.into_iter()
.filter(|d| d.mc_session_completed_at.is_some())
@ -459,57 +711,85 @@ impl Flow {
let mut payload = self.fuota_deployment.payload.clone();
payload.extend_from_slice(&vec![0; padding]);
let encoded_fragments = fragmentation::v1::encode(&payload, fragment_size, redundancy)?;
for (i, frag) in encoded_fragments.iter().enumerate() {
let pl =
fragmentation::v1::Payload::DataFragment(fragmentation::v1::DataFragmentPayload {
index_and_n: fragmentation::v1::DataFragmentPayloadIndexAndN {
frag_index: 0,
n: (i + 1) as u16,
},
data: frag.clone(),
});
let payloads = match self.device_profile.app_layer_params.ts004_version {
Some(Ts004Version::V100) => {
let mut payloads = Vec::new();
let encoded_fragments =
fragmentation::v1::encode(&payload, fragment_size, redundancy)?;
for (i, frag) in encoded_fragments.iter().enumerate() {
payloads.push(
fragmentation::v1::Payload::DataFragment(
fragmentation::v1::DataFragmentPayload {
index_and_n: fragmentation::v1::DataFragmentPayloadIndexAndN {
frag_index: 0,
n: (i + 1) as u16,
},
data: frag.clone(),
},
)
.to_vec()?,
);
}
payloads
}
Some(Ts004Version::V200) => {
let mut payloads = Vec::new();
let encoded_fragments =
fragmentation::v2::encode(&payload, fragment_size, redundancy)?;
for (i, frag) in encoded_fragments.iter().enumerate() {
payloads.push(
fragmentation::v2::Payload::DataFragment(
fragmentation::v2::DataFragmentPayload {
index_and_n: fragmentation::v2::DataFragmentPayloadIndexAndN {
frag_index: 0,
n: (i + 1) as u16,
},
data: frag.clone(),
},
)
.to_vec()?,
);
}
payloads
}
None => return Err(anyhow!("Device-profile does not support TS004")),
};
for pl in payloads {
let _ = downlink::multicast::enqueue(multicast::MulticastGroupQueueItem {
multicast_group_id: self.fuota_deployment.id,
f_port: self.device_profile.app_layer_params.ts004_f_port as i16,
data: pl.to_vec()?,
data: pl,
..Default::default()
})
.await?;
}
match self.fuota_deployment.request_fragmentation_session_status {
RequestFragmentationSessionStatus::NoRequest => {
Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())))
}
RequestFragmentationSessionStatus::NoRequest => Ok(Some((
FuotaJob::DeleteMcGroup,
self.fuota_deployment
.multicast_session_end
.unwrap_or_else(Utc::now),
))),
RequestFragmentationSessionStatus::AfterFragEnqueue => {
Ok(Some((FuotaJob::FragStatus, Utc::now())))
}
RequestFragmentationSessionStatus::AfterSessTimeout => {
let timeout = match self.fuota_deployment.multicast_group_type.as_ref() {
"B" => Duration::from_secs(
128 * (1 << self.fuota_deployment.multicast_timeout as u64),
),
"C" => Duration::from_secs(1 << self.fuota_deployment.multicast_timeout as u64),
_ => return Err(anyhow!("Invalid multicast-group type")),
};
Ok(Some((FuotaJob::FragStatus, Utc::now() + timeout)))
}
RequestFragmentationSessionStatus::AfterSessTimeout => Ok(Some((
FuotaJob::FragStatus,
self.fuota_deployment
.multicast_session_end
.unwrap_or_else(Utc::now),
))),
}
}
async fn fragmentation_status(&mut self) -> Result<Option<(FuotaJob, DateTime<Utc>)>> {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
return Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())));
}
info!("Enqueue FragSessionStatusReq");
self.job.attempt_count += 1;
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
let fuota_devices_completed_mc_session_count = fuota_devices
.iter()
.filter(|d| d.mc_session_completed_at.is_some())
.count();
// Filter on devices that have completed the multicast-session setup but
// not yet responded to the FragSessionStatusReq.
@ -518,23 +798,59 @@ impl Flow {
.filter(|d| d.mc_session_completed_at.is_some() && d.frag_status_completed_at.is_none())
.collect();
if fuota_devices.is_empty() {
// Proceed with next step after reaching the max attempts.
if self.job.attempt_count > self.job.max_retry_count {
info!("Set timeout error to devices that did not respond to FragSessionStatusReq");
fuota::set_device_timeout_error(
self.fuota_deployment.id.into(),
false,
false,
false,
true,
)
.await?;
if !fuota_devices.is_empty() {
self.job.warning_msg = format!(
"{} devices did not complete the fragmentation status",
fuota_devices.len()
);
}
return Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())));
}
info!("Enqueue FragSessionStatusReq");
self.job.attempt_count += 1;
if fuota_devices_completed_mc_session_count == 0 {
self.job.error_msg = "There are no devices available to complete this step".into();
return Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())));
}
for fuota_dev in &fuota_devices {
let pl = fragmentation::v1::Payload::FragSessionStatusReq(
fragmentation::v1::FragSessionStatusReqPayload {
participants: true,
frag_index: 0,
},
);
let pl = match self.device_profile.app_layer_params.ts004_version {
Some(Ts004Version::V100) => fragmentation::v1::Payload::FragSessionStatusReq(
fragmentation::v1::FragSessionStatusReqPayload {
participants: true,
frag_index: 0,
},
)
.to_vec()?,
Some(Ts004Version::V200) => fragmentation::v2::Payload::FragSessionStatusReq(
fragmentation::v2::FragSessionStatusReqPayload {
participants: true,
frag_index: 0,
},
)
.to_vec()?,
None => return Err(anyhow!("Device-profile does not support TS004")),
};
device_queue::enqueue_item(device_queue::DeviceQueueItem {
dev_eui: fuota_dev.dev_eui,
f_port: self.device_profile.app_layer_params.ts004_f_port.into(),
data: pl.to_vec()?,
data: pl,
..Default::default()
})
.await?;
@ -546,7 +862,12 @@ impl Flow {
Utc::now() + TimeDelta::seconds(self.device_profile.uplink_interval as i64);
Ok(Some((FuotaJob::FragStatus, scheduler_run_after)))
} else {
Ok(Some((FuotaJob::DeleteMcGroup, Utc::now())))
Ok(Some((
FuotaJob::DeleteMcGroup,
self.fuota_deployment
.multicast_session_end
.unwrap_or_else(Utc::now),
)))
}
}
@ -581,21 +902,15 @@ impl Flow {
} else {
fuota::set_device_completed(self.fuota_deployment.id.into(), true, true, true, true)
.await?;
fuota::set_device_timeout_error(
self.fuota_deployment.id.into(),
false,
false,
false,
true,
)
.await?;
}
let fuota_devices = fuota::get_devices(self.job.fuota_deployment_id.into(), -1, 0).await?;
let fuota_devices_count = fuota_devices.len();
let fuota_devices: Vec<fuota::FuotaDeploymentDevice> = fuota_devices
.into_iter()
.filter(|d| d.completed_at.is_some() && d.error_msg.is_empty())
.collect();
let fuota_devices_completed_count = fuota_devices.len();
for fuota_device in &fuota_devices {
let mut d = device::get(&fuota_device.dev_eui).await?;
@ -605,9 +920,15 @@ impl Flow {
let _ = device::update(d).await?;
}
let mut d = self.fuota_deployment.clone();
d.completed_at = Some(Utc::now());
let _ = fuota::update_deployment(d).await?;
if fuota_devices_count != fuota_devices_completed_count {
self.job.warning_msg = format!(
"{} devices did not complete the FUOTA deployment",
fuota_devices_count - fuota_devices_completed_count
);
}
self.fuota_deployment.completed_at = Some(Utc::now());
self.fuota_deployment = fuota::update_deployment(self.fuota_deployment.clone()).await?;
Ok(None)
}

View File

@ -18,6 +18,7 @@ pub async fn handle_uplink(
match version {
Ts005Version::V100 => handle_uplink_v100(dev, data).await,
Ts005Version::V200 => handle_uplink_v200(dev, data).await,
}
}
@ -40,6 +41,25 @@ async fn handle_uplink_v100(dev: &device::Device, data: &[u8]) -> Result<()> {
Ok(())
}
async fn handle_uplink_v200(dev: &device::Device, data: &[u8]) -> Result<()> {
let pl = multicastsetup::v2::Payload::from_slice(true, data)?;
match pl {
multicastsetup::v2::Payload::McGroupSetupAns(pl) => {
handle_v2_mc_group_setup_ans(dev, pl).await?
}
multicastsetup::v2::Payload::McClassBSessionAns(pl) => {
handle_v2_mc_class_b_session_ans(dev, pl).await?
}
multicastsetup::v2::Payload::McClassCSessionAns(pl) => {
handle_v2_mc_class_c_session_ans(dev, pl).await?
}
_ => {}
}
Ok(())
}
async fn handle_v1_mc_group_setup_ans(
dev: &device::Device,
pl: multicastsetup::v1::McGroupSetupAnsPayload,
@ -64,6 +84,30 @@ async fn handle_v1_mc_group_setup_ans(
Ok(())
}
async fn handle_v2_mc_group_setup_ans(
dev: &device::Device,
pl: multicastsetup::v2::McGroupSetupAnsPayload,
) -> Result<()> {
info!("Handling McGroupSetupAns");
let mut fuota_dev = fuota::get_latest_device_by_dev_eui(dev.dev_eui).await?;
if pl.mc_group_id_header.id_error {
warn!(
mc_group_id = pl.mc_group_id_header.mc_group_id,
id_error = true,
"McGroupSetupAns contains errors"
);
fuota_dev.error_msg = "Error: McGroupSetupAns response id_error=true".into();
} else {
fuota_dev.mc_group_setup_completed_at = Some(Utc::now());
}
let _ = fuota::update_device(fuota_dev).await?;
Ok(())
}
async fn handle_v1_mc_class_b_session_ans(
dev: &device::Device,
pl: multicastsetup::v1::McClassBSessionAnsPayload,
@ -101,6 +145,46 @@ async fn handle_v1_mc_class_b_session_ans(
Ok(())
}
async fn handle_v2_mc_class_b_session_ans(
dev: &device::Device,
pl: multicastsetup::v2::McClassBSessionAnsPayload,
) -> Result<()> {
info!("Handling McClassBSessionAns");
let mut fuota_dev = fuota::get_latest_device_by_dev_eui(dev.dev_eui).await?;
if pl.status_and_mc_group_id.dr_error
| pl.status_and_mc_group_id.freq_error
| pl.status_and_mc_group_id.mc_group_undefined
| pl.status_and_mc_group_id.start_missed
{
warn!(
dr_error = pl.status_and_mc_group_id.dr_error,
freq_error = pl.status_and_mc_group_id.freq_error,
mc_group_undefined = pl.status_and_mc_group_id.mc_group_undefined,
start_missed = pl.status_and_mc_group_id.start_missed,
"McClassBSessionAns contains errors"
);
fuota_dev.error_msg= format!("Error: McClassBSessionAns response dr_error: {}, freq_error: {}, mc_group_undefined: {}, start_missed: {}",
pl.status_and_mc_group_id.dr_error,
pl.status_and_mc_group_id.freq_error,
pl.status_and_mc_group_id.mc_group_undefined,
pl.status_and_mc_group_id.start_missed,
);
} else {
info!(
time_to_start = pl.time_to_start.unwrap_or_default(),
"McClassBSessionAns OK"
);
fuota_dev.mc_session_completed_at = Some(Utc::now());
}
let _ = fuota::update_device(fuota_dev).await?;
Ok(())
}
async fn handle_v1_mc_class_c_session_ans(
dev: &device::Device,
pl: multicastsetup::v1::McClassCSessionAnsPayload,
@ -137,3 +221,43 @@ async fn handle_v1_mc_class_c_session_ans(
Ok(())
}
async fn handle_v2_mc_class_c_session_ans(
dev: &device::Device,
pl: multicastsetup::v2::McClassCSessionAnsPayload,
) -> Result<()> {
info!("Handling McClassCSessionAns");
let mut fuota_dev = fuota::get_latest_device_by_dev_eui(dev.dev_eui).await?;
if pl.status_and_mc_group_id.dr_error
| pl.status_and_mc_group_id.freq_error
| pl.status_and_mc_group_id.mc_group_undefined
| pl.status_and_mc_group_id.start_missed
{
warn!(
dr_error = pl.status_and_mc_group_id.dr_error,
freq_error = pl.status_and_mc_group_id.freq_error,
mc_group_undefined = pl.status_and_mc_group_id.mc_group_undefined,
start_missed = pl.status_and_mc_group_id.start_missed,
"McClassCSessionAns contains errors"
);
fuota_dev.error_msg = format!("Error: McClassCSessionAns response dr_error: {}, freq_error: {}, mc_group_undefined: {}, start_missed: {}",
pl.status_and_mc_group_id.dr_error,
pl.status_and_mc_group_id.freq_error,
pl.status_and_mc_group_id.mc_group_undefined,
pl.status_and_mc_group_id.start_missed,
);
} else {
info!(
time_to_start = pl.time_to_start.unwrap_or_default(),
"McClassCSessionAns OK"
);
fuota_dev.mc_session_completed_at = Some(Utc::now());
}
let _ = fuota::update_device(fuota_dev).await?;
Ok(())
}

View File

@ -1,4 +1,4 @@
use std::sync::Arc;
use std::sync::{Arc, LazyLock};
use anyhow::Result;
use tokio::sync::RwLock;
@ -8,9 +8,8 @@ use crate::{config, stream};
use backend::{Client, ClientConfig};
use lrwn::{EUI64Prefix, EUI64};
lazy_static! {
static ref CLIENTS: RwLock<Vec<(EUI64Prefix, Arc<Client>)>> = RwLock::new(vec![]);
}
static CLIENTS: LazyLock<RwLock<Vec<(EUI64Prefix, Arc<Client>)>>> =
LazyLock::new(|| RwLock::new(vec![]));
pub async fn setup() -> Result<()> {
info!("Setting up Join Server clients");
@ -30,6 +29,11 @@ pub async fn setup() -> Result<()> {
tls_key: js.tls_key.clone(),
async_timeout: js.async_timeout,
request_log_sender: stream::backend_interfaces::get_log_sender().await,
authorization: if js.authorization_header.is_empty() {
None
} else {
Some(js.authorization_header.clone())
},
..Default::default()
})?;

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::io::Cursor;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::{Arc, LazyLock};
use anyhow::Result;
use chrono::{Duration, DurationRound};
@ -15,9 +15,8 @@ use backend::{Client, ClientConfig, GWInfoElement, ULMetaData};
use chirpstack_api::{common, gw};
use lrwn::{region, DevAddr, NetID, EUI64};
lazy_static! {
static ref CLIENTS: RwLock<HashMap<NetID, Arc<Client>>> = RwLock::new(HashMap::new());
}
static CLIENTS: LazyLock<RwLock<HashMap<NetID, Arc<Client>>>> =
LazyLock::new(|| RwLock::new(HashMap::new()));
pub async fn setup() -> Result<()> {
info!("Setting up roaming clients");

View File

@ -3,9 +3,7 @@ use handlebars::Handlebars;
use super::super::config;
pub fn run() {
#[allow(clippy::useless_vec)]
let template = vec![
r#"
let template = r#"
# Logging configuration
[logging]
@ -21,10 +19,11 @@ r#"
# Log as JSON.
json={{ logging.json }}
"#,
#[cfg(feature = "postgres")]
r#"
# PostgreSQL configuration.
#
# Note: this option is only available to ChirpStack with PostgreSQL support (default).
[postgresql]
# PostgreSQL DSN.
@ -49,16 +48,20 @@ r#"
# the server-certificate is not signed by a CA in the platform certificate
# store.
ca_cert="{{ postgresql.ca_cert }}"
"#,
#[cfg(feature = "sqlite")]
r#"
# SQLite configuration.
#
# Note: this option is only available to ChirpStack with SQLite support.
[sqlite]
# Sqlite DB path.
#
# Format example: sqlite:///<DATABASE>.
# Make sure the path exists and that the ChirpStack process has read-write
# access to it. If the database file does not exists, it will be created the
# first time ChirpStack starts.
#
# Format example: sqlite:///<DATABASE>.
path="{{ sqlite.path }}"
# Max open connections.
@ -77,8 +80,8 @@ r#"
"{{this}}",
{{/each}}
]
"#,
r#"
# Redis configuration.
[redis]
@ -794,6 +797,11 @@ r#"
# #
# # Set this to enable client-certificate authentication with the join-server.
# tls_key="/path/to/tls_key.pem"
# # Authorization header.
# #
# # Optional value of the Authorization header, e.g. token or password.
# authorization_header="Bearer sometoken"
{{#each join_server.servers}}
[[join_server.servers]]
@ -804,6 +812,7 @@ r#"
ca_cert="{{ this.ca_cert }}"
tls_cert="{{ this.tls_cert }}"
tls_key="{{ this.tls_key }}"
authorization_header="{{ this.authorization_header }}"
{{/each}}
@ -990,14 +999,14 @@ r#"
# default tileserver_url (OSM). If you configure a different tile-server, you
# might need to update the map_attribution.
map_attribution="{{ui.map_attribution}}"
"#].join("\n");
"#;
let mut reg = Handlebars::new();
reg.register_escape_fn(|s| s.to_string().replace('"', r#"\""#));
let conf = config::get();
println!(
"{}",
reg.render_template(&template, &conf)
reg.render_template(template, &conf)
.expect("render configfile error")
);
}

View File

@ -227,7 +227,7 @@ pub mod test {
let out = decode(Utc::now(), 10, &vars, &decoder, &[0x01, 0x02, 0x03]).await;
assert_eq!(
"JS error: Error: foo is not defined\n at decodeUplink (eval_script:3:1)\n at <eval> (eval_script:8:9)\n",
"JS error: Error: foo is not defined\n at decodeUplink (eval_script:3:1)\n at <eval> (eval_script:8:22)\n",
out.err().unwrap().to_string()
);
}
@ -368,7 +368,7 @@ pub mod test {
};
let out = encode(10, &vars, &encoder, &input).await;
assert_eq!("JS error: Error: foo is not defined\n at encodeDownlink (eval_script:3:1)\n at <eval> (eval_script:8:9)\n", out.err().unwrap().to_string());
assert_eq!("JS error: Error: foo is not defined\n at encodeDownlink (eval_script:3:1)\n at <eval> (eval_script:8:24)\n", out.err().unwrap().to_string());
}
#[tokio::test]

View File

@ -1,5 +1,5 @@
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::sync::{Arc, LazyLock, Mutex};
use std::time::Duration;
use std::{env, fs};
@ -9,9 +9,8 @@ use serde::{Deserialize, Serialize};
use lrwn::region::CommonName;
use lrwn::{AES128Key, DevAddrPrefix, EUI64Prefix, NetID};
lazy_static! {
static ref CONFIG: Mutex<Arc<Configuration>> = Mutex::new(Arc::new(Default::default()));
}
static CONFIG: LazyLock<Mutex<Arc<Configuration>>> =
LazyLock::new(|| Mutex::new(Arc::new(Default::default())));
#[derive(Default, Serialize, Deserialize, Clone)]
#[serde(default)]
@ -525,6 +524,7 @@ pub struct JoinServerServer {
pub ca_cert: String,
pub tls_cert: String,
pub tls_key: String,
pub authorization_header: String,
}
#[derive(Serialize, Deserialize, Default, Clone)]

View File

@ -1,12 +1,12 @@
use rand::seq::SliceRandom;
use rand::RngCore;
use crate::config;
use lrwn::DevAddr;
use rand::seq::IndexedRandom;
pub fn get_random_dev_addr() -> DevAddr {
let conf = config::get();
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
// Get configured DevAddr prefixes.
let prefixes = if conf.network.dev_addr_prefixes.is_empty() {

View File

@ -1,3 +1,5 @@
use std::sync::LazyLock;
use aes::cipher::generic_array::GenericArray;
use aes::cipher::{BlockEncrypt, KeyInit};
use aes::{Aes128, Block};
@ -7,14 +9,11 @@ use tracing::debug;
use lrwn::DevAddr;
lazy_static! {
static ref BEACON_PERIOD: Duration = Duration::try_seconds(128).unwrap();
static ref BEACON_RESERVED: Duration = Duration::try_milliseconds(2120).unwrap();
static ref BEACON_GUARD: Duration = Duration::try_seconds(3).unwrap();
static ref BEACON_WINDOW: Duration = Duration::try_milliseconds(122880).unwrap();
static ref PING_PERIOD_BASE: usize = 1 << 12;
static ref SLOT_LEN: Duration = Duration::try_milliseconds(30).unwrap();
}
static BEACON_PERIOD: LazyLock<Duration> = LazyLock::new(|| Duration::try_seconds(128).unwrap());
static BEACON_RESERVED: LazyLock<Duration> =
LazyLock::new(|| Duration::try_milliseconds(2120).unwrap());
static PING_PERIOD_BASE: usize = 1 << 12;
static SLOT_LEN: LazyLock<Duration> = LazyLock::new(|| Duration::try_milliseconds(30).unwrap());
pub fn get_beacon_start(ts: Duration) -> Duration {
Duration::try_seconds(ts.num_seconds() - (ts.num_seconds() % BEACON_PERIOD.num_seconds()))
@ -26,7 +25,7 @@ pub fn get_ping_offset(beacon_ts: Duration, dev_addr: &DevAddr, ping_nb: usize)
return Err(anyhow!("ping_nb must be > 0"));
}
let ping_period = *PING_PERIOD_BASE / ping_nb;
let ping_period = PING_PERIOD_BASE / ping_nb;
let beacon_time = (beacon_ts.num_seconds() % (1 << 32)) as u32;
let key_bytes: [u8; 16] = [0x00; 16];
@ -54,7 +53,7 @@ pub fn get_next_ping_slot_after(
}
let mut beacon_start_ts = get_beacon_start(after_gps_epoch_ts);
let ping_period = *PING_PERIOD_BASE / ping_nb;
let ping_period = PING_PERIOD_BASE / ping_nb;
loop {
let ping_offset = get_ping_offset(beacon_start_ts, dev_addr, ping_nb)?;
@ -122,7 +121,7 @@ pub mod test {
for k in 0..8 {
let mut beacon_ts = Duration::zero();
let ping_nb: usize = 1 << k;
let ping_period = *PING_PERIOD_BASE / ping_nb;
let ping_period = PING_PERIOD_BASE / ping_nb;
let dev_addr = DevAddr::from_be_bytes([0, 0, 0, 0]);
for _ in 0..100000 {

View File

@ -65,7 +65,7 @@ impl Data {
must_ack: bool,
mac_commands: Vec<lrwn::MACCommandSet>,
) -> Result<()> {
let downlink_id: u32 = rand::thread_rng().gen();
let downlink_id: u32 = rand::rng().random();
let span = span!(Level::INFO, "data_down", downlink_id = downlink_id);
match Data::_handle_response(
@ -107,7 +107,7 @@ impl Data {
must_ack: bool,
mac_commands: Vec<lrwn::MACCommandSet>,
) -> Result<()> {
let downlink_id: u32 = rand::thread_rng().gen();
let downlink_id: u32 = rand::rng().random();
let span = span!(Level::INFO, "data_down", downlink_id = downlink_id);
match Data::_handle_response_relayed(
@ -138,7 +138,7 @@ impl Data {
}
pub async fn handle_schedule_next_queue_item(device: device::Device) -> Result<()> {
let downlink_id: u32 = rand::thread_rng().gen();
let downlink_id: u32 = rand::rng().random();
let span =
span!(Level::INFO, "schedule", dev_eui = %device.dev_eui, downlink_id = downlink_id);
@ -1827,20 +1827,19 @@ impl Data {
let set = lrwn::MACCommandSet::new(vec![lrwn::MACCommand::ConfigureFwdLimitReq(
lrwn::ConfigureFwdLimitReqPayload {
reload_rate: lrwn::FwdLimitReloadRatePL {
overall_reload_rate: relay_params.relay_overall_limit_reload_rate as u8,
overall_reload_rate: relay_params.relay_overall_limit_reload_rate,
global_uplink_reload_rate: relay_params
.relay_global_uplink_limit_reload_rate
as u8,
notify_reload_rate: relay_params.relay_notify_limit_reload_rate as u8,
join_req_reload_rate: relay_params.relay_join_req_limit_reload_rate as u8,
.relay_global_uplink_limit_reload_rate,
notify_reload_rate: relay_params.relay_notify_limit_reload_rate,
join_req_reload_rate: relay_params.relay_join_req_limit_reload_rate,
reset_limit_counter: lrwn::ResetLimitCounter::NoChange,
},
load_capacity: lrwn::FwdLimitLoadCapacityPL {
overall_limit_size: relay_params.relay_overall_limit_bucket_size as u8,
global_uplink_limit_size: relay_params.relay_global_uplink_limit_bucket_size
as u8,
notify_limit_size: relay_params.relay_notify_limit_bucket_size as u8,
join_req_limit_size: relay_params.relay_join_req_limit_bucket_size as u8,
overall_limit_size: relay_params.relay_overall_limit_bucket_size,
global_uplink_limit_size: relay_params
.relay_global_uplink_limit_bucket_size,
notify_limit_size: relay_params.relay_notify_limit_bucket_size,
join_req_limit_size: relay_params.relay_join_req_limit_bucket_size,
},
},
)]);
@ -2052,7 +2051,7 @@ impl Data {
if relay.enabled != relay_params.relay_enabled
|| relay.cad_periodicity != relay_params.relay_cad_periodicity as u32
|| relay.default_channel_index != relay_params.default_channel_index as u32
|| relay.second_channel_freq != relay_params.second_channel_freq as u32
|| relay.second_channel_freq != relay_params.second_channel_freq
|| relay.second_channel_dr != relay_params.second_channel_dr as u32
|| relay.second_channel_ack_offset != relay_params.second_channel_ack_offset as u32
{
@ -2063,17 +2062,17 @@ impl Data {
true => 1,
false => 0,
},
cad_periodicity: relay_params.relay_cad_periodicity as u8,
default_ch_idx: relay_params.default_channel_index as u8,
cad_periodicity: relay_params.relay_cad_periodicity,
default_ch_idx: relay_params.default_channel_index,
second_ch_idx: if relay_params.second_channel_freq > 0 {
1
} else {
0
},
second_ch_dr: relay_params.second_channel_dr as u8,
second_ch_ack_offset: relay_params.second_channel_ack_offset as u8,
second_ch_dr: relay_params.second_channel_dr,
second_ch_ack_offset: relay_params.second_channel_ack_offset,
},
second_ch_freq: relay_params.second_channel_freq as u32,
second_ch_freq: relay_params.second_channel_freq,
},
)]);
mac_command::set_pending(&dev_eui, lrwn::CID::RelayConfReq, &set).await?;
@ -2102,7 +2101,7 @@ impl Data {
if relay.ed_activation_mode != relay_params.ed_activation_mode.to_u8() as u32
|| relay.ed_smart_enable_level != relay_params.ed_smart_enable_level as u32
|| relay.ed_back_off != relay_params.ed_back_off as u32
|| relay.second_channel_freq != relay_params.second_channel_freq as u32
|| relay.second_channel_freq != relay_params.second_channel_freq
|| relay.second_channel_dr != relay_params.second_channel_dr as u32
|| relay.second_channel_ack_offset != relay_params.second_channel_ack_offset as u32
{
@ -2110,19 +2109,19 @@ impl Data {
lrwn::EndDeviceConfReqPayload {
activation_relay_mode: lrwn::ActivationRelayMode {
relay_mode_activation: relay_params.ed_activation_mode,
smart_enable_level: relay_params.ed_smart_enable_level as u8,
smart_enable_level: relay_params.ed_smart_enable_level,
},
channel_settings_ed: lrwn::ChannelSettingsED {
second_ch_ack_offset: relay_params.second_channel_ack_offset as u8,
second_ch_dr: relay_params.second_channel_dr as u8,
second_ch_ack_offset: relay_params.second_channel_ack_offset,
second_ch_dr: relay_params.second_channel_dr,
second_ch_idx: if relay_params.second_channel_freq > 0 {
1
} else {
0
},
backoff: relay_params.ed_back_off as u8,
backoff: relay_params.ed_back_off,
},
second_ch_freq: relay_params.second_channel_freq as u32,
second_ch_freq: relay_params.second_channel_freq,
},
)]);
mac_command::set_pending(&dev_eui, lrwn::CID::EndDeviceConfReq, &set).await?;

View File

@ -54,7 +54,7 @@ impl Data {
xmit_data_req: pl,
dl_meta_data: dl_meta,
downlink_frame: gw::DownlinkFrame {
downlink_id: rand::thread_rng().gen(),
downlink_id: rand::rng().random(),
..Default::default()
},
};

View File

@ -1,7 +1,7 @@
use std::str::FromStr;
use anyhow::Result;
use rand::seq::SliceRandom;
use rand::seq::IndexedRandom;
use uuid::Uuid;
use chirpstack_api::{gw, internal};
@ -74,7 +74,7 @@ pub fn select_downlink_gateway(
// Return a random item from the new_items slice (filtered by min_snr_margin).
// If new_items is empty, then choose will return None and we return the first item from
// rx_info.item.
Ok(match new_items.choose(&mut rand::thread_rng()) {
Ok(match new_items.choose(&mut rand::rng()) {
Some(v) => v.clone(),
None => rx_info.items[0].clone(),
})

View File

@ -37,7 +37,7 @@ impl JoinAccept<'_> {
device: &device::Device,
join_accept: &PhyPayload,
) -> Result<()> {
let downlink_id: u32 = rand::thread_rng().gen();
let downlink_id: u32 = rand::rng().random();
let span = span!(Level::INFO, "join_accept", downlink_id = downlink_id);
let fut = JoinAccept::_handle(downlink_id, ufs, tenant, device, join_accept);
@ -51,7 +51,7 @@ impl JoinAccept<'_> {
device: &device::Device,
join_accept: &PhyPayload,
) -> Result<()> {
let downlink_id: u32 = rand::thread_rng().gen();
let downlink_id: u32 = rand::rng().random();
let span = span!(
Level::INFO,
"join_accept_relayed",

View File

@ -53,7 +53,7 @@ impl Multicast {
let mut ctx = Multicast {
downlink_frame: gw::DownlinkFrame {
downlink_id: rand::thread_rng().gen(),
downlink_id: rand::rng().random(),
gateway_id: qi.gateway_id.to_string(),
..Default::default()
},

View File

@ -43,7 +43,7 @@ impl PassiveRoamingDownlink {
network_conf,
region_conf,
downlink_frame: gw::DownlinkFrame {
downlink_id: rand::thread_rng().gen(),
downlink_id: rand::rng().random(),
..Default::default()
},
downlink_gateway: None,

View File

@ -1,3 +1,5 @@
use std::sync::LazyLock;
use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::RwLock;
@ -6,11 +8,10 @@ use chirpstack_api::gw;
use super::GatewayBackend;
lazy_static! {
static ref DOWNLINK_FRAMES: RwLock<Vec<gw::DownlinkFrame>> = RwLock::new(Vec::new());
static ref GATEWAY_CONFIGURATIONS: RwLock<Vec<gw::GatewayConfiguration>> =
RwLock::new(Vec::new());
}
static DOWNLINK_FRAMES: LazyLock<RwLock<Vec<gw::DownlinkFrame>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static GATEWAY_CONFIGURATIONS: LazyLock<RwLock<Vec<gw::GatewayConfiguration>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
pub async fn reset() {
DOWNLINK_FRAMES.write().await.drain(..);

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::sync::LazyLock;
use anyhow::{Context, Result};
use async_trait::async_trait;
@ -11,10 +12,8 @@ use crate::config;
pub mod mock;
mod mqtt;
lazy_static! {
static ref BACKENDS: RwLock<HashMap<String, Box<dyn GatewayBackend + Sync + Send>>> =
RwLock::new(HashMap::new());
}
static BACKENDS: LazyLock<RwLock<HashMap<String, Box<dyn GatewayBackend + Sync + Send>>>> =
LazyLock::new(|| RwLock::new(HashMap::new()));
#[async_trait]
pub trait GatewayBackend {

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use std::io::Cursor;
use std::sync::RwLock;
use std::sync::{LazyLock, RwLock};
use std::time::Duration;
use anyhow::Result;
@ -38,27 +38,26 @@ struct CommandLabels {
command: String,
}
lazy_static! {
static ref EVENT_COUNTER: Family<EventLabels, Counter> = {
let counter = Family::<EventLabels, Counter>::default();
prometheus::register(
"gateway_backend_mqtt_events",
"Number of events received",
counter.clone(),
);
counter
};
static ref COMMAND_COUNTER: Family<CommandLabels, Counter> = {
let counter = Family::<CommandLabels, Counter>::default();
prometheus::register(
"gateway_backend_mqtt_commands",
"Number of commands sent",
counter.clone(),
);
counter
};
static ref GATEWAY_JSON: RwLock<HashMap<String, bool>> = RwLock::new(HashMap::new());
}
static EVENT_COUNTER: LazyLock<Family<EventLabels, Counter>> = LazyLock::new(|| {
let counter = Family::<EventLabels, Counter>::default();
prometheus::register(
"gateway_backend_mqtt_events",
"Number of events received",
counter.clone(),
);
counter
});
static COMMAND_COUNTER: LazyLock<Family<CommandLabels, Counter>> = LazyLock::new(|| {
let counter = Family::<CommandLabels, Counter>::default();
prometheus::register(
"gateway_backend_mqtt_commands",
"Number of commands sent",
counter.clone(),
);
counter
});
static GATEWAY_JSON: LazyLock<RwLock<HashMap<String, bool>>> =
LazyLock::new(|| RwLock::new(HashMap::new()));
pub struct MqttBackend<'a> {
client: AsyncClient,
@ -99,8 +98,8 @@ impl<'a> MqttBackend<'a> {
// get client id, this will generate a random client_id when no client_id has been
// configured.
let client_id = if conf.client_id.is_empty() {
let mut rnd = rand::thread_rng();
let client_id: u64 = rnd.gen();
let mut rnd = rand::rng();
let client_id: u64 = rnd.random();
format!("{:x}", client_id)
} else {
conf.client_id.clone()

View File

@ -1,82 +1,85 @@
use std::sync::LazyLock;
use chrono::{DateTime, Duration, TimeZone, Utc};
lazy_static! {
static ref GPS_EPOCH_TIME: DateTime<Utc> = Utc.with_ymd_and_hms(1980, 1, 6, 0, 0, 0).unwrap();
static ref LEAP_SECONDS_TABLE: Vec<(DateTime<Utc>, Duration)> = vec![
static GPS_EPOCH_TIME: LazyLock<DateTime<Utc>> =
LazyLock::new(|| Utc.with_ymd_and_hms(1980, 1, 6, 0, 0, 0).unwrap());
static LEAP_SECONDS_TABLE: LazyLock<Vec<(DateTime<Utc>, Duration)>> = LazyLock::new(|| {
vec![
(
Utc.with_ymd_and_hms(1981, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1982, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1983, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1985, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1987, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1989, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1990, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1992, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1993, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1994, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1995, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1997, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(1998, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(2005, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(2008, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(2012, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(2015, 6, 30, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
(
Utc.with_ymd_and_hms(2016, 12, 31, 23, 59, 59).unwrap(),
Duration::try_seconds(1).unwrap()
Duration::try_seconds(1).unwrap(),
),
];
}
]
});
pub trait ToGpsTime {
fn to_gps_time(&self) -> Duration;

View File

@ -68,17 +68,34 @@ pub fn private_key_to_pkcs8(pem: &str) -> Result<String> {
let pkcs8_pem = pkey.to_pkcs8_pem(LineEnding::default())?;
Ok(pkcs8_pem.as_str().to_owned())
} else if pem.contains("EC PRIVATE KEY") {
use elliptic_curve::{
pkcs8::{EncodePrivateKey, LineEnding},
SecretKey,
use sec1::{
der::{Decode, Encode, EncodePem},
pkcs8::{AlgorithmIdentifierRef, PrivateKeyInfo},
EcPrivateKey, LineEnding,
};
// We assume it is a P256 based secret-key, which is the most popular curve.
// Attempting to decode it as P256 is still better than just failing to read it.
let pkey: SecretKey<p256::NistP256> =
SecretKey::from_sec1_pem(pem).context("Read EC SEC1")?;
let pkcs8_pem = pkey.to_pkcs8_pem(LineEnding::default())?;
Ok(pkcs8_pem.as_str().to_owned())
// Get a SEC1 ECPrivateKey from the PEM string input
let pem = pem::parse(pem).context("Parse PEM string")?;
let pkey =
EcPrivateKey::from_der(pem.contents()).context("Decode PEM into SEC1 ECPrivateKey")?;
// Retrieve the curve name from the decoded private key's parameters
let params_oid = pkey.parameters.and_then(|params| params.named_curve());
// Get the proper types to construct a PKCS#8 PrivateKeyInfo
let private_key = &pkey.to_der()?;
let algorithm = AlgorithmIdentifierRef {
oid: sec1::ALGORITHM_OID,
parameters: params_oid.as_ref().map(Into::into),
};
let pkcs8 = PrivateKeyInfo {
algorithm,
private_key,
public_key: None,
};
Ok(pkcs8.to_pem(LineEnding::default())?)
} else {
Ok(pem.to_string())
}

View File

@ -71,17 +71,34 @@ pub fn private_key_to_pkcs8(pem: &str) -> Result<String> {
let pkcs8_pem = pkey.to_pkcs8_pem(LineEnding::default())?;
Ok(pkcs8_pem.as_str().to_owned())
} else if pem.contains("EC PRIVATE KEY") {
use elliptic_curve::{
pkcs8::{EncodePrivateKey, LineEnding},
SecretKey,
use sec1::{
der::{Decode, Encode, EncodePem},
pkcs8::{AlgorithmIdentifierRef, PrivateKeyInfo},
EcPrivateKey, LineEnding,
};
// We assume it is a P256 based secret-key, which is the most popular curve.
// Attempting to decode it as P256 is still better than just failing to read it.
let pkey: SecretKey<p256::NistP256> =
SecretKey::from_sec1_pem(pem).context("Read EC SEC1")?;
let pkcs8_pem = pkey.to_pkcs8_pem(LineEnding::default())?;
Ok(pkcs8_pem.as_str().to_owned())
// Get a SEC1 ECPrivateKey from the PEM string input
let pem = pem::parse(pem).context("Parse PEM string")?;
let pkey =
EcPrivateKey::from_der(pem.contents()).context("Decode PEM into SEC1 ECPrivateKey")?;
// Retrieve the curve name from the decoded private key's parameters
let params_oid = pkey.parameters.and_then(|params| params.named_curve());
// Get the proper types to construct a PKCS#8 PrivateKeyInfo
let private_key = &pkey.to_der()?;
let algorithm = AlgorithmIdentifierRef {
oid: sec1::ALGORITHM_OID,
parameters: params_oid.as_ref().map(Into::into),
};
let pkcs8 = PrivateKeyInfo {
algorithm,
private_key,
public_key: None,
};
Ok(pkcs8.to_pem(LineEnding::default())?)
} else {
Ok(pem.to_string())
}

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::sync::LazyLock;
use anyhow::Result;
use async_trait::async_trait;
@ -19,10 +20,8 @@ use chirpstack_api::integration;
// implement re-connect on error. To reconnect within the Integration struct would require
// mutability of the Integration struct, which is not possible without changing the
// IntegrationTrait as we would need to change the (&self, ...) signatures to (&mut self, ...).
lazy_static! {
static ref CONNECTION: RwLock<Option<Connection>> = RwLock::new(None);
static ref CHANNEL: RwLock<Option<Channel>> = RwLock::new(None);
}
static CONNECTION: LazyLock<RwLock<Option<Connection>>> = LazyLock::new(|| RwLock::new(None));
static CHANNEL: LazyLock<RwLock<Option<Channel>>> = LazyLock::new(|| RwLock::new(None));
pub struct Integration<'a> {
templates: Handlebars<'a>,
@ -128,7 +127,7 @@ impl<'a> Integration<'a> {
}
#[async_trait]
impl<'a> IntegrationTrait for Integration<'a> {
impl IntegrationTrait for Integration<'_> {
async fn uplink_event(
&self,
_vars: &HashMap<String, String>,

View File

@ -107,7 +107,7 @@ impl<'a> Integration<'a> {
}
#[async_trait]
impl<'a> IntegrationTrait for Integration<'a> {
impl IntegrationTrait for Integration<'_> {
async fn uplink_event(
&self,
_vars: &HashMap<String, String>,

View File

@ -539,7 +539,7 @@ impl<'de> Deserialize<'de> for Eui64Wrapper {
struct Eui64WrapperVisitor;
impl<'de> Visitor<'de> for Eui64WrapperVisitor {
impl Visitor<'_> for Eui64WrapperVisitor {
type Value = Eui64Wrapper;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::sync::LazyLock;
use anyhow::Result;
use async_trait::async_trait;
@ -8,17 +9,22 @@ use chirpstack_api::integration;
use super::Integration as IntegrationTrait;
lazy_static! {
static ref UPLINK_EVENTS: RwLock<Vec<integration::UplinkEvent>> = RwLock::new(Vec::new());
static ref JOIN_EVENTS: RwLock<Vec<integration::JoinEvent>> = RwLock::new(Vec::new());
static ref ACK_EVENTS: RwLock<Vec<integration::AckEvent>> = RwLock::new(Vec::new());
static ref TXACK_EVENTS: RwLock<Vec<integration::TxAckEvent>> = RwLock::new(Vec::new());
static ref LOG_EVENTS: RwLock<Vec<integration::LogEvent>> = RwLock::new(Vec::new());
static ref STATUS_EVENTS: RwLock<Vec<integration::StatusEvent>> = RwLock::new(Vec::new());
static ref LOCATION_EVENTS: RwLock<Vec<integration::LocationEvent>> = RwLock::new(Vec::new());
static ref INTEGRATION_EVENTS: RwLock<Vec<integration::IntegrationEvent>> =
RwLock::new(Vec::new());
}
static UPLINK_EVENTS: LazyLock<RwLock<Vec<integration::UplinkEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static JOIN_EVENTS: LazyLock<RwLock<Vec<integration::JoinEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static ACK_EVENTS: LazyLock<RwLock<Vec<integration::AckEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static TXACK_EVENTS: LazyLock<RwLock<Vec<integration::TxAckEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static LOG_EVENTS: LazyLock<RwLock<Vec<integration::LogEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static STATUS_EVENTS: LazyLock<RwLock<Vec<integration::StatusEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static LOCATION_EVENTS: LazyLock<RwLock<Vec<integration::LocationEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
static INTEGRATION_EVENTS: LazyLock<RwLock<Vec<integration::IntegrationEvent>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
pub async fn reset() {
UPLINK_EVENTS.write().await.drain(..);

View File

@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::str::FromStr;
use std::sync::LazyLock;
use anyhow::{Context, Result};
use async_trait::async_trait;
@ -33,11 +34,11 @@ mod postgresql;
mod redis;
mod thingsboard;
lazy_static! {
static ref GLOBAL_INTEGRATIONS: RwLock<Vec<Box<dyn Integration + Sync + Send>>> =
RwLock::new(Vec::new());
static ref MOCK_INTEGRATION: RwLock<bool> = RwLock::new(false);
}
static GLOBAL_INTEGRATIONS: LazyLock<RwLock<Vec<Box<dyn Integration + Sync + Send>>>> =
LazyLock::new(|| RwLock::new(Vec::new()));
#[cfg(test)]
static MOCK_INTEGRATION: LazyLock<RwLock<bool>> = LazyLock::new(|| RwLock::new(false));
pub async fn setup() -> Result<()> {
info!("Setting up global integrations");

View File

@ -66,8 +66,8 @@ impl<'a> Integration<'a> {
// get client id, this will generate a random client_id when no client_id has been
// configured.
let client_id = if conf.client_id.is_empty() {
let mut rnd = rand::thread_rng();
let client_id: u64 = rnd.gen();
let mut rnd = rand::rng();
let client_id: u64 = rnd.random();
format!("{:x}", client_id)
} else {
conf.client_id.clone()

View File

@ -33,7 +33,7 @@ pub mod update_uplink_list;
// This returns the mac-commands which must be sent back to the device as response and a bool
// indicating if a downlink must be sent. For some mac-commands, no mac-command answer is required,
// but the device expects a downlink as confirmation, even if the downlink frame is empty.
pub async fn handle_uplink<'a>(
pub async fn handle_uplink(
uplink_frame_set: &UplinkFrameSet,
cmds: &lrwn::MACCommandSet,
tenant: &tenant::Tenant,

View File

@ -1,7 +1,5 @@
#![recursion_limit = "256"]
#[macro_use]
extern crate lazy_static;
extern crate diesel_migrations;
#[macro_use]
extern crate diesel;

View File

@ -1,12 +1,10 @@
use std::sync::RwLock;
use std::sync::{LazyLock, RwLock};
use anyhow::Result;
use prometheus_client::encoding::text::encode;
use prometheus_client::registry::{Metric, Registry};
lazy_static! {
static ref REGISTRY: RwLock<Registry> = RwLock::new(<Registry>::default());
}
static REGISTRY: LazyLock<RwLock<Registry>> = LazyLock::new(|| RwLock::new(<Registry>::default()));
pub fn encode_to_string() -> Result<String> {
let registry_r = REGISTRY.read().unwrap();

View File

@ -1,5 +1,5 @@
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use std::sync::{Arc, LazyLock, RwLock};
use anyhow::{Context, Result};
use tracing::{info, span, trace, Level};
@ -7,10 +7,8 @@ use tracing::{info, span, trace, Level};
use crate::config;
use lrwn::region;
lazy_static! {
static ref REGIONS: RwLock<HashMap<String, Arc<Box<dyn region::Region + Sync + Send>>>> =
RwLock::new(HashMap::new());
}
static REGIONS: LazyLock<RwLock<HashMap<String, Arc<Box<dyn region::Region + Sync + Send>>>>> =
LazyLock::new(|| RwLock::new(HashMap::new()));
pub fn setup() -> Result<()> {
info!("Setting up regions");

View File

@ -116,6 +116,7 @@ pub struct Device {
pub join_eui: EUI64,
pub secondary_dev_addr: Option<DevAddr>,
pub device_session: Option<fields::DeviceSession>,
pub app_layer_params: fields::device::AppLayerParams,
}
#[derive(AsChangeset, Debug, Clone, Default)]
@ -133,6 +134,7 @@ pub struct DeviceChangeset {
pub battery_level: Option<Option<fields::BigDecimal>>,
pub scheduler_run_after: Option<Option<DateTime<Utc>>>,
pub is_disabled: Option<bool>,
pub app_layer_params: Option<fields::device::AppLayerParams>,
}
impl Device {
@ -190,6 +192,7 @@ impl Default for Device {
join_eui: EUI64::default(),
secondary_dev_addr: None,
device_session: None,
app_layer_params: Default::default(),
}
}
}
@ -552,6 +555,7 @@ pub async fn update(d: Device) -> Result<Device, Error> {
device::tags.eq(&d.tags),
device::variables.eq(&d.variables),
device::join_eui.eq(&d.join_eui),
device::app_layer_params.eq(&d.app_layer_params),
))
.get_result(&mut get_async_db_conn().await?)
.await

View File

@ -109,7 +109,7 @@ impl DeviceProfile {
if let Some(class_b_params) = &self.class_b_params {
ds.class_b_ping_slot_dr = class_b_params.ping_slot_dr as u32;
ds.class_b_ping_slot_freq = class_b_params.ping_slot_freq as u32;
ds.class_b_ping_slot_freq = class_b_params.ping_slot_freq;
ds.class_b_ping_slot_nb = 1 << class_b_params.ping_slot_nb_k as u32;
}
@ -133,7 +133,7 @@ impl DeviceProfile {
ds.rx1_delay = abp_params.rx1_delay as u32;
ds.rx1_dr_offset = abp_params.rx1_dr_offset as u32;
ds.rx2_dr = abp_params.rx2_dr as u32;
ds.rx2_frequency = abp_params.rx2_freq as u32;
ds.rx2_frequency = abp_params.rx2_freq;
}
}
}

View File

@ -34,7 +34,7 @@ pub enum Error {
NotAllowed(String),
#[error("Multiple errors")]
MultiError(Vec<Error>),
Multi(Vec<Error>),
#[error(transparent)]
Diesel(#[from] diesel::result::Error),

View File

@ -0,0 +1,52 @@
use diesel::backend::Backend;
use diesel::{deserialize, serialize};
#[cfg(feature = "postgres")]
use diesel::{pg::Pg, sql_types::Jsonb};
#[cfg(feature = "sqlite")]
use diesel::{sql_types::Text, sqlite::Sqlite};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, AsExpression, FromSqlRow)]
#[cfg_attr(feature = "postgres", diesel(sql_type = Jsonb))]
#[cfg_attr(feature = "sqlite", diesel(sql_type = Text))]
#[serde(default)]
#[derive(Default)]
pub struct AppLayerParams {
pub ts004_session_cnt: [u16; 4],
}
#[cfg(feature = "postgres")]
impl deserialize::FromSql<Jsonb, Pg> for AppLayerParams {
fn from_sql(value: <Pg as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
let value = <serde_json::Value as deserialize::FromSql<Jsonb, Pg>>::from_sql(value)?;
Ok(serde_json::from_value(value)?)
}
}
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for AppLayerParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
#[cfg(feature = "sqlite")]
impl deserialize::FromSql<Text, Sqlite> for AppLayerParams
where
*const str: deserialize::FromSql<Text, Sqlite>,
{
fn from_sql(value: <Sqlite as Backend>::RawValue<'_>) -> deserialize::Result<Self> {
let s =
<*const str as deserialize::FromSql<diesel::sql_types::Text, Sqlite>>::from_sql(value)?;
Ok(serde_json::from_str(unsafe { &*s })?)
}
}
#[cfg(feature = "sqlite")]
impl serialize::ToSql<Text, Sqlite> for AppLayerParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Sqlite>) -> serialize::Result {
out.set_value(serde_json::to_string(&self)?);
Ok(serialize::IsNull::No)
}
}

View File

@ -29,7 +29,7 @@ impl deserialize::FromSql<Jsonb, Pg> for AbpParams {
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for AbpParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(&self)?;
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
@ -77,7 +77,7 @@ impl deserialize::FromSql<Jsonb, Pg> for ClassBParams {
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for ClassBParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(&self)?;
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
@ -122,7 +122,7 @@ impl deserialize::FromSql<Jsonb, Pg> for ClassCParams {
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for ClassCParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(&self)?;
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
@ -189,7 +189,7 @@ impl deserialize::FromSql<Jsonb, Pg> for RelayParams {
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for RelayParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(&self)?;
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
@ -263,14 +263,9 @@ impl Default for AppLayerParams {
impl AppLayerParams {
pub fn is_app_layer_f_port(&self, f_port: u8) -> bool {
if (self.ts003_version.is_some() && self.ts003_f_port == f_port)
(self.ts003_version.is_some() && self.ts003_f_port == f_port)
|| (self.ts004_version.is_some() && self.ts004_f_port == f_port)
|| (self.ts005_version.is_some() && self.ts005_f_port == f_port)
{
true
} else {
false
}
}
}
@ -285,7 +280,7 @@ impl deserialize::FromSql<Jsonb, Pg> for AppLayerParams {
#[cfg(feature = "postgres")]
impl serialize::ToSql<Jsonb, Pg> for AppLayerParams {
fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, Pg>) -> serialize::Result {
let value = serde_json::to_value(&self)?;
let value = serde_json::to_value(self)?;
<serde_json::Value as serialize::ToSql<Jsonb, Pg>>::to_sql(&value, &mut out.reborrow())
}
}
@ -314,16 +309,19 @@ impl serialize::ToSql<Text, Sqlite> for AppLayerParams {
pub enum Ts003Version {
#[default]
V100,
V200,
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
pub enum Ts004Version {
#[default]
V100,
V200,
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
pub enum Ts005Version {
#[default]
V100,
V200,
}

View File

@ -1,5 +1,6 @@
mod big_decimal;
mod dev_nonces;
pub mod device;
pub mod device_profile;
mod device_session;
mod fuota;

View File

@ -36,6 +36,8 @@ pub struct FuotaDeployment {
pub multicast_class_b_ping_slot_nb_k: i16,
pub multicast_frequency: i64,
pub multicast_timeout: i16,
pub multicast_session_start: Option<DateTime<Utc>>,
pub multicast_session_end: Option<DateTime<Utc>>,
pub unicast_max_retry_count: i16,
pub fragmentation_fragment_size: i16,
pub fragmentation_redundancy_percentage: i16,
@ -69,6 +71,8 @@ impl Default for FuotaDeployment {
multicast_class_b_ping_slot_nb_k: 0,
multicast_frequency: 0,
multicast_timeout: 0,
multicast_session_start: None,
multicast_session_end: None,
unicast_max_retry_count: 0,
fragmentation_fragment_size: 0,
fragmentation_redundancy_percentage: 0,
@ -154,6 +158,7 @@ pub struct FuotaDeploymentJob {
pub max_retry_count: i16,
pub attempt_count: i16,
pub scheduler_run_after: DateTime<Utc>,
pub warning_msg: String,
pub error_msg: String,
}
@ -169,6 +174,7 @@ impl Default for FuotaDeploymentJob {
max_retry_count: 0,
attempt_count: 0,
scheduler_run_after: now,
warning_msg: "".into(),
error_msg: "".into(),
}
}
@ -220,6 +226,8 @@ pub async fn update_deployment(d: FuotaDeployment) -> Result<FuotaDeployment, Er
.eq(&d.multicast_class_b_ping_slot_nb_k),
fuota_deployment::multicast_frequency.eq(&d.multicast_frequency),
fuota_deployment::multicast_timeout.eq(&d.multicast_timeout),
fuota_deployment::multicast_session_start.eq(&d.multicast_session_start),
fuota_deployment::multicast_session_end.eq(&d.multicast_session_end),
fuota_deployment::unicast_max_retry_count.eq(&d.unicast_max_retry_count),
fuota_deployment::fragmentation_fragment_size.eq(&d.fragmentation_fragment_size),
fuota_deployment::fragmentation_redundancy_percentage
@ -328,7 +336,7 @@ pub async fn add_devices(fuota_deployment_id: Uuid, dev_euis: Vec<EUI64>) -> Res
if errors.is_empty() {
Ok(())
} else {
Err(Error::MultiError(errors))
Err(Error::Multi(errors))
}
}
@ -441,7 +449,7 @@ pub async fn set_device_timeout_error(
let mut q = diesel::update(fuota_deployment_device::table)
.set(fuota_deployment_device::dsl::error_msg.eq(&error_msg))
.filter(fuota_deployment_device::dsl::fuota_deployment_id.eq(&fuota_deployment_id))
.filter(fuota_deployment_device::dsl::error_msg.is_not_null())
.filter(fuota_deployment_device::dsl::error_msg.eq(""))
.into_boxed();
if mc_group_setup_timeout {
@ -544,7 +552,7 @@ pub async fn add_gateways(fuota_deployment_id: Uuid, gateway_ids: Vec<EUI64>) ->
if errors.is_empty() {
Ok(())
} else {
Err(Error::MultiError(errors))
Err(Error::Multi(errors))
}
}
@ -642,6 +650,7 @@ pub async fn update_job(j: FuotaDeploymentJob) -> Result<FuotaDeploymentJob, Err
fuota_deployment_job::completed_at.eq(&j.completed_at),
fuota_deployment_job::attempt_count.eq(&j.attempt_count),
fuota_deployment_job::scheduler_run_after.eq(&j.scheduler_run_after),
fuota_deployment_job::warning_msg.eq(&j.warning_msg),
fuota_deployment_job::error_msg.eq(&j.error_msg),
))
.get_result(&mut get_async_db_conn().await?)
@ -780,7 +789,7 @@ pub fn get_multicast_timeout(d: &FuotaDeployment) -> Result<usize> {
conf.network.scheduler.multicast_class_c_margin.as_secs() as usize;
// Multiply by the number of fragments (+1 for additional margin).
let mc_class_c_duration_secs = mc_class_c_margin_secs * (total_fragments + 1 as usize);
let mc_class_c_duration_secs = mc_class_c_margin_secs * (total_fragments + 1);
// Calculate the timeout value. In case of Class-C, timeout is defined as seconds,
// where the number of seconds is 2^timeout.

View File

@ -1,3 +1,4 @@
use std::sync::LazyLock;
use std::sync::RwLock;
use std::time::Instant;
@ -47,19 +48,18 @@ pub mod user;
use crate::monitoring::prometheus;
lazy_static! {
static ref ASYNC_REDIS_POOL: TokioRwLock<Option<AsyncRedisPool>> = TokioRwLock::new(None);
static ref REDIS_PREFIX: RwLock<String> = RwLock::new("".to_string());
static ref STORAGE_REDIS_CONN_GET: Histogram = {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_redis_conn_get_duration_seconds",
"Time between requesting a Redis connection and the connection-pool returning it",
histogram.clone(),
);
histogram
};
}
static ASYNC_REDIS_POOL: LazyLock<TokioRwLock<Option<AsyncRedisPool>>> =
LazyLock::new(|| TokioRwLock::new(None));
static REDIS_PREFIX: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
static STORAGE_REDIS_CONN_GET: LazyLock<Histogram> = LazyLock::new(|| {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_redis_conn_get_duration_seconds",
"Time between requesting a Redis connection and the connection-pool returning it",
histogram.clone(),
);
histogram
});
#[cfg(feature = "postgres")]
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations_postgres");

View File

@ -1012,7 +1012,7 @@ pub mod test {
// invalid f_port
assert!(enqueue(
MulticastGroupQueueItem {
multicast_group_id: mg.id.into(),
multicast_group_id: mg.id,
gateway_id: gw.gateway_id,
f_cnt: 1,
f_port: 0,
@ -1026,7 +1026,7 @@ pub mod test {
assert!(enqueue(
MulticastGroupQueueItem {
multicast_group_id: mg.id.into(),
multicast_group_id: mg.id,
gateway_id: gw.gateway_id,
f_cnt: 1,
f_port: 256,
@ -1041,7 +1041,7 @@ pub mod test {
// Enqueue (Class-C) (delay)
let (ids, f_cnt) = enqueue(
MulticastGroupQueueItem {
multicast_group_id: mg.id.into(),
multicast_group_id: mg.id,
gateway_id: gw.gateway_id,
f_cnt: 1,
f_port: 2,
@ -1071,7 +1071,7 @@ pub mod test {
let mut mg = update(mg).await.unwrap();
let (ids, f_cnt) = enqueue(
MulticastGroupQueueItem {
multicast_group_id: mg.id.into(),
multicast_group_id: mg.id,
gateway_id: gw.gateway_id,
f_cnt: 1,
f_port: 2,
@ -1098,7 +1098,7 @@ pub mod test {
let mg = update(mg).await.unwrap();
let (ids, f_cnt) = enqueue(
MulticastGroupQueueItem {
multicast_group_id: mg.id.into(),
multicast_group_id: mg.id,
gateway_id: gw.gateway_id,
f_cnt: 1,
f_port: 2,

View File

@ -1,4 +1,4 @@
use std::sync::RwLock;
use std::sync::{LazyLock, RwLock};
use std::time::Instant;
use anyhow::Result;
@ -20,18 +20,16 @@ use crate::helpers::tls::get_root_certs;
pub type AsyncPgPool = DeadpoolPool<AsyncPgConnection>;
pub type AsyncPgPoolConnection = DeadpoolObject<AsyncPgConnection>;
lazy_static! {
static ref ASYNC_PG_POOL: RwLock<Option<AsyncPgPool>> = RwLock::new(None);
static ref STORAGE_PG_CONN_GET: Histogram = {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_pg_conn_get_duration_seconds",
"Time between requesting a PostgreSQL connection and the connection-pool returning it",
histogram.clone(),
);
histogram
};
}
static ASYNC_PG_POOL: LazyLock<RwLock<Option<AsyncPgPool>>> = LazyLock::new(|| RwLock::new(None));
static STORAGE_PG_CONN_GET: LazyLock<Histogram> = LazyLock::new(|| {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_pg_conn_get_duration_seconds",
"Time between requesting a PostgreSQL connection and the connection-pool returning it",
histogram.clone(),
);
histogram
});
pub fn setup(conf: &config::Postgresql) -> Result<()> {
info!("Setting up PostgreSQL connection pool");

View File

@ -65,6 +65,7 @@ diesel::table! {
join_eui -> Bytea,
secondary_dev_addr -> Nullable<Bytea>,
device_session -> Nullable<Bytea>,
app_layer_params -> Jsonb,
}
}
@ -203,6 +204,8 @@ diesel::table! {
multicast_class_b_ping_slot_nb_k -> Int2,
multicast_frequency -> Int8,
multicast_timeout -> Int2,
multicast_session_start -> Nullable<Timestamptz>,
multicast_session_end -> Nullable<Timestamptz>,
unicast_max_retry_count -> Int2,
fragmentation_fragment_size -> Int2,
fragmentation_redundancy_percentage -> Int2,
@ -249,6 +252,7 @@ diesel::table! {
max_retry_count -> Int2,
attempt_count -> Int2,
scheduler_run_after -> Timestamptz,
warning_msg -> Text,
error_msg -> Text,
}
}

View File

@ -60,6 +60,7 @@ diesel::table! {
join_eui -> Binary,
secondary_dev_addr -> Nullable<Binary>,
device_session -> Nullable<Binary>,
app_layer_params -> Text,
}
}
@ -180,6 +181,8 @@ diesel::table! {
multicast_class_b_ping_slot_nb_k -> SmallInt,
multicast_frequency -> BigInt,
multicast_timeout -> SmallInt,
multicast_session_start -> Nullable<TimestamptzSqlite>,
multicast_session_end -> Nullable<TimestamptzSqlite>,
unicast_max_retry_count -> SmallInt,
fragmentation_fragment_size -> SmallInt,
fragmentation_redundancy_percentage -> SmallInt,
@ -224,6 +227,7 @@ diesel::table! {
max_retry_count -> SmallInt,
attempt_count -> SmallInt,
scheduler_run_after -> TimestamptzSqlite,
warning_msg -> Text,
error_msg -> Text,
}
}

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::sync::LazyLock;
use anyhow::{Context, Result};
use diesel_async::RunQueryDsl;
@ -8,9 +9,7 @@ use uuid::Uuid;
use super::{error::Error, fields, get_async_db_conn};
use lrwn::EUI64;
lazy_static! {
static ref SEARCH_TAG_RE: Regex = Regex::new(r"([^ ]+):([^ ]+)").unwrap();
}
static SEARCH_TAG_RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"([^ ]+):([^ ]+)").unwrap());
#[derive(QueryableByName, PartialEq, Debug)]
pub struct SearchResult {
@ -449,8 +448,8 @@ pub mod test {
let _d = device::create(device::Device {
dev_eui: EUI64::from_str("0203040506070809").unwrap(),
name: "test-device".into(),
application_id: a.id.clone(),
device_profile_id: dp.id.clone(),
application_id: a.id,
device_profile_id: dp.id,
tags: build_tags(&[("common_tag", "value"), ("mytag", "dev_value")]),
..Default::default()
})

View File

@ -1,4 +1,4 @@
use std::sync::RwLock;
use std::sync::{LazyLock, RwLock};
use std::time::Instant;
use anyhow::Result;
@ -19,18 +19,17 @@ use crate::config;
pub type AsyncSqlitePool = DeadpoolPool<SyncConnectionWrapper<SqliteConnection>>;
pub type AsyncSqlitePoolConnection = DeadpoolObject<SyncConnectionWrapper<SqliteConnection>>;
lazy_static! {
static ref ASYNC_SQLITE_POOL: RwLock<Option<AsyncSqlitePool>> = RwLock::new(None);
static ref STORAGE_SQLITE_CONN_GET: Histogram = {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_sqlite_conn_get_duration_seconds",
"Time between requesting a SQLite connection and the connection-pool returning it",
histogram.clone(),
);
histogram
};
}
static ASYNC_SQLITE_POOL: LazyLock<RwLock<Option<AsyncSqlitePool>>> =
LazyLock::new(|| RwLock::new(None));
static STORAGE_SQLITE_CONN_GET: LazyLock<Histogram> = LazyLock::new(|| {
let histogram = Histogram::new(exponential_buckets(0.001, 2.0, 12));
prometheus::register(
"storage_sqlite_conn_get_duration_seconds",
"Time between requesting a SQLite connection and the connection-pool returning it",
histogram.clone(),
);
histogram
});
pub fn setup(conf: &config::Sqlite) -> Result<()> {
info!("Setting up SQLite connection pool");

View File

@ -325,7 +325,6 @@ pub mod test {
use crate::storage::user::test::create_user;
use crate::test;
use chrono::SubsecRound;
use std::str::FromStr;
use uuid::Uuid;
struct FilterTest<'a> {
@ -357,10 +356,19 @@ pub mod test {
async fn test_tenant() {
let _guard = test::prepare().await;
// delete default tenant
delete(&Uuid::from_str("52f14cd4-c6f1-4fbd-8f87-4025e1d49242").unwrap())
.await
.unwrap();
// delete existing tenants.
let tenants = list(
10,
0,
&Filters {
..Default::default()
},
)
.await
.unwrap();
for t in &tenants {
delete(&t.id).await.unwrap();
}
let mut t = create_tenant().await;
@ -379,7 +387,7 @@ pub mod test {
let tu = TenantUser {
tenant_id: t.id,
user_id: user.id.into(),
user_id: user.id,
is_admin: true,
..Default::default()
};
@ -480,7 +488,7 @@ pub mod test {
let tu = TenantUser {
tenant_id: t.id,
user_id: user.id.into(),
user_id: user.id,
is_admin: true,
..Default::default()
};

View File

@ -4,10 +4,9 @@ use diesel::{dsl, prelude::*};
use diesel_async::RunQueryDsl;
use email_address::EmailAddress;
use pbkdf2::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
password_hash::{rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Algorithm, Pbkdf2,
};
use rand_core::OsRng;
use tracing::info;
use uuid::Uuid;

View File

@ -14,7 +14,6 @@ use crate::storage::{get_async_redis_conn, redis_key};
use chirpstack_api::{api, integration};
#[allow(clippy::enum_variant_names)]
pub async fn log_event_for_device(typ: &str, dev_eui: &str, b: &[u8]) -> Result<()> {
let conf = config::get();

View File

@ -1,6 +1,7 @@
use std::future::Future;
use std::io::Cursor;
use std::pin::Pin;
use std::sync::LazyLock;
use std::time::Duration;
use prost::Message;
@ -17,9 +18,7 @@ use crate::storage::{
use chirpstack_api::{gw, integration as integration_pb, internal, stream};
use lrwn::EUI64;
lazy_static! {
static ref LAST_DOWNLINK_ID: RwLock<u32> = RwLock::new(0);
}
static LAST_DOWNLINK_ID: LazyLock<RwLock<u32>> = LazyLock::new(|| RwLock::new(0));
pub type Validator = Box<dyn Fn() -> Pin<Box<dyn Future<Output = ()>>>>;

View File

@ -150,7 +150,7 @@ async fn test_fns_uplink() {
// Simulate uplink
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: data_phy.to_vec().unwrap(),

View File

@ -5558,7 +5558,7 @@ async fn run_test(t: &Test) {
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: t.phy_payload.to_vec().unwrap(),

View File

@ -462,7 +462,7 @@ async fn run_uplink_test(t: &UplinkTest) {
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: t.phy_payload.to_vec().unwrap(),

View File

@ -1,5 +1,5 @@
use std::env;
use std::sync::{Mutex, Once};
use std::sync::{LazyLock, Mutex, Once};
use crate::{adr, config, region, storage};
@ -17,9 +17,7 @@ mod relay_otaa_test;
static TRACING_INIT: Once = Once::new();
lazy_static! {
static ref TEST_MUX: Mutex<()> = Mutex::new(());
}
static TEST_MUX: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
pub async fn prepare<'a>() -> std::sync::MutexGuard<'a, ()> {
dotenv::dotenv().ok();

View File

@ -367,7 +367,7 @@ async fn run_test(t: &Test) {
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: t.phy_payload.to_vec().unwrap(),

View File

@ -190,7 +190,7 @@ async fn test_fns() {
// Simulate uplink
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: jr_phy.to_vec().unwrap(),

View File

@ -1250,7 +1250,7 @@ async fn run_test(t: &Test) {
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: t.phy_payload.to_vec().unwrap(),

View File

@ -821,7 +821,7 @@ async fn run_test(t: &Test) {
uplink::handle_uplink(
CommonName::EU868,
"eu868".into(),
"eu868",
Uuid::new_v4(),
gw::UplinkFrameSet {
phy_payload: t.phy_payload.to_vec().unwrap(),

Some files were not shown because too many files have changed in this diff Show More