92 Commits

Author SHA1 Message Date
850f74f667 fix(deps): update rust crate futures to v0.3.32 2026-02-16 01:09:06 +00:00
76b21aab07 chore(deps): update rust crate tracing to v0.1.44 2025-12-19 01:09:16 +00:00
7d7e760e5e chore(deps): update tokio-tracing monorepo 2025-11-29 01:08:52 +00:00
947303c5e0 chore(deps): update rust crate tracing-subscriber to v0.3.20 2025-11-13 01:27:56 +00:00
06a9dd10e1 fix(deps): update tokio-prost monorepo to v0.13.5
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2025-02-13 01:11:41 +00:00
55befef95b chore(release): v0.1.0 (#17)
All checks were successful
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is passing
chore(release): 0.1.0

Co-authored-by: cuddle-please <bot@cuddle.sh>
Reviewed-on: https://git.front.kjuulh.io/kjuulh/churn-v2/pulls/17
2025-01-11 15:26:44 +01:00
53cc689dc4 docs: update readme
All checks were successful
continuous-integration/drone/push Build is passing
next up is differentiating the different agents, such that we can execute commands from the cli to for example update dependencies on all machines, restart machines etc.
2025-01-11 15:22:38 +01:00
1c20383de6 chore: update final repo
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 15:11:30 +01:00
53c15a653f feat: add cuddle please
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 15:10:59 +01:00
9c5cb6667e chore: update lock"
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 15:09:23 +01:00
b0c40196b6 docs: add installation docs
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 14:11:02 +01:00
a28a5ca6ee fix: use actual names for files
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 13:08:04 +01:00
ea6bfc9c04 feat: enable churn update service
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 13:04:54 +01:00
844f8519d5 feat: add updater to install script 2025-01-11 13:03:11 +01:00
1508fbb2bf feat: add updater to install script
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-11 13:02:57 +01:00
ef6ae3f2b1 chore: update default schedule
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-10 21:46:57 +01:00
8923c60d9e feat: add http client
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-10 21:42:35 +01:00
efec76d28c feat: run more often
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2025-01-05 20:50:49 +01:00
03e23c7d9d feat: enable checking if it should actually run
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2025-01-04 01:52:05 +01:00
83294306a4 feat: enable having get variable from local setup
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2025-01-04 01:28:32 +01:00
ceaad75057 feat: inherit output as well
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2025-01-04 00:35:18 +01:00
b86fa54671 fix(deps): update rust crate serde to v1.0.217
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-28 01:13:18 +00:00
aecdace4ee chore(deps): update rust crate anyhow to v1.0.95
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-23 01:10:59 +00:00
c98761622d fix(deps): update rust crate serde_json to v1.0.134
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-22 01:09:42 +00:00
825f612aea fix(deps): update all dependencies to v28
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-12-21 01:11:47 +00:00
c12b02ba92 fix(deps): update rust crate nodrift to 0.3.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-12-14 01:09:13 +00:00
c5e5307682 fix(deps): update rust crate serde to v1.0.216
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-11 05:09:41 +00:00
5fb59ad992 fix(deps): update tokio-prost monorepo to v0.13.4
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-07 01:13:20 +00:00
e21663c8bd chore(deps): update rust crate clap to v4.5.23
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-12-06 01:14:09 +00:00
39a01778b2 fix(deps): update rust crate tokio-util to v0.7.13
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-05 01:11:27 +00:00
f1cdf3ae20 chore(deps): update all dependencies
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-04 01:13:13 +00:00
355587234e feat: allow process from external code
Some checks failed
continuous-integration/drone/push Build is failing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-02 23:12:37 +01:00
21a13f3444 feat: add inherit
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-02 21:00:20 +01:00
5e1b585a2d feat: add default no labels
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-01 22:46:22 +01:00
94025a02ce feat: warn all targets
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-01 22:30:18 +01:00
db4cc98643 feat: update with web assembly components
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-01 22:21:17 +01:00
2387a70778 feat: add labels to config
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-01 14:38:10 +01:00
d6fdda0e4e feat: add abstraction around task
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-12-01 14:25:24 +01:00
974e1ee0d6 feat: enable webpki roots
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:31:46 +01:00
717b052a88 feat: add short connect timeout
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:30:16 +01:00
9b52376e7a feat: more error logging
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:22:44 +01:00
7b222af1dd feat: stop the service if running
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:20:51 +01:00
150c7c3c98 feat: setup stream logging
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:16:19 +01:00
8b064c2169 feat: update script with warn
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 12:02:52 +01:00
879737eedd feat: disable force again
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:52:50 +01:00
818cc6c671 feat: make curl silent"
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:49:23 +01:00
e759239243 feat: force update
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:48:40 +01:00
b37674987e feat: use public prod
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:48:26 +01:00
1dea5f29ac feat: run as root
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:45:49 +01:00
38a0a9fba4 feat: agent is already setup
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:44:51 +01:00
f7c7aef96b feat: allow errors
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:43:45 +01:00
7fefca47c9 feat: some more debugging
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:42:46 +01:00
36b1335fe9 feat: some more debugging
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:40:05 +01:00
eeabeda036 feat: stderr to stdout as well
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:37:28 +01:00
569fee52e6 feat: this should work
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:36:10 +01:00
b0ec41fa93 feat: when config has already been setup
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:35:24 +01:00
77f5ec7475 feat: add agent start as well
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:33:21 +01:00
f3926d0885 feat: update with agent setup
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:32:41 +01:00
b48b1ec886 feat: add install script
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-30 11:28:03 +01:00
79a8a34499 chore(deps): update rust crate tracing-subscriber to v0.3.19
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-11-30 01:19:48 +00:00
c9bbeb2439 fix(deps): update rust crate bytes to v1.9.0
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-11-29 01:10:19 +00:00
b19ff0b7e5 chore(deps): update rust crate tracing to v0.1.41
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-11-28 01:11:13 +00:00
eeaf59ac63 feat: add comments
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 22:04:07 +01:00
6f04d0cdda feat: use actual internal
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:44:46 +01:00
43c5fa1731 feat: reqwest as native build
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:42:41 +01:00
9badf8e193 feat: use internal
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:41:56 +01:00
55a0d294c5 feat: add external service host
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:37:08 +01:00
8508d3f640 feat: add grpc host
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:34:10 +01:00
dd8ade9798 feat: add external vars
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:29:58 +01:00
f1e6268a2d feat: add grpc and env
Some checks failed
continuous-integration/drone/push Build is failing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:28:06 +01:00
6647bb89be feat: add queue
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:24:50 +01:00
ea5adb2f93 feat: add common queue
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 21:08:37 +01:00
ee323e99e8 feat: add discovery
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 17:12:15 +01:00
c4434fd841 fix(deps): update rust crate tower-http to 0.6.0
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-11-24 01:15:41 +00:00
cb340ffb1e feat: add tonic
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 01:34:06 +01:00
670fd0d13b feat: added tonic
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 01:15:59 +01:00
deceaad4d1 feat: added longer timer
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 01:13:09 +01:00
32cd1cc617 feat: fix error message
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 00:54:19 +01:00
d1e9eb9eb5 feat: add agent
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-24 00:53:43 +01:00
7487e7336e feat: add churn v2
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-23 22:58:43 +01:00
610a465279 feat: initial v2 commit
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-23 22:39:13 +01:00
cfe21ad23c feat: reset
Signed-off-by: kjuulh <contact@kjuulh.io>
2024-11-23 22:38:25 +01:00
0f5249f620 chore(deps): update rust crate serde to v1.0.215 2024-11-12 01:07:53 +00:00
30ca5c540f chore(deps): update rust crate serde to v1.0.214 2024-10-29 01:11:07 +00:00
e0c622f8fd chore(deps): update rust crate serde to v1.0.213 2024-10-23 00:10:40 +00:00
8159603490 chore(deps): update rust crate serde to v1.0.210 2024-09-07 00:07:13 +00:00
736e166b76 chore(deps): update rust crate serde to v1.0.209 2024-08-24 04:06:37 +00:00
542e7aceaf chore(deps): update rust crate serde_json to v1.0.126 2024-08-23 20:25:55 +00:00
3e65cda2c9 chore(deps): update all dependencies 2024-08-22 13:39:37 +00:00
9f23dd935a chore(deps): update rust crate serde to v1.0.208 2024-08-21 20:32:58 +00:00
085123a1b0 fix(deps): update all dependencies 2024-07-06 13:16:57 +00:00
c3dda47512 chore(deps): update all dependencies 2024-05-25 19:03:43 +00:00
64 changed files with 4663 additions and 3343 deletions

View File

@@ -1,2 +1,2 @@
kind: template
load: cuddle-rust-cli-plan.yaml
load: cuddle-rust-service-plan.yaml

9
.env Normal file
View File

@@ -0,0 +1,9 @@
EXTERNAL_HOST=http://localhost:3000
PROCESS_HOST=http://localhost:7900
SERVICE_HOST=127.0.0.1:3000
DISCOVERY_HOST=http://127.0.0.1:3000
#EXTERNAL_HOST=http://localhost:3000
#PROCESS_HOST=http://localhost:7900
#SERVICE_HOST=127.0.0.1:3000
#DISCOVERY_HOST=https://churn.prod.kjuulh.app

View File

@@ -6,9 +6,65 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.1.0] - 2024-04-06
## [0.1.0] - 2025-01-11
### Added
- add cuddle please
- enable churn update service
- add updater to install script
- add updater to install script
- add http client
- run more often
- enable checking if it should actually run
- enable having get variable from local setup
- inherit output as well
- allow process from external code
- add inherit
- add default no labels
- warn all targets
- update with web assembly components
- add labels to config
- add abstraction around task
- enable webpki roots
- add short connect timeout
- more error logging
- stop the service if running
- setup stream logging
- update script with warn
- disable force again
- make curl silent"
- force update
- use public prod
- run as root
- agent is already setup
- allow errors
- some more debugging
- some more debugging
- stderr to stdout as well
- this should work
- when config has already been setup
- add agent start as well
- update with agent setup
- add install script
- add comments
- use actual internal
- reqwest as native build
- use internal
- add external service host
- add grpc host
- add external vars
- add grpc and env
- add queue
- add common queue
- add discovery
- add tonic
- added tonic
- added longer timer
- fix error message
- add agent
- add churn v2
- initial v2 commit
- reset
- update
- update
- update stuff
@@ -28,9 +84,58 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- add simple health check
### Docs
- update readme
next up is differentiating the different agents, such that we can execute commands from the cli to for example update dependencies on all machines, restart machines etc.
- add installation docs
- add notes
### Fixed
- use actual names for files
- *(deps)* update rust crate serde to v1.0.217
- *(deps)* update rust crate serde_json to v1.0.134
- *(deps)* update all dependencies to v28
- *(deps)* update rust crate nodrift to 0.3.0
- *(deps)* update rust crate serde to v1.0.216
- *(deps)* update tokio-prost monorepo to v0.13.4
- *(deps)* update rust crate tokio-util to v0.7.13
- *(deps)* update rust crate bytes to v1.9.0
- *(deps)* update rust crate tower-http to 0.6.0
- *(deps)* update all dependencies
- *(deps)* update rust crate capnp to 0.19.5
- *(deps)* update rust crate capnp to 0.19.4
### Other
- update final repo
- update lock"
- update default schedule
- *(deps)* update rust crate anyhow to v1.0.95
- *(deps)* update rust crate clap to v4.5.23
- *(deps)* update all dependencies
- *(deps)* update rust crate tracing-subscriber to v0.3.19
- *(deps)* update rust crate tracing to v0.1.41
- *(deps)* update rust crate serde to v1.0.215
- *(deps)* update rust crate serde to v1.0.214
- *(deps)* update rust crate serde to v1.0.213
- *(deps)* update rust crate serde to v1.0.210
- *(deps)* update rust crate serde to v1.0.209
- *(deps)* update rust crate serde_json to v1.0.126
- *(deps)* update all dependencies
- *(deps)* update rust crate serde to v1.0.208
- *(deps)* update all dependencies
- *(deps)* update rust crate serde to v1.0.203
- *(deps)* update rust crate anyhow to 1.0.86
- *(deps)* update rust crate anyhow to 1.0.85
- *(deps)* update rust crate anyhow to 1.0.84
- *(deps)* update rust crate itertools to 0.13.0
- *(deps)* update rust crate anyhow to 1.0.83
- *(deps)* update rust crate reqwest to 0.12.4
- *(deps)* update rust crate chrono to 0.4.38
- *(deps)* update rust crate anyhow to 1.0.82
- Merge pull request 'chore(release): v0.1.0' (#4) from cuddle-please/release into main
Reviewed-on: https://git.front.kjuulh.io/kjuulh/churn/pulls/4
- *(release)* 0.1.0
- *(test)* test commit
- *(test)* test commit
- *(test)* test commit

3322
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,36 +2,15 @@
members = ["crates/*"]
resolver = "2"
[workspace.package]
repository = "https://git.front.kjuulh.io/kjuulh/cuddle-please"
description = "Churn is a distributed configuration manager and engine"
readme = "README.md"
license-file = "LICENSE-MIT"
authors = ["kjuulh <contact@kjuulh.io>"]
version = "0.1.0"
edition = "2021"
publish = true
[workspace.dependencies]
churn = { path = "crates/churn" }
churn-agent = { path = "crates/churn-agent" }
churn-server = { path = "crates/churn-server" }
churn-domain = { path = "crates/churn-domain", version = "0.1.0" }
churn-capnp = { path = "crates/churn-capnp", version = "0.1.0" }
anyhow = { version = "1.0.86" }
anyhow = { version = "1" }
tokio = { version = "1", features = ["full"] }
tracing = { version = "0.1", features = ["log"] }
tracing-subscriber = { version = "0.3.18" }
clap = { version = "4.5.4", features = ["derive", "env"] }
dotenv = { version = "0.15.0" }
axum = { version = "0.7.5", features = ["macros"] }
async-trait = "*"
serde = {version = "1", features = ["derive"]}
serde_json = "1"
reqwest = {version = "0.12.4", features = ["json"]}
uuid = {version = "1.8.0", features = ["v4", "serde"]}
itertools = {version = "0.13.0"}
clap = { version = "4", features = ["derive", "env"] }
dotenv = { version = "0.15" }
axum = { version = "0.7" }
sled = "0.34.7"
chrono = {version = "0.4.38", features = ["serde"]}
[workspace.package]
version = "0.1.0"

View File

@@ -1,8 +0,0 @@
Copyright 2023 Kasper J. Hermansen
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,6 +0,0 @@
# Notes
- Building a ringbuffer like structure for sliding window logs in the server.
- Move logs to cold storage in rocksdb, after they're expelled from the ringbuffer
- Implement rocksdb in the agents to support settings and whatnot

View File

@@ -1,3 +1,27 @@
# Churn - A distributed contriguration manager and engine
# churn
## (Work in progress)
## Installation
To install churn, you need first of all a server and agents.
Servers can be run via. docker.
```shell
docker run docker.io/kjuulh/churn-v2:latest
```
To install an agent run the following script
```shell
curl https://git.front.kjuulh.io/kjuulh/churn-v2/raw/branch/main/install.sh | bash
```
configure `~/.local/share/io.kjuulh.churn-agent/churn-agent.toml` use an editor of choice. Churn agent will generate a randomish name for the specific agent, consider giving it something more semantically meaningful to you
## CLI (TBD)
Using the churn cli allows sending specific commands to a set of agents
```
```

8
buf.gen.yaml Normal file
View File

@@ -0,0 +1,8 @@
version: v2
plugins:
- remote: buf.build/community/neoeinstein-prost
out: crates/churn/src/grpc
- remote: buf.build/community/neoeinstein-tonic:v0.4.0
out: crates/churn/src/grpc
inputs:
- directory: crates/churn/proto

View File

@@ -1,39 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v0.1.0 (2023-08-26)
### New Features
- <csr-id-8f8c5fd41aaa82a495dd0933060f0a3a095bbaf1/> with basic package
- <csr-id-821e14fb1256957a107220c6c775565f5abc58c4/> with publish
- <csr-id-569f5272e667deeef9f269db5eaf3dec57e2df1c/> with monitor
- <csr-id-97978df287ee42f523f509ac686a13fa0400a026/> add initial churn
- <csr-id-f61d0bbf120607e59145a80b65985ab93c938522/> add simple health check
### Commit Statistics
<csr-read-only-do-not-edit/>
- 5 commits contributed to the release over the course of 2 calendar days.
- 5 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- With basic package (8f8c5fd)
- With publish (821e14f)
- With monitor (569f527)
- Add initial churn (97978df)
- Add simple health check (f61d0bb)
</details>

View File

@@ -1,24 +0,0 @@
[package]
name = "churn-agent"
description.workspace = true
authors.workspace = true
license-file.workspace = true
version= "0.1.0"
edition.workspace = true
publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
churn-domain.workspace = true
anyhow.workspace = true
tokio.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
clap.workspace = true
dotenv.workspace = true
axum.workspace = true
serde.workspace = true
serde_json.workspace = true
reqwest.workspace = true

View File

@@ -1,74 +0,0 @@
use std::sync::Arc;
use axum::async_trait;
use churn_domain::ServerEnrollReq;
use tokio::sync::Mutex;
#[derive(Clone)]
pub struct AgentService(Arc<dyn AgentServiceTrait + Send + Sync + 'static>);
impl std::ops::Deref for AgentService {
type Target = Arc<dyn AgentServiceTrait + Send + Sync + 'static>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Default for AgentService {
fn default() -> Self {
Self(Arc::new(DefaultAgentService::default()))
}
}
#[derive(Default)]
struct DefaultAgentService {
server: Arc<Mutex<String>>,
leases: Arc<Mutex<Vec<String>>>,
}
#[async_trait]
pub trait AgentServiceTrait {
async fn enroll(&self, agent_name: &str, server: &str, lease: &str) -> anyhow::Result<()>;
}
#[async_trait]
impl AgentServiceTrait for DefaultAgentService {
async fn enroll(&self, agent_name: &str, server: &str, lease: &str) -> anyhow::Result<()> {
let mut cur_server = self.server.lock().await;
let mut leases = self.leases.lock().await;
let client = reqwest::Client::new();
let req = client
.post(format!("{server}/agent/enroll"))
.json(&ServerEnrollReq {
lease: lease.into(),
agent_name: agent_name.into(),
})
.build()?;
let resp = client.execute(req).await?;
if !resp.status().is_success() {
if let Ok(text) = resp.text().await {
anyhow::bail!(
"could not enroll agent: {} at server: {}, error: {}",
agent_name,
server,
text
)
}
anyhow::bail!(
"could not enroll agent: {} at server: {}",
agent_name,
server
)
}
*cur_server = server.to_string();
leases.push(lease.to_string());
Ok(())
}
}

View File

@@ -1,130 +0,0 @@
mod agent;
use std::net::SocketAddr;
use agent::AgentService;
use anyhow::Error;
use axum::{
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
routing::{get, post},
Json, Router,
};
use churn_domain::AgentEnrollReq;
use clap::{Parser, Subcommand};
use serde_json::json;
use tokio::net::TcpListener;
#[derive(Parser)]
#[command(author, version, about, long_about = None, subcommand_required = true)]
struct Command {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
Daemon {
#[arg(env = "CHURN_ADDR", long)]
host: SocketAddr,
},
Connect {
/// agent name is the hostname which other agents or servers can resolve and connect via. It should be unique
#[arg(env = "CHURN_AGENT_NAME", long)]
agent_name: String,
#[arg(env = "CHURN_ADDR", long)]
host: SocketAddr,
#[arg(env = "CHURN_TOKEN", long)]
token: String,
},
}
#[derive(Clone, Default)]
struct AppState {
agent: AgentService,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
dotenv::dotenv().ok();
tracing_subscriber::fmt::init();
let cli = Command::parse();
handle_command(cli).await?;
Ok(())
}
async fn handle_command(cmd: Command) -> anyhow::Result<()> {
match cmd.command {
Some(Commands::Daemon { host }) => {
tracing::info!("Starting churn server");
let app = Router::new()
.route("/enroll", post(enroll))
.route("/ping", get(ping))
.with_state(AppState::default());
tracing::info!("churn server listening on {}", host);
let listener = TcpListener::bind(&host).await?;
axum::serve(listener, app.into_make_service())
.await
.unwrap();
Ok(())
}
Some(Commands::Connect {
host: _,
token: _,
agent_name: _,
}) => todo!(),
None => todo!(),
}
}
enum AppError {
Internal(Error),
}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
AppError::Internal(e) => {
tracing::error!("failed with error: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed with internal error",
)
}
};
let body = Json(json!({
"error": error_message,
}));
(status, body).into_response()
}
}
async fn ping() -> impl IntoResponse {
"pong!"
}
async fn enroll(
State(state): State<AppState>,
Json(req): Json<AgentEnrollReq>,
) -> Result<(), AppError> {
state
.agent
.enroll(&req.agent_name, &req.server, &req.lease)
.await
.map_err(AppError::Internal)?;
Ok(())
}

View File

@@ -1,23 +0,0 @@
[package]
name = "churn-capnp"
repository.workspace = true
description.workspace = true
readme.workspace = true
license-file.workspace = true
authors.workspace = true
version.workspace = true
edition.workspace = true
publish.workspace = true
[dependencies]
churn-domain.workspace = true
uuid.workspace = true
anyhow.workspace = true
chrono.workspace = true
capnp = "0.19.5"
[build-dependencies]
capnpc = "0.19.0"

View File

@@ -1,10 +0,0 @@
extern crate capnpc;
fn main() {
capnpc::CompilerCommand::new()
.output_path("src/")
.src_prefix("schemas/")
.file("schemas/models.capnp")
.run()
.unwrap();
}

View File

@@ -1,17 +0,0 @@
@0xf23adf24ffd8aca4;
struct LogEvent {
id @0 :Text;
author @1 :Text;
content @2 :Text;
datetime @3 :Int64;
}
struct Agent {
name @0 :Text;
}
struct Lease {
id @0 :Text;
lease @1 :Text;
}

View File

@@ -1,105 +0,0 @@
use capnp::message::{Builder, HeapAllocator};
use capnp::message::{ReaderOptions, TypedReader};
use capnp::serialize::{self, BufferSegments};
use capnp::traits::Owned;
use churn_domain::{Agent, Lease, LogEvent};
mod models_capnp;
pub trait CapnpPackExt {
type Return;
fn serialize_capnp(&self) -> Vec<u8>;
fn deserialize_capnp(content: &[u8]) -> anyhow::Result<Self::Return>;
fn capnp_to_string(builder: &Builder<HeapAllocator>) -> Vec<u8> {
serialize::write_message_to_words(builder)
}
fn string_to_capnp<S>(mut content: &[u8]) -> TypedReader<BufferSegments<&[u8]>, S>
where
S: Owned,
{
let log_event =
serialize::read_message_from_flat_slice(&mut content, ReaderOptions::new()).unwrap();
log_event.into_typed::<S>()
}
}
impl CapnpPackExt for LogEvent {
type Return = Self;
fn serialize_capnp(&self) -> Vec<u8> {
let mut builder = Builder::new_default();
let mut log_event = builder.init_root::<models_capnp::log_event::Builder>();
log_event.set_id(&self.id.to_string());
log_event.set_author(&self.author);
log_event.set_content(&self.content);
log_event.set_datetime(self.timestamp.timestamp());
Self::capnp_to_string(&builder)
}
fn deserialize_capnp(content: &[u8]) -> anyhow::Result<Self> {
let log_event = Self::string_to_capnp::<models_capnp::log_event::Owned>(content);
let log_event = log_event.get()?;
Ok(Self {
id: uuid::Uuid::parse_str(log_event.get_id()?.to_str()?)?,
author: log_event.get_author()?.to_string()?,
content: log_event.get_content()?.to_string()?,
timestamp: chrono::DateTime::<chrono::Utc>::from_utc(
chrono::NaiveDateTime::from_timestamp_opt(log_event.get_datetime(), 0).unwrap(),
chrono::Utc,
),
})
}
}
impl CapnpPackExt for Agent {
type Return = Self;
fn serialize_capnp(&self) -> Vec<u8> {
let mut builder = Builder::new_default();
let mut item = builder.init_root::<models_capnp::agent::Builder>();
item.set_name(&self.name);
Self::capnp_to_string(&builder)
}
fn deserialize_capnp(content: &[u8]) -> anyhow::Result<Self::Return> {
let item = Self::string_to_capnp::<models_capnp::agent::Owned>(content);
let item = item.get()?;
Ok(Self {
name: item.get_name()?.to_string()?,
})
}
}
impl CapnpPackExt for Lease {
type Return = Self;
fn serialize_capnp(&self) -> Vec<u8> {
let mut builder = Builder::new_default();
let mut item = builder.init_root::<models_capnp::lease::Builder>();
item.set_id(&self.id.to_string());
item.set_lease(&self.lease.to_string());
Self::capnp_to_string(&builder)
}
fn deserialize_capnp(content: &[u8]) -> anyhow::Result<Self::Return> {
let item = Self::string_to_capnp::<models_capnp::lease::Owned>(content);
let item = item.get()?;
Ok(Self {
id: uuid::Uuid::parse_str(item.get_id()?.to_str()?)?,
lease: uuid::Uuid::parse_str(item.get_lease()?.to_str()?)?,
})
}
}

View File

@@ -1,771 +0,0 @@
// @generated by the capnpc-rust plugin to the Cap'n Proto schema compiler.
// DO NOT EDIT.
// source: models.capnp
pub mod log_event {
#[derive(Copy, Clone)]
pub struct Owned(());
impl ::capnp::introspect::Introspect for Owned { fn introspect() -> ::capnp::introspect::Type { ::capnp::introspect::TypeVariant::Struct(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types, annotation_types: _private::get_annotation_types }).into() } }
impl ::capnp::traits::Owned for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::OwnedStruct for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::Pipelined for Owned { type Pipeline = Pipeline; }
pub struct Reader<'a> { reader: ::capnp::private::layout::StructReader<'a> }
impl <'a,> ::core::marker::Copy for Reader<'a,> {}
impl <'a,> ::core::clone::Clone for Reader<'a,> {
fn clone(&self) -> Self { *self }
}
impl <'a,> ::capnp::traits::HasTypeId for Reader<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructReader<'a>> for Reader<'a,> {
fn from(reader: ::capnp::private::layout::StructReader<'a>) -> Self {
Self { reader, }
}
}
impl <'a,> ::core::convert::From<Reader<'a,>> for ::capnp::dynamic_value::Reader<'a> {
fn from(reader: Reader<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Reader::new(reader.reader, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::core::fmt::Debug for Reader<'a,> {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::result::Result<(), ::core::fmt::Error> {
core::fmt::Debug::fmt(&::core::convert::Into::<::capnp::dynamic_value::Reader<'_>>::into(*self), f)
}
}
impl <'a,> ::capnp::traits::FromPointerReader<'a> for Reader<'a,> {
fn get_from_pointer(reader: &::capnp::private::layout::PointerReader<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(reader.get_struct(default)?.into())
}
}
impl <'a,> ::capnp::traits::IntoInternalStructReader<'a> for Reader<'a,> {
fn into_internal_struct_reader(self) -> ::capnp::private::layout::StructReader<'a> {
self.reader
}
}
impl <'a,> ::capnp::traits::Imbue<'a> for Reader<'a,> {
fn imbue(&mut self, cap_table: &'a ::capnp::private::layout::CapTable) {
self.reader.imbue(::capnp::private::layout::CapTableReader::Plain(cap_table))
}
}
impl <'a,> Reader<'a,> {
pub fn reborrow(&self) -> Reader<'_,> {
Self { .. *self }
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.reader.total_size()
}
#[inline]
pub fn get_id(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn has_id(&self) -> bool {
!self.reader.get_pointer_field(0).is_null()
}
#[inline]
pub fn get_author(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn has_author(&self) -> bool {
!self.reader.get_pointer_field(1).is_null()
}
#[inline]
pub fn get_content(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(2), ::core::option::Option::None)
}
#[inline]
pub fn has_content(&self) -> bool {
!self.reader.get_pointer_field(2).is_null()
}
#[inline]
pub fn get_datetime(self) -> i64 {
self.reader.get_data_field::<i64>(0)
}
}
pub struct Builder<'a> { builder: ::capnp::private::layout::StructBuilder<'a> }
impl <'a,> ::capnp::traits::HasStructSize for Builder<'a,> {
const STRUCT_SIZE: ::capnp::private::layout::StructSize = ::capnp::private::layout::StructSize { data: 1, pointers: 3 };
}
impl <'a,> ::capnp::traits::HasTypeId for Builder<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructBuilder<'a>> for Builder<'a,> {
fn from(builder: ::capnp::private::layout::StructBuilder<'a>) -> Self {
Self { builder, }
}
}
impl <'a,> ::core::convert::From<Builder<'a,>> for ::capnp::dynamic_value::Builder<'a> {
fn from(builder: Builder<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Builder::new(builder.builder, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::capnp::traits::ImbueMut<'a> for Builder<'a,> {
fn imbue_mut(&mut self, cap_table: &'a mut ::capnp::private::layout::CapTable) {
self.builder.imbue(::capnp::private::layout::CapTableBuilder::Plain(cap_table))
}
}
impl <'a,> ::capnp::traits::FromPointerBuilder<'a> for Builder<'a,> {
fn init_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, _size: u32) -> Self {
builder.init_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE).into()
}
fn get_from_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(builder.get_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE, default)?.into())
}
}
impl <'a,> ::capnp::traits::SetterInput<Owned<>> for Reader<'a,> {
fn set_pointer_builder(mut pointer: ::capnp::private::layout::PointerBuilder<'_>, value: Self, canonicalize: bool) -> ::capnp::Result<()> { pointer.set_struct(&value.reader, canonicalize) }
}
impl <'a,> Builder<'a,> {
pub fn into_reader(self) -> Reader<'a,> {
self.builder.into_reader().into()
}
pub fn reborrow(&mut self) -> Builder<'_,> {
Builder { builder: self.builder.reborrow() }
}
pub fn reborrow_as_reader(&self) -> Reader<'_,> {
self.builder.as_reader().into()
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.builder.as_reader().total_size()
}
#[inline]
pub fn get_id(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_id(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false).unwrap()
}
#[inline]
pub fn init_id(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(0).init_text(size)
}
#[inline]
pub fn has_id(&self) -> bool {
!self.builder.is_pointer_field_null(0)
}
#[inline]
pub fn get_author(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_author(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false).unwrap()
}
#[inline]
pub fn init_author(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(1).init_text(size)
}
#[inline]
pub fn has_author(&self) -> bool {
!self.builder.is_pointer_field_null(1)
}
#[inline]
pub fn get_content(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(2), ::core::option::Option::None)
}
#[inline]
pub fn set_content(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(2), value, false).unwrap()
}
#[inline]
pub fn init_content(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(2).init_text(size)
}
#[inline]
pub fn has_content(&self) -> bool {
!self.builder.is_pointer_field_null(2)
}
#[inline]
pub fn get_datetime(self) -> i64 {
self.builder.get_data_field::<i64>(0)
}
#[inline]
pub fn set_datetime(&mut self, value: i64) {
self.builder.set_data_field::<i64>(0, value);
}
}
pub struct Pipeline { _typeless: ::capnp::any_pointer::Pipeline }
impl ::capnp::capability::FromTypelessPipeline for Pipeline {
fn new(typeless: ::capnp::any_pointer::Pipeline) -> Self {
Self { _typeless: typeless, }
}
}
impl Pipeline {
}
mod _private {
pub static ENCODED_NODE: [::capnp::Word; 78] = [
::capnp::word(0, 0, 0, 0, 5, 0, 6, 0),
::capnp::word(50, 25, 14, 89, 91, 12, 143, 231),
::capnp::word(13, 0, 0, 0, 1, 0, 1, 0),
::capnp::word(164, 172, 216, 255, 36, 223, 58, 242),
::capnp::word(3, 0, 7, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(21, 0, 0, 0, 178, 0, 0, 0),
::capnp::word(29, 0, 0, 0, 7, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(25, 0, 0, 0, 231, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(109, 111, 100, 101, 108, 115, 46, 99),
::capnp::word(97, 112, 110, 112, 58, 76, 111, 103),
::capnp::word(69, 118, 101, 110, 116, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 1, 0, 1, 0),
::capnp::word(16, 0, 0, 0, 3, 0, 4, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(97, 0, 0, 0, 26, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(92, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(104, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(1, 0, 0, 0, 1, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 1, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(101, 0, 0, 0, 58, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(96, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(108, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(2, 0, 0, 0, 2, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 2, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(105, 0, 0, 0, 66, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(100, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(112, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(3, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 3, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(109, 0, 0, 0, 74, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(108, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(120, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(105, 100, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(97, 117, 116, 104, 111, 114, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(99, 111, 110, 116, 101, 110, 116, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(100, 97, 116, 101, 116, 105, 109, 101),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(5, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(5, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
];
pub fn get_field_types(index: u16) -> ::capnp::introspect::Type {
match index {
0 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
1 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
2 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
3 => <i64 as ::capnp::introspect::Introspect>::introspect(),
_ => panic!("invalid field index {}", index),
}
}
pub fn get_annotation_types(child_index: Option<u16>, index: u32) -> ::capnp::introspect::Type {
panic!("invalid annotation indices ({:?}, {}) ", child_index, index)
}
pub static RAW_SCHEMA: ::capnp::introspect::RawStructSchema = ::capnp::introspect::RawStructSchema {
encoded_node: &ENCODED_NODE,
nonunion_members: NONUNION_MEMBERS,
members_by_discriminant: MEMBERS_BY_DISCRIMINANT,
members_by_name: MEMBERS_BY_NAME,
};
pub static NONUNION_MEMBERS : &[u16] = &[0,1,2,3];
pub static MEMBERS_BY_DISCRIMINANT : &[u16] = &[];
pub static MEMBERS_BY_NAME : &[u16] = &[1,2,3,0];
pub const TYPE_ID: u64 = 0xe78f_0c5b_590e_1932;
}
}
pub mod agent {
#[derive(Copy, Clone)]
pub struct Owned(());
impl ::capnp::introspect::Introspect for Owned { fn introspect() -> ::capnp::introspect::Type { ::capnp::introspect::TypeVariant::Struct(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types, annotation_types: _private::get_annotation_types }).into() } }
impl ::capnp::traits::Owned for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::OwnedStruct for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::Pipelined for Owned { type Pipeline = Pipeline; }
pub struct Reader<'a> { reader: ::capnp::private::layout::StructReader<'a> }
impl <'a,> ::core::marker::Copy for Reader<'a,> {}
impl <'a,> ::core::clone::Clone for Reader<'a,> {
fn clone(&self) -> Self { *self }
}
impl <'a,> ::capnp::traits::HasTypeId for Reader<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructReader<'a>> for Reader<'a,> {
fn from(reader: ::capnp::private::layout::StructReader<'a>) -> Self {
Self { reader, }
}
}
impl <'a,> ::core::convert::From<Reader<'a,>> for ::capnp::dynamic_value::Reader<'a> {
fn from(reader: Reader<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Reader::new(reader.reader, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::core::fmt::Debug for Reader<'a,> {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::result::Result<(), ::core::fmt::Error> {
core::fmt::Debug::fmt(&::core::convert::Into::<::capnp::dynamic_value::Reader<'_>>::into(*self), f)
}
}
impl <'a,> ::capnp::traits::FromPointerReader<'a> for Reader<'a,> {
fn get_from_pointer(reader: &::capnp::private::layout::PointerReader<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(reader.get_struct(default)?.into())
}
}
impl <'a,> ::capnp::traits::IntoInternalStructReader<'a> for Reader<'a,> {
fn into_internal_struct_reader(self) -> ::capnp::private::layout::StructReader<'a> {
self.reader
}
}
impl <'a,> ::capnp::traits::Imbue<'a> for Reader<'a,> {
fn imbue(&mut self, cap_table: &'a ::capnp::private::layout::CapTable) {
self.reader.imbue(::capnp::private::layout::CapTableReader::Plain(cap_table))
}
}
impl <'a,> Reader<'a,> {
pub fn reborrow(&self) -> Reader<'_,> {
Self { .. *self }
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.reader.total_size()
}
#[inline]
pub fn get_name(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn has_name(&self) -> bool {
!self.reader.get_pointer_field(0).is_null()
}
}
pub struct Builder<'a> { builder: ::capnp::private::layout::StructBuilder<'a> }
impl <'a,> ::capnp::traits::HasStructSize for Builder<'a,> {
const STRUCT_SIZE: ::capnp::private::layout::StructSize = ::capnp::private::layout::StructSize { data: 0, pointers: 1 };
}
impl <'a,> ::capnp::traits::HasTypeId for Builder<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructBuilder<'a>> for Builder<'a,> {
fn from(builder: ::capnp::private::layout::StructBuilder<'a>) -> Self {
Self { builder, }
}
}
impl <'a,> ::core::convert::From<Builder<'a,>> for ::capnp::dynamic_value::Builder<'a> {
fn from(builder: Builder<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Builder::new(builder.builder, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::capnp::traits::ImbueMut<'a> for Builder<'a,> {
fn imbue_mut(&mut self, cap_table: &'a mut ::capnp::private::layout::CapTable) {
self.builder.imbue(::capnp::private::layout::CapTableBuilder::Plain(cap_table))
}
}
impl <'a,> ::capnp::traits::FromPointerBuilder<'a> for Builder<'a,> {
fn init_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, _size: u32) -> Self {
builder.init_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE).into()
}
fn get_from_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(builder.get_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE, default)?.into())
}
}
impl <'a,> ::capnp::traits::SetterInput<Owned<>> for Reader<'a,> {
fn set_pointer_builder(mut pointer: ::capnp::private::layout::PointerBuilder<'_>, value: Self, canonicalize: bool) -> ::capnp::Result<()> { pointer.set_struct(&value.reader, canonicalize) }
}
impl <'a,> Builder<'a,> {
pub fn into_reader(self) -> Reader<'a,> {
self.builder.into_reader().into()
}
pub fn reborrow(&mut self) -> Builder<'_,> {
Builder { builder: self.builder.reborrow() }
}
pub fn reborrow_as_reader(&self) -> Reader<'_,> {
self.builder.as_reader().into()
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.builder.as_reader().total_size()
}
#[inline]
pub fn get_name(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_name(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false).unwrap()
}
#[inline]
pub fn init_name(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(0).init_text(size)
}
#[inline]
pub fn has_name(&self) -> bool {
!self.builder.is_pointer_field_null(0)
}
}
pub struct Pipeline { _typeless: ::capnp::any_pointer::Pipeline }
impl ::capnp::capability::FromTypelessPipeline for Pipeline {
fn new(typeless: ::capnp::any_pointer::Pipeline) -> Self {
Self { _typeless: typeless, }
}
}
impl Pipeline {
}
mod _private {
pub static ENCODED_NODE: [::capnp::Word; 32] = [
::capnp::word(0, 0, 0, 0, 5, 0, 6, 0),
::capnp::word(160, 129, 44, 52, 151, 203, 164, 244),
::capnp::word(13, 0, 0, 0, 1, 0, 0, 0),
::capnp::word(164, 172, 216, 255, 36, 223, 58, 242),
::capnp::word(1, 0, 7, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(21, 0, 0, 0, 154, 0, 0, 0),
::capnp::word(29, 0, 0, 0, 7, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(25, 0, 0, 0, 63, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(109, 111, 100, 101, 108, 115, 46, 99),
::capnp::word(97, 112, 110, 112, 58, 65, 103, 101),
::capnp::word(110, 116, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 1, 0, 1, 0),
::capnp::word(4, 0, 0, 0, 3, 0, 4, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(13, 0, 0, 0, 42, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(8, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(20, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(110, 97, 109, 101, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
];
pub fn get_field_types(index: u16) -> ::capnp::introspect::Type {
match index {
0 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
_ => panic!("invalid field index {}", index),
}
}
pub fn get_annotation_types(child_index: Option<u16>, index: u32) -> ::capnp::introspect::Type {
panic!("invalid annotation indices ({:?}, {}) ", child_index, index)
}
pub static RAW_SCHEMA: ::capnp::introspect::RawStructSchema = ::capnp::introspect::RawStructSchema {
encoded_node: &ENCODED_NODE,
nonunion_members: NONUNION_MEMBERS,
members_by_discriminant: MEMBERS_BY_DISCRIMINANT,
members_by_name: MEMBERS_BY_NAME,
};
pub static NONUNION_MEMBERS : &[u16] = &[0];
pub static MEMBERS_BY_DISCRIMINANT : &[u16] = &[];
pub static MEMBERS_BY_NAME : &[u16] = &[0];
pub const TYPE_ID: u64 = 0xf4a4_cb97_342c_81a0;
}
}
pub mod lease {
#[derive(Copy, Clone)]
pub struct Owned(());
impl ::capnp::introspect::Introspect for Owned { fn introspect() -> ::capnp::introspect::Type { ::capnp::introspect::TypeVariant::Struct(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types, annotation_types: _private::get_annotation_types }).into() } }
impl ::capnp::traits::Owned for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::OwnedStruct for Owned { type Reader<'a> = Reader<'a>; type Builder<'a> = Builder<'a>; }
impl ::capnp::traits::Pipelined for Owned { type Pipeline = Pipeline; }
pub struct Reader<'a> { reader: ::capnp::private::layout::StructReader<'a> }
impl <'a,> ::core::marker::Copy for Reader<'a,> {}
impl <'a,> ::core::clone::Clone for Reader<'a,> {
fn clone(&self) -> Self { *self }
}
impl <'a,> ::capnp::traits::HasTypeId for Reader<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructReader<'a>> for Reader<'a,> {
fn from(reader: ::capnp::private::layout::StructReader<'a>) -> Self {
Self { reader, }
}
}
impl <'a,> ::core::convert::From<Reader<'a,>> for ::capnp::dynamic_value::Reader<'a> {
fn from(reader: Reader<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Reader::new(reader.reader, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::core::fmt::Debug for Reader<'a,> {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::result::Result<(), ::core::fmt::Error> {
core::fmt::Debug::fmt(&::core::convert::Into::<::capnp::dynamic_value::Reader<'_>>::into(*self), f)
}
}
impl <'a,> ::capnp::traits::FromPointerReader<'a> for Reader<'a,> {
fn get_from_pointer(reader: &::capnp::private::layout::PointerReader<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(reader.get_struct(default)?.into())
}
}
impl <'a,> ::capnp::traits::IntoInternalStructReader<'a> for Reader<'a,> {
fn into_internal_struct_reader(self) -> ::capnp::private::layout::StructReader<'a> {
self.reader
}
}
impl <'a,> ::capnp::traits::Imbue<'a> for Reader<'a,> {
fn imbue(&mut self, cap_table: &'a ::capnp::private::layout::CapTable) {
self.reader.imbue(::capnp::private::layout::CapTableReader::Plain(cap_table))
}
}
impl <'a,> Reader<'a,> {
pub fn reborrow(&self) -> Reader<'_,> {
Self { .. *self }
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.reader.total_size()
}
#[inline]
pub fn get_id(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn has_id(&self) -> bool {
!self.reader.get_pointer_field(0).is_null()
}
#[inline]
pub fn get_lease(self) -> ::capnp::Result<::capnp::text::Reader<'a>> {
::capnp::traits::FromPointerReader::get_from_pointer(&self.reader.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn has_lease(&self) -> bool {
!self.reader.get_pointer_field(1).is_null()
}
}
pub struct Builder<'a> { builder: ::capnp::private::layout::StructBuilder<'a> }
impl <'a,> ::capnp::traits::HasStructSize for Builder<'a,> {
const STRUCT_SIZE: ::capnp::private::layout::StructSize = ::capnp::private::layout::StructSize { data: 0, pointers: 2 };
}
impl <'a,> ::capnp::traits::HasTypeId for Builder<'a,> {
const TYPE_ID: u64 = _private::TYPE_ID;
}
impl <'a,> ::core::convert::From<::capnp::private::layout::StructBuilder<'a>> for Builder<'a,> {
fn from(builder: ::capnp::private::layout::StructBuilder<'a>) -> Self {
Self { builder, }
}
}
impl <'a,> ::core::convert::From<Builder<'a,>> for ::capnp::dynamic_value::Builder<'a> {
fn from(builder: Builder<'a,>) -> Self {
Self::Struct(::capnp::dynamic_struct::Builder::new(builder.builder, ::capnp::schema::StructSchema::new(::capnp::introspect::RawBrandedStructSchema { generic: &_private::RAW_SCHEMA, field_types: _private::get_field_types::<>, annotation_types: _private::get_annotation_types::<>})))
}
}
impl <'a,> ::capnp::traits::ImbueMut<'a> for Builder<'a,> {
fn imbue_mut(&mut self, cap_table: &'a mut ::capnp::private::layout::CapTable) {
self.builder.imbue(::capnp::private::layout::CapTableBuilder::Plain(cap_table))
}
}
impl <'a,> ::capnp::traits::FromPointerBuilder<'a> for Builder<'a,> {
fn init_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, _size: u32) -> Self {
builder.init_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE).into()
}
fn get_from_pointer(builder: ::capnp::private::layout::PointerBuilder<'a>, default: ::core::option::Option<&'a [::capnp::Word]>) -> ::capnp::Result<Self> {
::core::result::Result::Ok(builder.get_struct(<Self as ::capnp::traits::HasStructSize>::STRUCT_SIZE, default)?.into())
}
}
impl <'a,> ::capnp::traits::SetterInput<Owned<>> for Reader<'a,> {
fn set_pointer_builder(mut pointer: ::capnp::private::layout::PointerBuilder<'_>, value: Self, canonicalize: bool) -> ::capnp::Result<()> { pointer.set_struct(&value.reader, canonicalize) }
}
impl <'a,> Builder<'a,> {
pub fn into_reader(self) -> Reader<'a,> {
self.builder.into_reader().into()
}
pub fn reborrow(&mut self) -> Builder<'_,> {
Builder { builder: self.builder.reborrow() }
}
pub fn reborrow_as_reader(&self) -> Reader<'_,> {
self.builder.as_reader().into()
}
pub fn total_size(&self) -> ::capnp::Result<::capnp::MessageSize> {
self.builder.as_reader().total_size()
}
#[inline]
pub fn get_id(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_id(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false).unwrap()
}
#[inline]
pub fn init_id(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(0).init_text(size)
}
#[inline]
pub fn has_id(&self) -> bool {
!self.builder.is_pointer_field_null(0)
}
#[inline]
pub fn get_lease(self) -> ::capnp::Result<::capnp::text::Builder<'a>> {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_lease(&mut self, value: impl ::capnp::traits::SetterInput<::capnp::text::Owned>) {
::capnp::traits::SetterInput::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false).unwrap()
}
#[inline]
pub fn init_lease(self, size: u32) -> ::capnp::text::Builder<'a> {
self.builder.get_pointer_field(1).init_text(size)
}
#[inline]
pub fn has_lease(&self) -> bool {
!self.builder.is_pointer_field_null(1)
}
}
pub struct Pipeline { _typeless: ::capnp::any_pointer::Pipeline }
impl ::capnp::capability::FromTypelessPipeline for Pipeline {
fn new(typeless: ::capnp::any_pointer::Pipeline) -> Self {
Self { _typeless: typeless, }
}
}
impl Pipeline {
}
mod _private {
pub static ENCODED_NODE: [::capnp::Word; 47] = [
::capnp::word(0, 0, 0, 0, 5, 0, 6, 0),
::capnp::word(98, 86, 14, 197, 84, 8, 214, 176),
::capnp::word(13, 0, 0, 0, 1, 0, 0, 0),
::capnp::word(164, 172, 216, 255, 36, 223, 58, 242),
::capnp::word(2, 0, 7, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(21, 0, 0, 0, 154, 0, 0, 0),
::capnp::word(29, 0, 0, 0, 7, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(25, 0, 0, 0, 119, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(109, 111, 100, 101, 108, 115, 46, 99),
::capnp::word(97, 112, 110, 112, 58, 76, 101, 97),
::capnp::word(115, 101, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 1, 0, 1, 0),
::capnp::word(8, 0, 0, 0, 3, 0, 4, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(41, 0, 0, 0, 26, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(36, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(48, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(1, 0, 0, 0, 1, 0, 0, 0),
::capnp::word(0, 0, 1, 0, 1, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(45, 0, 0, 0, 50, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(40, 0, 0, 0, 3, 0, 1, 0),
::capnp::word(52, 0, 0, 0, 2, 0, 1, 0),
::capnp::word(105, 100, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(108, 101, 97, 115, 101, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(12, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
::capnp::word(0, 0, 0, 0, 0, 0, 0, 0),
];
pub fn get_field_types(index: u16) -> ::capnp::introspect::Type {
match index {
0 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
1 => <::capnp::text::Owned as ::capnp::introspect::Introspect>::introspect(),
_ => panic!("invalid field index {}", index),
}
}
pub fn get_annotation_types(child_index: Option<u16>, index: u32) -> ::capnp::introspect::Type {
panic!("invalid annotation indices ({:?}, {}) ", child_index, index)
}
pub static RAW_SCHEMA: ::capnp::introspect::RawStructSchema = ::capnp::introspect::RawStructSchema {
encoded_node: &ENCODED_NODE,
nonunion_members: NONUNION_MEMBERS,
members_by_discriminant: MEMBERS_BY_DISCRIMINANT,
members_by_name: MEMBERS_BY_NAME,
};
pub static NONUNION_MEMBERS : &[u16] = &[0,1];
pub static MEMBERS_BY_DISCRIMINANT : &[u16] = &[];
pub static MEMBERS_BY_NAME : &[u16] = &[0,1];
pub const TYPE_ID: u64 = 0xb0d6_0854_c50e_5662;
}
}

View File

@@ -1,43 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v0.1.0 (2023-08-26)
### Chore
- <csr-id-1ae70ac5258ae9f8f5471923fefd3e8ab02f46c1/> with changelog
### New Features
- <csr-id-8f8c5fd41aaa82a495dd0933060f0a3a095bbaf1/> with basic package
- <csr-id-821e14fb1256957a107220c6c775565f5abc58c4/> with publish
- <csr-id-e0545c726c44dccfb8ea179266c1da93389c07e4/> with monitoring
- <csr-id-569f5272e667deeef9f269db5eaf3dec57e2df1c/> with monitor
### Commit Statistics
<csr-read-only-do-not-edit/>
- 6 commits contributed to the release.
- 5 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- With changelog (1ae70ac)
- Release churn-domain v0.1.0, churn v0.1.0 (34bc81e)
- With basic package (8f8c5fd)
- With publish (821e14f)
- With monitoring (e0545c7)
- With monitor (569f527)
</details>

View File

@@ -1,23 +0,0 @@
[package]
name = "churn-domain"
description.workspace = true
authors.workspace = true
license-file.workspace = true
version= "0.1.0"
edition.workspace = true
publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow.workspace = true
tokio.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
clap.workspace = true
dotenv.workspace = true
axum.workspace = true
reqwest.workspace = true
serde.workspace = true
uuid.workspace = true
chrono.workspace = true

View File

@@ -1,55 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct LeaseResp {
pub token: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct AgentEnrollReq {
pub lease: String,
pub server: String,
pub agent_name: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ServerEnrollReq {
pub lease: String,
pub agent_name: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ServerMonitorResp {
pub cursor: Option<uuid::Uuid>,
pub logs: Vec<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LogEvent {
pub id: uuid::Uuid,
pub author: String,
pub content: String,
pub timestamp: chrono::DateTime<chrono::Utc>,
}
impl LogEvent {
pub fn new(author: impl Into<String>, content: impl Into<String>) -> Self {
Self {
id: uuid::Uuid::new_v4(),
author: author.into(),
content: content.into(),
timestamp: chrono::Utc::now(),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Agent {
pub name: String,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Lease {
pub id: uuid::Uuid,
pub lease: uuid::Uuid,
}

View File

@@ -1,44 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v0.1.0 (2023-08-26)
### New Features
- <csr-id-8f8c5fd41aaa82a495dd0933060f0a3a095bbaf1/> with basic package
- <csr-id-821e14fb1256957a107220c6c775565f5abc58c4/> with publish
- <csr-id-e0545c726c44dccfb8ea179266c1da93389c07e4/> with monitoring
- <csr-id-569f5272e667deeef9f269db5eaf3dec57e2df1c/> with monitor
- <csr-id-8c41e1004c11bc3018d36a72be6e38b2e410c362/> with enroll
- <csr-id-97978df287ee42f523f509ac686a13fa0400a026/> add initial churn
- <csr-id-f61d0bbf120607e59145a80b65985ab93c938522/> add simple health check
### Commit Statistics
<csr-read-only-do-not-edit/>
- 8 commits contributed to the release over the course of 2 calendar days.
- 7 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Release churn-domain v0.1.0, churn v0.1.0 (34bc81e)
- With basic package (8f8c5fd)
- With publish (821e14f)
- With monitoring (e0545c7)
- With monitor (569f527)
- With enroll (8c41e10)
- Add initial churn (97978df)
- Add simple health check (f61d0bb)
</details>

View File

@@ -1,29 +0,0 @@
[package]
name = "churn-server"
authors.workspace = true
description.workspace = true
license-file.workspace = true
version= "0.1.0"
edition.workspace = true
publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
churn-domain.workspace = true
churn-capnp.workspace = true
anyhow.workspace = true
tokio.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
clap.workspace = true
dotenv.workspace = true
axum.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
async-trait.workspace = true
itertools.workspace = true
sled.workspace = true

View File

@@ -1,60 +0,0 @@
use std::sync::Arc;
use axum::async_trait;
use churn_capnp::CapnpPackExt;
use churn_domain::{Agent, ServerEnrollReq};
use crate::db::Db;
#[derive(Clone)]
pub struct AgentService(Arc<dyn AgentServiceTrait + Send + Sync + 'static>);
impl AgentService {
pub fn new(db: Db) -> Self {
Self(Arc::new(DefaultAgentService::new(db)))
}
}
impl std::ops::Deref for AgentService {
type Target = Arc<dyn AgentServiceTrait + Send + Sync + 'static>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
struct DefaultAgentService {
agents: Db,
}
impl DefaultAgentService {
pub fn new(db: Db) -> Self {
Self { agents: db }
}
}
#[async_trait]
pub trait AgentServiceTrait {
async fn enroll(&self, req: ServerEnrollReq) -> anyhow::Result<String>;
}
#[async_trait]
impl AgentServiceTrait for DefaultAgentService {
async fn enroll(&self, req: ServerEnrollReq) -> anyhow::Result<String> {
let agent_name = req.agent_name;
self.agents
.insert(
"agents",
&agent_name,
&Agent {
name: agent_name.clone(),
}
.serialize_capnp(),
)
.await?;
Ok(agent_name)
}
}

View File

@@ -1,64 +0,0 @@
use core::slice::SlicePattern;
use std::path::{Path};
use std::sync::Arc;
use async_trait::async_trait;
#[derive(Clone)]
pub struct Db(Arc<dyn DbTrait + Send + Sync + 'static>);
impl Db {
pub fn new_sled(path: &Path) -> Self {
Self(Arc::new(DefaultDb::new(path)))
}
}
impl std::ops::Deref for Db {
type Target = Arc<dyn DbTrait + Send + Sync + 'static>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
struct DefaultDb {
db: sled::Db,
}
impl DefaultDb {
pub fn new(path: &Path) -> Self {
Self {
db: sled::open(path).expect("to be able open a sled path"),
}
}
}
#[async_trait]
pub trait DbTrait {
async fn insert(&self, namespace: &str, key: &str, value: &[u8]) -> anyhow::Result<()>;
async fn get_all(&self, namespace: &str) -> anyhow::Result<Vec<Vec<u8>>>;
}
#[async_trait]
impl DbTrait for DefaultDb {
async fn insert(&self, namespace: &str, key: &str, value: &[u8]) -> anyhow::Result<()> {
let tree = self.db.open_tree(namespace)?;
tree.insert(key, value)?;
//tree.flush_async().await?;
Ok(())
}
async fn get_all(&self, namespace: &str) -> anyhow::Result<Vec<Vec<u8>>> {
let tree = self.db.open_tree(namespace)?;
Ok(tree
.iter()
.flatten()
.map(|(_, val)| val.as_slice().to_vec())
.collect::<Vec<_>>())
}
}

View File

@@ -1,108 +0,0 @@
use std::sync::Arc;
use axum::async_trait;
use churn_domain::LogEvent;
use itertools::Itertools;
use churn_capnp::CapnpPackExt;
use crate::db::Db;
#[derive(Clone)]
pub struct EventService(Arc<dyn EventServiceTrait + Send + Sync + 'static>);
impl EventService {
pub fn new(db: Db) -> Self {
Self(Arc::new(DefaultEventService::new(db)))
}
}
impl std::ops::Deref for EventService {
type Target = Arc<dyn EventServiceTrait + Send + Sync + 'static>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
struct DefaultEventService {
db: Db,
}
impl DefaultEventService {
pub fn new(db: Db) -> Self {
Self { db }
}
}
#[async_trait]
pub trait EventServiceTrait {
async fn append(&self, req: LogEvent) -> anyhow::Result<()>;
async fn get_from_cursor(&self, cursor: uuid::Uuid) -> anyhow::Result<Vec<LogEvent>>;
async fn get_from_beginning(&self) -> anyhow::Result<Vec<LogEvent>>;
async fn get_latest_cursor(&self) -> anyhow::Result<uuid::Uuid>;
}
#[async_trait]
impl EventServiceTrait for DefaultEventService {
async fn append(&self, req: LogEvent) -> anyhow::Result<()> {
self.db
.insert("events_log", &req.id.to_string(), &req.serialize_capnp())
.await?;
Ok(())
}
async fn get_from_cursor(&self, cursor: uuid::Uuid) -> anyhow::Result<Vec<LogEvent>> {
let events = self.db.get_all("events_log").await?;
let events = events
.iter()
.flat_map(|e| match LogEvent::deserialize_capnp(e) {
Ok(o) => Ok(o),
Err(e) => {
tracing::error!("failed to deserialize capnp: {e}");
Err(e)
}
})
.sorted_by_key(|i| i.timestamp)
.skip_while(|item| item.id != cursor)
.skip(1)
.collect();
Ok(events)
}
async fn get_from_beginning(&self) -> anyhow::Result<Vec<LogEvent>> {
let events = self.db.get_all("events_log").await?;
let events = events
.iter()
.map(|x| x.as_slice())
.flat_map(LogEvent::deserialize_capnp)
.sorted_by_key(|i| i.timestamp)
.collect();
Ok(events)
}
async fn get_latest_cursor(&self) -> anyhow::Result<uuid::Uuid> {
let events = self.db.get_all("events_log").await?;
let event = events
.iter()
.flat_map(|e| match LogEvent::deserialize_capnp(e) {
Ok(o) => Ok(o),
Err(e) => {
tracing::error!("failed to deserialize capnp: {e}");
Err(e)
}
})
.sorted_by_key(|i| i.timestamp)
.last();
match event {
Some(x) => Ok(x.id),
None => anyhow::bail!("no events found"),
}
}
}

View File

@@ -1,59 +0,0 @@
use std::sync::Arc;
use axum::async_trait;
use churn_capnp::CapnpPackExt;
use churn_domain::Lease;
use crate::db::Db;
#[derive(Clone)]
pub struct LeaseService(Arc<dyn LeaseServiceTrait + Send + Sync + 'static>);
impl LeaseService {
pub fn new(db: Db) -> Self {
Self(Arc::new(DefaultLeaseService::new(db)))
}
}
impl std::ops::Deref for LeaseService {
type Target = Arc<dyn LeaseServiceTrait + Send + Sync + 'static>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
struct DefaultLeaseService {
db: Db,
}
impl DefaultLeaseService {
pub fn new(db: Db) -> Self {
Self { db }
}
}
#[async_trait]
pub trait LeaseServiceTrait {
async fn create_lease(&self) -> anyhow::Result<String>;
}
#[async_trait]
impl LeaseServiceTrait for DefaultLeaseService {
async fn create_lease(&self) -> anyhow::Result<String> {
let lease = uuid::Uuid::new_v4();
let id = uuid::Uuid::new_v4();
self.db
.insert(
"lease",
&lease.to_string(),
&Lease { id, lease }.serialize_capnp(),
)
.await?;
Ok(lease.to_string())
}
}

View File

@@ -1,238 +0,0 @@
#![feature(slice_pattern)]
mod agent;
mod db;
mod event;
mod lease;
use std::net::SocketAddr;
use std::path::PathBuf;
use agent::AgentService;
use anyhow::Error;
use axum::extract::{Query, State};
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use axum::routing::{get, post};
use axum::{Json, Router};
use churn_domain::{Agent, LeaseResp, LogEvent, ServerEnrollReq, ServerMonitorResp};
use clap::{Args, Parser, Subcommand, ValueEnum};
use event::EventService;
use lease::LeaseService;
use serde::Deserialize;
use serde_json::json;
use tokio::net::TcpListener;
use crate::db::Db;
#[derive(Parser)]
#[command(author, version, about, long_about = None, subcommand_required = true)]
struct Command {
#[command(subcommand)]
command: Option<Commands>,
#[clap(flatten)]
global: GlobalArgs,
}
#[derive(Args)]
struct GlobalArgs {
#[arg(env = "CHURN_DATABASE", long, default_value = "sled")]
database: DatabaseType,
#[arg(env = "CHURN_SLED_PATH", long, default_value = "churn-server.sled")]
sled_path: PathBuf,
}
#[derive(ValueEnum, Clone)]
enum DatabaseType {
Sled,
}
#[derive(Subcommand)]
enum Commands {
Serve {
#[arg(env = "SERVICE_HOST", long, default_value = "127.0.0.1:3000")]
host: SocketAddr,
},
}
#[derive(Clone)]
struct AppState {
agent: AgentService,
leases: LeaseService,
events: EventService,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
dotenv::dotenv().ok();
tracing_subscriber::fmt::init();
let cli = Command::parse();
if let Some(Commands::Serve { host }) = cli.command {
tracing::info!("Starting churn server");
let db = match cli.global.database {
DatabaseType::Sled => Db::new_sled(&cli.global.sled_path),
};
let app = Router::new()
.route("/ping", get(ping))
.route("/logs", get(logs))
.nest(
"/agent",
Router::new()
.route("/enroll", post(enroll))
.route("/ping", post(agent_ping))
.route("/events", post(get_tasks))
.route("/lease", post(agent_lease)),
)
.with_state(AppState {
agent: AgentService::new(db.clone()),
leases: LeaseService::new(db.clone()),
events: EventService::new(db.clone()),
});
tracing::info!("churn server listening on {}", host);
let listener = TcpListener::bind(&host).await?;
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
Ok(())
}
enum AppError {
Internal(Error),
}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
AppError::Internal(e) => {
tracing::error!("failed with error: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed with internal error",
)
}
};
let body = Json(json!({
"error": error_message,
}));
(status, body).into_response()
}
}
async fn enroll(
State(state): State<AppState>,
Json(req): Json<ServerEnrollReq>,
) -> Result<Json<Agent>, AppError> {
state
.events
.append(LogEvent::new(&req.agent_name, "attempting to enroll agent"))
.await
.map_err(AppError::Internal)?;
let name = state.agent.enroll(req).await.map_err(AppError::Internal)?;
state
.events
.append(LogEvent::new(&name, "enrolled agent"))
.await
.map_err(AppError::Internal)?;
Ok(Json(Agent { name }))
}
async fn agent_lease(State(state): State<AppState>) -> Result<Json<LeaseResp>, AppError> {
let lease = state
.leases
.create_lease()
.await
.map_err(AppError::Internal)?;
Ok(Json(LeaseResp { token: lease }))
}
async fn agent_ping() -> impl IntoResponse {
todo!()
}
async fn get_tasks() -> impl IntoResponse {
todo!()
}
async fn ping() -> impl IntoResponse {
"pong!"
}
#[derive(Clone, Deserialize)]
struct LogsQuery {
cursor: Option<uuid::Uuid>,
}
async fn logs(
State(state): State<AppState>,
Query(cursor): Query<LogsQuery>,
) -> Result<Json<ServerMonitorResp>, AppError> {
state
.events
.append(LogEvent::new(
"author",
format!(
"logs called: {}",
cursor
.cursor
.as_ref()
.map(|c| format!("(cursor={c})"))
.unwrap_or("".to_string())
),
))
.await
.map_err(AppError::Internal)?;
match cursor.cursor {
Some(cursor) => {
tracing::debug!("finding logs from cursor: {}", cursor);
}
None => {
tracing::debug!("finding logs from beginning");
}
}
match cursor.cursor {
Some(c) => {
let events = state
.events
.get_from_cursor(c)
.await
.map_err(AppError::Internal)?;
Ok(Json(ServerMonitorResp {
cursor: events.last().map(|e| e.id),
logs: events
.iter()
.map(|e| format!("{}: {}", e.author, e.content))
.collect(),
}))
}
None => {
let cursor = state
.events
.get_latest_cursor()
.await
.map_err(AppError::Internal)?;
Ok(Json(ServerMonitorResp {
cursor: Some(cursor),
logs: Vec::new(),
}))
}
}
}

View File

@@ -1,53 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v0.1.0 (2023-08-26)
<csr-id-1ae70ac5258ae9f8f5471923fefd3e8ab02f46c1/>
### Chore
- <csr-id-1ae70ac5258ae9f8f5471923fefd3e8ab02f46c1/> with changelog
### New Features
- <csr-id-8f8c5fd41aaa82a495dd0933060f0a3a095bbaf1/> with basic package
- <csr-id-821e14fb1256957a107220c6c775565f5abc58c4/> with publish
- <csr-id-e0545c726c44dccfb8ea179266c1da93389c07e4/> with monitoring
- <csr-id-569f5272e667deeef9f269db5eaf3dec57e2df1c/> with monitor
- <csr-id-10eae9b36cfe82b86fe0bf4d7c02f99d727b839d/> with extra churning repl thingy
- <csr-id-97978df287ee42f523f509ac686a13fa0400a026/> add initial churn
- <csr-id-f61d0bbf120607e59145a80b65985ab93c938522/> add simple health check
### Commit Statistics
<csr-read-only-do-not-edit/>
- 11 commits contributed to the release over the course of 2 calendar days.
- 8 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Release churn v0.1.0 (d5212f0)
- Release churn-domain v0.1.0, churn v0.1.0 (e4e05bc)
- With changelog (1ae70ac)
- Release churn-domain v0.1.0, churn v0.1.0 (34bc81e)
- With basic package (8f8c5fd)
- With publish (821e14f)
- With monitoring (e0545c7)
- With monitor (569f527)
- With extra churning repl thingy (10eae9b)
- Add initial churn (97978df)
- Add simple health check (f61d0bb)
</details>

View File

@@ -1,15 +1,9 @@
[package]
name = "churn"
authors.workspace = true
description.workspace = true
license-file.workspace = true
version= "0.1.0"
edition.workspace = true
publish.workspace = true
version = "0.1.0"
edition = "2021"
[dependencies]
churn-domain.workspace = true
anyhow.workspace = true
tokio.workspace = true
tracing.workspace = true
@@ -17,5 +11,30 @@ tracing-subscriber.workspace = true
clap.workspace = true
dotenv.workspace = true
axum.workspace = true
reqwest.workspace = true
uuid.workspace = true
serde = { version = "1.0.197", features = ["derive"] }
uuid = { version = "1.7.0", features = ["v4"] }
tower-http = { version = "0.6.0", features = ["cors", "trace"] }
notmad = "0.7.1"
tokio-util = "0.7.12"
async-trait = "0.1.83"
nodrift = "0.3.0"
rusqlite = { version = "0.32.1", features = ["bundled"] }
prost-types = "0.13.3"
prost = "0.13.3"
bytes = "1.8.0"
tonic = { version = "0.12.3", features = ["tls", "tls-roots"] }
toml = "0.8.19"
dirs = "5.0.1"
futures = "0.3.31"
reqwest = { version = "0.12.9", default-features = false, features = [
"json",
"http2",
"charset",
"native-tls-vendored",
"stream",
] }
serde_json = "1.0.133"
wasmtime = "28.0.0"
wasmtime-wasi = "28.0.0"
petname = "2.0.2"

View File

@@ -0,0 +1,35 @@
syntax = "proto3";
package churn.v1;
service Churn {
rpc GetKey(GetKeyRequest) returns (GetKeyResponse);
rpc SetKey(SetKeyRequest) returns (SetKeyResponse);
rpc ListenEvents(ListenEventsRequest) returns (stream ListenEventsResponse);
}
message GetKeyRequest {
string namespace = 1;
optional string id = 2;
string key = 3;
}
message GetKeyResponse {
optional string value = 1;
}
message SetKeyRequest {
string namespace = 1;
optional string id = 2;
string key = 3;
string value = 4;
}
message SetKeyResponse {}
message ListenEventsRequest {
string namespace = 1;
optional string id = 2;
}
message ListenEventsResponse {
string id = 1;
string value = 2;
}

36
crates/churn/src/agent.rs Normal file
View File

@@ -0,0 +1,36 @@
use agent_state::AgentState;
use event_handler::EventHandler;
use refresh::AgentRefresh;
pub use config::setup_config;
pub mod models;
pub(crate) mod task;
mod agent_state;
mod config;
mod discovery_client;
mod event_handler;
mod grpc_client;
mod plugins;
mod queue;
mod refresh;
mod scheduler;
mod handlers;
mod actions;
pub async fn execute() -> anyhow::Result<()> {
let state = AgentState::new().await?;
notmad::Mad::builder()
.add(AgentRefresh::new(&state))
.add(EventHandler::new(&state))
.add(state.queue.clone())
.cancellation(Some(std::time::Duration::from_secs(2)))
.run()
.await?;
Ok(())
}

View File

@@ -0,0 +1,24 @@
use apt::AptTask;
use plugin_task::PluginTask;
use super::{plugins::PluginStore, task::IntoTask};
pub mod apt;
pub mod plugin_task;
pub struct Plan {
store: PluginStore,
}
impl Plan {
pub fn new(store: PluginStore) -> Self {
Self { store }
}
pub async fn tasks(&self) -> anyhow::Result<Vec<impl IntoTask>> {
Ok(vec![
AptTask::new().into_task(),
PluginTask::new("alloy@0.1.0", self.store.clone()).into_task(),
PluginTask::new("dev_packages@0.1.0", self.store.clone()).into_task(),
])
}
}

View File

@@ -0,0 +1,49 @@
use anyhow::Context;
use crate::agent::task::Task;
pub struct AptTask {}
impl AptTask {
pub fn new() -> Self {
Self {}
}
}
#[async_trait::async_trait]
impl Task for AptTask {
async fn id(&self) -> anyhow::Result<String> {
Ok("apt".into())
}
async fn execute(&self) -> anyhow::Result<()> {
let mut cmd = tokio::process::Command::new("apt-get");
cmd.args(["update", "-q"]);
let output = cmd.output().await.context("failed to run apt update")?;
match output.status.success() {
true => tracing::info!("successfully ran apt update"),
false => {
anyhow::bail!(
"failed to run apt update: {}",
std::str::from_utf8(&output.stderr)?
);
}
}
let mut cmd = tokio::process::Command::new("apt-get");
cmd.env("DEBIAN_FRONTEND", "noninteractive")
.args(["upgrade", "-y"]);
let output = cmd.output().await.context("failed to run apt upgrade")?;
match output.status.success() {
true => tracing::info!("successfully ran apt upgrade"),
false => {
anyhow::bail!(
"failed to run apt upgrade: {}",
std::str::from_utf8(&output.stderr)?
);
}
}
Ok(())
}
}

View File

@@ -0,0 +1,30 @@
use crate::agent::{plugins::PluginStore, task::Task};
pub struct PluginTask {
plugin: String,
store: PluginStore,
}
impl PluginTask {
pub fn new(plugin: impl Into<String>, store: PluginStore) -> Self {
Self {
plugin: plugin.into(),
store,
}
}
}
#[async_trait::async_trait]
impl Task for PluginTask {
async fn id(&self) -> anyhow::Result<String> {
let id = self.store.id(&self.plugin).await?;
Ok(id)
}
async fn execute(&self) -> anyhow::Result<()> {
self.store.execute(&self.plugin).await?;
Ok(())
}
}

View File

@@ -0,0 +1,60 @@
use std::{ops::Deref, sync::Arc};
use crate::api::Discovery;
use super::{
config::AgentConfig, discovery_client::DiscoveryClient, grpc_client::GrpcClient,
handlers::scheduled_tasks::ScheduledTasks, plugins::PluginStore, queue::AgentQueue,
scheduler::Scheduler,
};
#[derive(Clone)]
pub struct AgentState(Arc<State>);
impl AgentState {
pub async fn new() -> anyhow::Result<Self> {
Ok(Self(Arc::new(State::new().await?)))
}
}
impl From<&AgentState> for AgentState {
fn from(value: &AgentState) -> Self {
value.clone()
}
}
impl Deref for AgentState {
type Target = Arc<State>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct State {
pub grpc: GrpcClient,
pub config: AgentConfig,
pub discovery: Discovery,
pub queue: AgentQueue,
pub plugin_store: PluginStore,
}
impl State {
pub async fn new() -> anyhow::Result<Self> {
let config = AgentConfig::new().await?;
let discovery = DiscoveryClient::new(&config.discovery).discover().await?;
let grpc = GrpcClient::new(&discovery.process_host);
let plugin_store = PluginStore::new(config.clone())?;
let scheduled_tasks = ScheduledTasks::new(plugin_store.clone());
let scheduler = Scheduler::new(scheduled_tasks);
let queue = AgentQueue::new(scheduler);
Ok(Self {
grpc,
config,
discovery,
queue,
plugin_store,
})
}
}

View File

@@ -0,0 +1,96 @@
use std::collections::BTreeMap;
use anyhow::Context;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Clone)]
pub struct AgentConfig {
pub agent_id: String,
pub discovery: String,
pub labels: BTreeMap<String, String>,
}
impl AgentConfig {
pub async fn new() -> anyhow::Result<Self> {
let config = ConfigFile::load().await?;
Ok(Self {
agent_id: config.agent_id,
discovery: config.discovery,
labels: config.labels.unwrap_or_default(),
})
}
}
#[derive(Serialize, Deserialize)]
struct ConfigFile {
agent_id: String,
discovery: String,
labels: Option<BTreeMap<String, String>>,
}
impl ConfigFile {
pub async fn load() -> anyhow::Result<Self> {
let directory = dirs::data_dir()
.ok_or(anyhow::anyhow!("failed to get data dir"))?
.join("io.kjuulh.churn-agent")
.join("churn-agent.toml");
if !directory.exists() {
anyhow::bail!(
"No churn agent file was setup, run `churn agent setup` to setup the defaults"
)
}
let contents = tokio::fs::read_to_string(&directory).await?;
toml::from_str(&contents).context("failed to parse the contents of the churn agent config")
}
pub async fn write_default(
discovery: impl Into<String>,
force: bool,
labels: impl Into<BTreeMap<String, String>>,
) -> anyhow::Result<Self> {
let s = Self {
agent_id: Uuid::new_v4().to_string(),
discovery: discovery.into(),
labels: Some(labels.into()),
};
let directory = dirs::data_dir()
.ok_or(anyhow::anyhow!("failed to get data dir"))?
.join("io.kjuulh.churn-agent")
.join("churn-agent.toml");
if let Some(parent) = directory.parent() {
tokio::fs::create_dir_all(&parent).await?;
}
if !force && directory.exists() {
anyhow::bail!("config file already exists, consider moving it to a backup before trying again: {}", directory.display());
}
let contents = toml::to_string_pretty(&s)
.context("failed to convert default implementation to string")?;
tokio::fs::write(directory, contents.as_bytes())
.await
.context("failed to write to agent file")?;
Ok(s)
}
}
pub async fn setup_config(
discovery: impl Into<String>,
force: bool,
labels: impl Into<BTreeMap<String, String>>,
) -> anyhow::Result<()> {
ConfigFile::write_default(discovery, force, labels).await?;
Ok(())
}

View File

@@ -0,0 +1,21 @@
use crate::api::Discovery;
pub struct DiscoveryClient {
host: String,
}
impl DiscoveryClient {
pub fn new(discovery_host: impl Into<String>) -> Self {
Self {
host: discovery_host.into(),
}
}
pub async fn discover(&self) -> anyhow::Result<Discovery> {
tracing::info!(
"getting details from discovery endpoint: {}/discovery",
self.host.trim_end_matches('/')
);
crate::api::Discovery::get_from_host(&self.host).await
}
}

View File

@@ -0,0 +1,63 @@
use notmad::{Component, MadError};
use crate::agent::models::Commands;
use super::{
agent_state::AgentState, config::AgentConfig, grpc_client::GrpcClient, queue::AgentQueue,
};
#[derive(Clone)]
pub struct EventHandler {
config: AgentConfig,
grpc: GrpcClient,
queue: AgentQueue,
}
impl EventHandler {
pub fn new(state: impl Into<AgentState>) -> Self {
let state: AgentState = state.into();
Self {
config: state.config.clone(),
grpc: state.grpc.clone(),
queue: state.queue.clone(),
}
}
}
#[async_trait::async_trait]
impl Component for EventHandler {
fn name(&self) -> Option<String> {
Some("event_handler".into())
}
async fn run(
&self,
cancellation_token: tokio_util::sync::CancellationToken,
) -> Result<(), notmad::MadError> {
tokio::select! {
_ = cancellation_token.cancelled() => {},
res = self.grpc.listen_events("agents", None::<String>, self.clone()) => {
res.map_err(MadError::Inner)?;
},
res = self.grpc.listen_events("agents", Some(&self.config.agent_id), self.clone()) => {
res.map_err(MadError::Inner)?;
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl super::grpc_client::ListenEventsExecutor for EventHandler {
async fn execute(&self, event: crate::grpc::ListenEventsResponse) -> anyhow::Result<()> {
tracing::info!(value = event.id, "received event");
let event: Commands = serde_json::from_str(&event.value)?;
self.queue.publish(event).await?;
Ok(())
}
}

View File

@@ -0,0 +1,108 @@
use tonic::transport::{Channel, ClientTlsConfig};
use crate::grpc::{churn_client::ChurnClient, *};
#[derive(Clone)]
pub struct GrpcClient {
host: String,
}
impl GrpcClient {
pub fn new(host: impl Into<String>) -> Self {
Self { host: host.into() }
}
pub async fn get_key(
&self,
namespace: &str,
id: Option<impl Into<String>>,
key: &str,
) -> anyhow::Result<Option<String>> {
let mut client = self.client().await?;
let resp = client
.get_key(GetKeyRequest {
key: key.into(),
namespace: namespace.into(),
id: id.map(|i| i.into()),
})
.await?;
let resp = resp.into_inner();
Ok(resp.value)
}
pub async fn set_key(
&self,
namespace: &str,
id: Option<impl Into<String>>,
key: &str,
value: &str,
) -> anyhow::Result<()> {
let mut client = self.client().await?;
client
.set_key(SetKeyRequest {
key: key.into(),
value: value.into(),
namespace: namespace.into(),
id: id.map(|i| i.into()),
})
.await?;
Ok(())
}
pub async fn listen_events(
&self,
namespace: &str,
id: Option<impl Into<String>>,
exec: impl ListenEventsExecutor,
) -> anyhow::Result<()> {
let mut client = self.client().await?;
tracing::debug!("creating stream for listening to events on: {}", namespace);
let resp = client
.listen_events(ListenEventsRequest {
namespace: namespace.into(),
id: id.map(|i| i.into()),
})
.await
.inspect_err(|e| tracing::warn!("failed to establish a connection: {}", e))?;
tracing::debug!("setup stream: {}", namespace);
let mut inner = resp.into_inner();
while let Ok(Some(message)) = inner.message().await {
tracing::debug!("received message: {}", namespace);
exec.execute(message)
.await
.inspect_err(|e| tracing::warn!("failed to handle message: {}", e))?;
}
Ok(())
}
async fn client(&self) -> anyhow::Result<ChurnClient<tonic::transport::Channel>> {
tracing::debug!("setting up client");
let channel = if self.host.starts_with("https") {
Channel::from_shared(self.host.to_owned())?
.tls_config(ClientTlsConfig::new().with_native_roots())?
.connect_timeout(std::time::Duration::from_secs(5))
.connect()
.await?
} else {
Channel::from_shared(self.host.to_owned())?
.connect()
.await?
};
let client = ChurnClient::new(channel);
Ok(client)
}
}
#[async_trait::async_trait]
pub trait ListenEventsExecutor {
async fn execute(&self, event: ListenEventsResponse) -> anyhow::Result<()>;
}

View File

@@ -0,0 +1 @@
pub mod scheduled_tasks;

View File

@@ -0,0 +1,46 @@
use std::collections::BTreeMap;
use crate::agent::{
actions::Plan,
plugins::PluginStore,
task::{ConcreteTask, IntoTask},
};
#[derive(Clone)]
pub struct ScheduledTasks {
store: PluginStore,
}
impl ScheduledTasks {
pub fn new(store: PluginStore) -> Self {
Self { store }
}
pub async fn handle(
&self,
task: &str,
_properties: BTreeMap<String, String>,
) -> anyhow::Result<()> {
tracing::info!("scheduling: {}", task);
let plan = Plan::new(self.store.clone());
let tasks: Vec<ConcreteTask> = plan
.tasks()
.await?
.into_iter()
.map(|i| i.into_task())
.collect();
for task in tasks {
let id = task.id().await?;
if !task.should_run().await? {
tracing::debug!(task = id, "skipping run");
continue;
}
tracing::info!(task = id, "executing task");
task.execute().await?;
}
Ok(())
}
}

View File

@@ -0,0 +1,20 @@
use std::{collections::BTreeMap, fmt::Display};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(tag = "type")]
pub enum Commands {
ScheduleTask {
task: String,
properties: BTreeMap<String, String>,
},
}
impl Display for Commands {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Commands::ScheduleTask { .. } => "schedule_task",
})
}
}

View File

@@ -0,0 +1,328 @@
use anyhow::Context;
use component::churn_tasks::process::HostProcess;
use futures::StreamExt;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use wasmtime::component::*;
use wasmtime::{Config, Engine, Store};
use wasmtime_wasi::{DirPerms, FilePerms, WasiCtx, WasiCtxBuilder, WasiView};
use super::config::AgentConfig;
wasmtime::component::bindgen!({
path: "wit/world.wit",
//world: "churn",
async: true,
with: {
"component:churn-tasks/process/process": CustomProcess,
"component:churn-tasks/http/client": http::HttpClient
}
});
mod http;
pub struct CustomProcess {
agent_config: AgentConfig,
}
impl CustomProcess {
pub fn new(agent_config: AgentConfig) -> Self {
Self { agent_config }
}
pub fn run(&self, args: Vec<String>) -> String {
tracing::info!("calling function");
match args.split_first() {
Some((item, rest)) => {
let mut cmd = std::process::Command::new(item);
match cmd.args(rest).output() {
Ok(output) => std::str::from_utf8(&output.stdout)
.expect("to be able to parse utf8")
.to_string(),
Err(e) => {
tracing::error!("command failed with output: {e}");
e.to_string()
}
}
}
None => {
tracing::warn!("failed to call function because it is empty");
panic!("failed to call function because it is empty")
}
}
}
pub fn get_label(&self, label_key: &str) -> Option<String> {
self.agent_config.labels.get(label_key).cloned()
}
}
#[derive(Clone)]
pub struct PluginStore {
inner: Arc<Mutex<InnerPluginStore>>,
}
impl PluginStore {
pub fn new(config: AgentConfig) -> anyhow::Result<Self> {
Ok(Self {
inner: Arc::new(Mutex::new(InnerPluginStore::new(config)?)),
})
}
pub async fn id(&self, plugin: &str) -> anyhow::Result<String> {
let mut inner = self.inner.lock().await;
inner.id(plugin).await
}
pub async fn execute(&self, plugin: &str) -> anyhow::Result<()> {
let mut inner = self.inner.lock().await;
// FIXME: hack to avoid memory leak issues from instantiating plugins
*inner = InnerPluginStore::new(inner.agent_config.clone())?;
inner.execute(plugin).await
}
}
pub struct InnerPluginStore {
store: wasmtime::Store<ServerWasiView>,
linker: wasmtime::component::Linker<ServerWasiView>,
engine: wasmtime::Engine,
agent_config: AgentConfig,
}
impl InnerPluginStore {
pub fn new(agent_config: AgentConfig) -> anyhow::Result<Self> {
let mut config = Config::default();
config.wasm_component_model(true);
config.async_support(true);
let engine = Engine::new(&config)?;
let mut linker: wasmtime::component::Linker<ServerWasiView> = Linker::new(&engine);
// Add the command world (aka WASI CLI) to the linker
wasmtime_wasi::add_to_linker_async(&mut linker).context("Failed to link command world")?;
component::churn_tasks::process::add_to_linker(
&mut linker,
|state: &mut ServerWasiView| state,
)?;
component::churn_tasks::http::add_to_linker(&mut linker, |state: &mut ServerWasiView| {
state
})?;
let wasi_view = ServerWasiView::new(agent_config.clone());
let store = Store::new(&engine, wasi_view);
Ok(Self {
store,
linker,
engine,
agent_config,
})
}
pub async fn id(&mut self, plugin: &str) -> anyhow::Result<String> {
let plugin = self.ensure_plugin(plugin).await?;
plugin
.interface0
.call_id(&mut self.store)
.await
.context("Failed to call add function")
}
pub async fn execute(&mut self, plugin: &str) -> anyhow::Result<()> {
let plugin = self.ensure_plugin(plugin).await?;
self.store.gc_async().await;
if plugin
.interface0
.call_should_run(&mut self.store)
.await
.context("Failed to call should run")?
{
tracing::info!("job was marked as required to run");
return plugin
.interface0
.call_execute(&mut self.store)
.await
.context("Failed to call add function");
}
Ok(())
}
async fn ensure_plugin(&mut self, plugin: &str) -> anyhow::Result<Churn> {
let cache = dirs::cache_dir()
.ok_or(anyhow::anyhow!("failed to find cache dir"))?
.join("io.kjuulh.churn");
let (plugin_name, plugin_version) = plugin.split_once("@").unwrap_or((plugin, "latest"));
let plugin_path = cache
.join("plugins")
.join(plugin_name)
.join(plugin_version)
.join(format!("{plugin_name}.wasm"));
let no_cache: bool = std::env::var("CHURN_NO_CACHE")
.unwrap_or("false".into())
.parse()?;
if !plugin_path.exists() || no_cache {
tracing::info!(
plugin_name = plugin_name,
plugin_version = plugin_version,
"downloading plugin"
);
if let Some(parent) = plugin_path.parent() {
tokio::fs::create_dir_all(parent).await?;
}
let req = reqwest::get(format!("https://api-minio.front.kjuulh.io/churn-registry/{plugin_name}/{plugin_version}/{plugin_name}.wasm")).await.context("failed to get plugin from registry")?;
let mut stream = req.bytes_stream();
tracing::info!(
plugin_name = plugin_name,
plugin_path = plugin_path.display().to_string(),
"writing plugin to file"
);
let mut file = tokio::fs::File::create(&plugin_path).await?;
while let Some(chunk) = stream.next().await {
let chunk = chunk?;
file.write_all(&chunk).await?;
}
file.flush().await?;
}
let component =
Component::from_file(&self.engine, plugin_path).context("Component file not found")?;
tracing::debug!(
plugin_name = plugin_name,
plugin_version = plugin_version,
"instantiating plugin"
);
let instance = Churn::instantiate_async(&mut self.store, &component, &self.linker)
.await
.context("Failed to instantiate the example world")
.unwrap();
Ok(instance)
}
}
struct ServerWasiView {
table: ResourceTable,
ctx: WasiCtx,
processes: ResourceTable,
clients: ResourceTable,
agent_config: AgentConfig,
}
impl ServerWasiView {
fn new(agent_config: AgentConfig) -> Self {
let table = ResourceTable::new();
let ctx = WasiCtxBuilder::new()
.inherit_stdio()
.inherit_stdout()
.inherit_env()
.inherit_stderr()
.inherit_network()
.preopened_dir("/", "/", DirPerms::all(), FilePerms::all())
.expect("to be able to open root")
.build();
Self {
table,
ctx,
processes: ResourceTable::default(),
clients: ResourceTable::default(),
agent_config,
}
}
}
impl WasiView for ServerWasiView {
fn table(&mut self) -> &mut ResourceTable {
&mut self.table
}
fn ctx(&mut self) -> &mut WasiCtx {
&mut self.ctx
}
}
impl component::churn_tasks::process::Host for ServerWasiView {}
#[async_trait::async_trait]
impl HostProcess for ServerWasiView {
async fn new(
&mut self,
) -> wasmtime::component::Resource<component::churn_tasks::process::Process> {
self.processes
.push(CustomProcess::new(self.agent_config.clone()))
.unwrap()
}
async fn run_process(
&mut self,
self_: wasmtime::component::Resource<component::churn_tasks::process::Process>,
inputs: wasmtime::component::__internal::Vec<String>,
) -> String {
let process = self.processes.get(&self_).unwrap();
process.run(inputs)
}
async fn get_variable(
&mut self,
self_: wasmtime::component::Resource<component::churn_tasks::process::Process>,
key: wasmtime::component::__internal::String,
) -> String {
let process = self.processes.get(&self_).unwrap();
process.get_label(&key).unwrap()
}
async fn drop(
&mut self,
rep: wasmtime::component::Resource<component::churn_tasks::process::Process>,
) -> wasmtime::Result<()> {
self.processes.delete(rep)?;
Ok(())
}
}
impl component::churn_tasks::http::Host for ServerWasiView {}
#[async_trait::async_trait]
impl component::churn_tasks::http::HostClient for ServerWasiView {
async fn new(&mut self) -> wasmtime::component::Resource<component::churn_tasks::http::Client> {
self.clients.push(http::HttpClient::new()).unwrap()
}
async fn get(
&mut self,
self_: wasmtime::component::Resource<component::churn_tasks::http::Client>,
url: wasmtime::component::__internal::String,
) -> Vec<u8> {
let process = self.clients.get(&self_).unwrap();
process
.get(&url)
.await
.expect("to be able to make http call")
}
async fn drop(
&mut self,
rep: wasmtime::component::Resource<component::churn_tasks::http::Client>,
) -> wasmtime::Result<()> {
self.clients.delete(rep)?;
Ok(())
}
}

View File

@@ -0,0 +1,12 @@
pub struct HttpClient {}
impl HttpClient {
pub fn new() -> Self {
Self {}
}
pub async fn get(&self, url: &str) -> anyhow::Result<Vec<u8>> {
let bytes = reqwest::get(url).await?.bytes().await?;
Ok(bytes.into())
}
}

View File

@@ -0,0 +1,67 @@
use std::sync::Arc;
use notmad::{Component, MadError};
use tokio::sync::Mutex;
use super::{models::Commands, scheduler::Scheduler};
#[derive(Clone)]
pub struct AgentQueue {
sender: Arc<tokio::sync::mpsc::Sender<Commands>>,
receiver: Arc<Mutex<tokio::sync::mpsc::Receiver<Commands>>>,
scheduler: Scheduler,
}
impl AgentQueue {
pub fn new(scheduler: Scheduler) -> Self {
let (tx, rx) = tokio::sync::mpsc::channel(5);
Self {
sender: Arc::new(tx),
receiver: Arc::new(Mutex::new(rx)),
scheduler,
}
}
pub async fn handler(&self, command: Commands) -> anyhow::Result<()> {
tracing::debug!("handling task");
self.scheduler.handle(command).await?;
Ok(())
}
pub async fn publish(&self, command: Commands) -> anyhow::Result<()> {
tracing::debug!("publishing task: {}", command.to_string());
self.sender.send(command).await?;
Ok(())
}
}
#[async_trait::async_trait]
impl Component for AgentQueue {
async fn run(
&self,
cancellation_token: tokio_util::sync::CancellationToken,
) -> Result<(), notmad::MadError> {
loop {
let mut recv = self.receiver.lock().await;
tokio::select! {
res = recv.recv() => {
if let Some(res) = res {
self.handler(res).await.map_err(MadError::Inner)?;
}
}
_ = cancellation_token.cancelled() => {
break
}
}
}
Ok(())
}
}

View File

@@ -0,0 +1,63 @@
use std::collections::BTreeMap;
use crate::agent::models::Commands;
use super::{agent_state::AgentState, queue::AgentQueue};
#[derive(Clone)]
pub struct AgentRefresh {
process_host: String,
queue: AgentQueue,
}
impl AgentRefresh {
pub fn new(state: impl Into<AgentState>) -> Self {
let state: AgentState = state.into();
Self {
process_host: state.discovery.process_host.clone(),
queue: state.queue.clone(),
}
}
}
#[async_trait::async_trait]
impl notmad::Component for AgentRefresh {
fn name(&self) -> Option<String> {
Some("agent_refresh".into())
}
async fn run(
&self,
cancellation_token: tokio_util::sync::CancellationToken,
) -> Result<(), notmad::MadError> {
// let cancel =
// nodrift::schedule_drifter(std::time::Duration::from_secs(60 * 10), self.clone());
let cancel =
nodrift::schedule_drifter(std::time::Duration::from_secs(60 * 5), self.clone());
tokio::select! {
_ = cancel.cancelled() => {},
_ = cancellation_token.cancelled() => {
tracing::debug!("cancelling agent refresh");
cancel.cancel();
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl nodrift::Drifter for AgentRefresh {
async fn execute(&self, _token: tokio_util::sync::CancellationToken) -> anyhow::Result<()> {
tracing::info!(process_host = self.process_host, "refreshing agent");
self.queue
.publish(Commands::ScheduleTask {
task: "update".into(),
properties: BTreeMap::default(),
})
.await?;
Ok(())
}
}

View File

@@ -0,0 +1,22 @@
use super::{handlers::scheduled_tasks::ScheduledTasks, models::Commands};
#[derive(Clone)]
pub struct Scheduler {
scheduled_tasks: ScheduledTasks,
}
impl Scheduler {
pub fn new(scheduled_tasks: ScheduledTasks) -> Self {
Self { scheduled_tasks }
}
pub async fn handle(&self, command: Commands) -> anyhow::Result<()> {
match command {
Commands::ScheduleTask { task, properties } => {
self.scheduled_tasks.handle(&task, properties).await?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,45 @@
use std::sync::Arc;
#[async_trait::async_trait]
pub trait Task {
async fn id(&self) -> anyhow::Result<String>;
async fn should_run(&self) -> anyhow::Result<bool> {
Ok(true)
}
async fn execute(&self) -> anyhow::Result<()>;
}
pub trait IntoTask {
fn into_task(self) -> ConcreteTask;
}
#[derive(Clone)]
pub struct ConcreteTask {
inner: Arc<dyn Task + Sync + Send + 'static>,
}
impl ConcreteTask {
pub fn new<T: Task + Sync + Send + 'static>(t: T) -> Self {
Self { inner: Arc::new(t) }
}
}
impl std::ops::Deref for ConcreteTask {
type Target = Arc<dyn Task + Sync + Send + 'static>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl IntoTask for ConcreteTask {
fn into_task(self) -> ConcreteTask {
self
}
}
impl<T: Task + Sync + Send + 'static> IntoTask for T {
fn into_task(self) -> ConcreteTask {
ConcreteTask::new(self)
}
}

94
crates/churn/src/api.rs Normal file
View File

@@ -0,0 +1,94 @@
use std::net::SocketAddr;
use axum::{
extract::{MatchedPath, State},
http::Request,
routing::get,
Json, Router,
};
use serde::{Deserialize, Serialize};
use tokio_util::sync::CancellationToken;
use tower_http::trace::TraceLayer;
use crate::state::SharedState;
pub struct Api {
state: SharedState,
host: SocketAddr,
}
impl Api {
pub fn new(state: impl Into<SharedState>, host: impl Into<SocketAddr>) -> Self {
Self {
state: state.into(),
host: host.into(),
}
}
pub async fn serve(&self) -> anyhow::Result<()> {
let app = Router::new()
.route("/", get(root))
.route("/discovery", get(discovery))
.with_state(self.state.clone())
.layer(
TraceLayer::new_for_http().make_span_with(|request: &Request<_>| {
// Log the matched route's path (with placeholders not filled in).
// Use request.uri() or OriginalUri if you want the real path.
let matched_path = request
.extensions()
.get::<MatchedPath>()
.map(MatchedPath::as_str);
tracing::info_span!(
"http_request",
method = ?request.method(),
matched_path,
some_other_field = tracing::field::Empty,
)
}), // ...
);
tracing::info!("listening on {}", self.host);
let listener = tokio::net::TcpListener::bind(&self.host).await.unwrap();
axum::serve(listener, app.into_make_service())
.await
.unwrap();
Ok(())
}
}
async fn root() -> &'static str {
"Hello, churn!"
}
#[derive(Serialize, Deserialize)]
pub struct Discovery {
pub external_host: String,
pub process_host: String,
}
impl Discovery {
pub async fn get_from_host(host: &str) -> anyhow::Result<Self> {
let resp = reqwest::get(format!("{}/discovery", host.trim_end_matches('/'))).await?;
let s: Self = resp.json().await?;
Ok(s)
}
}
async fn discovery(State(state): State<SharedState>) -> Json<Discovery> {
Json(Discovery {
external_host: state.config.external_host.clone(),
process_host: state.config.process_host.clone(),
})
}
#[async_trait::async_trait]
impl notmad::Component for Api {
async fn run(&self, _cancellation_token: CancellationToken) -> Result<(), notmad::MadError> {
self.serve().await.map_err(notmad::MadError::Inner)?;
Ok(())
}
}

95
crates/churn/src/cli.rs Normal file
View File

@@ -0,0 +1,95 @@
use std::{collections::BTreeMap, net::SocketAddr};
use clap::{Parser, Subcommand};
use crate::{agent, server};
pub async fn execute() -> anyhow::Result<()> {
let cli = Command::parse();
match cli.command.expect("to have a subcommand") {
Commands::Serve {
host,
grpc_host,
config,
} => {
tracing::info!("Starting service");
server::execute(host, grpc_host, config).await?;
}
Commands::Agent { commands } => match commands {
AgentCommands::Start {} => {
tracing::info!("starting agent");
agent::execute().await?;
tracing::info!("shut down agent");
}
AgentCommands::Setup {
force,
discovery,
labels,
} => {
let mut setup_labels = BTreeMap::new();
for (k, v) in labels {
setup_labels.insert(k, v);
}
if !setup_labels.contains_key("node_name") {
setup_labels.insert(
"node_name".into(),
petname::petname(2, "-").expect("to be able to generate a valid petname"),
);
}
agent::setup_config(discovery, force, setup_labels).await?;
tracing::info!("wrote default agent config");
}
},
}
Ok(())
}
#[derive(Parser)]
#[command(author, version, about, long_about = None, subcommand_required = true)]
struct Command {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
Serve {
#[arg(env = "SERVICE_HOST", long, default_value = "127.0.0.1:3000")]
host: SocketAddr,
#[arg(env = "SERVICE_GRPC_HOST", long, default_value = "127.0.0.1:7900")]
grpc_host: SocketAddr,
#[clap(flatten)]
config: server::config::ServerConfig,
},
Agent {
#[command(subcommand)]
commands: AgentCommands,
},
}
#[derive(Subcommand)]
enum AgentCommands {
Start {},
Setup {
#[arg(long, default_value = "false")]
force: bool,
#[arg(env = "DISCOVERY_HOST", long = "discovery")]
discovery: String,
#[arg(long = "label", short = 'l', value_parser = parse_key_val, action = clap::ArgAction::Append)]
labels: Vec<(String, String)>,
},
}
fn parse_key_val(s: &str) -> Result<(String, String), String> {
let (key, value) = s
.split_once("=")
.ok_or_else(|| format!("invalid key=value: no `=` found in `{s}`"))?;
Ok((key.to_string(), value.to_string()))
}

View File

@@ -0,0 +1,52 @@
// @generated
// This file is @generated by prost-build.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetKeyRequest {
#[prost(string, tag="1")]
pub namespace: ::prost::alloc::string::String,
#[prost(string, optional, tag="2")]
pub id: ::core::option::Option<::prost::alloc::string::String>,
#[prost(string, tag="3")]
pub key: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetKeyResponse {
#[prost(string, optional, tag="1")]
pub value: ::core::option::Option<::prost::alloc::string::String>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetKeyRequest {
#[prost(string, tag="1")]
pub namespace: ::prost::alloc::string::String,
#[prost(string, optional, tag="2")]
pub id: ::core::option::Option<::prost::alloc::string::String>,
#[prost(string, tag="3")]
pub key: ::prost::alloc::string::String,
#[prost(string, tag="4")]
pub value: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct SetKeyResponse {
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListenEventsRequest {
#[prost(string, tag="1")]
pub namespace: ::prost::alloc::string::String,
#[prost(string, optional, tag="2")]
pub id: ::core::option::Option<::prost::alloc::string::String>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListenEventsResponse {
#[prost(string, tag="1")]
pub id: ::prost::alloc::string::String,
#[prost(string, tag="2")]
pub value: ::prost::alloc::string::String,
}
include!("churn.v1.tonic.rs");
// @@protoc_insertion_point(module)

View File

@@ -0,0 +1,435 @@
// @generated
/// Generated client implementations.
pub mod churn_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct ChurnClient<T> {
inner: tonic::client::Grpc<T>,
}
impl ChurnClient<tonic::transport::Channel> {
/// Attempt to create a new client by connecting to a given endpoint.
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> ChurnClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> ChurnClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
ChurnClient::new(InterceptedService::new(inner, interceptor))
}
/// Compress requests with the given encoding.
///
/// This requires the server to support it otherwise it might respond with an
/// error.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
/// Enable decompressing responses.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
/// Limits the maximum size of a decoded message.
///
/// Default: `4MB`
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
/// Limits the maximum size of an encoded message.
///
/// Default: `usize::MAX`
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn get_key(
&mut self,
request: impl tonic::IntoRequest<super::GetKeyRequest>,
) -> std::result::Result<tonic::Response<super::GetKeyResponse>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/churn.v1.Churn/GetKey");
let mut req = request.into_request();
req.extensions_mut().insert(GrpcMethod::new("churn.v1.Churn", "GetKey"));
self.inner.unary(req, path, codec).await
}
pub async fn set_key(
&mut self,
request: impl tonic::IntoRequest<super::SetKeyRequest>,
) -> std::result::Result<tonic::Response<super::SetKeyResponse>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/churn.v1.Churn/SetKey");
let mut req = request.into_request();
req.extensions_mut().insert(GrpcMethod::new("churn.v1.Churn", "SetKey"));
self.inner.unary(req, path, codec).await
}
pub async fn listen_events(
&mut self,
request: impl tonic::IntoRequest<super::ListenEventsRequest>,
) -> std::result::Result<
tonic::Response<tonic::codec::Streaming<super::ListenEventsResponse>>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/churn.v1.Churn/ListenEvents",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("churn.v1.Churn", "ListenEvents"));
self.inner.server_streaming(req, path, codec).await
}
}
}
/// Generated server implementations.
pub mod churn_server {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
/// Generated trait containing gRPC methods that should be implemented for use with ChurnServer.
#[async_trait]
pub trait Churn: Send + Sync + 'static {
async fn get_key(
&self,
request: tonic::Request<super::GetKeyRequest>,
) -> std::result::Result<tonic::Response<super::GetKeyResponse>, tonic::Status>;
async fn set_key(
&self,
request: tonic::Request<super::SetKeyRequest>,
) -> std::result::Result<tonic::Response<super::SetKeyResponse>, tonic::Status>;
/// Server streaming response type for the ListenEvents method.
type ListenEventsStream: tonic::codegen::tokio_stream::Stream<
Item = std::result::Result<super::ListenEventsResponse, tonic::Status>,
>
+ Send
+ 'static;
async fn listen_events(
&self,
request: tonic::Request<super::ListenEventsRequest>,
) -> std::result::Result<
tonic::Response<Self::ListenEventsStream>,
tonic::Status,
>;
}
#[derive(Debug)]
pub struct ChurnServer<T: Churn> {
inner: _Inner<T>,
accept_compression_encodings: EnabledCompressionEncodings,
send_compression_encodings: EnabledCompressionEncodings,
max_decoding_message_size: Option<usize>,
max_encoding_message_size: Option<usize>,
}
struct _Inner<T>(Arc<T>);
impl<T: Churn> ChurnServer<T> {
pub fn new(inner: T) -> Self {
Self::from_arc(Arc::new(inner))
}
pub fn from_arc(inner: Arc<T>) -> Self {
let inner = _Inner(inner);
Self {
inner,
accept_compression_encodings: Default::default(),
send_compression_encodings: Default::default(),
max_decoding_message_size: None,
max_encoding_message_size: None,
}
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> InterceptedService<Self, F>
where
F: tonic::service::Interceptor,
{
InterceptedService::new(Self::new(inner), interceptor)
}
/// Enable decompressing requests with the given encoding.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.accept_compression_encodings.enable(encoding);
self
}
/// Compress responses with the given encoding, if the client supports it.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.send_compression_encodings.enable(encoding);
self
}
/// Limits the maximum size of a decoded message.
///
/// Default: `4MB`
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.max_decoding_message_size = Some(limit);
self
}
/// Limits the maximum size of an encoded message.
///
/// Default: `usize::MAX`
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.max_encoding_message_size = Some(limit);
self
}
}
impl<T, B> tonic::codegen::Service<http::Request<B>> for ChurnServer<T>
where
T: Churn,
B: Body + Send + 'static,
B::Error: Into<StdError> + Send + 'static,
{
type Response = http::Response<tonic::body::BoxBody>;
type Error = std::convert::Infallible;
type Future = BoxFuture<Self::Response, Self::Error>;
fn poll_ready(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<std::result::Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
let inner = self.inner.clone();
match req.uri().path() {
"/churn.v1.Churn/GetKey" => {
#[allow(non_camel_case_types)]
struct GetKeySvc<T: Churn>(pub Arc<T>);
impl<T: Churn> tonic::server::UnaryService<super::GetKeyRequest>
for GetKeySvc<T> {
type Response = super::GetKeyResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::GetKeyRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as Churn>::get_key(&inner, request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = GetKeySvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/churn.v1.Churn/SetKey" => {
#[allow(non_camel_case_types)]
struct SetKeySvc<T: Churn>(pub Arc<T>);
impl<T: Churn> tonic::server::UnaryService<super::SetKeyRequest>
for SetKeySvc<T> {
type Response = super::SetKeyResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::SetKeyRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as Churn>::set_key(&inner, request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = SetKeySvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/churn.v1.Churn/ListenEvents" => {
#[allow(non_camel_case_types)]
struct ListenEventsSvc<T: Churn>(pub Arc<T>);
impl<
T: Churn,
> tonic::server::ServerStreamingService<super::ListenEventsRequest>
for ListenEventsSvc<T> {
type Response = super::ListenEventsResponse;
type ResponseStream = T::ListenEventsStream;
type Future = BoxFuture<
tonic::Response<Self::ResponseStream>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::ListenEventsRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as Churn>::listen_events(&inner, request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ListenEventsSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.server_streaming(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(
http::Response::builder()
.status(200)
.header("grpc-status", "12")
.header("content-type", "application/grpc")
.body(empty_body())
.unwrap(),
)
})
}
}
}
}
impl<T: Churn> Clone for ChurnServer<T> {
fn clone(&self) -> Self {
let inner = self.inner.clone();
Self {
inner,
accept_compression_encodings: self.accept_compression_encodings,
send_compression_encodings: self.send_compression_encodings,
max_decoding_message_size: self.max_decoding_message_size,
max_encoding_message_size: self.max_encoding_message_size,
}
}
}
impl<T: Churn> Clone for _Inner<T> {
fn clone(&self) -> Self {
Self(Arc::clone(&self.0))
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T: Churn> tonic::server::NamedService for ChurnServer<T> {
const NAME: &'static str = "churn.v1.Churn";
}
}

View File

@@ -1,154 +1,19 @@
use churn_domain::{AgentEnrollReq, LeaseResp, ServerMonitorResp};
use clap::{Parser, Subcommand};
#[derive(Parser)]
#[command(author, version, about, long_about = None, subcommand_required = true)]
struct Command {
#[command(subcommand)]
command: Option<Commands>,
mod api;
mod cli;
mod state;
mod grpc {
include!("grpc/churn.v1.rs");
}
#[derive(Subcommand)]
enum Commands {
Auth {
#[arg(env = "CHURN_SERVER", long)]
server: String,
#[arg(env = "CHURN_SERVER_TOKEN", long)]
server_token: String,
},
Bootstrap {
#[arg(env = "CHURN_AGENT", long)]
agent: String,
#[arg(env = "CHURN_AGENT_NAME", long)]
agent_name: String,
#[arg(env = "CHURN_SERVER", long)]
server: String,
#[arg(env = "CHURN_SERVER_TOKEN", long)]
server_token: String,
},
Health {
#[arg(env = "CHURN_SERVER", long)]
server: String,
#[arg(env = "CHURN_AGENT", long)]
agent: String,
},
Monitor {
#[arg(env = "CHURN_SERVER", long)]
server: String,
#[arg(env = "CHURN_SERVER_TOKEN", long)]
server_token: String,
},
}
mod agent;
mod server;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
dotenv::dotenv().ok();
tracing_subscriber::fmt::init();
let cli = Command::parse();
handle_command(cli).await?;
cli::execute().await?;
Ok(())
}
async fn handle_command(cmd: Command) -> anyhow::Result<()> {
if let Some(cmd) = cmd.command {
match cmd {
Commands::Bootstrap {
agent,
agent_name,
server,
server_token: _,
} => {
tracing::info!("enrolling agent: {} for server: {}", agent, server);
let client = reqwest::Client::new();
let req = client.post(format!("{server}/agent/lease")).build()?;
let lease_resp = client.execute(req).await?;
let lease = lease_resp.json::<LeaseResp>().await?;
let req = client
.post(format!("{agent}/enroll"))
.json(&AgentEnrollReq {
lease: lease.token,
server,
agent_name,
})
.build()?;
let lease_resp = client.execute(req).await?;
if !lease_resp.status().is_success() {
if let Ok(text) = lease_resp.text().await {
tracing::warn!(
"could not enroll because agent server encoutered error: {}",
text
);
anyhow::bail!("encountered error: {}", text);
}
anyhow::bail!("encountered error");
}
Ok(())
}
Commands::Health { server, agent } => {
tracing::info!("connecting to server: {}", server);
reqwest::get(format!("{server}/ping")).await?;
tracing::info!("connected to server successfully");
tracing::info!("connecting to agent: {}", agent);
reqwest::get(format!("{agent}/ping")).await?;
tracing::info!("connected to agent successfully");
Ok(())
}
Commands::Auth {
server: _,
server_token: _,
} => todo!(),
Commands::Monitor {
server,
server_token: _,
} => {
tracing::info!("monitoring server: {}", server);
let mut cursor: Option<uuid::Uuid> = None;
loop {
tracing::debug!("reading logs from server: {}", server);
let resp = reqwest::get(format!(
"{server}/logs{}",
match &cursor {
None => "".to_string(),
Some(cursor) => format!("?cursor={}", cursor),
}
))
.await?;
if !resp.status().is_success() {
if let Ok(text) = resp.text().await {
anyhow::bail!("encountered error: {}", text);
}
anyhow::bail!("encountered error");
}
match resp.json::<ServerMonitorResp>().await {
Ok(resp) => {
for line in resp.logs {
tracing::info!("event: {}", line);
}
cursor = resp.cursor;
}
Err(e) => {
tracing::warn!("failed to call server (error={})", e);
}
}
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
}
} else {
panic!("no command supplied")
}
}

View File

@@ -0,0 +1,23 @@
use std::net::SocketAddr;
pub mod config;
mod grpc_server;
use crate::{api, state::SharedState};
pub async fn execute(
host: impl Into<SocketAddr>,
grpc_host: impl Into<SocketAddr>,
config: config::ServerConfig,
) -> anyhow::Result<()> {
let state = SharedState::new(config).await?;
notmad::Mad::builder()
.add(api::Api::new(&state, host))
.add(grpc_server::GrpcServer::new(grpc_host.into()))
.run()
.await?;
Ok(())
}

View File

@@ -0,0 +1,7 @@
#[derive(clap::Args)]
pub struct ServerConfig {
#[arg(long = "external-host", env = "EXTERNAL_HOST")]
pub external_host: String,
#[arg(long = "process-host", env = "PROCESS_HOST")]
pub process_host: String,
}

View File

@@ -0,0 +1,102 @@
use std::{collections::BTreeMap, net::SocketAddr, pin::Pin};
use anyhow::Context;
use futures::Stream;
use notmad::{Component, MadError};
use tonic::transport::Server;
use crate::{agent::models::Commands, grpc::*};
#[derive(Clone)]
pub struct GrpcServer {
grpc_host: SocketAddr,
}
impl GrpcServer {
pub fn new(grpc_host: SocketAddr) -> Self {
Self { grpc_host }
}
}
#[async_trait::async_trait]
impl Component for GrpcServer {
async fn run(
&self,
cancellation_token: tokio_util::sync::CancellationToken,
) -> Result<(), notmad::MadError> {
let task = Server::builder()
.add_service(crate::grpc::churn_server::ChurnServer::new(self.clone()))
.serve(self.grpc_host);
tokio::select! {
_ = cancellation_token.cancelled() => {},
res = task => {
res.context("failed to run grpc server").map_err(MadError::Inner)?;
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl crate::grpc::churn_server::Churn for GrpcServer {
async fn get_key(
&self,
request: tonic::Request<GetKeyRequest>,
) -> std::result::Result<tonic::Response<GetKeyResponse>, tonic::Status> {
todo!()
}
async fn set_key(
&self,
request: tonic::Request<SetKeyRequest>,
) -> std::result::Result<tonic::Response<SetKeyResponse>, tonic::Status> {
todo!()
}
#[doc = " Server streaming response type for the ListenEvents method."]
type ListenEventsStream =
Pin<Box<dyn Stream<Item = Result<ListenEventsResponse, tonic::Status>> + Send>>;
async fn listen_events(
&self,
request: tonic::Request<ListenEventsRequest>,
) -> std::result::Result<tonic::Response<Self::ListenEventsStream>, tonic::Status> {
let (tx, rx) = tokio::sync::mpsc::channel(128);
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(60 * 10));
loop {
interval.tick().await;
let Ok(schedule_task) = serde_json::to_string(&Commands::ScheduleTask {
task: "refresh".into(),
properties: BTreeMap::default(),
}) else {
tracing::warn!("failed to serialize event");
continue;
};
if let Err(e) = tx
.send(Ok(ListenEventsResponse {
id: uuid::Uuid::new_v4().to_string(),
value: schedule_task,
}))
.await
{
tracing::warn!("failed to send response: {}", e);
break;
}
}
});
let stream = futures::stream::unfold(rx, |mut msg| async move {
let next = msg.recv().await?;
Some((next, msg))
});
Ok(tonic::Response::new(Box::pin(stream)))
}
}

36
crates/churn/src/state.rs Normal file
View File

@@ -0,0 +1,36 @@
use std::{ops::Deref, sync::Arc};
use crate::server::config::ServerConfig;
#[derive(Clone)]
pub struct SharedState(Arc<State>);
impl SharedState {
pub async fn new(config: ServerConfig) -> anyhow::Result<Self> {
Ok(Self(Arc::new(State::new(config).await?)))
}
}
impl From<&SharedState> for SharedState {
fn from(value: &SharedState) -> Self {
value.clone()
}
}
impl Deref for SharedState {
type Target = Arc<State>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct State {
pub config: ServerConfig,
}
impl State {
pub async fn new(config: ServerConfig) -> anyhow::Result<Self> {
Ok(Self { config })
}
}

View File

@@ -0,0 +1,28 @@
package component:churn-tasks@0.1.0;
interface process {
resource process {
constructor();
run-process: func(inputs: list<string>) -> string;
get-variable: func(key: string) -> string;
}
}
interface http {
resource client {
constructor();
get: func(url: string) -> list<u8>;
}
}
interface task {
id: func() -> string;
should-run: func() -> bool;
execute: func();
}
world churn {
export task;
import process;
import http;
}

View File

@@ -1,12 +0,0 @@
[package]
name = "churning"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
dagger-sdk = "0.9.8"
dagger-rust = {git = "https://git.front.kjuulh.io/kjuulh/dagger-components.git", ref = "main"}
tokio.workspace = true
eyre = "*"

View File

@@ -1,136 +0,0 @@
use std::{path::PathBuf, sync::Arc};
use dagger_rust::build::{RustVersion, SlimImage};
use dagger_sdk::Query;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
#[tokio::main]
async fn main() -> eyre::Result<()> {
// let mut config = Config::default();
// config.logger = None;
println!("Building churning...");
//let client = dagger_sdk::connect_opts(config).await?;
let client = dagger_sdk::connect().await?;
let cli = build_container(client.clone(), "churn").await?;
let server = build_container(client.clone(), "churn-server").await?;
let server = server
.with_env_variable("CHURN_DATABASE", "sled")
.with_env_variable("CHURN_SLED_PATH", "/mnt/sled")
.with_mounted_cache("/mnt/sled", client.cache_volume("sled"))
.with_exec(vec!["churn-server", "serve", "--host", "0.0.0.0:3000"])
.with_exposed_port(3000);
let server_service = server.as_service();
let agent = build_container(client.clone(), "churn-agent").await?;
let agent = agent
.with_service_binding("churn-server", server_service.clone())
.with_exec(vec!["churn-agent", "daemon", "--host", "0.0.0.0:3000"])
.with_exposed_port(3000);
let agent_service = agent.as_service();
let churning = cli
.with_service_binding("churn-agent", agent_service)
.with_service_binding("churn-server", server_service)
.with_env_variable("CHURN_SERVER", "http://churn-server:3000")
.with_env_variable("CHURN_SERVER_TOKEN", "something")
.with_env_variable("CHURN_AGENT", "http://churn-agent:3000")
.with_env_variable("CHURN_AGENT_NAME", "churn-agent")
.with_exec(vec![
"churn",
"health",
"--server",
"http://churn-server:3000",
"--agent",
"http://churn-agent:3000",
]);
let stdout = churning.stdout().await?;
println!("{stdout}");
let stderr = churning.stderr().await?;
println!("{stderr}");
churning.sync().await?;
println!("Finished building churning...");
repl(churning).await?; //.with_entrypoint(vec!["churn"])).await?;
Ok(())
}
async fn repl(container: dagger_sdk::Container) -> eyre::Result<()> {
loop {
let stdin = tokio::io::stdin();
let mut stdout = tokio::io::stdout();
stdout.write_all(b"> ").await?;
stdout.flush().await?;
let mut input = String::new();
let mut stdin = tokio::io::BufReader::new(stdin);
stdin.read_line(&mut input).await?;
let input = input.trim();
if input == "q" {
break;
}
let container = container.with_exec(input.split(' ').collect());
match container.stdout().await {
Ok(stdout) => {
println!("{stdout}");
}
Err(e) => {
eprintln!("{}", e);
}
}
match container.sync().await {
Ok(_) => {}
Err(_e) => {
//eprintln!("encountred error: {}", e);
}
}
}
Ok(())
}
async fn build_container(client: Query, bin_name: &str) -> eyre::Result<dagger_sdk::Container> {
let crates = &["crates/*", "ci"];
let debian_deps = &[
"libssl-dev",
"pkg-config",
"openssl",
"git",
"jq",
"capnproto",
];
let debian_image = "debian:bullseye".to_string();
let images = dagger_rust::build::RustBuild::new(client.clone())
.build_release(
None::<PathBuf>,
RustVersion::Nightly,
crates,
debian_deps,
vec![SlimImage::Debian {
image: debian_image,
deps: debian_deps
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>(),
architecture: dagger_rust::build::BuildArchitecture::Amd64,
}],
bin_name,
)
.await?;
Ok(images.first().take().unwrap().clone())
}

View File

@@ -1,24 +1,41 @@
# yaml-language-server: $schema=https://git.front.kjuulh.io/kjuulh/cuddle/raw/branch/main/schemas/base.json
base: "git@git.front.kjuulh.io:kjuulh/cuddle-rust-cli-plan.git"
base: "git@git.front.kjuulh.io:kjuulh/cuddle-rust-service-plan.git"
vars:
service: "churn"
registry: kasperhermansen
database:
crdb: "false"
ingress:
- external: "true"
- internal: "true"
- internal_grpc: "true"
please:
project:
owner: kjuulh
repository: churn
repository: churn-v2
branch: main
settings:
api_url: https://git.front.kjuulh.io
actions:
rust:
components:
packages:
debian:
cuddle/clusters:
dev:
- capnproto
release:
- capnproto
env:
service.host: "0.0.0.0:3000"
service.grpc.host: "0.0.0.0:4001"
process.host: "https://grpc.churn.dev.internal.kjuulh.app"
external.host: "https://churn.internal.dev.kjuulh.app"
rust.log: "h2=warn,debug"
prod:
env:
service.host: "0.0.0.0:3000"
service.grpc.host: "0.0.0.0:4001"
process.host: "https://grpc.churn.prod.internal.kjuulh.app"
external.host: "https://churn.internal.prod.kjuulh.app"
rust.log: "h2=warn,debug"

134
install.sh Normal file
View File

@@ -0,0 +1,134 @@
#!/usr/bin/env bash
set -e
# Configuration
APP_NAME="churn"
APP_VERSION="latest" # or specify a version
S3_BUCKET="rust-artifacts"
BINARY_NAME="churn"
SERVICE_NAME="${APP_NAME}.service"
SERVICE_UPDATE_NAME="${APP_NAME}-update.service"
TIMER_UPDATE_NAME="${APP_NAME}-update.timer"
INSTALL_DIR="/usr/local/bin"
CONFIG_DIR="/etc/${APP_NAME}"
CHURN_DISCOVERY="https://churn.prod.kjuulh.app"
LOG="/var/log/churn-install.log"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
exec > >(tee -i ${LOG})
exec 2>&1
echo "Starting churn install $(date)"
# Check if running as root
if [ "$EUID" -ne 0 ]; then
echo -e "${RED}Please run as root${NC}"
exit 1
fi
# Create necessary directories
echo "Creating directories..."
mkdir -p "${INSTALL_DIR}"
mkdir -p "${CONFIG_DIR}"
if systemctl is-active --quiet churn.service; then
echo "Stopping existing churn service..."
systemctl stop churn.service
fi
# Download binary from S3
echo "Downloading binary..."
curl -L -s "https://api-minio.front.kjuulh.io/${S3_BUCKET}/releases/${APP_NAME}/${APP_VERSION}/${BINARY_NAME}" -o "${INSTALL_DIR}/${BINARY_NAME}"
# Make binary executable
chmod +x "${INSTALL_DIR}/${BINARY_NAME}"
echo "Starting churn agent setup..."
set +e
res=$(churn agent setup --discovery "${CHURN_DISCOVERY}" 2>&1)
set -e
exit_code=$?
if [[ $exit_code != 0 ]]; then
if [[ "$res" != *"config file already exists"* ]] && [[ "$res" != *"already exists"* ]]; then
echo "Error detected: $res"
exit 1
else
echo "Ignoring setup 'agent is already setup'"
fi
fi
# Create systemd service file
echo "Creating systemd service..."
cat > "/etc/systemd/system/${SERVICE_NAME}" << EOF
[Unit]
Description=${APP_NAME} service
After=network.target
[Service]
Type=simple
User=root
Group=root
ExecStart=${INSTALL_DIR}/${BINARY_NAME} agent start
Restart=always
RestartSec=10
Environment=RUST_LOG=h2=warn,hyper=warn,churn=debug,warn
[Install]
WantedBy=multi-user.target
EOF
echo "Creating churn update service..."
cat > "/etc/systemd/system/${SERVICE_UPDATE_NAME}" <<EOF
[Unit]
Description=Daily Churn Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/bin/bash -c 'curl -s https://git.front.kjuulh.io/kjuulh/churn-v2/raw/branch/main/install.sh | bash'
User=root
[Install]
WantedBy=multi-user.target
EOF
cat > "/etc/systemd/system/${TIMER_UPDATE_NAME}" <<EOF
[Unit]
Description=Run Churn Update Daily
[Timer]
OnCalendar=daily
Persistent=true
[Install]
WantedBy=timers.target
EOF
# Reload systemd and enable service
echo "Configuring systemd service..."
systemctl daemon-reload
systemctl enable "${SERVICE_NAME}"
systemctl start "${SERVICE_NAME}"
systemctl enable "${SERVICE_UPDATE_NAME}"
systemctl enable "${TIMER_UPDATE_NAME}"
systemctl start "${TIMER_UPDATE_NAME}"
# Check service status
if systemctl is-active --quiet "${SERVICE_NAME}"; then
echo -e "${GREEN}Installation successful! ${APP_NAME} is running.${NC}"
echo "You can check the status with: systemctl status ${SERVICE_NAME}"
else
echo -e "${RED}Installation completed but service failed to start. Check logs with: journalctl -u ${SERVICE_NAME}${NC}"
exit 1
fi

7
upload.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env zsh
set -e
cargo build --release --target x86_64-unknown-linux-musl
aws s3 cp target/x86_64-unknown-linux-musl/release/churn s3://rust-artifacts/releases/churn/latest/churn --endpoint-url https://api-minio.front.kjuulh.io