4 Commits

Author SHA1 Message Date
4bb6b0228a feat: add blog contents
Some checks failed
continuous-integration/drone/push Build is failing
2025-07-31 11:01:22 +02:00
da0771ccb9 feat: update ci
Some checks failed
continuous-integration/drone/push Build is failing
2025-07-26 22:43:05 +02:00
c17986182b feat: remove dind
Some checks failed
continuous-integration/drone/push Build is failing
2025-06-02 09:09:22 +02:00
cf60c5bcdd feat: remove priviledge
Some checks failed
continuous-integration/drone/push Build is failing
2025-06-02 09:08:54 +02:00
82 changed files with 19789 additions and 950 deletions

View File

@@ -1,68 +1,150 @@
type: docker
kind: pipeline
name: "shuttle-drone-templates"
name: default
type: docker
steps:
- name: "wait for dind"
image: docker:dind
- name: build ci
image: rustlang/rust:nightly
volumes:
- name: dockersock
path: /var/run
- name: ci
path: /mnt/ci
environment:
DOCKER_BUILDKIT: 1
DOCKER_PASSWORD:
from_secret: docker_password
DOCKER_USERNAME:
from_secret: docker_username
PKG_CONFIG_SYSROOT_DIR: "/"
CI_PREFIX: "/mnt/ci"
commands:
- ls /var/run
- set -eu
- sleep 10
- echo "$${DOCKER_PASSWORD}" | docker login --password-stdin --username="$${DOCKER_USERNAME}"
- name: "build"
image: docker.io/kasperhermansen/shuttle-drone:1690570181415
- set -e
- apt update
- apt install musl-tools pkg-config libssl-dev openssl build-essential musl-dev -y
- rustup target add x86_64-unknown-linux-musl
- cargo build --target=x86_64-unknown-linux-musl -p ci --bin ci
- mv target/x86_64-unknown-linux-musl/debug/ci "$CI_PREFIX/ci"
- name: load_secret
image: debian:buster-slim
volumes:
- name: dockersock
path: /var/run
- name: ssh
path: /root/.ssh/
environment:
DOCKER_BUILDKIT: 1
DOCKER_PASSWORD:
from_secret: docker_password
DOCKER_USERNAME:
from_secret: docker_username
DRONE_TOKEN:
from_secret: drone_token
SSH_KEY:
from_secret: gitea_id_ed25519
GIT_USERNAME:
from_secret: GIT_USERNAME
GIT_PASSWORD:
from_secret: GIT_PASSWORD
commands:
- set -eu
- apt update && apt install -y libssl-dev pkg-config openssl # musl-tools
- mkdir -p $HOME/.ssh/
- echo "$SSH_KEY" | base64 -d > $HOME/.ssh/id_ed25519
- chmod -R 600 ~/.ssh
- |
cat >$HOME/.ssh/config <<EOL
Host git.front.kjuulh.io
IdentityFile $HOME/.ssh/id_ed25519
IdentitiesOnly yes
UserKnownHostsFile=/dev/null
StrictHostKeyChecking no
EOL
- chmod 700 ~/.ssh/config
- name: build pr
image: kasperhermansen/cuddle:latest
pull: always
volumes:
- name: ssh
path: /root/.ssh/
- name: ci
path: /mnt/ci
commands:
- eval `ssh-agent`
- mkdir -p ~/.ssh
- echo "$SSH_KEY" | base64 -d > ~/.ssh/id_ed25519
- chmod -R 600 ~/.ssh
- ssh-add
- echo "$DOCKER_PASSWORD" | docker login --password-stdin --username="$DOCKER_USERNAME" docker.io
#- rustup target add x86_64-unknown-linux-musl
- cd ci || exit 1
- cargo build
- cd - || exit 1
# - sha1sum ci/target/x86_64-unknown-linux-musl/debug/ci
# - ldd ci/target/x86_64-unknown-linux-musl/debug/ci
# - "./ci/target/x86_64-unknown-linux-musl/debug/ci"
- sha1sum ci/target/debug/ci
- ldd ci/target/debug/ci
- "./ci/target/debug/ci"
services:
- name: docker
image: docker:dind
privileged: true
- apk add git
- $CI_PREFIX pr
environment:
DAGGER_CLOUD_TOKEN:
from_secret: dagger_cloud_token
DRONE_HOST: "https://ci.i.kjuulh.io"
DRONE_USER: "kjuulh"
DRONE_TOKEN:
from_secret: drone_token
DOCKER_BUILDKIT: 1
DOCKER_PASSWORD:
from_secret: docker_password
DOCKER_USERNAME:
from_secret: docker_username
CUDDLE_SECRETS_PROVIDER: 1password
CUDDLE_ONE_PASSWORD_DOT_ENV: ".env.ci"
CUDDLE_SSH_AGENT: "true"
CI_PREFIX: "/mnt/ci/ci"
CUDDLE_PLEASE_TOKEN:
from_secret: cuddle_please_token
OP_SERVICE_ACCOUNT_TOKEN:
from_secret: op_service_account_token
when:
event:
- pull_request
exclude:
- main
- master
depends_on:
- "load_secret"
- "build ci"
- name: build main
image: kasperhermansen/cuddle:latest
pull: always
volumes:
- name: dockersock
path: /var/run
- name: ssh
path: /root/.ssh/
- name: ci
path: /mnt/ci
commands:
- eval `ssh-agent`
- ssh-add
- echo "$DOCKER_PASSWORD" | docker login --password-stdin --username="$DOCKER_USERNAME" docker.io
- apk add git
- cuddle --version
- $CI_PREFIX main
environment:
DAGGER_CLOUD_TOKEN:
from_secret: dagger_cloud_token
DRONE_HOST: "https://ci.i.kjuulh.io"
DRONE_USER: "kjuulh"
DRONE_TOKEN:
from_secret: drone_token
REGISTRY_CACHE_USERNAME:
from_secret: registry_cache_username
REGISTRY_CACHE_PASSWORD:
from_secret: registry_cache_password
REGISTRY_CACHE_TOKEN:
from_secret: registry_cache_token
REGISTRY_CACHE_url:
from_secret: registry_cache_url
DOCKER_BUILDKIT: 1
DOCKER_PASSWORD:
from_secret: docker_password
DOCKER_USERNAME:
from_secret: docker_username
CUDDLE_SECRETS_PROVIDER: 1password
CUDDLE_ONE_PASSWORD_DOT_ENV: ".env.ci"
CUDDLE_SSH_AGENT: "true"
GIT_PASSWORD:
from_secret: git_password
CI_PREFIX: "/mnt/ci/ci"
DOCKER_HOST: "tcp://192.168.1.155:2376"
CUDDLE_PLEASE_TOKEN:
from_secret: cuddle_please_token
OP_SERVICE_ACCOUNT_TOKEN:
from_secret: op_service_account_token
when:
event:
- push
branch:
- main
- master
exclude:
- pull_request
depends_on:
- "load_secret"
- "build ci"
volumes:
- name: dockersock
- name: ssh
temp: {}
- name: ci
temp: {}

1355
ci/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,8 +8,13 @@ edition = "2021"
[dependencies]
chrono = "0.4.26"
color-eyre = "0.6.2"
dagger-sdk = "0.16.0"
dagger-sdk = "0.18.3"
eyre = "0.6.8"
tokio = { version = "1.31.0", features = ["full"] }
tokio-scoped = "0.2.0"
dotenv = "0.15.0"
dotenvy = "0.15.0"
dagger-components = { git = "https://git.kjuulh.io/kjuulh/dagger-components", branch = "main" }
dagger-rust = { git = "https://git.kjuulh.io/kjuulh/dagger-components", branch = "main" }
cuddle-ci = { git = "https://git.kjuulh.io/kjuulh/dagger-components", branch = "main" }
async-trait = "0.1.88"

View File

@@ -1,13 +1,62 @@
use std::sync::Arc;
use cuddle_ci::{Context, MainAction, PullRequestAction};
use dagger_sdk::{
Container, HostDirectoryOptsBuilder, PortForward, QueryContainerOptsBuilder,
ServiceUpOptsBuilder,
};
use dagger_sdk::{HostDirectoryOptsBuilder, QueryContainerOptsBuilder};
use tokio::sync::Mutex;
const UPDATE_DEPLOYMENT_IMAGE: &str = "docker.io/kasperhermansen/update-deployment:1690401410";
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _ = dotenv::dotenv();
let client = dagger_sdk::connect().await?;
const ZOLA_VERSION: &str = "0.17.2-1";
const DEBIAN_EDITION: &str = "bullseye";
const DEBIAN_PLATFORM: &str = "amd64";
#[derive(Clone)]
pub struct BlogComponent {
client: dagger_sdk::Query,
}
impl BlogComponent {
pub async fn run(&self) -> eyre::Result<()> {
let (_, container) = build(self.client.clone()).await?;
container
.as_service()
.up_opts(
ServiceUpOptsBuilder::default()
.ports(vec![PortForward {
backend: 8000,
frontend: 8000,
protocol: dagger_sdk::NetworkProtocol::Tcp,
}])
.build()?,
)
.await?;
Ok(())
}
}
#[async_trait::async_trait]
impl PullRequestAction for BlogComponent {
async fn execute_pull_request(&self, _ctx: &mut Context) -> eyre::Result<()> {
let (tag, container) = build(self.client.clone()).await?;
deploy(self.client.clone(), tag, container).await?;
Ok(())
}
}
#[async_trait::async_trait]
impl MainAction for BlogComponent {
async fn execute_main(&self, _ctx: &mut Context) -> eyre::Result<()> {
let (tag, container) = build(self.client.clone()).await?;
deploy(self.client.clone(), tag, container).await?;
Ok(())
}
}
async fn build(client: dagger_sdk::Query) -> eyre::Result<(i64, Container)> {
let src = client.host().directory_opts(
".",
HostDirectoryOptsBuilder::default()
@@ -15,6 +64,7 @@ async fn main() -> eyre::Result<()> {
.build()?,
);
let github_zola_download: String = format!("https://github.com/barnumbirr/zola-debian/releases/download/v{ZOLA_VERSION}/zola_{ZOLA_VERSION}_{DEBIAN_PLATFORM}_{DEBIAN_EDITION}.deb");
let node_cache = client.cache_volume("node_cache");
let debian_cache = client.cache_volume("debian_cache");
@@ -28,40 +78,15 @@ async fn main() -> eyre::Result<()> {
.with_exec(vec!["yarn", "compile"])
.file("static/styles/styles.css");
let zola_version = "0.17.2-1";
let debian_edition = "bullseye";
let debian_platform = "amd64";
let tag = chrono::Utc::now().timestamp();
let github_zola_download = format!("https://github.com/barnumbirr/zola-debian/releases/download/v{zola_version}/zola_{zola_version}_{debian_platform}_{debian_edition}.deb");
let pull_articles_docker_image = "docker.io/kasperhermansen/pull-articles:1690413158";
let update_deployments_docker_image = "docker.io/kasperhermansen/update-deployment:1701123940";
let pull_articles = client
.container()
.from(pull_articles_docker_image)
.with_exec(vec!["echo", &format!("{}", tag)])
.with_env_variable("GIT_USERNAME", "kjuulh")
.with_env_variable("GIT_PASSWORD", std::env::var("GIT_PASSWORD").unwrap())
.with_exec(vec![
"pull-articles",
"--repo",
"https://git.front.kjuulh.io/kjuulh/obsidian.git",
"--path",
"areas/blog/posts",
"--out",
"/mnt/posts",
"--static-out=/mnt/static",
])
.directory("/mnt/");
let dist_dir = client
.container_opts(
dagger_sdk::QueryContainerOptsBuilder::default()
.platform(format!("linux/{debian_platform}"))
.platform(format!("linux/{DEBIAN_PLATFORM}"))
.build()?,
)
.from(format!("debian:{debian_edition}"))
.from(format!("debian:{DEBIAN_EDITION}"))
.with_exec(vec!["apt", "update"])
.with_exec(vec!["apt", "install", "wget", "-y"])
.with_workdir("/mnt")
@@ -70,76 +95,33 @@ async fn main() -> eyre::Result<()> {
.with_exec(vec![
"dpkg",
"-i",
format!("zola_{zola_version}_{debian_platform}_{debian_edition}.deb").as_str(),
format!("zola_{ZOLA_VERSION}_{DEBIAN_PLATFORM}_{DEBIAN_EDITION}.deb").as_str(),
])
.with_workdir("/app")
.with_directory(".", src.id().await?)
.with_directory(
"content/posts",
pull_articles.directory("posts").id().await?,
)
.with_directory(
"static/assets",
pull_articles.directory("static").id().await?,
)
.with_file(
"content/posts/_index.md",
src.file("content/posts/_index.md").id().await?,
)
.with_file("static/styles/styles.css", styles_file.id().await?)
.with_exec(vec!["zola", "build"])
.directory("public");
let caddy_file = client.host().directory("deployment").file("Caddyfile");
let variants = vec!["linux/amd64", "linux/arm64"];
let platform_variants = Arc::new(Mutex::new(Vec::new()));
let dep_image = client
.container_opts(QueryContainerOptsBuilder::default().build().unwrap())
.from("caddy")
.with_directory("/usr/share/caddy", dist_dir.id().await.unwrap())
.with_file("/etc/caddy/Caddyfile", caddy_file.id().await.unwrap());
tokio_scoped::scope(|s| {
for platform in variants {
let caddy_file = caddy_file.clone();
let client = client.clone();
let dist_dir = dist_dir.clone();
let platform_variants = platform_variants.clone();
Ok((tag, dep_image))
}
s.spawn(async move {
let dep_image = client
.container_opts(
QueryContainerOptsBuilder::default()
.platform(platform)
.build()
.unwrap(),
)
.from("caddy")
.with_directory("/usr/share/caddy", dist_dir.id().await.unwrap())
.with_file("/etc/caddy/Caddyfile", caddy_file.id().await.unwrap());
let mut platform_variants = platform_variants.lock().await;
platform_variants.push(dep_image.id().await.unwrap())
});
}
});
let variants = platform_variants
.lock()
.await
.iter()
.map(|c| c.clone())
.collect::<Vec<_>>();
let version = client
.container()
.publish_opts(
format!("kasperhermansen/kasperhermansen-blog:{tag}"),
dagger_sdk::ContainerPublishOptsBuilder::default()
.platform_variants(variants)
.build()?,
)
async fn deploy(client: dagger_sdk::Query, tag: i64, container: Container) -> eyre::Result<()> {
container
.publish(format!("kasperhermansen/kasperhermansen-blog:{tag}"))
.await?;
let update_deployment = client
client
.container()
.from(update_deployments_docker_image)
.from(UPDATE_DEPLOYMENT_IMAGE)
.with_env_variable("GIT_USERNAME", "kjuulh")
.with_env_variable("GIT_PASSWORD", std::env::var("GIT_PASSWORD").unwrap())
.with_exec(vec![
@@ -156,3 +138,29 @@ async fn main() -> eyre::Result<()> {
Ok(())
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _ = dotenvy::dotenv();
dagger_sdk::connect(|client| async move {
let blog_component = &BlogComponent { client };
let run = std::env::var("RUN").unwrap_or_default();
if !run.is_empty() {
blog_component.run().await?;
return Ok(());
}
cuddle_ci::CuddleCI::default()
.with_main(blog_component)
.execute(std::env::args())
.await?;
Ok(())
})
.await?;
Ok(())
}

View File

@@ -1,6 +1,6 @@
# The URL the site will be built for
base_url = "https://blog.kasperhermansen.com"
generate_feed = true
generate_feeds = true
[markdown]
# Whether to do syntax highlighting

View File

@@ -0,0 +1,264 @@
---
type: "blog-post"
title: "Advancing career"
description: "This blog post reflects on the author's career journey in software engineering, detailing the various roles they have held and the challenges they faced. The author shares their realization that they need to take a more deliberate approach to their career growth and decide on a particular area of expertise. They also discuss their experiences as an inexperienced senior developer, including their decision not to fight for a higher title and salary, and the subsequent career limitations that resulted from this choice. The post concludes with the author's decision to transition into platform engineering and the benefits and challenges of this move. Overall, the post highlights the importance of intentional career planning and the potential career limitations that can result from not advocating for oneself."
draft: false
date: "2023-04-01"
authors:
- "kjuulh"
tags:
- '#blog'
---
# Introduction
My current challenge is figuring out how to advance my career, which directions
to take, and what options to consider. During my studies and at the start of my
career, I wanted to discover what type of work I enjoyed and was suited for.
Although nothing is set in stone and there's no perfect choice, I explored
various areas to find my niche.
I've already tried several things, including software engineering roles such as
mainframe development, cryptography. I've also been a tech lead for feature
teams, focusing on cryptography-related areas such as authentication for complex
systems like OpenID Connect, Webauthn, and custom signature protocols. I've
worked as a senior platform engineer, focused on developer tooling and general
operations tooling, and as a data platform engineer, maintaining and creating
strategies for sustainable data platforms. Additionally, I've contributed to a
variety of open-source projects over the years.
These experiences have given me different perspectives and helped me identify
what I want to pursue on a deeper level. Until now, I've simply taken the
opportunities available to me in various companies, regardless of how good or
bad they were. I consider myself an agreeable person, and I've been fortunate to
have had many opportunities come my way. However, this approach hasn't been
ideal for my career growth, which is why I'm writing this blog post to reflect
on my approach.
Until now, my goal was to gain experience in different areas of product
development and software engineering by learning from experienced peers.
However, I realize that it's time to decide on a career path and focus on a
particular area of expertise. Although I've enjoyed most of the staff-type roles
I've had, I didn't prepare for them, and I wasn't always compensated for them.
Therefore, I need to take a more deliberate approach to advance my career.
Fortunately, I have a willing mentor, supportive peers, and an understanding
manager who I can be transparent with. So, it's an excellent time for me to
tackle this issue.
If you're not interested in my history, feel free to skip to "Deciding on a
career path."
## Immature senior
I have a unique situation, and I consider myself lucky with how things turned
out. Right after completing my bachelor's degree, I got the opportunity to
continue working for the company I had interned with as a student worker. They
moved me to a new team as an Associate software engineer, which was essentially
a Junior software engineer position.
Although I enjoyed working as a mainframe programmer, the outdated and defective
tools made the experience unpleasant. Therefore, when I began working with C#
and Typescript, I was thrilled. However, the new paradigm took some getting used
to since I was accustomed to systems development in C and PL1. Within a few
months, I was asked to join a new project, which I accepted despite having just
moved to a new team.
The project was a greenfield project on a new internal infrastructure platform
built on openshift (Redhat kubernetes), with a backend in Dotnet and a frontend
in React. I was assigned to a split team that already had some developers
working on it and a tech lead. Due to various reasons, I found myself taking
over the reins of the React and Dotnet work. Even though my title was still
Associate developer, I ended up doing the same job as a senior developer.
We hired interns and student workers at this point, and I also mentored them.
However, being a fresh graduate myself, it was challenging to teach someone else
the ropes without having much experience or forming my own opinions.
As time passed, I became increasingly involved in the project, taking on more
leadership responsibilities. I also did some hardcore architecting to get the
project audited and approved, along with a few other skilled team members. The
project was launched and received positive feedback, despite being built by
relatively inexperienced people. It passed various audits, pentests, and
refinement over a year, and we even received compensation for the extra effort.
However, I didn't want a higher title, as I didn't want to be perceived as an
inexperienced senior, even though that's what I was.
This decision turned out to be a mistake because I had to switch jobs before the
project was put in maintenance mode due to some other circumstances. My new job
only recognized me as a software engineer, and I was placed in a position that I
didn't care much for, given my years of experience as an engineer.
This was my first significant internal conflict as an engineer, and it was a
result of my inexperience as a senior developer. I had the expertise and was
used to taking on a certain level of responsibility, but I lacked basic skills
in other areas. Instead of fighting for a senior role, I took a software
engineer role, which was not a good career choice. I should have sought a pay
raise when switching jobs, especially since I have the ability to pick up
various responsibilities quickly. I ended up doing the work of a senior
developer for a standard software engineer salary.
In summary, a senior developer can independently maintain, operate and iterate
on a product while providing guidance and consulting for other developers in the
nearest teams. However, due to my inexperience in certain areas, I ended up
doing the work of a senior developer while being paid as a regular software
engineer.
## Move to platform engineering
I was becoming tired of working on feature teams as it lacked the depth I was
seeking. Handling tickets day in and day out was burning me out, and my
motivation was waning. My top priority was being excited about going to work.
After a great conversation with my current manager, I decided to accept an
internal transfer to the platform engineering team. This move is a step up in my
career, and it feels like platform engineers are more specialized and operate at
a different level than other types of engineers who deal with product
development more closely.
I love the work of creating products for other developers, getting feedback, and
having a close feedback loop. Usually, I come up with ideas, create a tool, use
it myself for a few weeks, work out the bugs, release it, and get feedback.
If you've ever worked on a feature team, you know that it can take months to
years before a product is ready for consumers.
Working with open-source software and building a great product has resulted in
some great synergies. I have also been working on outreach and a whole host of
other things that have helped me grow as an engineer.
Although I still maintain a few products on the side, one of the things holding
me back from working on a team is the bureaucracy in teams and organizations. I
have found that it is hindering my progress as I need approval for everything.
Currently, I am working with my mentor to find a balance.
## Dataplatform fixing
Due to some circumstances, our data platform team was let go, and my manager
asked me if I wanted to work on getting the data platform back on track and work
on it for a few months. I accepted, and together with an SRE, we worked on it
for the past few months. We have just winded down the project, and it was a
great success.
## Deciding on a career path
As such, I have completed what I set out to do - I've explored all the major
software engineering areas that interest me, and now I have to decide on my next
step. While I'm not interested in the management route, there are several
options available to me as an individual contributor.
### Category
When deciding on my path forward, I need to consider what role I want to work in
and how I want to collaborate with a team and products. At a higher level of
seniority, these factors are intertwined and more complicated.
Some recent posts have categorized senior roles into different types such as
Fixer, Right hand man, Tech lead, Architect, and so on. However, these
categories are on a spectrum and there may be other options that better suit me
or my organization.
In particular, I am interested in two roles: Fixer and Tech lead. Although I
currently work as a Fixer, I have also worked as a Tech lead before. To better
understand these roles, a brief description of each would be helpful.
#### Fixer
A fixer is a person who is called upon to solve critical issues within a
particular area of expertise. For example, an expert in server infrastructure
may help teams with problematic projects or specific business goals on a
case-by-case basis.
While the term "fixer" might conjure up the image of a 10x engineer who can
solve any problem, it's important to note that this is not always the case.
Rather, a fixer is simply someone with a great deal of experience and deep
knowledge who can guide a team to solve the problem at hand.
Once the issue is resolved, the fixer will move on to the next project, leaving
the team to continue their work.
#### Tech lead
The tech lead role is typically more static than the fixer role. A tech lead is
responsible for the technical strategy of a single team or area, and has a
variety of mentoring capabilities to guide the team in the right direction and
be of service to the team. This person typically possesses a mix of development
skills, architecture skills, management skills, and more.
The tech lead role, along with the architect role, can be problematic as they
share names with roles often found in a company. However, it's important to
focus on the idea of the role itself rather than any specific title.
As the name suggests, the tech lead is a leader of technology, and all the rules
of leadership can apply here. This can include being an active or passive
leader, a servant leader, a conductor, and so on.
### Speciality
When it comes to choosing a specialization, there are two main factors to
consider: the technology or business domain you want to focus on, and the level
of specificity you're comfortable with.
The more specialized you become, the more the balance will swing towards
compensation, but the fewer job opportunities you might find. On the other hand,
the less specialized you are, the more job options you'll have, but the
compensation might be lower.
It's important to find a balance that works for you. However, there's also an
"uncanny valley" to consider. At a certain level of seniority, if you have a
very niche specialization and high compensation expectations, you might become
unattractive to most companies.
For example, if you have the experience of a CTO and specialize in database core
contributions, it might be challenging to find job opportunities in the general
business market. In this case, you might need to get in early in a database
company or make yourself more attractive to other teams.
## Decision
Currently, I find the fixer role more enjoyable because of its spontaneity and
because it aligns with the work I have done throughout my career. However, to
advance my career, I need to decide whether to specialize for a company or for a
technology, which may or may not be compatible.
For instance, as a fixer for Hashicorp, specializing in Golang and developer
tools would work well. But for other commercial companies, specialization may be
required in specific technologies or in business domains such as payments or
card infrastructure, which could limit my job prospects.
I am interested in a wide spectrum of technologies, but my interests lean
heavily towards pure technological capabilities. This will limit my earning
potential and may make it difficult to find a job, but it is a sacrifice I am
willing to make because I am comfortable with my current job and growth
trajectory. I am willing to build my network and wait for the right opportunity.
Therefore, my aim is to become a fixer specializing in platform engineering,
with expertise in Rust, Golang, Linux, PostgreSQL, cloud native enablement, and
infrastructure. In this role, I can solve platform-level problems often found in
the backbone of tech companies and focus on enhancing the effectiveness of
interacting with the platform, while also being able to solve business-critical
problems that affect the security, productivity, or availability of the
platform.
My pitch for this role is that I am an engineer who can solve common pain-points
associated with the platform and be of service in solving business-critical
problems.
### How to get there
To achieve my goal of becoming a fixer specializing in platform engineering, I
plan to pursue multiple avenues:
- _Work_: I will seek out tasks and projects at my current job that are related
to my goal, and work with my manager to create a plan to get there.
- _Home_: I will dedicate time outside of work to specialize in the different
areas of platform engineering and deepen my knowledge of them.
- _Events_: I will attend industry events and participate in speaking
opportunities or publishing work to expand my network and establish
credibility as an expert in my field.
- _Open Source_: I will contribute to open source projects to gain practical
experience and establish a presence in the community.
While I don't expect to reach my goal quickly, I want to start taking steps in
the right direction. Removing any obstacles at work that hinder my progress is
currently my top priority.

View File

@@ -0,0 +1,133 @@
---
type: "blog-post"
coverImage: "https://images.unsplash.com/photo-1512187849-463fdb898f21?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1932&q=80"
title: "Platform Engineering: The next era of ops"
description: "Platform engineering is a new methodology that takes a user-centered approach and treats developers as stakeholders, ultimately leading to improved software quality and time-to-market. Platform engineering provides an opinionated and curated view of the platform, benefiting feature squads by streamlining development and improving collaboration between teams."
draft: false
authors:
- "kjuulh"
date: "2023-04-05"
updates:
- time: "2023-04-05"
description: "first iteration"
tags:
- '#blog'
---
# Introduction
In recent years, the field of software development has witnessed the emergence
of several new trends and methodologies. DevOps, which combines operations with
software development, was one of the first such trends to gain widespread
popularity. While it brought about significant improvements in the development
process, it ultimately reached a point of stagnation, leaving room for further
innovation.
One of the newer trends that has emerged is platform engineering. This approach
builds upon the foundations laid by DevOps but takes a more product-oriented
mindset to delivering value. Platform engineering involves creating and managing
the technology infrastructure that supports an organization's applications and
services. It combines operations with systems development, with the aim of
improving efficiency, reducing downtime, increasing scalability, and enhancing
security.
The key goal of platform engineering is to bring value to engineers and treat
them as stakeholders. This means taking a user-centered approach to building and
maintaining the technology infrastructure that supports the development process.
By doing so, organizations can increase developer productivity and job
satisfaction, ultimately leading to improved software quality and faster
time-to-market.
## Why platform engineering
Adopting a new methodology requires a reason. Platform engineering may not be
suitable for every company, but it is ideal for medium-sized companies with a
sizable engineering team. It's worth noting that platform engineering isn't
solely for operations; it can be used for any type of platform, not just
infrastructure services.
The primary motivation for implementing platform engineering is to serve a large
number of developers who rely on a relatively uniform tech stack. When starting
from scratch, platform engineering allows teams to collaborate with
stakeholders, such as developers, to define the product the platform itself.
However, platform engineering can also be adopted or evolved from existing
DevOps or SRE teams, or it may grow from an enabler squad if their product is
beginning to see significant usage within the company.
Several principles guide platform engineering
- The platform must be under control and in service of its stakeholders.
- The platform should bring value to the business through its developers.
- If its services are needed beyond the company, the platform must evolve into a
feature squad while maintaining its previous connections. Maintaining a direct
and stable connection to developers is critical to keeping the platform
focused.
## Differences to existing methodologies
Platform engineering operates at a higher level of abstraction than SRE or
DevOps. While SRE oversees infrastructure or direct services for the business,
and DevOps aims to bring together developers and operations, platform
engineering provides an opinionated and curated platform to feature squads. This
can take various forms depending on the company, ranging from CLI tools or
Jenkins plugins to overseeing the entire development journey, from project
creation to day-2 operations.
Platform engineering can enhance feature squads to the extent necessary, without
ever interfering with the actual business domain. A platform engineering team
can collaborate with a feature squad to develop a business feature, as long as
the feature squad retains ownership and the platform engineer is there to gain
experience in using the product they build. However, this should not be the
first option, as it can be a costly exercise.
Overall, platform engineering provides a valuable perspective that can benefit
feature squads. Its focus on providing an opinionated and curated view of the
platform can help streamline development and improve collaboration between
teams. By working together, feature squads and platform engineering can create a
more efficient and effective development process while maintaining clear
ownership of each project.
## Modes of Operation in Platform Engineering
Platform engineering has two primary modes of operation, which in some ways
mirrors how a business serves another business (B2B):
- Product discovery
- Product maintenance and evaluation
This is a continuous cycle of discovering, building, maintaining, and evaluating
products to ensure that they meet their intended goals and remain relevant. The
key difference between B2B and platform engineering is that the latter is far
more opinionated and direct, building opinionated tools rather than generalized
products to serve the needs of developers and business services.
It's important that platform engineering teams operate independently rather than
being embedded within business teams, although they should have reasonable
feedback mechanisms in place. This can include joining guilds to participate in
general discussions with business teams, spending time with business teams to
collaborate on development, or providing outreach through talks and training
sessions. This two-way exchange provides an opportunity for platform engineers
to receive feedback on their products while also enabling business teams to
learn how to use them effectively.
By maintaining this feedback loop and focusing on opinionated tools that meet
the needs of developers and business services, platform engineering teams can
ensure that they provide a valuable service to the organization as a whole.
# Conclusion
Platform engineering is a methodology that combines operations with systems
development to create and manage the technology infrastructure that supports an
organization's applications and services. By taking a user-centered approach and
treating developers as stakeholders, platform engineering aims to increase
efficiency, reduce downtime, enhance security, and ultimately improve software
quality and time-to-market. It provides an opinionated and curated view of the
platform, which can benefit feature squads by streamlining development and
improving collaboration between teams.
Platform engineering operates in two primary modes of operation: product
discovery and product maintenance and evaluation, and it's essential that
platform engineering teams operate independently but maintain feedback
mechanisms to receive feedback on their products while also enabling business
teams to learn how to use them effectively. i

View File

@@ -0,0 +1,291 @@
---
type: "blog-post"
title: "Supercharging My Workflow with GPT-4: A Love Story"
description: |
Once a skeptic, the author has embraced GPT-4 (ChatGPT Plus) and experienced a complete transformation of their workflow. GPT-4 has helped them conquer the blank page problem, effortlessly generate scripts, blog posts, and Rust CLI apps, and even adopt a witty writing style. The author is excited about the potential of integrating GPT-4's API into their tool stack and looks forward to a bright future powered by this game-changing technology
draft: false
date: "2023-04-07"
updates:
- time: "2023-04-07"
description: "first iteration"
tags:
- '#blog'
- '#gpt4'
- '#openai'
- '#workflow'
- '#rust'
authors:
- "kjuulh"
---
# Introduction
This post is about my current workflow with gpt4 and gpt3.5. I've written this
to show how I use it, and why I think it is the future.
I was perviously a skeptic about gpt, and what it would mean for developers,
engineers, and just though workers in general. However, after having actually
tried to use it. In the beginning gpt3.5 (chatgpt), and because I was so
impressed and the fact that it enhanced my workflow that much I decided to
purchase gpt4 (chatgpt plus) .
My primary work is building tools and infrastructure. Kind of a mix between a
fixer and a hacker. That means that I usually switch between a lot of languages
and systems, some of which I don't have a deep expertise in.
## Workflow
The next few sections will be a few situations where I've used chatgpt.
### The blank page problem
Give it a very basic draft or summary of what I want to create.
Example prompts could be:
#### Script
```
User> Please create a script to extract docker image layers. You should download the image, extract it to a tmp folder and further extract the layers into a shared directory lets call it layers
GPT> <code>
User> <further corrections>
```
It doesn't give the right answer immediately but close enough that I can either
nudge it in the right direction or fix the minor mistakes myself, which is what
I did in this case, because it was so close.
This was the final result:
```bash
#!/bin/bash
set -euo pipefail
# Parse command line arguments
while [[ $# -gt 0 ]]; do
key="$1"
case "$key" in
--image)
IMAGE_NAME="$2"
shift
shift
;;
*)
echo "Error: Unknown option: $key"
echo "Usage: $0 --image IMAGE_NAME"
exit 1
;;
esac
done
# Define the path to the temporary directory where the files will be extracted
DESTINATION="/tmp/docker_images/${IMAGE_NAME}"
MANIFEST="${DESTINATION}/manifest.json"
# Define a function to download and extract the Docker image layers
function extract_docker_image {
# Save the Docker image as a tar file
echo "Saving Docker image as a tar file..." >&2
docker save "$1" -o "${DESTINATION}/${IMAGE_NAME}.tar"
# Extract the Docker image layers to the destination directory
echo "Extracting Docker image layers..." >&2
mkdir -p "${DESTINATION}/layers"
tar -xf "${DESTINATION}/${IMAGE_NAME}.tar" -C "${DESTINATION}/"
# Rename the layer directories to their respective layer IDs
LAYERS=$(jq -r '.[0].Layers[]' "${MANIFEST}")
# Extract each layer
for LAYER in ${LAYERS}; do
BLOB="${LAYER}"
# Extract the layer tar file to the destination directory
echo "Extracting layer ${LAYER}..." >&2
mkdir -p "${DESTINATION}/$(dirname ${BLOB})"
tar -xf "${DESTINATION}/${BLOB}" -C "${DESTINATION}/layers"
echo "Layer ${LAYER} extracted" >&2
done
}
# Ensure that the user has provided the Docker image name using the --image flag
if [ -z "$IMAGE_NAME" ]; then
echo "Error: Docker image name not provided." >&2
echo "Usage: $0 --image IMAGE_NAME" >&2
exit 1
fi
# Create the destination directory if it doesn't already exist
rm -rf "${DESTINATION}"
mkdir -p "${DESTINATION}"
# Call the function to download and extract the Docker image layers
extract_docker_image "$IMAGE_NAME"
```
#### Blog post
Next I've asked it to generate a skeleton of a blog post for me.
```
User> Please generate a draft of a blogpost about why platform engineering is the next era of infrastructure development. Only write the sections so that I can fill it out myself
GPT> <article>
```
See my last post for the headings.
I then went ahead and wrote the post, finally I asked it to proof read the
entire article.
### Rust cli
I gave it a few pointers to generate a cli, this was probably the most wonky
example, as clap has had major revisions since 2021. However, I just pasted most
major revisions and it corrected it itself.
```
User> Please generate a cli app for releasing software based on semantic versioning, include commands such as release, validate and bump, all which should use semantic versioning, please use rust and the clap library
GPT> <code>
User> Please use these clap revisions and update the code to use these instead: <clap revisions>
GPT> <code>
```
The code still had some issues but it was a great starting point
### Proof reading and writing
ChatGPT is great a writing articles and can quickly infer what points you want
to argue for. I've given it small sections and had it expand on it and proof
read it. However, it is usually quite stiff and neutral in tone.
However, you can tune chatgpt to your liking, I.e.
```
User> From now on please act like a well known blog post writer, write in a whitty, concise and clear manner, but include humor where possible. Write OK if you understand
GPT> OK
User> please expand on this article and write it in the manner mentioned above:
<draft>
GPT> <post>
```
This actually makes a big difference and writes the post in a clear but personal
manner, totally different than the neutral tone it held when just prompting it
normally.
### Generation of stuff
I've asked gpt to generate various things for me, such as yaml for these blog
posts, it can infer the tags and if given an example of the yaml will
automatically fill it out.
I've asked it to generate tests for various functions, which it has done to
great success.
I've also just given it a cli help description and asked it to produce a readme
for me.
## API
I've not gotten access to the API yet, but I've already simulated what I want it
to do using the web interface. That is generate a meal plan from a recipe list
I've curated myself, choose them for the given day, write descriptions add
recipes etc.
All these steps have been tested individually and I am just waiting for access
before I can implement it in my meal planner.
# Conclusion
The possibilities are endless, I feel like my development have been super
charged, I can quickly generate boiler plate and syntax even given vague
descriptions. ChatGPT has solved the blank page problem for me as I can quickly
throw an idea out there and it generated a draft for me, which I can refine or
use as an idea generator.
I've used it for development, and is preparing for integrating it into my
toolstack and every day. It is an awesome tool, if this continues then the
future is brigt.
# ChatGPTs take
```
User> From now on please act as a well known tech blog writer, write in a whitty, clear and concise manner. Write OK if you understand
GPT4> OK
User> <entire post copy/pasta>
```
I don't remember gushing over gpt that much, but it really speaks its own
praises
## From Skeptic to Devoted Fan: How GPT-4 Transformed My Workflow
There I was, a once-skeptical soul, questioning the value of GPT and its
potential impact on developers, engineers, and thought workers alike. But then,
I dipped my toes into the refreshing waters of GPT-3.5 (ChatGPT), and I was
hooked! Now a proud GPT-4 (ChatGPT Plus) owner, I'm here to share how it
revolutionized my workflow.
As a jack-of-all-trades, I find myself juggling a variety of languages and
systems, often with less-than-perfect expertise. But fear not, for GPT-4 has
come to the rescue!
### The GPT-4 Workflow Chronicles
Allow me to regale you with tales of how ChatGPT has supercharged my
productivity.
#### Conquering the Blank Page Problem
No more staring into the abyss of a blank page! I simply provide a basic draft
or summary, and ChatGPT whips up something marvelous.
##### Scriptwriting Made Easy
Imagine needing a script to extract Docker image layers. Instead of sweating
over every detail, I give GPT-4 a quick rundown of my requirements, and voilà, a
near-perfect script is born!
##### Blogging Like a Pro
When tasked with writing a blog post, I ask GPT-4 for a skeleton, and it
promptly delivers. Then, I fill out the content, and let ChatGPT proofread the
masterpiece. Simple, efficient, and delightful!
#### Rust CLI Adventures
Though my initial request for a Rust CLI app using the Clap library was slightly
off due to outdated information, a quick update on Clap revisions had GPT-4
churning out a solid starting point.
#### Unleashing the Wordsmith Within
While ChatGPT is adept at generating content, it can be a bit neutral in tone.
However, a simple instruction to adopt a witty, concise, and clear writing
style, sprinkled with humor, makes all the difference.
#### Masterful Generation of Stuff
From YAML files for blog posts to function tests and even READMEs, GPT-4 has me
covered. It's like having a digital personal assistant for all things code.
### The API Possibilities
Though I haven't yet accessed the API, I've been testing individual steps to
create a meal planner. Once I gain access, the sky's the limit!
## Conclusion: A Bright Future Ahead
GPT-4 has truly supercharged my workflow, solving the blank page problem and
providing endless possibilities. From development to everyday tasks, it's a game
changer. If this is the future, my friends, it's going to be a wild ride.

View File

@@ -0,0 +1,161 @@
---
type: "blog-post"
title: "Evolving Software: Embracing AI-Driven Development"
description: "Dive into the world of AI-driven software development as we explore a system that evolves with AI capabilities and context. Learn how starting with a minimal viable product and gradually increasing responsibility can lead to AI-managed software systems. This blog post delves into the challenges of context limitations and expansion, and discusses potential solutions and strategies to optimize AI-generated code. Join us as we envision the future of AI-managed software systems and their potential for transforming the software development landscape."
draft: false
date: "2023-04-08"
updates:
- time: "2023-04-08"
description: "first iteration"
tags:
- '#blog'
- '#ai'
- '#software-development'
- '#ai-driven-development'
- '#evolving-software'
- '#GPT-4'
authors:
- "kjuulh"
---
## Introduction
In this post, we'll explore a system of software development that allows for
evolution and greater responsibilities as AI capabilities and context grow.
Unlike emergent AI functions, where native functions interact with AI
capabilities through an interface, this approach enables AI to build and
maintain its own responsibilities based on a set of goals or directives.
## Initial thoughts
The AI generative model/system would have a set of goals and requirements it
would fulfill, it would build the initial version of the code, or spawn sub-AIs
to build capabilities for them.
It would handle requirements as they come in, and may even be set up to
automatically improve the code by updating to newer libraries, improve
performance and create more maintainable code
## Starting Small: A Minimal Viable Product
Let's begin with a single function or unit of work, similar to what you'd find
in a unit test. The AI generative model would be responsible for this function,
but not as a black box. Instead, it would resemble the following:
```rust
enum SomeFunctionError {
...
}
struct SomeModel {
...
}
fn some_function() -> Result<...> {
let resp = call_api().map_err(|e| SomeFunctionError::ServerErr(e))?;
let content: SomeModel = resp.content()?;
return Ok(content)
}
```
As more requirements arise, the generative system can be informed of them,
allowing it to evolve the function or system accordingly:
```rust
enum SomeFunctionError {
...
}
struct SomeModel {
...
}
fn some_function() -> Result<...> {
let resp = call_api().map_err(|e| SomeFunctionError::ServerErr(e))?;
let content: SomeModel = resp.content()?;
log_metrics(content)?; // new
return Ok(content)
}
```
The generative model would automatically refresh its context when it cycles,
allowing developers to directly modify the code without any runtime magic.
## Scaling Up: Introducing More Responsibility
As the capabilities and context of the AI model evolve, abstraction levels can
be increased, allowing each AI layer to manage its own capabilities. The
hierarchy would look like this:
`service has modules which has files.`
Each file maintains its own context and responsibility within a module, which
itself is a single AI instance. The primary AI module can direct and query
sub-AIs for their capabilities, prompting them to fix bugs, add features, and
even spawn new AIs for emerging requirements.
## Interaction: System Level and Public API
Interaction with the AI should be possible both at the system level and via a
public API. Primary engineers can prompt the AI directly, enabling it to update
its goals and delegate tasks to its child systems.
Through a public API like GitHub, the AI would have its own user account,
allowing developers to mention or assign it to issues. The AI would then handle
the issue directly, offering help, closing it, or fixing the submitted bug.
# A Thought Experiment: Real-World Viability
While this concept requires testing in real-world scenarios, tools like AutoGPT
and GPT4All could potentially be adapted for this purpose. The groundwork laid
by AutoGPT makes integration with existing systems like Git, GitHub, and web
search feasible, along with delegation and supervision tasks.
# The Future of AI-Managed Software Systems
An automated AI-managed software system may soon become a reality, and this post
outlines a potential model for incrementally increasing AI responsibility as its
capabilities grow. Although AI models are currently intelligent and capable,
their context and long-term memory are not as mature, making a gradual model
more suitable for implementation.
A practical example will follow as I experiment more.
### Reflecting on the AI Experience
Working with these AI models has yielded surprising results. Initially, i
anticipated that AI would generate obscure and difficult-to-maintain code.
However, the opposite has proven true: AI can create incredibly readable and
maintainable code. The key is providing concise and directed requirements, as
the AI is quite adept at discerning nuances within them and taking appropriate
action.
The primary challenges i face involve context limitations and context expansion
(acquiring new knowledge). The current context for models like ChatGPT or GPT-4
is quite restricted, with a maximum of 32k tokens (around 20k words). This
constraint must accommodate all the directives driving the generative software
system, its acquired knowledge, and any new requirements.
The central issue is the lack of an easy way for AI to gain knowledge without
exceeding its context cache. While GPT could read an entire library's source
code to understand it, doing so would result in a biased perspective based on
that specific implementation. Alternatively, GPT could read a library's API, but
there is no standard method that's general enough for our use case. Developing
an ingestion function for each language, package manager, and documentation
system would be necessary.
A practical solution involves using AI to optimize context for another AI. In
other words, one AI fetches and digests the documentation, then compresses it as
succinctly as possible for another AI to use. While this approach may not be
perfect, as the AI is not specifically designed to optimize for another AI, it
offers a promising workaround.
Long-term storage is another viable option that i plan to explore. However, its
effectiveness in practice and the extent of context it can restore remain to be
seen.

View File

@@ -0,0 +1,222 @@
---
type: "blog-post"
title: "Dismantling a Monolithic Golang Application"
description: "The follow-up article showcases a practical example of how to apply the strangler pattern to a monolithic system, using a Go application as an example. The process involves incrementally extracting small pieces of functionality into separate APIs, testing them, and gradually replacing the old monolithic code. This approach allows for a smoother and less risky transition to a microservices architecture, with minimal disruption to the existing system."
draft: false
date: "2023-04-13"
updates:
- time: "2023-04-13"
description: "first iteration"
tags:
- '#blog'
- '#microservices'
- '#monolith'
- '#softwarearchitecture'
- '#golang'
- '#stranglerpattern'
- '#migration'
- '#api'
- '#deployment'
---
In this follow-up article to
[Strategies for Dismantling Monolithic Systems](https://blog.kasperhermansen.com/posts/breaking-down-the-monolith/),
we will explore a practical example of dismantling a monolithic Golang
application using the strategies discussed in the previous article. We will walk
through the process step by step, demonstrating the application of the
Strangler, Decorator, and Sprig strategies, and provide a simple diagram to
illustrate the architectural changes.
## Initial Monolithic Application
Consider a simple monolithic Golang application that handles user registration
and authentication:
```go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/register", registerHandler)
http.HandleFunc("/login", loginHandler)
http.ListenAndServe(":8080", nil)
}
func registerHandler(w http.ResponseWriter, r *http.Request) {
// Register user logic
fmt.Fprint(w, "User registered")
}
func loginHandler(w http.ResponseWriter, r *http.Request) {
// Authenticate user logic
fmt.Fprint(w, "User logged in")
}
```
This application has two main functionalities: registering a new user and
logging in an existing user.
## Breaking Down the Monolith
### Step 1: Identify the functionalities to be extracted
We will start by identifying the functionalities that can be extracted into
separate microservices. In our example, we will extract the user registration
and authentication functionalities into two separate services:
1. User Registration Service
2. Authentication Service
### Step 2: Apply the Strangler Pattern
Next, we will apply the Strangler Pattern to gradually replace the monolithic
application with the new microservices.
First, create the new User Registration and Authentication services:
```go
// User Registration Service
func newUserRegistrationHandler(w http.ResponseWriter, r *http.Request) {
// New user registration logic
fmt.Fprint(w, "New user registered")
}
// Authentication Service
func newAuthenticationHandler(w http.ResponseWriter, r *http.Request) {
// New authentication logic
fmt.Fprint(w, "New user authenticated")
}
```
Now, we will modify the main function of the application to use these new
services:
```go
func main() {
http.HandleFunc("/register", newUserRegistrationHandler)
http.HandleFunc("/login", newAuthenticationHandler)
http.ListenAndServe(":8080", nil)
}
```
During this transition, we can use feature flags or canary deployments to
control the traffic between the old and new services.
### Step 3: Apply the Decorator and Sprig Patterns
As we develop new features, we can leverage the Decorator and Sprig Patterns to
add functionality to the new microservices without further complicating the
monolithic application.
For example, if we want to implement a password reset functionality, we can
create a new endpoint in the Authentication Service:
```go
// Password Reset
func passwordResetHandler(w http.ResponseWriter, r *http.Request) {
// Password reset logic
fmt.Fprint(w, "Password reset")
}
// Updated main function
func main() {
http.HandleFunc("/register", newUserRegistrationHandler)
http.HandleFunc("/login", newAuthenticationHandler)
http.HandleFunc("/reset-password", passwordResetHandler)
http.ListenAndServe(":8080", nil)
}
```
By following these strategies, we can gradually dismantle the monolithic
application while maintaining a functional system throughout the process.
## System Diagram
### Step 1: Identify and Isolate a Component
The first step is to identify and isolate a component that can be extracted from
the monolith. This should be a well-defined, self-contained unit that can be
broken off and turned into a separate service without affecting the rest of the
application. Once you have identified the component, you should create a new API
that can handle its responsibilities.
```mermaid
graph TD
A[User] --> B[Monolith]
B --> C[Monolithic Application]
subgraph C["Monolithic Application"]
E[UserRegistrationService]
F[AuthenticationService]
end
```
### Step 2: Create a New API
Next, you need to create a new API that can handle the responsibilities of the
isolated component. This API should be designed to work independently of the
monolith, so it can be easily swapped in or out as needed. The API should be
thoroughly tested to ensure it works as expected.
### Step 3: Test and Roll Out the New API
Once you have created the new API, you need to test it to ensure it works as
expected. You can use a canary rollout or feature flags to gradually roll out
the new API while still keeping the old one in place. This will allow you to
catch any issues or bugs before fully switching over to the new API.
### Step 4: Switch Over to the New API
Once you have thoroughly tested the new API, it's time to switch over to it. You
can do this by updating the monolith to use the new API instead of the old one.
You should monitor the application closely to ensure there are no issues or
bugs, and be prepared to roll back if necessary.
```mermaid
graph TD
A[User] --> B[Monolith]
B --> C[Monolithic Application]
B --> D
subgraph C[Monolithic Application]
E[UserRegistrationService]
F[AuthenticationService]
end
subgraph D[Microservices]
NewUserRegistrationService
NewAuthenticationService
end
```
### Step 5: Remove monolithic application
Once you're satisfied with the performance of the new API, delete the old parts
of the monolith. This process can take a long time, as the old code will exist
as a form of backup for a while.
```mermaid
graph TD
A[User] --> B[Microservices]
subgraph B["Microservices"]
C[UserRegistrationService]
D[AuthenticationService]
E[PasswordResetService]
end
```
### Step 6: Repeat
Finally, you should repeat the process by identifying and isolating another
component that can be extracted from the monolith. This process can be repeated
until the monolith has been completely broken down into a set of smaller,
independent services.
## Conclusion
By following these strategies, we can gradually dismantle a monolithic Golang
application while maintaining a functional system throughout the process. The
practical example and the PlantUML diagram demonstrate how the Strangler,
Decorator, and Sprig Patterns can be applied to effectively break down a
monolithic application into smaller, more manageable microservices.

View File

@@ -0,0 +1,275 @@
---
type: blog-post
title: "Streamlining Developer Workflows Across Your Organization"
description: "In this post, the author explores how to distribute a development platform and deployment system across an organization to share robust tools and workflows. They discuss the importance of opinionated workflows and blessed paths, which are focused on specific technologies and tooling. By choosing a blessed path, developers can share a common set of tools and reduce cognitive load, allowing them to focus on feature development."
draft: false
date: '2023-04-11'
updates:
- time: '2023-04-11'
description: first iteration
tags:
- '#blog'
- "Distributed Systems"
- "Modular Architecture"
- "Developer Workflows"
- "Shared Tooling"
- "Platform Teams"
- "Scalability"
- "Continuous Integration"
- "Continuous Deployment"
---
In this blog post, we'll delve into distributing a continuous integration and
deployment system throughout an organization, enabling robust tools and
workflows to be shared across various languages and domains.
As an organization evolves, its developer tooling undergoes various stages based
on its size, maturity, and technology stack. A typical progression includes:
1. Creating shell scripts for project deployment.
2. Packaging shell scripts or developing basic tools, then sharing them via
brew, copying, etc.
3. Developing dedicated tools, often by forming a dedicated operations team.
4. Establishing opinionated workflows and endorsed paths.
5. Assigning responsibilities to operations feature squads and consolidating
them in endorsed paths.
Of course, this process isn't one-size-fits-all, and it can vary depending on an
organization's size, the products being developed, and other factors.
We'll concentrate on steps 4 and 5, examining their implications for your
company and how they can empower and accelerate your operations teams.
First, let's define opinionated workflows.
## Opinionated workflows and endorsed paths
For a medium-sized company, a smart strategy is to mature a specific tool stack
by focusing on a handful of technologies and creating exceptional tooling around
them.
It's often believed that if you're using Kubernetes, Fargate, service mesh,
etc., you can choose any language for a given service. However, many companies
have discovered that building a service involves more than just selecting a new
language; it also requires investments in infrastructure, libraries, monitoring,
expertise, and more.
For instance, if we select Java for a project where we typically build C# with
.NET, we'll find that the languages share very little in terms of tooling.
To run these services in production on Kubernetes, you'll need:
- Dockerfiles for declarative deployment
- CI tools for testing and static code analysis
- Libraries for:
- Logging
- Monitoring
- Error handling
- Circuit breaking
- Retrying
- API abstraction layer
- Database ORM/querier
- Authorization
- And numerous other convenience libraries.
While it's not essential to build all these solutions yourself, you do need to
consider and adapt them for your needs.
Thus, it's wise to choose an endorsed path, providing developers with a
consistent set of tools and clear expectations.
An endorsed path is a predetermined service journey that automates developers'
decision-making, offering immediate and ongoing support out of the box.
You can tailor your path to your developers' needs. In our case, we designed a
journey from day-0 to day-2. For example, we didn't prioritize deprecation, but
it's acceptable for our use case, even if it involves some manual work.
This encompasses:
1. Service/Library creation
2. Dependency management, allowing teams to automatically receive updated
dependencies
3. CI, with Jenkins jobs automatically created based on service type
4. CD, performed automatically according to service type and configuration,
eliminating the need for teams to create Dockerfiles or Kubernetes manifests
themselves
5. Libraries, including logging, monitoring, and more (see the library list
above)
However, it's crucial to note that we only endorse a few languages and types and
maintain different maturity levels for them.
Think of this as developing a product centered around the platform, as I
recently discussed in a post:
[Platform Engineering: The Next Era of Ops](https://blog.kasperhermansen.com/posts/platform-engineering-the-next-era-of-ops/).
The advantages of these services include reduced cognitive load on developers
and shifting incidental complexity to expert squads. This enables feature squads
to concentrate on developing features, only tapping into the platform layer when
necessary to extend, modify, or enhance the offerings, usually with the
assistance of the platform team.
Keep in mind, this isn't a one-size-fits-all solution; it's most effective for
homogenous, large feature sprawl or services. If you have only a single product
or a few, invest time in building highly performant and tailored platforms for
them.
One issue with this architecture is the potential for unclear ownership and the
challenge of applying software development principles to platform squads. We'll
address this in the next section:
## Distributing Responsibilities for platform squads and friends
When establishing a platform organization as a product-oriented entity
interacting with the actual business organization, an API must be developed to
govern how the business domain interacts with the platform. Building this can be
incredibly challenging (think AWS or any other large product you've used).
However, unlike these organizations, you likely have direct feedback from your
users and the ability to create more opinionated solutions, allowing you to
exert more control over the workloads running on your platforms.
An alternative model involves assigning responsibilities for specific parts of
your product to each squad within your platform organization while consolidating
these responsibilities into a single API for your developers.
This results in a single tool with a wide range of capabilities, enabling:
```bash
$ platform-tool build
$ platform-tool test
```
The challenge now lies in determining ownership for the entire build, test, and
the components that make up the pipeline. Drawing inspiration from product
development and software engineering, it becomes evident that we need to create
modules and plugin architectures for sharing these tools, allowing teams to
follow this organizational structure:
- Team responsible for the `platform-tool` itself, as well as creating
individual commands such as build, test, etc.
- Teams responsible for building parts of the tools, such as templating for
Kubernetes, integration testing, static analysis, code generation, etc.
There might come a time when you want to empower feature squads to develop parts
of the platform tailored to their needs, such as specific tools they've created
that necessitate a proper developer journey.
You might wonder why this hasn't been done yet or why this approach is more
challenging than it appears. The problem stems from a gap in tooling between
standard software development tools and the tools used for building the software
itself. These tools are often monolithic, require configuration, and are not
well-suited for distribution (see Dockerfiles, Helm, bash, makefiles). Most of
these products are designed to be defined within a single application, which
works for their use case but isn't scalable enough for our needs.
Large companies have gone the other way, developing highly scalable tools for
their own requirements, but these tools are generally not practical for small to
medium-sized companies to adopt (e.g., Bazel, Buck, Fabricator, etc.).
Additionally, they typically focus on a single specific use case and excel at
it.
The goal here is to introduce flexibility and autonomy into the pipeline,
enabling teams to leverage their expertise using a standard software development
paradigm.
To do so, we want to adopt a product strategy internally for the platform teams:
- The platform organization agrees on a protocol for sharing among plugins,
templates, etc.
- The customer-facing platform tool composes these tools using the agreed-upon
protocol.
- Each platform/feature team can own its features, which are then assembled in
an opinionated way by the platform team responsible for the developer journey.
### Example: Golang feature service
Let's consider the example of building a Go service.
A customer-facing platform team would have already defined the main API
functions, such as build, test, code coverage, etc.
The developer journey team then composes plugins and templates into these APIs.
```go
func Build(ctx context.Context) error {
session := ci.BootstrapSession(ctx)
defer session.Close()
if err := golangbin.Build(ctx, session); err != nil {
return err
}
if err := golang.Test(ctx, session); err != nil {
return err
}
if err := sast.Scan(ctx, session); err != nil {
return err
}
if err := docker.Publish(ctx, session); err != nil {
return err
}
return nil
}
```
While this is a simplified example and real-world situations would be far more
complex with numerous interdependencies, it illustrates that each team can own
their packages. For instance, a team might be responsible for the SAST plugin,
having their own submodule or repo, defining their own tests, workflow, etc.
When we publish our tool through our organization's preferred distribution
mechanism, each team can build a Go feature service using the same tooling,
automatically receiving all platform features relevant to their domain.
The same concept applies to a Node.js service:
```go
func Build(ctx context.Context) error {
session := ci.BootstrapSession(ctx)
defer session.Close()
if err := nodebin.Build(ctx, session); err != nil {
return err
}
if err := node.Test(ctx, session); err != nil {
return err
}
if err := sast.Scan(ctx, session); err != nil {
return err
}
if err := docker.Publish(ctx, session); err != nil {
return err
}
return nil
}
```
Each team can use the same tooling and benefit from platform features tailored
to their specific needs, streamlining the development process and fostering a
consistent developer experience across different languages and projects. This
modular approach enables teams to focus on their core responsibilities,
delivering high-quality features and improvements while leveraging the shared
expertise and resources of the platform organization.
Despite the changes, various tools can still be reused, even if the build and
test components have evolved. While this may seem trivial to some, for those
transitioning from a shell-driven workflow, this new approach unlocks the full
potential of a traditional software development workflow, extending it to
platform squads as well.
# Conclusion
This blog post expands upon a previous post,
[Platform Engineering: The Next Era of Ops](https://blog.kasperhermansen.com/posts/platform-engineering-the-next-era-of-ops/),
by advocating for a distributed, modular approach to crafting shared tools and
workflows throughout an organization. Implementing this strategy paves the way
for a more scalable, adaptable, and efficient development process, benefiting
platform squads and feature teams alike.

View File

@@ -0,0 +1,196 @@
---
type: "blog-post"
title: "Strategies for Dismantling Monolithic Systems"
description: "This article addresses the challenges of dismantling monolithic systems and presents strategies such as the strangler pattern, decorator strategy, and sprig strategy for a smoother transition. It emphasizes the importance of maintaining momentum and balancing the organization's structure. The concepts and strategies apply to both business and application levels, aiming to improve scalability, maintainability, and performance."
draft: false
date: "2023-04-12"
updates:
- time: "2023-04-12"
description: "first iteration"
tags:
- '#blog'
- '#monolithic'
- '#microservices'
- '#migration'
- '#stranglerpattern'
- '#decorators'
- '#sprigstrategy'
- '#architecture'
- '#applicationlevel'
---
Breaking down a monolithic system can be a daunting task. In this article, we'll
explore effective strategies for dismantling monolithic systems and transforming
them into more manageable, scalable, and flexible architectures. We'll provide
clear definitions, real-world examples, and practical advice to make this
complex process more accessible.
## Understanding Monolithic Systems
A monolithic system is a software application where all components are
interconnected and interdependent, making it difficult to modify or scale
individual parts without affecting the entire system. These systems can become
unwieldy over time, leading to slower development cycles, increased maintenance
costs, and reduced agility.
## Strategies for Dismantling Monolithic Systems
To successfully dismantle a monolithic system, you need a well-thought-out plan,
strong support from key stakeholders, and a thorough understanding of the
potential risks and drawbacks of each strategy. Let's explore three common
strategies: the Strangler Strategy, Decorator Strategy, and Sprig Strategy.
### Strangler Strategy
The strangler pattern is a technique for incrementally replacing parts of a
monolithic system with new, more flexible components. This strategy is named
after the strangler fig tree, which gradually envelops and replaces its host
tree.
The key to success with the strangler pattern is to start small and avoid
overambitious goals. Focus on extracting isolated, low-level functionality and
gradually work your way up the stack. Choose functions that can be easily
tested, integrated, and scaled. Consider using tools like feature flags, canary
rollouts, or API gateways to switch between old and new functionality with
minimal disruption.
Keep in mind that the strangler pattern can be applied to both code and system
architecture. It's essential to carefully consider the impact of changes on the
system's overall stability and performance.
### Decorator Strategy
The decorator strategy involves adding or modifying the behavior of existing
components without altering their core functionality. This can be achieved by
wrapping existing components in a new layer, such as a reverse proxy or an API
gateway.
This approach is best suited for situations where the monolithic system is
difficult or risky to modify directly. However, be cautious when using this
strategy, as it doesn't address the underlying issues in the monolithic system,
and knowledge gaps may persist.
### Sprig Strategy
The sprig strategy involves building new, independent modules or APIs that
interact with the monolithic system, rather than adding new features directly to
the monolith. This approach can help reduce the complexity of the monolithic
system while allowing for the development of new features.
The strangler and sprig strategies are often used together to achieve a balance
between dismantling the monolith and supporting new feature development.
## Best Practices for Dismantling Monolithic Systems
1. **Maintain momentum:** Keep a steady pace during the migration process and
prioritize stability.
2. **Don't aim for perfection:** Focus on making incremental improvements rather
than a complete rewrite.
3. **Communicate effectively:** Ensure all team members and stakeholders are
informed of the progress and challenges.
4. **Monitor and measure:** Track the success of your migration using key
performance indicators and other metrics.
5. **Prioritize security and data consistency:** Address potential security
risks and data migration challenges throughout the process.
## Addressing Organizational Challenges
Dismantling a monolithic system is not just a technical challenge; it also
requires addressing organizational issues. Conway's Law states that an
organization's system and structure must align to avoid conflicts. To achieve
this alignment, divide the system into smaller, autonomous components that
reflect your organization's structure, empowering teams to make decisions and
own their work.
## Conclusion
Dismantling a monolithic system is a complex and time-consuming process that
demands commitment, support, and a strategic approach. By employing a
combination of the Strangler Strategy, Decorator Strategy, and Sprig Strategy,
you can gradually transform a monolithic system into a more modular, scalable,
and flexible architecture that better aligns with your organization's needs.
As you progress through the dismantling process, keep in mind the importance of
communication, collaboration, and adaptability. Be prepared to adjust your
approach as new challenges and opportunities arise, and remember that the
ultimate goal is to create a system that is more maintainable, efficient, and
responsive to change.
Successful dismantling of a monolithic system can lead to transformative changes
within your organization, fueling rapid growth and enhancing both team
productivity and satisfaction. By investing in this process and adhering to best
practices, you can create a robust foundation for future development and
innovation.
# Appendix: Real-World Examples of Monolith Dismantling on an Architectural Level
## Example 1: Amazon
Amazon, one of the largest e-commerce and cloud computing companies, initially
began with a monolithic architecture. As the company grew, Amazon recognized the
need to move towards a microservices architecture to increase scalability,
reliability, and maintainability. The transition took place gradually, using the
strangler pattern to replace the monolith components with microservices.
Amazon's architectural transformation was not just about technology, but also
about organizational restructuring. Each microservice team was responsible for
their service's entire lifecycle, enabling faster development cycles and better
ownership.
## Example 2: Netflix
Netflix, a global streaming service provider, also started with a monolithic
architecture. As the company rapidly grew its user base and expanded to
international markets, it faced scalability issues. To overcome these
challenges, Netflix transitioned to a microservices architecture, using the
strangler pattern to systematically replace parts of the monolith with
individual services.
The migration to microservices helped Netflix improve scalability, resilience,
and the overall customer experience. It also allowed the company to innovate and
experiment with new features more efficiently.
## Example 3: Spotify
Spotify, a leading music streaming service, initially began as a monolithic
application. To support its rapidly growing user base and ensure a seamless
listening experience, Spotify chose to transition to a microservices
architecture. The company used the sprig strategy to build new features outside
the monolith, while gradually breaking down the existing application.
By moving to microservices, Spotify was able to improve the scalability and
reliability of its system. The company also embraced an organizational model
with autonomous teams, known as "squads," responsible for individual services,
which aligned with the microservices architecture.
---
These real-world examples illustrate the importance of transitioning from a
monolithic system to a more modular, scalable, and maintainable architecture. By
leveraging different strategies such as the strangler pattern, decorator
strategy, and sprig strategy, companies can systematically dismantle their
monoliths and build robust systems that support growth and innovation.
## Disclaimer: Applicability at the Application Level
While the real-world examples presented in this appendix focus on the
architectural transformation of entire businesses, it is essential to note that
the same strategies and principles can also be applied at the application level.
The transition from monolithic to microservices or modular architectures is not
limited to the overarching systems of a business but can be beneficial for
individual applications as well.
When addressing the challenges of scalability, maintainability, and flexibility
within specific applications, implementing the strangler pattern, decorator
strategy, or sprig strategy can yield significant improvements. These strategies
can help to break down monolithic applications into smaller, more manageable
components, allowing for more efficient development, testing, and deployment.
In summary, the concepts and strategies discussed in this article and appendix
are applicable and beneficial at both the business and application levels. They
provide a framework for transitioning from monolithic systems to more scalable
and maintainable architectures that can support growth, innovation, and improved
performance.
Practical examples could be added, but they are usually far more difficult to
illustrate given the nature of the task at hand.

View File

@@ -0,0 +1,389 @@
---
type: "blog-post"
title: "Revisiting my Personal Platform in 2023"
description: "The tech landscape moves fast, and as a Platform/Developer experience engineer I like to stay up to date with recent technology and approaches so that I can deliver the best and most solid approach for my engineers. As such in this blog post I will explore what my personal development stack looks like, and how I want to kick it up a notch and reflect on the challenges I've had in the previous year. Strap in because this is gonna be a janky ride. But first I will dig into why I've got a personal platform and why that might be useful for you as well."
draft: false
date: "2023-07-23"
updates:
- time: "2023-07-23"
description: "first iteration"
tags:
- '#blog'
---
The tech landscape moves fast, and as a Platform/Developer experience engineer I
like to stay up to date with recent technology and approaches so that I can
deliver the best and most solid approach for my engineers. As such in this blog
post I will explore what my personal development stack looks like, and how I
want to kick it up a notch and reflect on the challenges I've had in the
previous year. Strap in because this is gonna be a janky ride. But first I will
dig into why I've got a personal platform and why that might be useful for you
as well.
## What do i mean by personal platform
You may've heard the term self-hosted thrown around, or homelab. These terms
overlap a bit, but are also a bit orthogonal. Homelab is a personal or small
deployment of stuff, you can tinker with experiment and enjoy using. Parts of it
usually consist of HomeAssistant, Plex/Emby, various vms and such. Self hosted
basically means off the shelf tools you can host yourself, whether for personal
use or for enterprise.
When I mean personal platform, parts of it means a homelab, but taking it a step
further, and specializing it for development usage. The goal is to develop a
platform like a small to medium sized company that is capable of rolling out
software and get the amenities you want to select for (more on that later). It
should be useful and not just an experiment. You should actually use the
platform to roll out software. One of the most important part of developing a
platform is actually using it yourself (dog fooding) otherwise you will never
learn the sharp edges and where your requirements break, and such.
So for me the basic requirements for a platform is:
1. A place to host deployments, this may be a vm, a raspberry pi, fly.io, aws.
It doesn't matter too much, it all depends on your needs and what you want to
develop.
2. A place to store source code, again the easiest option is just to choose
GitHub, but you can also choose to go a step further and actually host the
code yourself in the spirit of a homelab. I do this personally.
3. A domain or a way to interact with the services and deployments you build.
You want to make the things you build be accessible to how wide of an
audience you choose. Whether that is only yourself, your closest family and
friends or the public. I personally do a mix, some stuff like the platform
internals are only accessible internally, other services are public, and some
are invite only.
If it is difficult to illustrate, you can kind of think of the platform as the
same things you would get if you used, fly.io, aws, gcp or any of the Platform
as a Service solutions out there.
## Why build a platform only for yourself
This is a question I get a lot, I seemingly spent a lot of effort in building
tools, services and whatnot, which is incredibly overkill for my personal needs.
I think of it like so:
> Get comfortable with advanced tooling and services, so when you actually need
> to do it in practice it is easy
It is part personal development, but also building up a certain expertise, that
can be difficult to acquire in a job, it is also incredibly fun and filled with
challenges.
It should also be noted that Personal platform would seem like incredibly
overkill, but it is an incremental process, you may already have parts of it
already. Just implicitly.
## The beginning
My personal platform began as an old workstation running Linux (the distro
doesn't really matter), with `docker` and `docker-compose` installed. Then I ran
various homelab deployments, such as `gitea`, `drone-ci`, `plex`, etc.
My workflow would be to simply build a docker image on the service I was at.
`make ci`, which `docker build .` and `docker push`, and finally I would ssh
into the workstation, and bump the image version using `image:latest`. It is a
fairly basic platform and a lot of the details weren't documented or automated.
In the beginning everything would just be accessible internally and I would just
use the hostname given by `dhcp` and so on. Such as
`http://home-server:8081/todo-list` or something like that.
It worked fine for a while, but I began the needs to actually want to use some
of those tools when I left the house. And as my tool stack grew and there were
more hostnames and ports to remember I began to look for enhancements for the
stack.
> This is actually the most important part of building a personal platform.
> Start small, and grow in the direction of your requirements and needs. Do not
> start with a self hosted kubernetes with all the bells and whistles. And don't
> copy another persons stack, it will not fit your needs and you won't be able
> to maintain it.
In the beginning I choose to use tools such as upnp and ngrok, to expose these
services as well as a dashboard service for discoverability. However, that
didn't work out. First of all ngrok, upnp wasn't the most stable, and I didn't
want to expose my home network to the internet in that way. I also didn't use
the dashboard service that much, as just that extra step, made me not use the
tools that I'd build that much. I would select for only those that I remembered
the hostname and port for and not the more niche ones.
### Getting a VPS
Getting my first vps for personal use, was a decision I made once I figured that
there was a lot of ammenties that I would get out of the box, I would get a
stable machine, which ran nearly 24/7, it has a public static ip, and was
reachable from anywhere.
I choose hetzner, because it was the cheapest option I could get where I am at,
with the required bandwidth cap and such.
I choose namecheap for a domain, and cloudflare for dns. Cloudflare technically
isn't needed, but the tooling is nice.
At this point my stack was like this.
```
namecheap -> cloudflare -> hetzner vps
```
This was sort of useful, but not that much, I could host some things on the vps,
but I'd like to use the cheap compute I had at home, but still make it
reachable. I then began searching for a mesh vpn. I looked at openvpn, a bunch
of other options, but finally landed on `wireguard`, because it seemed to be the
most performant, and suited my needs quite perfectly.
In the beginning I wanted to just use the vpn as a proxy.
```
namecheap -> cloudflare -> hetzner vps -> wireguard -> home workstation
```
However, setting `iptables` rules and such turned out to be a nightmare, and as
such I kept it simple and just installed `caddy` and `nginx` on the vps. Caddy
for TLS certificates, and nginx for TCP load balancing and reverse proxying.
(Caddy doesn't officially support TCP loadbalancing, only with a plugin which I
don't want to use because of ergonomics).
So now the stack was like this:
```
namecheap -> cloudflare -> hetzner vps -> caddy/nginx -> wireguard -> home workstation
```
I was really happy with this stack, and actually still use it.
The wireguard setup is setup as a bunch of point-to-point connections all
pointing at the ingress node.
```
home workstation (interface) -> hetzner ingress vps (peer)
hetzner ingress vps (interace) -> home workstation (peer)
```
Home workstation:
```
[Interface]
PrivateKey = <home-workstation-priv-key>
Address = 10.0.9.2
ListenPort = 55107
[Peer]
PublicKey = <ingress-vps-public-key
AllowedIPs = 10.0.9.0/16 # allows receiving a wide range of traffic from the wireguard peer
Endpoint = <ingress-vps-public-static-ip>:51194
PersistentKeepalive = 25
```
Hetzner vps:
```
[Interface]
Address = 10.0.9.0
ListenPort = 51194
PrivateKey = <ingress-vps-private-key>
# packet forwarding
PreUp = sysctl -w net.ipv4.ip_forward=1
[Peer]
PublicKey = <home-workstation-public-key>
AllowedIPs = 10.0.9.2/32 # this peer should only provide a single ip
PersistentKeepalive = 25
```
It is incredibly simple and effective. I even have entries for on the vps for my
android phone, mac, you name it. Super easy to setup, but requires some manuel
handling. Tailscale can be used to automate this, but when I set this up it
wasn't really a mature solution. But if I started today I would probably use it.
The important part is that the registration is only needed between the peer and
the hetzner ingress vps. So if I add another vps at some point only that and the
ingress vps, will need registration, but my phone would still be able to talk to
it, because of the 10.0.0.0/16. That is of course as long as they share a
subnet, i.e. 10.0.9.1 and 10.0.9.2.
Now my caddy things can just reverse proxy to my home workstation, without it
needing a public port.
```
hetzner ingress vps -> caddy -> wireguard ip for home workstation and port for service -> home workstation -> docker service
```
Because of docker bridge networking, even if caddy is running in a docker
container, it can still use the wireguard network interface and reverse proxy to
that. This is what was and is still binding all my own services together, even
if they don't share a physical network subnet.
## Hosting
My hosting of personal services is now a mix between, home workstation for plex
and other compute intensive services, and on hetzner, I've rented a few more for
services I use frequently like `gitea`, `grafana` and so on.
![infra](2023-07-22-infra.png)
As you may imagine plex, drone, grafana etc. shouldn't be exposed to the
internal, but I'd still like the convenience, so I've setup caddy to only allow
the wireguard subnet, and use domain wildcard certs for certificates, such that
it can still provision internal https certificates using lets encrypt.
There is a bunch more services I've left out, especially my own home built
things. However, the deployment model is still as handheld as I mentioned in the
beginning. Now they're just spread onto the vps and private nodes.
## Development
My next iteration for development was using an open-source tool I've helped
develop at work: https://github.com/lunarway/shuttle. The idea is to eliminate
the need for sharing shell scripts, makefiles and configuration between
different repositories. Now, just initialize a template `shuttle.yaml` file and
fill it out with a parent template plan, and you've got all you need. I usually
develop a mix of `nextjs`, `sveltekit`, `rust-axum`, `rust-cron`, `rust-cli` and
finally `go-service`. All of these plans contains everything needed to build a
docker image, prepare a docker-compose file and publish it. These again aren't
public, because they specifically suit my needs.
I've ended up building my own incarnation of `shuttle` called `cuddle`
https://git.front.kjuulh.io/kjuulh/cuddle it isn't made for public consumption,
and was one of the first projects I built when I was learning rust.
My workflow has changed to simply be `cuddle x ci` and it will automatically
build, test and prepare configs for deployment. It won't actually do the
deployment step, that is left for CI in drone when it actually runs
`cuddle x ci --dryrun=false`. I've developed a homegrown docker-compose gitops
approach, where the deployment is simply creating a commit to a central
repository with a docker-compose file, with a proper image version set. usually
a prefix plus a uuid.
My vps simply has a cronjob that once every 5 minutes it does a `git pull` and
executes a script
```bash
#!/bin/bash
set -e
LOG="/var/log/docker-refresh/refresh.log"
GIT_REPO="/home/<user>/git-repo"
exec > >(tee -i ${LOG})
exec 2>&1
echo "##### docker refresh started $(date) #####"
cd "$GIT_REPO" || return 1
git fetch origin main
git reset --hard origin/main
command_to_execute="/usr/local/bin/docker-compose up -d -v --remove-orphans"
find "$GIT_REPO" -type f \( -name "docker-compose.yml" -o -name "docker-compose.yaml" \) -print0 | while IFS= read -r -d '' file; do
dir=$(dirname "$file")
cd "$dir" || return 1
echo "Executing command in $dir"
$command_to_execute
done
# Monitor health check
curl -m 10 --retry 5 <uptime-kuma endpoint>
echo "##### docker refresh ended $(date) ##### "
```
This is simply run by cron and works just fine, I've setup uptime kuma to send a
slack message to me if it isn't run once an hour.
## The problems
This is my current state, except for some small experiments, you can never
capture everything in a blog post.
The main problems now, are mostly related to the manual tasks I've got to do
when creating a new web service i.e. axum, nextjs, svelte, go etc.
1. Create a new repository (manual)
2. Git push first (manual)
3. CI drone enable (manual)
4. GitOps repo update (automated)
5. Hostname inserted into caddy (manual)
6. If using authentication; setup (Zitadel manual)
7. Prometheus setup (manual registration)
8. Uptime kuma setup (manual registration)
9. Repeat for production deployment from step 5
Cuddle actually gives a lot out of the box, and I would quite easily be able to
automate most of it if alot of the configuration for drone, prometheus etc,
where driven by GitOps, but they aren't.
For service such as this blog, which is a rust-zola deployment, I also always
have downtime on deployments because I only run a single replica. This isn't the
end of the world, but I'd like the option to have a more declarative platform.
## Visions of the future
I want to focus the next good while on converting as much of the manual tasks to
be automated as possible.
The plan is to solve the root of the issues, and that is the deployment of the
services and simply service discovery. By that I could continue with
docker-compose and simply build more tooling around it. Many some heuristics on
what is in the docker gitops repo. However, I could also venture into the path
that is kubernetes.
We already maintain a fully declarative cluster setup in my dayjob, using
ClusterAPI and flux. So that is the option I will go with.
### Kubernetes
After some investigation and experiments, I've chosen to go with Talos and Flux.
I simply have to copy a vm, register it, and I've got controller or worker
nodes. I sadly have to run some Talos stuff imperatively, but to avoid the
complexity around ClusterAPI this is suitable approach for now. Flux simply
points at a gitops repo with a cluster path and it maintains the services I'd
want to run.
This means I can run `fluentbit`, `prometheus`, `traefik` and such in kubernetes
and automatically get deployments rolled out.
### Cuddle
From the development point of view, I simply change the docker-compose templates
to kubernetes templates, and I get the same benefit. Not much to say here. A
release to master will automatically release to prod, and a release to a branch
will create a preview environment for that deployment, which will automatically
be pruned after a period of time after the branch has been deleted.
A prometheus and grafana dashboard maintains a list which preview environments
are available, and how long they've been active for.
## Future list of steps
1. Create a new repository (manual)
2. Git push first (manual)
3. CI drone enable (manual)
4. GitOps repo update (automated)
5. Hostname inserted into caddy (automated)
6. If using authentication; setup (Zitadel manual)
7. Prometheus setup (automated)
8. Uptime kuma setup (automated)
9. Repeat for production deployment from step 5
I've got some ideas for 3 but that will have to rely on a kubernetes operator
sor something. The same goes for 6. As long as both has sufficient apis.
I've moved some of the operations from manual work, into kubernetes, but that
also means that maintaining kubernetes is a bigger problem. As docker-compose
didn't really have that much day 2 operation.s
Instead. I will have to rely on a semi automated talos setup for automatically
creating vm images, and doing cluster failovers for maximum optime and comfort.
# Conclusion
I've designed a future setup which will move things into kubernetes to relieve a
lot of manual tasks. I will still need to develop tooling for handling
kubernetes and various painpoints around it. As well as thinking up new
solutions for the last manual tasks. Some may move into kubernetes operators,
others into either chatops or clis.

View File

@@ -0,0 +1,30 @@
---
type: "blog-post"
title: "2023-07-23-2023-feature-plan"
description: "2023-07-23-2023-feature-plan"
draft: true
date: "2023-07-24"
updates:
- time: "2023-07-24"
description: "first iteration"
tags:
- '#blog'
---
# Schedule
- [x] get talos to 3 control plane nodes
- [x] get talos to run in dev environment ✅ 2023-10-25
- [x] Make sure proxmox is running fiiiine and dandy ✅ 2023-10-25
- [x] Look into wireguard vdev point to site ✅ 2023-10-25
- [x] Requires replacing 0.0.0.0 with a private subnet ✅ 2023-10-25
- [x] Probably not gonna do that now ✅ 2023-10-25
The plan for now is to consolidate the bootstrapping into a single repository
- Setting up terraform for each node and just spinning it up.
# Cuddle refactor
Cuddle right now is a mess. All the code runs in the pretty much the same loop
with no structuring or reason. To

View File

@@ -0,0 +1,170 @@
---
type: "blog-post"
title: "Running Grafana Kiosk on a Raspberry pi"
description: "The motivation behind setting up a raspberry pi for kiosk mode, and sacrificing a tv, is that at work we've got kind of this control center factor, where at a glance, we can quickly get an overview of a problem. If an alert goes, it is super easy to just look up and there you go, you can see the spike in CPU, requests you name it. "
draft: false
date: "2023-07-26"
updates:
- time: "2023-07-26"
description: "first iteration"
tags:
- '#blog'
---
# Introduction
The motivation behind setting up a raspberry pi for kiosk mode, and sacrificing
a tv, is that at work we've got kind of this control center factor, where at a
glance, we can quickly get an overview of a problem. If an alert goes, it is
super easy to just look up and there you go, you can see the spike in CPU,
requests you name it.
And it looks super cool :D
![grafana-dash](assets/2023-07-26-grafana-dash.jpg)
# Prerequisites
You are gonna need a raspberry pi (ofc) or another small mini pc. It doesn't
technically have to be a raspberry pi, but the official docs, and this guide is
certainly geared towards it.
I am using a:
- Raspberry pi 4
- A monitor / TV. I am usually using the tv.
- A grafana server somewhere reachable by the raspberry pi
# Installation
I won't go too deep into the installation, I've used the raspberry pi imager, to
setup an sd-card, with raspberryp os 64 bit, You may have to choose the 32 bit
version if you're running on an older pi. I am choosing the default, not the
lite version, you want the desktop to be setup.
You can change the name of the user if you want, you will just have to swap out
the pi username later.
Simply hook it up to a display, get a keyboard and mouse (or ssh connection), I
did everything through an ssh connection personally.
## Grafana-kiosk
Grafana actually publishes an app for running a kiosk mode grafana dashboard, it
is just a single binary and can be fetched from their github page:
https://github.com/grafana/grafana-kiosk
Simply download the binary that you need:
```bash
lscpu | grep Arch
```
First figure out which architecture you're on. Raspberry pi 4 returns aarch64,
which maps to linux-arm64. Older raspberry pi editions, map to an armv7 or below
(I believe)
Next grab the release (make sure to look for an updated version)
```bash
export VERSION="v1.0.6" # as of 2023-07-26
export KIOSK_ARCH="linux.arm64"
wget "https://github.com/grafana/grafana-kiosk/releases/download/$VERSION/grafana-kiosk.$KIOSK_ARCH"
chmod +x "grafana-kiosk.$KIOSK_ARCH"
```
Now you should have got the executable in the home directory, you are free to
move it to a more appropriate place, but make sure to update the paths later.
Now give it a whirl using the default settings, this will show stats from a
template grafana dashboard provided by grafana themselves.
```bash
./grafana-kiosk.$KIOSK_ARCH
```
After a few seconds the built in chromium browser will open and you should see
the grafana page loading. To turn it off simply `Ctrl+C` the process.
Now we have a few choices for auto-setup. The official docs mention a few
different ways you can do it:
https://github.com/grafana/grafana-kiosk/blob/main/README.md#automatic-startup
I will personally use systemd, as it is a fairly convenient and homogenous way
of doing it. I am also not running LXDE on my workstation, so I'd rather choose
something I am slightly more familiar with.
## Setup Systemd
First we need to create a systemd service. I am personally using neovim, but you
are free to choose regular vim or nano, if you prefer.
First grab yourself a super user session
```bash
sudo su
<enter password>
```
And create the file
```bash
nvim /etc/systemd/system/grafana-kiosk.service
```
Now enter the contents:
```bash
[Unit]
Description=Grafana Kiosk
Documentation=https://github.com/grafana/grafana-kiosk
Documentation=https://grafana.com/blog/2019/05/02/grafana-tutorial-how-to-create-kiosks-to-display-dashboards-on-a-tv
After=network.target
[Service]
User=nefarious
Environment="DISPLAY=:0"
Environment="XAUTHORITY=/home/nefarious/.Xauthority"
ExecStartPre=/bin/sleep 30
ExecStartPre=xset s off
ExecStartPre=xset -dpms
ExecStartPre=xset s noblank
ExecStart=/home/pi/grafana-kiosk.linux.arm64 -URL <your-playlist-url> -login-method local -username <your-user> -password <your-password> -playlists true -lxde-home /home/pi/ -lxde true
[Install]
WantedBy=graphical.target
```
This is specifically setup to use playlists, if you want to do the same, simply
create or view a playlist in grafana, and copy the shareable link or url to it.
Simple as that. I am also just using a user here, but checkout the options for
using api tokens or an oauth setup if this is used professionally.
Now we need to instruct systemd to reload all unit files
```bash
systemctl daemon-reload
```
And enable our service
```bash
systemctl enable --now grafana-kiosk
```
This will both enable and start the grafana-kiosk service. Not that this will
take atleast 30 seconds to take effect because of the 30 seconds sleep. You can
reduce or remove that at your own risk.
If it succeeds you should after a while see the same as before when you executed
the grafana-kiosk binary. Now it should display your dashboards instead tho.
If nothing happens or you get an error message, simply check with
`systemctl status grafana-kiosk` or using `journalctl -u grafana-kiosk`.
That should be it, you should now have a live dashboard view, which will be
automatically updated, when you update your grafana setup

View File

@@ -0,0 +1,343 @@
---
type: "blog-post"
title: "Streamlining Tooling Management: The Idea Behind `Cuddle`"
description: "In this post I go over `cuddle`, which is an advanced code-sharing tool built so solve maintainability issues around templating, scaffolding and sharing of project specific tools"
draft: false
date: "2023-07-28"
updates:
- time: "2023-07-28"
description: "first iteration"
- time: "2023-07-28"
description: "Fixed some typos"
tags:
- '#blog'
---
In a
[previous post](https://blog.kasperhermansen.com/posts/development-stack-2023/)
I explained that I've got my own internal cloud platform I am hosting for my own
applications. In this post I'd like to drill down into a foundational tool I am
using to manage the code both running the cloud, but also the applications
underneath.
First a small story to explain some of the motivation: When I started out
programming I like many others didn't know about git, or other vcs, so I just
used files and folders in brute force manner, when I had to share some code I
just packed it up in a zip file and yeeted it wherever it needed to go.
That was fine for a while, but eventually I found git and began using GitHub
Desktop, which probably still is the cleanest option for getting someone up and
running with git, even if it can become a shackle quite quickly, as you really
need to know git to use git.
However, now armed with git, github and so on. I began to have a problem that
kind of like the zip situation, everytime I started a new project I basically
copied an old project, removed the cruft that I didn't need and then simply
began working anew. A process filled with `rm -rf`, `sed` and more.
I also didn't like the mono repository approach, my reasons include that I
wasn't as proficient in building CI systems, and that my projects are so diverse
that they don't need to be in lock step. I would also like to share some stuff
publically, which complicates the whole mono repo approach.
A mono repository is a great way to remove some of the repetition, but you pay
for it in terms of continuous integration complexity, as such you need a much
more sophisticated building and testing solution than a regular multi-repository
approach, where each significant component has its own repo.
Anyways, I tried to solve the issue with various templating tools, but that
simply led to another problem, templating is great at scaffolding, but horrible
at maintenance. You get drift for various parts of the system.
My repositories usually consist of a few layers:
1. Project setup
2. CI definitions
3. Code
4. Various artifacts and docs
Most of these can be scaffolded, but in later projects I may change my mind and
add some stuff to CI, a new way of handling environment variables, or changing
the general infrastructure code.
And will I go back and update 20 other repositories manually with the same
changes? no.
There are some solutions to this problem, specifically code-doctoring, or
modding. However, that is hitting a nail with a sledge hammer, or rather a
screw. It is simply an incompatible problem, with some overlap. (it is primarily
built for handling small breaking changes in large open source projects, nextjs
have used this approach in the past, and is fairly common in the web scene).
## Shuttle
At work we've got an open source tool called
[lunarway/shuttle](https://github.com/lunarway/shuttle). It is basically a
golang cli application, which allows a repository to link to another for various
parts shown above.
It can link (not as in ln, but on a more implicit basis)
1. Project setup such as .env files, docker-compose setups, various commands
etc. Such as spinning up a dev environment etc.
2. It can contain a skeleton of a CI system. such that a project will only need
either a small bootstrap ci, basically just telling it to use shuttle to
handle it, or nothing if using Jenkins (which we do a work).
3. Various artifacts and docs (setup)
This is extremely nice, as we remove a lot of the boilerplate from our projects,
so that we can focus on what is important the code. This tool as we've found out
kind of gives the same benefit as having a monorepository, though with a
staggered update cycle.
It is run like so:
```
shuttle run build
shuttle run test
shuttle run *
```
Each of these commands will trigger some shell scripts either in the local
repository or in the parent plan.
The same shuttle is used in CI, to kick of various steps, again, such as
`build`, `test`, `generate-k8s-config` etc.
A shuttle spec at its most basic is just a file pointing at a parent plan, along
with some variables to be used in templating or discovery purposes.
```yaml
plan: "git@github.com/lunarway/shuttle-plan-example"
vars:
name: my-plan
squad: some-squad
```
A parent plan looks the same but is called `plan.yaml` instead of `shuttle.yaml`
scripts can also be defined in either the plan or shuttle files
```yaml
...
scripts:
build:
actions:
- shell: go build main.go
- shell: $scripts/build.sh
```
You are free to choose whatever floats your boat. I've also added native golang
actions, which doesn't require this setup, but that isn't relevant for this
post.
This is a very useful tool, and I could go and just use that. But I like to
tinker with my own things, so I've built my own to expand on its capabilities,
some of which I would need buy in from, in the company, which I am not
interested in for personal projects.
Shuttle itself is also a fairly simple tool, as what is important is what it
provides, not the tool itself.
## Cuddle
As such I've built a tool called `cuddle`, which is a CLI written in rust. My
vision for `cuddle` is that it can support the same features, but on a wider
spectrum, as well as making people able to go one step further.
It runs in nearly the exact way as above
One of the problems with shuttle, is that it heavily implies that commands
should be written in `shell`, this is great for hacks, and small tools, but not
great for delivering a product. I actually solved this for shuttle allowing it
to call natively into golang without having to write a line of shell script.
[lunarway/shuttle#159](https://github.com/lunarway/shuttle/commit/1a57a736391d9e500bdf0dce9467627e0eb9430c)
it works pretty well, if I have to say so myself, and if you don't have golang
installed, it will use docker in the background to build the plugins needed for
the commands to be executable.
I want some of the same features myself. I've already gotten `rhai` and `lua` to
work in cuddle, but I want something more. I want to use `rust` and I want it to
be a bigger focus in the tooling allowing for greater expandability and
pluggability.
### Code sharing
Right now shuttle always has this structure
```
shuttle service -> shuttle plan
```
This means that a repository can inherit stuff from just a single plan, which
can then include the pipeline and what not. But the plan itself cannot inherit
from more plans, in turn allowing a deep dependency chain. A shuttle plan can
act like a shuttle service inheriting from another plan, but that way it won't
allow it to distribute the base plans files.
I already have solved this for `cuddle`, such that we can have a deep as we want
dependency chain. However, I would like to flip this on its head a bit. See my
post of
[distributing continuous integration](https://blog.kasperhermansen.com/posts/distributing-continuous-integration/).
Cuddle right now has a dependency graph like so
```
cuddle service -> cuddle plan -> cuddle plan ->*
```
This basically means that cuddle can have infinite plans (or as deep as the
nesting in file systems allow), however only one at a time However I'd like to
split this out into more well defined components.
## Cuddle components
Kind of like a more traditional software development flow.
Such as:
```
cuddle service ->* cuddle component
-> cuddle plan -> cuddle plan ->*
->* cuddle *component
```
A cuddle component is technically a hybrid between a library and plugin. It
builds like a library, but functions as a plugin. That is because it should be
cross platform executable like a step in a CI platform is, but provide a more
fine grained features and api. Such as a cli script, but should either execute
as a docker run, a webassembly function, or one of the built in scripting
languages. A compiled language is typically a nogo, it is simply too slow for
immediate execution. Unless you use golang, because it is typically fast enough
for this usecase.
Now you may well have a good question, why not just use a regular package
manager and execution environment like: rust/cargo or ts/deno or another
language of choice.
### Cuddle constraints
There are a few reasons, to show them I will first have to highlight why this is
different than regular software development:
Cuddle is a traditional cli, as such it needs to uphold a few guarantees.
1. Firstly `cuddle` as a tool needs to be _fast_, fast enough that you don't
notice that it runs a lot of stuff underneath.
2. It needs to provide a good developer experience. `cuddle` provides its tools
as a product, as such we need a good experience using said products.
3. `cuddle` calls needs to be compose-able, such that you can pipe `cuddle`
output into regular unix or windows tools, depending on your needs.
4. `cuddle` services should not require maintenance to be up to date. Unless the
developers choose to using some of the various escape hatches.
Also I see `cuddle` as an enabler. This means that workflows should be built
around it. You may want to script the usage of `cuddle` runs yourself. This
should only be for the individual. If a squad needs a curated list of tools,
they can simply maintain either their own component or plan and inherit from
that.
For example I've built a tmux workflow around it, which opens a new tap, splits
the window into multiple panes, giving me an auto runner for tests, as well as
the binary (so I can access the webserver), a shell, and access to a test or
local database for debugging purposes.
This is highly opinionated towards me, and won't in its present form be useful
for others.
### Releasing plans and components
As such a traditional package manager won't work. This is mainly because package
managers rely on versioning and lock files to maintain a consistent set of
libraries to use. This is pretty good for a tools need. But not great if we
don't want to offload that burden on developers. If we choose that approach, we
would have a few problems.
1. Each time a cuddle or one of it dependent components were updated, we would
need to release a new semantic version, which would require the developers to
update. This may be moving quite fast, as such it is nearly a full-time job
for developers with big portfolios to maintain said dependencies.
2. Another as we've done in lunar is simply pulling a fresh plan every time.
This makes sure we're always up to date, or at least as long as the projects
are actually run and released. Here we allow various escape hatches, for
setting static commits, branches, tags what have you.
Without sacrificing too much developer experience on the publishing side, we
need to come up with a good approach for decoupling development from releasing.
Like traditional software.
In this case, the plugins and services will internally use semver, for signaling
breaking changes. This is useful for showing diffs and what now to developers
using the tool.
However, when we release stuff, releasing it on a channel instead allows a great
deal of benefit, first. We can choose which appetite you want your service to
run on. You may choose to use, either pre-release, or default (stable).
pre-release allows me to dog-food the plans, during testing, without breaking
all my tools, and services. Stable which is default, will as mention provide a
more thoroughly reviewed change set.
It is required to have a semver release, to release to a channel. This is for a
few reasons, but mostly for providing release artifacts. The services shouldn't
need to build anything themselves. This is to maintain speed, and usability.
Each component will simply function like regular libraries, releasing software
as normal.
Each plan will curate a set of components to release, and will handle them like
normal software releases, i.e. version and lock files and all that jazz. For
each release it will receive pull-requests with updated dependencies provided by
`renovate`.
This allows each plan to curate an experience for developers. A backend engineer
will not have the same needs, as a frontend engineer, or a db or an SRE etc.
However, this should provide a sufficiently sophisticated dependency chain that
stuff can actually be built with it, that is maintainable, and stable enough.
## Plans as binaries
This means that each plan on release can be turned into binaries, either regular
elf binaries, or wasm. I haven't decided yet, but wasm may have too many
constraints to be viable.
When cuddle runs for the first time in a service, will simply look at the
binary, its self reported included files, such as a cuddle spec, and other
included files, it will then form the dependency graph as it goes, downloading
all plans as it navigates the chain.
This is done serially for now, as it would require a registry to form these
graph relationships, which isn't needed right now, while the projects are small.
A `cuddle` service can also contain components, however, those will be built
adhoc, and function like a normal software project, no way to get around that
other than surfacing the components as binaries as well, which may become a tad
bit complicated to manage.
## Options for not breaking git history.
Right now the `cuddle` services rely on an external project to function, this
makes history non viable out of the box, because it implies that _everything_ in
the service has to be forward compatible. For example would `git bisect` be able
to run on a 3yr old `cuddle plan`, including changes to `cuddle` itself.
Probably not, and it doesn't fit the spirit of bisect, as you wouldn't get the
same binaries.
Instead, what should be done, is that cuddle will detect if running under a
bisect or some such, I haven't figured out entirely how to do this yet. And then
pick a release from a release date, that is older than the commit itself.
This should get as close as we can to getting reproducible builds, though it is
definitely a downside, so if this is a deal breaker then `cuddle` or `shuttle`
for that matter isn't for you. It isn't something I did myself that often, so it
isn't for me. It sadly is mostly one of those tools you don't need, until you
**really** need it.
# Conclusion
In this post I've gone over my own home built `cuddle` code sharing tool,
explained why it is useful, and what is wrong with current workflows in
multi-repository organisations. it is a bad bit more complicated than it needs
to be, but it provides a useful way of exploring new usecases and removing
pain-points I am currently experiencing.

View File

@@ -0,0 +1,397 @@
---
type: "blog-post"
title: "Superior caching with dagger"
description: "Dagger is an up-and-coming ci/cd orchestration tool as code, this may sound abstract, but it is quite simple, read on to learn more."
draft: false
date: "2023-08-02"
updates:
- time: "2023-08-02"
description: "first iteration"
tags:
- '#blog'
---
Dagger is an up-and-coming ci/cd orchestration tool as code, this may sound
abstract, but it is quite simple, read on to learn more.
## Introduction
This post is about me finding a solution to a problem, I've faced for a while
with `rust` caching for docker images. I was building a new tool I am working on
called `cuddle-please` (a release manager inspired by
[release-please](https://github.com/googleapis/release-please)).
I will start with a brief introduction to dagger, then the problem and how
dagger solves it, in comparison to docker.
## What is dagger
> If you already know what dagger is, feel free to skip ahead. I will explain
> briefly what it is, and give a short example.
Dagger is a tool where you can define your pipelines as code, dagger doesn't
desire to replace your tools, such as bash, clis, apis and whatnot, but it wants
to allow you to orchestrate them to your hearts content. And at the same time
bring proper engineering principles to it, such as testing, packaging, and
ergonomics.
Dagger allows you to write your pipelines in one of the supported languages (of
which are rapidly expanding).
The official languages are by the dagger team are:
- Go
- Python
- Typescript
Community based ones are:
- Rust (I am currently the author and maintainer of this one, but I don't work
for `dagger`)
- Elixir
- Dotnet (in-progress)
- Java (In-progress)
- Ruby etc.
Dagger at its simplest is an api on top of `docker` or rather `buildkit`, but
brings with it so much more. You can kind of think of `dagger` as a juiced up
`Dockerfile`, but it brings more interactivity and programmability to it. It
even have elements of `docker-compose` as well. I personally call it
`Programmatic Orchestration`.
Anyways, a sample pipeline could be:
```rust
#[tokio::main]
async fn main() -> eyre::Result<()> {
let client = dagger::connect().await?;
let output = client.container()
.from("alpine")
.with_exec(vec!["echo", "hello-world"])
.stdout().await?;
println!("stdout: {output}");
}
```
Now simply build and run it.
```bash
cargo run
```
This will go ahead and download the image, and run the `echo "hello-world"`
command. Which in turn we can extract and print. This is a very basic example.
The equivalent `Dockerfile` would look like this.
```Dockerfile
FROM alpine
RUN echo "hello-world"
```
> The only prerequisite is a newer version of `docker`, but you can also install
> `dagger` as well, for better ergonomics and output.
However, dagger as its namesake suggests runs on dags, this means that normally
when you would use `multi-stage dockerfiles`
```Dockerfile
FROM alpine as base
FROM base as builder
RUN ...
FROM base as production
COPY --from=builder /mnt/... .
```
This forms a dag when you run `docker build .`, where.
```
base is run first because builder depends on it.
after is done, production will run because depends on builder
```
Dagger does the same things behind the scenes, but with a much more capable api.
In dagger, you can easily, share sockets, files, folders, containers, stdout,
etc. All of which can be done in a programming language, instead of a recipe
like declarative file like a `Dockerfile`.
It should be noted that dagger transforms your code into a declarative manifest
behind the scenes, kind of like `Pulumi`, though it is still interactive, think
`SQL`, where each query is a declarative command/query.
## Why orchestration matters.
Dagger is a paradigm shift, because you can now enable engineering on top of
your pipelines, normally in Dockerfiles, you would download all sorts of clis to
manage your package managers, and tooling such as `jq` and whatnot to perform
small changes to the scripts to transform them into something compatible with
the `docker build`.
## The problem
A good example is building production images for rust. Building ci docker images
for rust is a massive pain. This is because when you run `cargo build`, or any
of its siblings, you refresh package registry if needed, download dependencies,
form the dependency chain between crates, and build the final crates / binaries.
This is very bad for caching, because you can't tell `cargo` to only fetch
dependencies and compile them, but leave your own crates alone.
This is general means that you will cache bust your dependencies each time you
do a code change to your crates, no matter how small. `Dockerfile` or rather
`Buildkit` on its own isn't able to properly split the cache, between these
commands, because from its point of view, it is all a single atomic command.
Existing solutions are downloading tools to handle it for you, but those are
cumbersome, and tbh, incompatible. For example, `cargo-chef`. With cargo chef,
it should allow you to create a recipe.json file, which contains a list of all
your dependencies, which you can move from an step into your build step, and
cache the dependencies that way. I've honestly found this really flaky, as the
lower `recipe.json` producing image, would cache-bust all the time.
```Dockerfile
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
WORKDIR /app
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
COPY --from=planner /app/recipe.json recipe.json
# Build dependencies - this is the caching Docker layer!
RUN cargo chef cook --release --recipe-path recipe.json
# Build application
COPY . .
RUN cargo build --release --bin app
# We do not need the Rust toolchain to run the binary!
FROM debian:buster-slim AS runtime
WORKDIR /app
COPY --from=builder /app/target/release/app /usr/local/bin
ENTRYPOINT ["/usr/local/bin/app"]
```
The above is the original example, but there are some flaws, it relies on the
checksum of the recipe.json to be the same. If you do a change in one of your
crates it will bust the hash of the recipe.json, because we just load all the
files in `COPY . .`.
Instead, what we would like to do is just load in the `Cargo.toml` and
`Cargo.lock` files in for our workspace, as well as any crates we've got. And
then dynamically construct empty main and lib.rs files to act as the binaries.
This is the simplest approach, but very bothersome in a `Dockerfile`.
```Dockerfile
FROM rustlang/rust:nightly as base
FROM base as dep-builder
WORKDIR /mnt/src
COPY **/.Cargo.toml .
COPY **/.Cargo.toml .
RUN echo "fn main() {}" >> crates/<some-crate>/src/main.rs
RUN echo "fn main() {}" >> crates/<some-crate>/src/lib.rs
RUN echo "fn main() {}" >> crates/<some-other-crate>/src/main.rs
RUN echo "fn main() {}" >> crates/<some-other-crate>/src/lib.rs
# ...
RUN cargo build # refreshes registry, fetches deps, compiles thems, and links them to a dummy binary
FROM base as builder
WORKDIR /mnt/src
COPY --from=dep-builder target target
COPY **/.Cargo.toml .
COPY **/.Cargo.toml .
COPY crates crates
RUN cargo build # Compiles user code and links everything together, reuses cache from incremental build done previously
```
This is very cumbersome, as you have to remember to update the `echo` lines set
above. You can script your way out of it, but it is just an ugly approach, that
is hard to maintain and grok.
## The solution built in dagger
Instead what we can do in `dagger` is to use a proper programmatic tool for
this.
```rust
// Some stuff omitted for brevity
# 1
let mut rust_crates = vec![PathBuf::from("ci")];
# 2
let mut dirs = tokio::fs::read_dir("crates").await?;
while let Some(entry) = dirs.next_entry().await? {
if entry.metadata().await?.is_dir() {
rust_crates.push(entry.path())
}
}
# 3
fn create_skeleton_files(
directory: dagger_sdk::Directory,
path: &Path,
) -> eyre::Result<dagger_sdk::Directory> {
let main_content = r#"fn main() {}"#;
let lib_content = r#"fn some() {}"#;
let directory = directory.with_new_file(
path.join("src").join("main.rs").display().to_string(),
main_content,
);
let directory = directory.with_new_file(
path.join("src").join("lib.rs").display().to_string(),
lib_content,
);
Ok(directory)
}
# 4
let mut directory = directory;
for rust_crate in rust_crates.into_iter() {
directory = create_skeleton_files(directory, &rust_crate)?
}
```
You can find this in
[cuddle-please](https://git.front.kjuulh.io/kjuulh/cuddle-please/src/branch/main/ci/src/main.rs).
Which uses dagger as part of its `ci`. Anyways, for those not versed on `rust`,
which most people probably arent. What is happening here, in rough terms:
1. We create a list of known crates. In this case ci, is added, because it is a
bit special.
2. We list all folders in the folder crates and add them to `rust_crates`
3. An inline function is created, which has the option of adding a new file to
an existing directory, in this case it adds both a main.rs and lib.rs file
with some dummy content to a given path.
4. Here we apply these files for all the crates we found above.
This is roughly equivalent to what we had above, but this time we can test
individual parts of the code, or even share it. For example, I could create a
rust library containing this functionality which I could reuse across all of my
projects. This is a game-changer!
> Note that rust is a bit more verbose than the other sdks, especially in
> comparison to the dynamic once, such as Python or Elixir. But to me this is a
> plus, because it allows us to work in the language we're most comfortable
> with, which in my case is `rust`
You can look at the rest of the
[file](https://git.front.kjuulh.io/kjuulh/cuddle-please/src/branch/main/ci/src/main.rs),
but now if I actually build using `cargo run -p ci`, it will first do everything
while it builds its cache, and then afterwards if I do a code change in any of
the files, only the binary will be recompiled and linked.
This is mainly because of these two import of files (which are equivalent to
`COPY` in dockerfiles)
```rust
# 1
let dep_src = client.host().directory_opts(
args.source
.clone()
.unwrap_or(PathBuf::from("."))
.display()
.to_string(),
dagger_sdk::HostDirectoryOptsBuilder::default()
.include(vec!["**/Cargo.toml", "**/Cargo.lock"])
.build()?,
);
# 2
let src = client.host().directory_opts(
args.source
.clone()
.unwrap_or(PathBuf::from("."))
.display()
.to_string(),
dagger_sdk::HostDirectoryOptsBuilder::default()
.exclude(vec!["node_modules/", ".git/", "target/"])
.build()?,
);
```
1. Will load in only the Cargo files, this allows us to only cache-bust if any
of those files change.
2. We load in everything except for some stuff, this is a mix of `COPY` and
`.dockerignore`.
Now we simply load them at different times and execute builds in between:
```rust
# 1
let rust_build_image = client.container().from(
args.rust_builder_image
.as_ref()
.unwrap_or(&"rustlang/rust:nightly".into()),
);
# 2
let target_cache = client.cache_volume("rust_target");
# 3
let rust_build_image = rust_build_image
.with_workdir("/mnt/src")
.with_directory("/mnt/src", dep_src.id().await?)
.with_exec(vec!["cargo", "build"])
.with_mounted_cache("/mnt/src/target/", target_cache.id().await?)
.with_directory("/mnt/src/crates", src.directory("crates").id().await?);
# 4
let rust_exe_image = rust_build_image.with_exec(vec!["cargo", "build"]);
# 5
rust_exe_image.exit_code().await?;
```
1. Do a `FROM` equivalent, creating a base container.
2. Builds a cache volume, this is extremely useful, because you can setup a
shared cache pool for these volumes, so that you don't have to rely on
buildkit-layer caching. (what is normally used in Dockerfiles)
3. Here we build the image
1. First we set the workdir,
2. then load in the directory fetched from above, this includes, the Cargo
files as well as stub main and lib.rs files
3. Next we fire off a normal build with `with_exec` which function like a
`RUN`. here we build the stub, with refreshed registry, downloaded and
compiled dependencies.
4. We load in the rest of the source and replace `crates` with out own
crates, this loads in the proper `.rs` files.
4. We now build the actual binary
5. We trigger exit_code, to actually run the dag, everything previously had been
lazy, so if we didn't fire off the exit_code, or do another code action on
it, we wouldn't actually execute the step. Now dagger will figure out the
most optimal way of running our pipeline for maximum performance and
cacheability.
## This is very verbose
Rust is a bit more verbose than other languages, especially in comparison to
scripting languages. In the future, I would probably package this up, and
publish this as a `crate` I can depend on myself. This is super nice, and would
make it quite easy to share this across all of my projects.
That project like in my previous
[post](https://blog.kasperhermansen.com/posts/cuddle/) could serve as a singular
component, which could be tested in isolation, and serve as a proper api, and
tool in general. This is something very hard, if not impossible with regular
`Dockerfiles` (without templating).
# Conclusion
I've shown a rough outline of what dagger is, why it is useful and how you can
do stuff with it that isn't possible using `Dockerfile` proper. The code
examples show some contrived code, that highlight that you can solve real
problems, using this new paradigm of mixing code with orchestration. In this
case an unholy union of `rust` and `buildkit` through `dagger`.

View File

@@ -0,0 +1,12 @@
---
type: "blog-post"
title: "A programmers warmup"
description: "This post goes into some of the struggles I've had when starting either a workday, or a new project. And how I fix it with a small warmup"
draft: true
date: "2023-08-24"
updates:
- time: "2023-08-24"
description: "first iteration"
tags:
- '#blog'
---

View File

@@ -0,0 +1,27 @@
---
type: blog-post
title: It is okay to be a Generalist
description: 2023-09-08-it-is-okay-to-be-a-generalist
draft: true
date: 2023-09-09
updates:
- time: 2023-09-09
description: first iteration
tags:
- "#blog"
---
I've had a nagging feeling brewing for a while, I myself am a generalist. Just
look at my [posts](https://blog.kasperhermansen.com/posts/), self promotion
aside, I have a lot a interests, and loose interest quickly. I look up to my
fellow engineers and envy their talent and work, but don't have the focus to
keep such a consistent focus.
I build a lot of random projects in all sorts of environments, web development,
backend engineering, development experience, platform engineering, now data
engineering, you name it. However, even if I can some problems in any of these
fields, and architect solutions built on them, I tend to loose interest quite
quickly, as such I have a whole bunch of half finished, somewhat useful
projects.
I've found that having a well rounded team is extremely useful, but that

View File

@@ -0,0 +1,85 @@
---
type: blog-post
title: I am cramped
description: This post describes the ideals and values of my upcoming data platform for my homelab
draft: false
date: 2023-09-09
updates:
- time: 2023-09-09
description: first iteration
tags:
- "#blog"
---
Cramped is an experiment at building a fully open source data platform built on
both new and old components. This project is heavily biased towards my ideals,
this generally means no java (even if extremely illogical in the data world, I
will dig into this in a later post) where it can be avoided, the system should
scale, but should be able to run on somewhat resource constrained environments,
like a normal developer workstation.
My primary ideals are:
- Opinionated: I want to get 80% of the way there with 20% of the effort, if
your use-case is in the 20 remaining, then this project is not for you
- Efficient: The project should feel fast, in execution, but also to develop on.
- Minimal: The project should feel minimal, even if it has a lot of
complexities, what is done here is trying to create actual abstractions, and
let cross talk between components be as limited as possible
- Immutable and compose-able architecture: The architecture should feature
immutable steps, and present a consistent api of which real applications can
use as its bedrock. Each component should be developed in of itself, this
doesn't mean that components shouldn't communicate, but that they will have a
valid migration or use pattern in of itself.
## The name is "Cramped"
I like to use words that described a feature I'd like to avoid, I usually call
the project exactly what I am trying to do to avoid doing while developing it.
Sometimes it succeeds other times it doesn't but it is my ideal. For example in
World of Warcraft, I played a Warrior in which my play style would best be
described as _Coked out if its mind Squirrel_, aptly named Reckless, I gave it
that name to try to better my behavior, and actually play like a sane person.
Cramped is the feeling of being stuck in a small space, where even if you can
make room to do stuff, you feel constrained, annoyed and uncomfortable. I'd like
exactly the opposite, a platform where you feel like you can make the changes
you need, and iterate as you'd like, while having fun working on new and old
parts of the system, and generally trust that the changes you do makes sense.
## The goal
As mentioned Cramped is first and foremost an experiment. I've maintained an
existing data platform single-handily (to mixed success) since January (we're in
September now), as such I've not had the experience of actually building a data
platform from scratch, and with a lot of dependent engineers and analytics, I
can't really make as much experimentation as I'd like. Which gave rise to the
feeling associated with the project name.
I hope this project will help me work out some of the frustrations I've had with
our existing system, as well as get some knowledge on how to improve it, what is
current and best practice atm.
I am a platform engineer first and foremost, so definitely not a Data engineer
or Data Ops engineer. I am doing this for fun, but also go get some more
experience, so please go easy on me, as I work through and share my decisions.
## Approach
I will start by defining my top level architecture components, and simply start
small on developing each part. Each part should provide inside into which
decisions I am going for and why I am doing so. I will use esoteric languages
and tools. However, most of them have alternatives that may or may not be useful
for your platform.
Each piece will be developed by itself, and be deployed independently. I will
also use my own tools `cuddle` and `churn` for respectively
`Development Experience Platform` and `Deployment and Orchestration`. I don't
know why I have a fascination with *C*s... Each of these tools aren't ready to
see the light of day yet, but they're incredibly useful for my own development.
## Next
I will add a Table of Context of the different parts as we go.
- TBA

View File

@@ -0,0 +1,205 @@
---
type: blog-post
title: "Cramped: Part 1 - initial-architecture"
description: This post goes into the initial architecture and requirements of the cramped project
draft: false
date: 2023-09-10
updates:
- time: 2023-09-09
description: first iteration
tags:
- "#blog"
---
In this post I will dig into the architecture of `Cramped`. I will start by
setting the most immediate requirements for me to get started on actually
selecting components, and then finally produce a diagram of the most major
components and their communication patterns and dependencies.
In total after this post, we should have a good idea of why `cramped` exists,
which components to develop, which metrics to select for, and how the components
should be domain sliced, if they were developed by independent teams (All of
this will be developed by me however).
The overall process will be somewhat informal, and won't stick to industry
standard enterprise architect bullshit; mainly because I despise it, and have
not gotten a good result of doing it once.
## Requirements
This will be a little bit special, as I will act as both the stakeholder, and
the developer. But I am pretty hard on myself on what I want, so this should be
a sinch. I also tend to not have any respect for future _Kasper_, so this should
be fun.
1. The system should handle homogenous events, such as IoT data, application
events, and such.
2. The system should be queriable for long time analytics (3 month history),
median 95th tail latency of 30 seconds
3. The system should be used for real time analytics with a refresh window of 1
minute
4. The system should be exposeable via apis, for third party services
5. The system should be run on multiple hosts
6. Each part of the system should be deployable on its own
7. As a backend engineer, the only thing I should provide is a schema for my
event, and the upload of the data
8. As a data ops guy the only thing I is building my component, and hook it up
in a central repository for configuring where in the pipeline it goes.
9. The service should be versioned, so that schemas can evolve and old snapshots
can be compacted, to remove potential PII
10. As a data engineer, I don't want to deal with tuning the JVM, as such
alternatives should be found, this is also to not rely too much on the
knowledge I've already got doing this.
These requirements are pretty loose. we'll dig into how to actually go about
achieving these, as well as defining metrics for the ones that need it.
## Architecture
The backend services already exist, and looks like this:
### Backend
![backend services](assets/2023-09-09-backend-services.png)
What happens here is that we have variety of business services, which are used
and serviced by either a User, or an engineer specifically. All the models are
event sourced using a home built library (something like `eventstore`, could
also be used). Some of these events are then republished as domain events, which
can be used by other services through a common communication layer, which I will
share later.
These domain events are what ends up in the data platform.
### Internet of Things
![iot](assets/2023-09-09-iot-service.png)
Like the backend services, we have IoT services for various things, such as
storing measured data, or controllers for doing various things. Most of these
are called or calls home assistant directly. A `Domain event` is created if
needed, which end up in the data platform.
### Existing communication layer
Services communicate with each other through both synchronous and asynchronous
messaging. For synchronous calls protobuf, json and capnp is used. json for the
frontend, usually through graphql, internally a variety of protobuf (if using
go), or capnp if using rust. Usually I don't mix go and rust services in a
single domain, as such this fits fine with my progressing into adopting capnp.
For asynchronous I've developed an in-house format based on capnp, which travels
over a database (using an outbox pattern), and then finally captured by a NATS
stream.
It sort of looks like this: written in json here to be more easily understood,
as capnp is a binary format
```json
{
"event": "user.identity.created",
"sequence": 142,
"metadata": {
"published": "<some timestamp>",
"domain": "user",
"entity": "identity",
"name": "created"
// "..."
},
"content": {
"userId": "<some-uuid>",
"email": "<some@email>",
"name": "<some full name>"
}
}
```
A schema will be supporting this data, which will also be ingested and applied
later down the line. It will also contain information on how to handle the
contents of the events, some fields may be communicated, in which the
dataplatform really, really need to be careful in handling.
```protobuf
@<id>;
using Metadata = import "cramped.metadata.capnp";
struct Event {
userId @0 :Text $Metadata.id;
email @1 :Text $Metadata.hash.sha256;
name @2 :text $Metadata.tokenize;
}
```
These annotations can evolve as well. If a change happens to a schema, the
datalake should automatically produce a new version of the data with the
changes. This may be rolling over all the data, and applying transformations
required.
These messages are sent over NATS, the event type is used as the nats routing
key, though format changed to its specific flavor.
## Data platform ingest
The data platform ingest pretty much consists of two parts. Handling events with
and without a schema / schema application failed.
An event will flow through a NATS global event consumer, which will then get a
schema applied while being transported to a data fusion pipeline (Apache spark
alternative)
![data ingest](assets/2023-09-09-data-ingest.png)
This is not very different than normal data ingestion pipelines, though without
Kafka and Spark. If a schema application fails, or cannot be found, it will be
stored in the Error lake, otherwise it will be put in the data lake. through
data fusion.
Data fusion works like a sort of orchestrator, so a variety of jobs can be
scheduled on it for doing various transformations on the data lake, be
transforming the data into parquet, iceberg or something else.
It may also be used for back-filling from the error lake, or versioning /
re-partitioning the data lake.
## Data lake(house)
The idea behind the data lake is to make it a queryable place, with up to up to
date base models, and integrated transformations by the users. It includes
support for compaction, versioning, scale out queries, transformations and much
more.
![data ingest](assets/2023-09-09-data-lakehouse.png)
In here we see the data entering the system from the NATS listener, it will pull
out data from the nats stream and ingest them into the datafusion pipeline
running in Apache Ballistra (Rust based Spark like thingy). Using deltalake as
the query and application layer, we're able to place data in the data lake in a
streamed approach.
Other jobs are run as well, for example compaction and versioning, for
automatically optimizing the query layer, as well as removing sensitive data
marked during ingestion.
A user can then query the lake directly using a regular workflow built on
deltalake for querying, the sql format is ANSI SQL, as it is queried through
Trino (distributed sql).
## Realtime analytics
I will have a need for realtime aggregations, as such a bunch of allowed
transformations should end up in clickhouse for rapid querying.
![real time analytics](assets/2023-09-09-data-real-time.png)
Much of the same architecture from before is used (See datalake section),
however, we also put data in `clickhouse`, this is to enable rapid querying on a
small subset and possibly sliding window of aggregations. `Trino` also
integrates with `clickhouse` for querying.
Ingest sends streamed data in much smaller batches, but still have the same
guarantees for schema application, as that is done in an earlier step. A
backfill is run regularly to fetch certain transformations, or restore precise
data consistency in the clickhouse stream. This means that technically
clickhouse can be removed entirely, and restored to a fresh instance and still
get the same data

View File

@@ -0,0 +1,187 @@
---
type: blog-post
title: I don't like MacOS (Software dev PoV)
description: This post goes into some of the reasons why I do and don't like MacOS but mostly don't
draft: true
date: 2023-09-12
updates:
- time: 2023-09-12
description: first iteration
tags:
- "#blog"
---
I don't like MacOS, I've been using it profesionally for the past year and a
half, and it hasn't gotten better. This post goes into some of the things I
like, and which I don't. This is mostly a post shitting on MacOS, so fanboys
please stay clear.
## Some background
I am a systems / platform / data engineer, so what I need is generally a tool
where I can be productive writing code, in a variety of environments, some
virtualization, mostly docker thought. I need a browser for browser stuff, and
in our case for Google Meet as well. I also have a variety of utility apps, such
as Mailspring, Linear, Obsidian and so on I use on a daily basis. I don't use
MacOS because I need to build apps, it is simply a company default and what is
recommended, which is why I am using it. The laptop I am on is a pretty beefy
1st gen M1 MacBook Pro, and the hardware has been awesome. But this post isn't
about the Hardware, but about the Software, and why I think it is crippling its
potential, at least for me.
## The things I like
I should probably start off a little positive, so here are some of the things I
like
- The user interface is generally quite polished, I haven't felt the need to
change the look of the UI, so that is a plus.
- I like the dock and the top bar, The dock is generally quite clean, even if it
a more _animated_ than I'd like. The top bare is quite useful, I like the
integration with my globally available apps, though the navigation leaves much
to be desired, I work on a large monitor 50% of the time, and it is just so,
so far away. I tend to never use the `File`, `Edit` and so on options only
when I absolutely need to, otherwise key-binds are preferred, as they're right
there.
- It generally just works, even if the bar is quite low. I trust that when I
update the Os, the OS itself doesn't break, which sometimes isn't the case
with the Linux and Windows os I normally run on my personal machine. (This is
mostly my own doing, as I prefer cutting edge Distributions)
- The fingerprint integration is quite good, it is responsive, and quite a few
of my apps can use it for various effects.
Quite a small list, but that is how it goes
## The things I don't like
This will be quite a hefty, section, and I should preface, that I've tried to
fix a lot of the follow issues using various third party apps. But generally
they all feel like they are fighting against the OS, to actually provide a good
user experience. I will go into some of the specifics as we go.
### Display manager
I like to be organized, and have opinionated workflows, as such the default of
dragging, resizing windows and spaces are clunky at best. I've tried to use
Stage manager, but I didn't like that it was as fluid as it was. It felt like my
windows were in a pool of water, drifting around when I moved another window.
I would like to jump between windows and spaces using keybinds, and feel fast
doing so. I've been using `Rectangle` and have been fairly happy doing so. I've
built my own little manual I3wm workflow around it, but it is a quite manual
experience. Recently I've tried `yabai` and friends. And that is actually what
prompted me to write this post. Not because of the tool, but because all the
limitation MacOS impose on it.
I ended up switching away from `yabai` again. Not because `yabai` was missing
features, or had an obscure command set. No because MacOS made it too unstable
to use properly (and required super root access, i.e. turning off root disk
protection to the OS).
I simply wanted a display manager which organizes my windows for me, and allows
me to create new ones, move them between spaces, and let me jump between spaces.
But no. MacOS will automatically change the numbering on the spaces, if you use
one more than another, it will think. Hippity hoppity let me move that space for
you. This was not great when yabai would reassign an app to a space. I would get
ghost application, where MacOS would think they were in one space, but yabai had
moved it to another (Where I actually wanted it to be).
MacOS doesn't allow by default removing transitions between space changes, you
can enable animation reduced mode, where the space transition changes to a fade
in and out, instead of a sliding thingy. I just want a snappy change from one
space to another, I don't care for the transition, it is pretty and yes the Mac
display is high refresh, it is smooth and all that. I just don't care. I want to
quickly change to my email client, slack or something else and back in a jiffy,
Mac doesn't allow this workflow.
### Display
I often have to share a part of a screen or use an external monitor, either for
a presentation, or at the office because we have free seating. The mac often
forgets window placement, or craps out entirely, requiring me to unplug and plug
the monitor in again until it works. This is frustrating, and I'd hoped that the
experience at least was better than what we have on Linux, but to be honest it
isn't much better.
### Sound
Don't get me started on the sound system in macos, it has a life of its own.
Sometimes it just randomly changes my Microphone to the build in one, even when
the mac is closed, and docked. The same for the output. I honestly don't know
what is going on, but it sucks when speaking to a large group of people and you
just disappear.
### The Dock and app lifecycles
You can hide apps in the dock, but they may also be unhidden if you do a global
action on them. Lets say I have a VPN client I'd like to hide, because I use it
once in the morning, and turn it off when I stop for the day. I don't care of
looking at it on my dock the entire day.
When you close an app for the most part, it is just pseudo dead, it is actually
still stateful, and can be started nearly instantly again. This is somewhat
nice, but when I want to close and app, it should just die. It shouldn't keep
running in the background. `Cmd+q` will kill it for me, which is nice, so it
isn't a huge deal, it is just annoying, given how troublesome MacOS generally
makes building routine apps running in the background for a root user.
### Package manager / App store and updates
Homebrew or rather its packages are often abysmal, often the packages are binary
builds, which means that it builds the binaries for me instead of downloading
them. This can be hefty, both in time and battery usage. Especially as homebrew
is run sequentially, which I don't understand, it should know the dependency
graph of the different packages, so why couldn't it resolve it after downloading
the taps and casks? No idea, but it is by far the slowest in my normal day to
day update cycle. I've also got a lot of them, as I am working in multiple
different development environments.
I don't use the App store, I haven't found a lot of value on it, maybe slack and
so on. But either I use `dmg`s or `homebrew`. The installation for Apps is
childish and obtruse, you have to know how macos works, to actually install an
app properly. First you download an app of, of the internet. A random dmg file,
then you double click it and a draggable application pops up, you move it to the
application folder, and then nothing happens, you usually don't get a
confirmation that anything was installed. You have to manually open the
application to verify that it works, and then you can close the popup and delete
the dmg file. This is quite strange, a few family members have called me because
they couldn't understand why the app wasn't installed, when it turns out it
actually was, they just hadn't opened it yet.
There is also the story of actually getting a dmg to install, if you don't have
a signed dmg, then it is quite bothersome to install, and properly not something
a non technical person would be able to figure out on their own. Though that may
not be true.
### Inconsistencies
MacOS default apps work a lot like windows apps, in that they tell the os to
play by certain rules, but don't actually follow those rules themselves. For
example the new settings panel cannot be made wider, the finder cannot be hard
killed using `Cmd+q`.
Xcode is rolled out with os updates (as far as I can tell), this breaks on every
single goddamn update, and I have to reinstall the cli tools package again. This
also often breaks, and I have to retry it a few times for it to actually update
properly. This has been reported multiple times, and have been a problem for
years, as far as I can tell given the support tickets, but it doesn't look like
it will be fixed anytime soon. I don't even use xcode, but still it is giving me
grief.
### Shady stuff
I don't generally care all that much about privacy, especially on a work machine
like this one. As I don't use the Mac for anything other than work. But seeing
the background processes sometimes consuming a lot of memory, and then googling
the process names to reveal that it is some kind of trial run by Apple doing
image scans, feature toggling, telemetry and all sorts of crap. It doesn't feel
nice, and I don't want to give a trial process a few gigabytes of ram, when I
don't benefit at all from it.
## Conclusion
There is a bunch more, but I think this is probably enough from me for now. I
can still keep using the Mac and MacOS, but it isn't as impressive as people
say, I am probably not the intended user, and that is fair. It is just
frustrating as the MacOS generally markets itself as a general purpose os, but
also for professionals, and after this experience I think it is anything but.

View File

@@ -0,0 +1,21 @@
---
type: "blog-post"
title: "2023-09-29-architecture-for-homelab-kubernetes-clusters"
description: "2023-09-29-architecture-for-homelab-kubernetes-clusters"
draft: true
date: "2023-09-30"
updates:
- time: "2023-09-30"
description: "first iteration"
tags:
- '#blog'
---
Let me start by saying that this is incredible overkill. I am a platform/systems
engineer by trade, as such this is what I am interested in. I like developing
stuff from first principle, and find it fun developing my own version of popular
tools.
I won't be spending too much time on why, I am not selecting tools such as
Ansible, Chef, Talos etc. But I will give a reason as to why I am choosing to go
a different route.

View File

@@ -0,0 +1,119 @@
---
type: blog-post
title: A developer platform doesn't have to be flashy
description: Building a developer platform is a messy ordeal, this blog post will go into some of the pitfalls of building/working on one. Getting the most out of your time, and saving those pesky interns from themselves.
draft: false
date: 2023-10-23
updates:
- time: 2023-10-23
description: first iteration
tags:
- "#blog"
---
Your life as a Platform Engineer revolves around the developer platform, a
hodgepodge of pre-built and homegrown tools. But lets get real: not all tools
are equally useful or even necessary.
## The ~~Honey~~ Tooling Trap
We all have our darlings; Kubernetes, Dagger, Docker, but falling
head-over-heels can be dangerous. Soon you're building a UI not to solve
problems but to admire YAML going to production. Its like creating a theatre
for paint to dry. Are you really building a spectator sport?
The real mistake isn't favoring a tool; it's pushing that tool onto your users.
What you end up delivering is a leaky abstraction. Worst case? Years invested in
a platform with no cohesive experience to show for it.
## What a Platform Should Be
A platform doesn't need to be a Swiss Army knife. It should, however, shield
users from internal complexity. Giving raw access to Kubernetes? That's a chaos
generator.
Youre not building a consumer product; you're building a tool that your
developers are somewhat obligated to use. It doesn't need to be
feature-complete; it needs to be useful and maintainable. Aiming for a pristine
API when no ones losing sleep over it is misplaced effort.
## The Danger of Complete Flexibility
Over-flexibility bites you back. Take `lunarway/shuttle`, our CLI tool, used for
sharing deployment strategies (think golang service preset). We let users
override scripts, which made internal changes a hellish ordeal. Flexibility done
wrong can fragment your platform until you're supporting as many versions as
there are teams.
Striking the right balance is key. Not every problem requires a sledgehammer;
sometimes a simple mallet will do. Aim for a platform thats robust but not a
maze, flexible but not a free-for-all. The important bits are keeping it
intentional.
## Being at the mercy of others
Having a strong opinion matters, but it's crucial to adapt when better ideas
arise. Leadership like an Architect or Tech Lead gives you the confidence to
build intentionally. Without buy-in, you risk building a golden hammer sitting
collecting dust.
It's a two-way street: meet user requirements or risk shelfware. Never
underestimate the value of feedback in building a robust platform.
Also remember that users want to solve **their** needs, they don't care about
what the platform does, or works. You have to take feedback, and apply it with
your style, opinion and view of the land. It is okay to just say thank you for
feedback, and ruminate on it.
Maybe you need to solve their exact need, but can you get some synergy by
spending a week extra on your tool to solve the problem globally, maybe, maybe
not. It is a decision you need to make.
## You don't need that fancy UI
I think most Platform Engineers have spend a good amount of time building a
fancy UI to solve a problem, which was barely used. While your little shitty
shell script you spent 15 minutes on, and distributed via. slack, is seeing use
everywhere. It sucks and doesn't feel right.
But here you have to take a step back and reflect. Should you really have spent
3 months building a glorified React app driving copious amounts of graphs,
before finding out if there is actually a need for such a thing?
## Coffee-Station Testimonials
The reason why your shitty shell script is used so much, is because it was born
to solve an immediate need. When you've got a ticketing system, or public
channel where people can post their problems, usually only the most confident
members of a team, usually a tech lead would be the ones asking questions on
their squad members behalf, especially if they're an intern. This means that one
simple ticket, could potentially have 3-4 very enthusiastic interns behind it.
Simple solutions can bring real value, especially for enthusiastic interns who
might not have the confidence to speak up, but will spread your dirty laundry
like a virus, if it seems remotely useful.
## Trimming the fat
You should always keep your platform lean enough, to have a gateway for new
tools, that as they grow into maturity, will get a new fresh coat of paint, or
redone etc. At some point you will end up with a host of mature, robust tools,
that nobody need to touch, except bump for security fixes. Tools that are in the
experimental phase, and tools that you're working on replacing, because it
doesn't fit in your strategy. But none of these need a UI unless there is an
actual need for it, or your users are simply incapable of using a terminal app.
In our experience, it has been as easy if not easier to ship `lunarway/shuttle`
features, than it has been to ship various backstage plugins (insert fancy UI of
choice here), which all are very pretty and seemingly user friendly. But most
people have to get a feel for how a UI works, because there is no standard for
how to build complex user inputs. There is for terminal apps on the other hand.
# Conclusion
Start simple. CLI first, UI later—if needed. Your goal should be a platform that
balances flexibility, functionality, and complexity, not a collection of shiny,
unused tools. Value is derived from utility, not aesthetics. The more aligned
your tools are with the real needs of your users, the more impact your work will
be. I feel like preaching to the choir, but I've been surprised more times than
I'd like.

View File

@@ -0,0 +1,79 @@
---
type: blog-post
title: Why your AI Chatbot sucks
description: Please don't burn too much money
draft: false
date: 2023-10-24
updates:
- time: 2023-10-23
description: first iteration
tags:
- "#blog"
---
The title may seem editorialized. But honestly it is true. The current landscape
for AI bots, doesn't deliver anything, aren't interesting to use. And no, I
won't use your shitty AI bot to ask questions about my groceries.
## Yet another AI chatbot
AI bots like the IPhone apps of yore, are being churned out like a warm bread
out of an oven. And funnily they all look the same, even if they may have
different seasonings. It is actually crazy how much AI bots mimic how we built
apps. When apps first became a thing. In the beginning everything was generic
cookie cutter stuff. Such as yet another:
- Downloader for ringtones.
- Instructuals,
- Tic/Tac/Toe
- Horrible company marketing app with no functionality
Like with the AI bots we fall straight into the 4th item. Putting a small prompt
on your AI bot doesn't make it useful, or interesting. Not unless you're
creative, like Wendies were on Twitter, until every fast food brand caught on,
and did the same.
Or useful. Can your AI bot actually do stuff. Can I actually buy groceries
directly using my chatbot? No, then why in all that is holy would i spend the
time to phrase a question for it? If your AI bot doesn't deliver a better
experience or original take on your product, why would anyone use it? If it is
just yet another user interface for your product, people will just use what is
most familiar to them.
For new products like this, either it has to be extremely useful, original. If
neither fits, you won't get any traction.
A lot of these AI bots feel like talking to a generic receptionist, whom likes
to talk, way, way too much. Not that I can relate :,)
## Gain traction before it becomes a commodity
If you can get the same product everywhere, you're gonna choose what you're most
familiar with. Get users before this happens, otherwise you would just have
burned a lot of money on compute and R&D.
Already you can tell that all these companies racking in billions of dollars in
investment building foundational models (the stuff that powers your favorite
chatbot). Will go bust in the next few years. Building these models are
expensive, risky, and very short-lived. If these companies cannot out-compete
their rivals, then they will just have burned a ton of money for no avail.
Building these models looks a lot like chip manufactoring. It requires tons of
capital, but in just a few years, that old investment is pretty much worthless.
All the users and money is given to the ones at their forefront of their field.
Whether that is in monthly users, cheapest operations, originality etc.
I fear for those playing in the generic foundation model field. You may be able
to buy 100 A100s now, but in two years that investment is pretty much obsolete,
so unless you get proper mindshare in that amount of time, you are doomed. We're
probably gonna see some interesting attempts from some of these companies over
the next years, trying to diversify their models, into specific fields, or
products. Because right now, being a generic general purpose chatbot, is a
difficult journey, especially when up against giants such as Google, OpenAI
(Microsoft), Apple etc.
GPT4 and rivals are already proving to be very reliable. In a few years ones the
next models come out, and they're further optimized for speed, and cost. GPT4
and its fellows will become a commodity. There will probably still be a marked
for the best and newest models, like with phones. But for most people the older
cheaper model, will probably be just fine.

View File

@@ -0,0 +1,400 @@
---
type: blog-post
title: Getting through a big project
description: In this article, I share how to navigate a project, identify key moments of crisis, and how to solve them.
draft: false
date: 2023-12-09
updates:
- time: 2023-12-09
description: first iteration
tags:
- "#blog"
---
## Introduction
As a engineer you will work on a variety of projects, some small, some large,
some complex, and some you can handle in 15 minutes. But once in a while you
will be faced with a big project. You may not know initially that it is gonna be
a big project, but somehow it turned out to be.
In this article I am gonna share some of the experience I've accumulated,
working on big projects, the various stages, and surprises you may will face,
and that it is gonna be okay.
This article is mainly geared at new software engineers, but is applicable to
most engineers as it focuses mostly on the problem domain, not the technical
skills.
## What is a big project
I categorize a big project to be around 3 months or longer. This depends on your
own internal barometer of what you can stomach but to me, I like to do small
changes, fast, iterate and so on. 3 months and longer would be a project me and
the team would have to work on for a long time, and face a natural project
development lifecycle which I will share later.
A big project are usually constitute a few factors, each factor you add to the
project it will multiply the amount of effort required.
1. Cross team collaboration:
1. Do you need external parties to succeed.
2. Do you need to break some interface to another team, i.e. does a design
change, which requires work from either 1 or more teams?
2. Complexity:
1. Does your team have the skills to solve the problem at hand. This may be
rooted in how new the team is, if it is new, add a multiplicative factor.
If it is highly technical, that is outside your immediate skillset add a
multiplicate factor
3. External domain:
1. Are your team developing this feature on behalf of another domain or
product?
4. Team size:
1. Do you either have a too large team, or a too small one. This may seem
counterintuitive, that is that in some cases it is preferable to have a
small team to be agile, but for other kinds of projects, getting a good
culture and collaboration can take longer than actually developing the
project.
I'd go as far as If you'd estimate a timeline initially for the project you
should multiply it with each of the above.
```
4 weeks * 1.5 ^ (1 + 1 external parties + 1 external domain + 1 small team)
= 4 * 1.5 ^ 4
= 4 * 1.5 * 1.5 * 1.5 * 1.5
~= 20
```
The above may seem extremely, but if you need to involve that may parties there
is a high likelihood of _anything_ splitting the time, a simple
miscommunication, etc. The above is just a naive example, but I've seen big
projects exceeding timelines by years, because of complexity of collaboration or
purely technical reasons.
In my experience you don't get a `big project` unless you've got at least one of
the above, I will dive more in depth into each, but think of it like this; a
project has multiple stages of development each of the above will add roadbumps
at each level, and to learn how to navigate these requires experience,
leadership and persistence.
### Why big projects can feel like pulling teeth out
I don't think I've ever meet a person whom likes these long drawn out projects,
fraught with extended deadlines, problems, conflicts and whatnot. In this
section I will dig into my experience of why this happens, and tied to the
previous section, on how to navigate this space, and keep a cool head.
Like many things, in acquiring knowledge about a subject, it is not a straight
`sin` curve, it is a journey filled with entropy and seemingly random events,
which are frustrating and causes problems.
Like the `Dunning-Kruger` effect it follows something like this
- Stage: 1: the project has been pitched
- At this point you feel: Optimism, confidence and hubris; You've got this,
you and the team are just beginning to collect requirements (hopefully), and
everything looks good, it may seem a little overwhelming, but you feel like
it is gonna be great, you've learned from the past projects and this is
gonna be a breeze.
- Stage 2: The requirements just keep coming in, and nobody wants to sign off on
what the scope is:
- At this point you are still in good spirits, you've maybe just finished the
first part of your PoC, and have a plan to solve the initial requirements.
You and the team are beginning to get frustrated with the ever expanding
requirements, and meetings. But you're still confident you can solve the
problem within a reasonable amount of time
- Stage 3: The requirements just don't stop, meeting with stakeholders, doesn't
go as expected, major delays from third party vendors are happening, sister
teams have shifting priorities, it is impossible to get sign off on the
project and your solution is half baked at best. You also lack key personel to
finish the job in time:
- At this point you feel burn out, exhaustion, and an assurance that you're
not go finish in time, however at this point you communicate that you can't
make the scope, and with the stakeholders you have a crisis meeting where
finally, you decide on a minimal set of features required.
- Stage 4: the project is still half baked, and you continue to face problems,
but you have confidence that the problem can be solved, you have pretty much
full domain knowledge at this point, but the sheer amount of work required is
daunting. Now you just want to get over with it.
- Stage: 5-: At this point either you round off the project, or you continue in
smaller iterations of the previous stages, it depends on how long the project
is and what the scope was decided to be.
- At this point you will discover flaws in your solution, but no allocated
time to fix it. You may have decided with your project owner to hack
something together to finish the project in the earlier steps to finish the
project in time. But now you don't have time to go and fix it. The cycle
either continues with another project, or the same project in another
iterations.
The above bullets are a bit extreme, but I've faced variants of these in nearly
all of the bigs projects I've seen. Some have been easier to deal with than
others, but all in all, experience is what counts. It is also why some engineers
seemingly have a higher titles than others, you may feel like your technical
skills are on par. But what counts is that you help navigate these projects on
behalf of your team, shield them, guide them and not least mentor them. (Thanks;
Anders Keller Carstensen, Martin Anker Have and Bjørn Hald Sørensen)
It is always easiest to see these projects in hindsights, but the most valuable
thing is to identify when you are about to enter one of these, so that you can
come off to the best start to them as humanly possible. You will not be perfect,
but doing the best work you can and take the conflicts required, will help
mitigate a potential disaster.
## Experiences
In the next section I will dive into how to mitigate each of the above, it will
not be a complete list, as my experience isn't complete. But it should still be
useful, it also won't be bullet point by bullet point, but rather some specific
topics which should help solve key problems.
### Requirements, requirements, requirements
Requirements are the absolute most important part of a project, it doesn't
matter how much of an experienced or skilled engineer you are, if the
requirements aren't clear, you are either toiling away for no avail, or trying
to built the tower of babel. It is not gonna happen.
Requirements seem like such a bening thing, but the difficult part of gathering
requirements are two fold;
1. You don't have complete knowledge
2. Requirements are decided by a non-technical partner
You don't know everything initially and discussing solutions with a
non-technical partner is a learned skill, that is difficult to acquire, you've
got to be confident in your teams solution, and methodology, but also being
steadfast and be an effective communicator with a non-technical partner.
My best advice here, is to exist as a unit, you may decide to be the person to
gather the requirements, gather the responsible people in a room or meeting,
poke for the requirements over several interactions with a clear agenda. Get
consensus, and clarify twice or thrice. And if you're being pressed to agree on
a solution, use `we`, say you need to discuss it with the team, and get back to
the steering committee, for the project.
An example could be:
> Business person A: We need to have National login implemented within 2 months,
> otherwise people cannot login to our platform
> You: It will potentially require a lot of work, I will need to discuss with
> the team, to tell whether it is possible or not
In the past, I would try to estimate this on my own, and give an immediate
answer. _Don't_. Whatever you give here, will set the wrong precedence. Work as
a team, and rely on them. If you are the sole technical lead, discuss the best
course of action with your team, manager. There is no silver bullet here, but
for software the best approach is to say that it is possible within a given
criteria, you may need extra hands, extra focus, or simply that it isn't
possible.
We are engineers not magicians. Don't present a concrete answer to a question
you don't have an actual answer for yet. It always ends in grief.
### The interface is everything
If you need ongoing consensus between teams or external parties you are doomed
from the start. If you need to continually interact with a second/third party to
develop your project, then you will move at a snails pace, and won't be able to
deliver on time.
The best strategy is:
- Sit together
- Have tech leads collaborate continually
- Scope the project differently
If you can't do either of the above the project simply isn't possible or sound.
The first is ideal, if you need a delivery to another team, actually
transferring that domain knowledge to them directly by peering is the best.
Usually, because of business politics and bureaucracy it isn't possible, sadly.
The second is a stop gap, and will take away resources from the team, this only
works if the techlead isn't the only person on the team with the technical
experience to drive a project. It requires a lot of work from said person, but
can make the project succeeded. It is usually done by having the two teams
senior technical leaders communicate on requirements day by day.
The third is thinking critically, can you scope the project in a way that
another team doesn't need a concrete delivery by the team you're on. This may be
deciding on an early interface or design between the teams, and then
implementing it afterwards. This usually requires follow up of either of the
previous points as well, but on a lesser degree. Requirements will change and
you will need to change the design on an ongoing basis, so plan for the
communication.
### The conflict not taken
As engineers it can be damned difficult to take a crucial conflict. This is both
a learned skill and a natural inclination. Some people are just willing to fight
tooth and nail for a solution, and others don't even want to look a stakeholder
in the eyes.
This is a make or break moment. I don't have this natural inclination for
conflict, but have learned to follow through and rely on certain techniques to
make it easier to actually remain steadfast and resolve said conflicts in a
respectful way.
If you are new, you can be owned by a seasoned business professional, be aware,
and use the `we` technique described above. Sometimes other engineers or
business professional can step over the line, if they do so, you should rely on
your leadership. This may be either a manager or director. Sometimes you may
need that distance and that is okay! especially as you learn to tackle these
things.
### Why are we doing team meetings all the time
A problem requires a certain set of skills and conditions to solve, and it is
likely that your team dosen't fit the bill perfectly, you may either be too
small, too large or don't have the technical skills to give a proper evaluation
of the project. I define a small team to be 2-3 people, and 5-10+ to be a large
team.
A too small team means that you will be impacted greatly by changes to the team,
or simply a lack of hands or capability to solve said problem. If this is you,
you need to set the correct expectations to your stakeholders that the solution
will have an undefined deadline. Let your leadership have the word here. It can
be very difficult to communicate this is in a good way.
A too of a large team: funnily enough this can be even more difficult to solve
than a too small team. It is not actually as rare as it might seem, but given a
large team, that wants to act as a unit. A consensus is often required. It can
take a long time to get consensus for even simple ideas, or features. My
solution here is to simply to communicate that we shouldn't have consensus for
everything. And rebuke the notion that you need a scrum master or agile coach or
whatever to drive your teams culture. It is fine to have team events, encouraged
even, but don't fall for the company culture events. It is bullshit in my
experience, you learn by working together, and not all at once. Work on problems
1 to 1, and solve problems that way. Large discussions can be had, but if you
decide to open said discussion be prepared with what you think will be an
acceptable answer beforehand and be ready to defend it.
### Damn, this project is drawing out
This will never become a solved problem, but you may become more equipped to
handle it. A project will have many cycles, over months, years, weeks. It can
change at a flip of a coin or be drawn out until you don't feel like you're
making any progress anymore.
This point is the simplest and hardest at the same time. This means that you
will gain the experience to face these projects in time, but will never find the
correct tools to fix them. We're sometimes prone to refine the tools for the
project we just solved, but those tools won't be sufficient to handle the next
project on the pipeline. Because if they did then it wouldn't be a big project.
With more experience you will to guide your team and yourself through this these
phases, you won't be unaffected, but you've been here before. So you now what is
going on in the trenches, but you're still in the trenches. It is a constant
balance, and the most important is to listen to your team, we each have
different capacities, and we have different breaking points. Be aware, that
especially Junior developers should be shielded from too much complexity, but
also that stress needs to be manageable for everyone.
### Deadlines, always deadlines
A deadline is such a frightening thing. It is a target, and guillotine at the
same time. But from experience that is not actually true, at least not for
reasonable organisation. A deadline is a target, yes, but not a guillotine.
After a few projects you will notice some patterns. A project can always be
extended given proper cause. It sucks having to say that something cannot be
done in time. But it has to be done.
As I always say to the teams I am working on, a project has three levers you can
change:
1. Scope
2. Resources
3. Time
If requirements are set in stone then scope cannot change, if you cannot get
extra headcount then resources cannot change. As such the only lever you've got
is time. Time is your last resort. But it is possible to extend a deadline. Have
a talk with your manager / product owner, often it isn't as bad as it may seem
### I only see walls
Tunnelvision is a real thing in these projects. When you've worked on them for a
long time, you lose your objective sense of how important the project is, how
valued it is, how well your solution fits, etc.
There isn't a concrete solution here, other than being aware, that if your
project isn't the business' primary goal, then it won't get extended funding, or
that the deadline isn't as important as it may seem.
If it is as important as the business say it is, they will throw money at the
problem to get it fixed. If not, then it isn't as important as it may seem to
you. This can be a bitter pill to swallow, as the project you've toiled away
for, may not actually bring the value to others as it initially seemed.
This is also why it is great to work iteratively, but never giving up quality,
solve a problem just as good as you can but not better. I've never seen
gold-platting giving any benefits in the past, other than having to break it all
down and starting from scratch.
### We'll do this now, but fix it later
This is a lie we tell ourselves when giving estimates, we'll just do this little
hack here, and then get back to it next quarter to fix it. If this is what you
tell your business leader, in 90% of cases, your team doesn't have the maturity
or agency you think it does.
You won't get back to fix this dirty hack, it will be a stain on your conscience
of the next years, and the business won't care. If you can't professionally
agree with a solution then don't propose it. If it sets you back 10 steps, and
moves you forward 1.5, is that really a win? no.
### Communication, I'd rather not
Communication is the most vital part of a big project. You'll have stakeholders,
and partners. There is a fine balance between oversharing, and undersharing. You
may choose to keep key learnings to yourself, because you know that it is gonna
risk the deadline. Or you may overshare causing teams to have `collaboration`
meeting each week, with no direct benefit.
_Do not_ plan regular meetings unless many teams need to succeed on a high level
task. Do share your learnings internally and ask for feedback from your manager
/ product owner. They will need the information to share more broadly, and if
you have to do it yourself, know the recipient. It is a learned skill to be an
effective communicator. One which, I've struggled with my entire career at,
sharing concise information in a language that the recipient can understand and
act on us crucial.
I will go into this in a future article for self development in this space. Just
know that all communication has to be intentional, and targeted at a specific
reader, and you're off to a good start.
### I can't do this anymore
It may not be in every project, but especially for newer teams, someone will
break. Whether it is stress, not the right expectations, giving up, etc. It is
important to listen to yourself, and the team. We don't have the same ideals,
work ethic, interests, etc. As such you may feel that the project is bearable,
but another may be on the brink. Make sure you communicate, if you're the
technical leader, make sure to set the right expectations of your teammates,
they are not you. Don't project your skills onto them. You may have a vision for
the project, but they might not know or have the skills to implement it.
Doing these projects is much more a team sport than a personal one. You will
find that as a technical leader, you will spend more and more time,
communicating and mentoring rather than implementing. As such, keep your ear to
the ground and learn to mentor instead of ripping away the keyboard from your
teammates. They are not you!
If you're a junior, if you see your tech lead being stress. Open the
communication as well. They may be bottling up all the technical problems to
themselves. They can break just as well as any other.
I've been in either situation and it is just as hard. But I've never had a bad
experience asking a person how they felt. And if they rebuke you, then the
company is probably not one that values you.
# Conclusion
In this article I've shared some of my thoughts on how big projects are being
done, and some of the experiences and learnings I've faced while doing them. It
is by no means a complete list (and if you feel something is lacking reach out
at `contact@kjuulh.io`, I am also available for general mentoring as well). But
it should hopefully give some ideas, and help newer engineers navigate the
difficult space that is project development and collaboration

View File

@@ -0,0 +1,19 @@
---
type: "blog-post"
title: "2023-12-29-Don't be afraid of bespoke solutions"
description: "2023-12-29-Don't be afraid of bespoke solutions"
draft: true
date: "2023-12-30"
updates:
- time: "2023-12-30"
description: "first iteration"
tags:
- '#blog'
---
This is an argument I see a lot both at work and online, we shouldn't build this
solution ourselves, there must be something out there to solve it for us. It is
a nice sentiment and often it is worth it to research if there truly is a
solution out there to solve your issues. But it is also sometimes a sign that
you don't understand the problem fully, and/or don't have the skills yet to
solve said problem.

View File

@@ -0,0 +1,165 @@
---
type: blog-post
title: Play test your tools
description: You just had this brilliant idea, you had this one problem that you just found the solution to while showering, you couldn't wait to go back to your pc and put it in ink, or rather bytes. By 1 in the morning you succeed, filled with excitement you write in slack to your coworkers or peers that you've just burnt the candle again and have something exciting to show in the morning.
draft: false
date: 2024-01-26
updates:
- time: 2024-01-26
description: first iteration
tags:
- "#blog"
---
You had this one problem that you just found the solution for while showering,
you couldn't wait to go back to your pc and put it in ink, or rather bytes. By 1
in the morning you succeed, filled with excitement you write in slack to your
coworkers or peers that you've just burnt the candle again and have something
exciting to show in the morning.
In the morning you filled with excitement and without preparing a demo jump
straight in and show your groggy coworkers your new tool, which by the way
barely limps along, but in your mind is enough to show the idea, and the
glorious promise of the behemoth you just constructed.
Jumping straight into the technical bits, you can slowly feel the tired looks
from your coworkers, once again it happened. As you explain the idea and
solution to the degree you're capable so early in the morning, questions come
in - Is this needed, what does it do for us, do we really need the complexity?
Questions in your mind that suggests negativity, and maybe a slight hint of you
being a bother.
You leave the demo meeting slightly dejected, and disappointed - was what you
built a waste? should you just throw it in the bin?
> No
No is often the answer. But it is clear that the tool will need a host of
iterations to be useful. And that is exactly what you need to keep in mind. What
your coworkers gave the tired look, was not necessarily your idea, it was the
ratio of complexity vs benefit. Did your barely limping along behemoth provide
enough benefit for your team and your company to be worth the complexity. Maybe,
maybe not. But that is a discussion you should have.
Don't be dejected, it is okay to grieve for an idea, and solution (we're
engineers after all). But get back on the horse, try again and this time include
your peers.
## Focus on the right thing
When doing these demos you need to do one thing and one thing only. Communicate
an idea of your solution to a problem. Showing technical details often don't
matter, if they're not part of the problem themselves. This means that as you
demo these tools the focus should be on the solution not on anything else.
It should be presented in a manner you would expect from a keynote, the demo
should be practiced, and work without showing the bits that are held together
with duct tape. And more often than not, short and straight forward. And usually
it isn't a good idea to plan for the demos in the morning where people are just
getting into work, do it just before lunch, that way you can also talk about it
during lunch if there is enough interest.
But take feedback like you would during a pull request, as software engineers
we're used to giving and taking feedback all the time. But often for trivial
features. This is different, you often can't help to be married to your
solution, as such you can't take these questions for what they are; feedback.
To build products, which is usually what these tools and ideas are, in some form
at least. You need to take objective feedback, take a step back, pull the wool
from your eyes and look at what you've built with clear eyes. Take a day or two
from it, and come back to it, so that you can evaluate it as well.
## Someone stole my idea
This happened to me a bunch of time when I was a Junior engineer, and
occasionally happens now and then as well. You may feel like you successfully
presented your solution to your peers. It wasn't met with much interest, but
either during the discussions afterwards or a few weeks after someone builds or
presents a solution that you feel is eerily similar to your idea, but is met
with all the praise and excitement that yours didn't.
This can of course be nefarious, but more often than not, it was your fault. You
felt like you built a solution to a problem, but you didn't actually communicate
the solution sufficiently. As such none was able to see it for what it was,
someone else may even have been inspired by the idea, of what your tool could be
and took their own spin at it. As they'd churned on the idea a bit more, and
may've been a better communicator they could properly present the idea and
solution.
> Your idea is only as good as your ability to convey it
As tools builders we need to get it out of our heads that building a tool is the
same as conveying it. It isn't, at least not by itself. One of the reasons
Kelsey Hightower is as successful as he is, is that he is a master communicator,
and tool builder. He knows exactly what he needs to communicate and how much of
a solution to show to get buy in.
But don't accept someone stealing your idea, if you feel like they got all the
credits for what your work have an honest talk about it. Don't become bitter
because of what may've simply been a misunderstanding.
## As a playtest
A playtest is usually the practice of testing a piece of a game to check it for
a variety of factors. It may be a general playtest to test if the game is fun to
play, is cohesive, or in general if it works at all. Collect feedback and
iterate.
Your tools are not so special that they don't need refinement, and as you're
absolutely married to your solution, as you've just built it. You need external
feedback. You may want this feedback from your team, but they might not be as
unbiased as it may seem, for some of the factors given above.
Instead reach out to a few colleagues in another team or ask in a forum if
someone would like to try your tool out. It will help you practice your
presentation, and get some feedback from someone a little less biased towards
your idea. That way you can have room to answer the difficult questions when you
end up presenting this idea to your team during a demo.
I really recommend this practice, but remember to ask for feedback, and choose
your testers with intention. Such that it doesn't just become a show and tell,
but an intentional section that improves your ability to convey the idea you'd
like to. You may also get enough feedback, simply from seeing them interact with
your tool, and the questions they ask. If they ask the same questions as your
colleagues did in the earlier section then it may not have been conveyed
properly, or been too confusing.
## Your idea isn't yours anymore
Once the idea or rather solution is accepted you need to realize that it isn't
yours anymore, you may still be the initial author, but you probably aren't
gonna be able to recognize it a few iterations from then.
As your tool is let out in the wild, people will struggle with certain parts,
find bugs, some of your smart ideas, will turn into sharp corners and will need
to be remolded entirely. The tool may even need a pivot because your initial
solution didn't actually cover the right problem space. As you didn't have the
full knowledge of said problem space when you began developing the solution.
## Ask for meta feedback
If you feel like your presentation didn't go the way you wanted it to. Either
ask the most senior person on the team, or give the same presentation to another
senior person and get feedback on the presentation itself. It is much easier
seeing what went wrong from outside your perspective. Getting this feedback on
how you convey a message is crucial for becoming a natural at it.
Communication is hard, really, really hard. You may have it down one day, and
the next you whip up a message in 10 minutes and get no engagement, or simply
confusion. You always need to be intentional with your communication. If you're
not you're simply producing white noise.
# Maturity
At long last your idea is mature, it is widely adopted, your team uses it as
part of their daily work, it has its quirks, but it works just fine. The tool
has become the team or organisation and you don't feel the same ownership of it
as you did in the beginning, but you're still somewhat proud of having presented
it, and it being accepted.
At this point you're ready to think about the next edition of idea and how it
will solve problems in a whole new way. It will be janky, it will be
frustrating, you will be dejected, but you will continue with the learning that,
yes, you need to be a great tool builder to implement these solutions to your
ideas, but being a great communicator and being intentional with your
presentations is just as important as being able to put your idea into bytes.

View File

@@ -0,0 +1,82 @@
---
type: blog-post
title: Building your own personal development platform
description: I am going to start building a series called "The Workshop" where we are going to do what it takes to see what a personal development platform looks like, build our own, roll out services in production
draft: false
date: 2024-01-29
updates:
- time: 2024-01-29
description: first iteration
tags:
- "#blog"
---
A personal development platform is to a craftsman what the workshop is. It is
the place we come to tinker, relax, expand our skills, be frustrated, be happy,
and spend too many ours on our own refining our skills and building our own
sanctuary.
There is nothing like kicking back in the chair, watching your own services get
merged in git, built, deployed and finally be available to you and your users. I
expect it to be the same as when a craftsman holds up a fixture that they made
themselves using the CNC they themselves assembled. You can see all the flaws,
but you're still satisfied with the tool and rely on it to do work. Nothing is
perfect, a tool never it.
To me, my personal platform is a labor of love, time and refinement. I've spent
countless hours tinkering, solving problems, and just having fun. Again I should
stress my platform will never be yours, never truly, you might see a snapshot of
it, it is open source after all. But I constantly iterate, improve and pivot.
Unlike a professional product it is never whole, or at least attempting to be.
To expose my platform to you means that I'd have to curate it in a way to make
it presentable. For even I sometimes get lost in the jungle that is my tools. As
an engineer you will constantly build, and build, and build. It can sometimes be
difficult to remember how a certain part works even if it was myself whom built
it from scratch.
# Sharing my platform
I'd still like to attempt to share said snapshot of what my own workshop looks
like, give you a little peak inside, have you play around with the tools a bit.
Maybe do a project or two, then leave. Take the learnings back, and develop your
own in your own image, with your preferred approach, maybe start with a few of
mine, and continuously build on.
So for the next few articles, I'll focus on key pieces of my own platform. We'll
start with something basic, like hosting web servers and whatnot at home, until
we gradually can deploy some services automatically from git. I've built all my
tools on open source software, so you should be able to pick apart and replace
parts as we go.
Nothing should be as complicated that you couldn't just choose another
programming language or application to do the same work. I'd even encourage it,
choose whatever fits your needs and wants the best. Remember only work on the
tools you need and want to build. If you don't find it fun building applications
or tools, choose those tools from the open source library and built what you
want on top of that.
My own platform is built from the principle that I'd like a convenient way of
releasing software to others, be they tools or services. That is because at work
my role is a Platform Engineer, I spent far too many hours thinking about how to
remove work from others, such that they can work on what they do best.
You will have a different goal, you will not know it in the beginning, and you
probably won't know it by the end, but you should get a taste of what you find
most enjoyable and pivot from there. Sometimes my articles won't be picture
perfect, so I expect you to go and tinker if you find a problem, or contribute
it to the template repository either as an issue, or pull request.
## The workshop series
I will continue to develop this series of time, for now feel free to follow
along or ask questions in:
[https://discord.gg/vwNCMTSVxA](https://discord.gg/vwNCMTSVxA) and on my own git
server:
[https://git.front.kjuulh.io/workshop](https://git.front.kjuulh.io/workshop)
(will be synced to GitHub as well as:
[https://github.com/kjuulh/workshop-intro](https://github.com/kjuulh/workshop-intro))
they all are fairly empty for now. More to come.
This page should serve as the index of what I endearingly call `The Workshop`
series.

View File

@@ -0,0 +1,669 @@
---
type: blog-post
title: Building services in rust
description: This post should show that it is not just possible to write production ready services in rust, but that it can be a nice experience without too much boiler plate. I also go over some does and dont's so that you hopefully have a nice experience
draft: false
date: 2024-02-19
updates:
- time: 2024-02-19
description: first iteration
tags:
- "#blog"
---
Building business services might sound like a boring topic, and in some
instances it can be. But from my point of view, building business services is a
test for how ready a language or tool is, to achieve mainstream appeal. With
this article I hope to show that Rust is right around that point, if not already
there, so lets jump right in.
# But what is a business service
First of all we should define some criteria of what a business services is,
otherwise we have nothing to measure against, even if we're gonna use half-baked
fluffy metrics to decide on anyways.
A business service, is a long running application, capable of running multiple
different endpoints at once, maybe an http stack on a port, grpc on another,
sending logs somewhere, calling external services, putting stuff on a queue, you
name it. It is a multi facetted application that serves an external need of some
sort and is basically a shell around some business logic.
A business service can be anything from a microservice, which serves one of the
above, or monolith, it doesn't really matter as those are orthogonal metrics,
i.e. they are about scale not capabilities, how much damned stuff you can cram
in a box, and how many engineers to page once it goes down.
Most importantly of all a business service should be testable, it should be
relatively self-serving once not receiving direct maintenance other than patch
upgrades, and do its absolute damnest to fulfill its requirements; serve a
business need.
To me the most important is test-ability, can it serve:
- Unittests
- Integrationtests
- Acceptests
If these above are cumbersome to do, then the language isn't ready for
mainstream usage. It doesn't matter how capable, how fast, or how secure it is,
if normal engineers can't write code in the language without arcane knowledge
then it isn't ready.
So to sum up the criteria:
- Ergonomics
- How easy is it to manage external dependencies
- Testability
As you can probably tell, these are not some of rusts core values, maybe except
for ergonomics, but I'll show that it is still possible to do great work in it.
# Rust as a Service
Lets start from the top and go through a few architectural patterns that are
common in business software, such as handling dependency injection, interfaces
and concrete types, strategy pattern, etc. And which tools you need to rely on
to achieve them.
## Dependency injection ~~hell~~
Dependency management or injection as it is normally called for services, is
simply a way for a function to take in some abstraction from outside and use its
functionality, without having to deal with the complexities of how it actually
implements said functionality. It is also extremely useful for testing a piece
by itself.
I come from an object-oriented background as such that is usually how I go about
solving these issues, especially as Rusts functional programming model have some
ergonomic downsides that makes it difficult to do dependency injection using it
(for reasons I won't go into here).
Usually you use dependency injection via a constructor
```rust
pub struct MealPlannerAPI {
meal_planner: MealPlannerDatabase
}
impl MealPlannerAPI {
pub fn new(meal_planner: MealPlannerDatabase) -> Self {
Self {
meal_planner
}
}
pub async fn book_meal(&self) -> Result<()> {
self.meal_planner.book_meal(/* some input */).await?;
Ok(())
}
}
```
Quite simply we take in some `struct` or `trait` in the `new` function, which
serves as our constructor. And we can now just call the `book_meal` on the
`meal_planner` inner type. This has a few benefits. If the input is a trait, we
can mock it out, or we can use macro to mock a struct and swap a concrete value
with it (even if I don't recommend it, but more on that later).
Lets say for now that MealPlannerDatabase is a `trait`
```rust
#[async_trait]
pub trait MealPlannerDatabase {
pub async fn book_meal(&self) -> Result<()>;
}
pub struct MealPlannerPsqlDatabase {
psql: sqlx::ConnPool<Postgres>
}
impl MealPlannerPsqlDatabase {
pub fn new(psql: sqlx::ConnPool<Postgres>) -> Self {
Self {
psql
}
}
}
#[async_trait]
impl MealPlannerDatabase for MealPlannerPsqlDatabase {
pub async fn book_meal(&self) -> Result<()> {
self.psql.query!("INSERT ... INTO ...").execute(&mut self.psql).await?;
Ok(())
}
}
```
This is a small example, which we'll make use off later, but notice that we've
split up the implementation up into two parts, the interface (contract) and the
concrete type (implementation). This helps us in a few ways, i.e. we can swap
out the implementation for either a different database in this case, or a mock
if we want to test the `MealPlannerAPI`.
We can also add the `mockall` trait to our trait to automatically get mocks
generated. This is quite convenient, but comes with some downsides in that it
can reduce the feature set that you would normally have available. For example
you cannot use `impl` in functions.
The keen eyed among you may notice that the above code wouldn't actually
compile. I.e. you cannot take a trait as input to a function without a pointer,
this is because we don't know the size of said trait (it may be any of the
possible implementations or none), as such we need some abstraction around it.
Secondly, the database might have some requirements that it needs to be called
exclusively, so it may need an `Arc` or a `Mutex`, which we didn't deal with
either.
For that we'll make use of the facade pattern. I.e. we're gonna create a facade,
such that our external code doesn't have to deal at all with us having a trait,
a mutex, arc whatever. The only thing that matters is that it can depend on the
functionality without too much hassle.
```rust
#[derive(Clone)]
pub struct MealPlannerDatabase(Arc<dyn traits::MealPlannerDatabase>);
impl MealPlannerDatabase {
// Options we want to expose
pub fn psql(psql: sqlx::ConnPool<Postgres>) -> Self {
Self(Arc::new(MealPlannerPsqlDatabase::new(psql)))
}
// Escape hatch
pub fn dynamic(concrete: Arc<dyn traits::MealPlannerDatabase>) -> Self {
Self(concrete)
}
}
impl std::ops::Deref for MealPlannerDatabase {
Target = Arc<dyn traits::MealPlannerDatabase>
deref(&self) -> Self::Target {
&self.0
}
}
```
Now you could technically have an `Arc`, `Mutex`, whatever and the consumer
would be none the wiser, it still allows you to use the inner functions as you
normally would `self.meal_planner.book_meal().await?`.
You can even expand on it with an actual inner pattern if you need the Mutex, or
something more complicated. The `dynamic` specifies that we can still use it as
a test, as we can replace the internals with our mock.
### Shared dependencies as a Service
The last pattern I want to show is shared dependency management. For that we'll
use a few rust features as well. The corner stone of the pattern is to create a
single shared resource, which we can use to new up all the required dependencies
we need.
```rust
pub struct App {
}
#[derive(Clone)]
pub struct SharedApp(Arc<App>)
impl SharedApp {
pub fn new() -> Self {
Self(Arc::new(App{}))
}
}
impl std::ops::Deref for SharedApp {
Target = Arc<App>;
fn deref(&self) -> Self::Target {
&self.0
}
}
```
Again we use a custom deref that makes sure we can reach the inner pattern,
without having to wrap everything in Arcs, and or mutexes. I forgot to mention
why we do so. When you've got 10-100 dependencies, it becomes a little long in
the tooth, to have to wrap each an everything in Arcs because the `SharedApp` is
a shared object and needs to be `clone`.
Before we move on to how to actually use this pattern, I'd like to give a
recommendation. The App should not contain every single struct you need, it
should contain foundational IO resources. Such as a database connection pool,
queue manager, grpc connection, logger instance etc. Things that need setup from
external configuration.
```rust
pub struct App {
psql: sqlx::ConnPool<Postgres>
// ...
}
```
That means that it won't contain `MealPlannerAPI` or `MealPlannerDatabase`.
We'll get to those in another way.
To actually get to the concrete types we'll use something called extension
traits
```rust
//file: meal_planner_api.rs
pub struct MealPlannerAPI {
// skipped for brevity ...
}
pub mod extensions {
pub trait MealPlannerAPIExt {
fn meal_planner_api(&self) -> MealPlannerAPI;
}
impl MealPlannerAPIExt for SharedApp {
fn meal_planner_api(&self) -> MealPlannerAPI {
MealPlannerAPI::new(self.meal_planner_database())
}
}
}
```
This means that we can now from the outside call `app.meal_planner_api()` and
we'll get an instance of the concrete type. If you've got a high volume service,
you can either choose to move these values down into the shared struct itself,
or cache them in the `SharedApp` using an object pool. In most cases the
performance cost is negligible. In some cases rust will even inline these
functions even if they're traits, to make them faster.
The database is the same, but uses values on self instead.
```rust
//file: meal_planner_database.rs
pub struct MealPlannerDatabase {
// skipped for brevity ...
}
pub mod extensions {
pub trait MealPlannerDatabaseExt {
fn meal_planner_database(&self) -> MealPlannerDatabase;
}
impl MealPlannerDatabaseExt for SharedApp {
fn meal_planner_database(&self) -> MealPlannerDatabase {
MealPlannerDatabase::psql(self.psql.clone())
}
}
}
```
Notice that we use the `psql` method instead, and that this acts like a normal
struct, even if it fronts for a trait. This is super convenient. This also means
that you could technically create multiple `App`s for different purposes and
only choose to implement the extensions for those that need said dependencies.
This should cover all of our needs to handling dependencies. And if you'd like
to can see this in action at: https://git.front.kjuulh.io/kjuulh/flux-releaser.
Where I heavily use this pattern both for a cli and for a service in the same
crate.
## Dependencies all of them
We may have to run multiple different hot paths in our code, which are code
paths which see high traffic, or where the main traffic comes through. This may
be a http runtime, grpc, messaging etc.
For that right now, tokio is the name of the game. This is also why I didn't
touch on the question above of why I marked nearly every function as async. If
you develop this kind of software, it is a given that nearly all functions will
touch some IO, and as such will be async, if not you will just have to go back
afterwards and add async.
You want a fast, ergonomic, and stable runtime. In most languages these are
built in. In rust the defacto standard is tokio. Even if there are multiple
other alternatives on the marked, but for now, tokio is what you'd probably
choose if you built services. It may change in the future though so don't take
my word as gospel, and figure out what fits best for you. The only thing I ask
is that you be consistent.
Tokio has the benefit of being able to spawn many virtual `threads` (tasks), and
as such even if we only have a single core, or part of one. We can still run
asynchronous work.
This should most of the time be done by a lifecycle management library,
something that can make sure that a bunch of parallel services are running at
the same time, and if one fails they all shut down. But we can just start by
hacking our own together to illustrate how it works.
```rust
#[tokio::main]
async fn main() -> Result<()> {
let app = SharedApp::new();
tokio::select! {
res = app.meal_planner_api().serve() => {
res
},
res = app.meal_planner_grpc().serve() => {
res
},
// .. As many as you'd like
}
Ok(())
}
```
This is a bit of a naive example, but should illustrate that you can run
multiple tasks at the same time serving requests. Do note that if one exits all
of them will terminate. But we can now share `app` between all the different
runtimes and execution flows, like you'd normally do in any service.
I will go into how to actually make a nice development environment in another
article, such that you should know which packages to provide as a standard
development offering. But for now we'll just let our little service setup
everything for itself. So keep in mind that the database setup, apis, runtimes
etc. could be provided by a dedicated team.
## Testability
One of the most important criteria for myself is being able to test a service. I
usually defer on writing fewer more end-2-end tests rather than a lot of small
unit tests. This is convenient, because rust doesn't make it easy to write unit
tests.
Lets start with integration tests and then afterwards move on to unittests,
because in rust they're quite different.
### Integration tests
Integration tests I categorize a a test that span an entire service, including
its io dependencies, but not other services. Such that you'd include a database,
messaging broker, but not S3 or another service in your tests. It should poke
the application from the outside, at least as much as possible, but still be
able to introspect the state of the app using the libraries. So for me
integration tests are categorized as a greybox test. Somewhere in the middle of
whitebox and blackbox.
To setup integration tests for a service in rust, is a bit different than what
you're used to. First of all, you'll want to place the test file somewhere else
than where they normally life (in the code beside the functionality is the usual
place). As such you'd create a folder in your crate:
```bash
tests/ # new folder
src/
```
Each file under tests will be module like we normally have it in rust, this will
become important later.
A tests file looks like this
```rust
#[tokio::test]
async fn can_book_a_meal() -> Result<()> {
let (endpoints, app) = setup().await?; // TODO: more on this in a bit
let resp = reqwest::post(endpoints.meal_planner_http).await?;
assert!(resp.status.is_success())
let meal_bookings = app.meal_planner_database().get_meal_bookings().await?;
// ... more asserts
Ok(())
}
```
There is a few different pieces we haven't gone through before, but the first
important piece is the setup function. You'd want as a few as possible
concurrent apps running, as such the setup can be shared across tests (this is
only possible pr. file, as each file is a binary in of itself, as such they
cannot share memory between them).
So the setup should setup an app once, let the tests do its thing, and once all
of them are done, shut down.
```rust
async fn setup() -> Result<(Endpoints, SharedApp)> {
// You need a separate tokio runtime, as otherwise it would shutdown between each test
// OnceCell to only spawn a server once
INIT.call_once(|| {
std::thread::spawn(|| {
let rt = tokio::Runtime::new().unwrap();
rt.block_on(async move {
// Init the server
let server = Server::new().await.unwrap();
// Set global options
unsafe {
ENDPOINTS = Some(server.endpoints.clone());
APP = Some(server.app.clone());
}
// Actually wait for the server, this should never terminate before the tests are done. I.e. start a webserver and stay blocking.
server.start().await.unwrap();
});
});
});
// Wait for the server to come up, i.e. call a ping endpoint or something
wait_for_server().await?;
return Ok(unsafe { (ENDPOINTS.unwrap(), APP.unwrap()) })
}
```
Again lots of technicalities (see flux_releaser for a more thorough example).
Just remember that we start up a process once in a separate thread, which has
its own runtime. Let the server run, and outside of that we wait.
A small disclaimer here, this is what I would constitute as arcane knowledge,
thankfully you only have to do this once, and it can be packaged up, so that you
don't have to deal with this complexity all the time. It is just too useful and
essential for testing to not mention.
I will also stop here for now with integration testing, if you'd like a follow
up let me know at `contact@kasperhermansen.com`.
### Unit testing
Depending on what you're doing in rust, unit testing can either be a breeze, or
an absolute nightware. Essentially if you use structs all the way down with
dependency injection shown in the previous section, without using traits, it is
very difficult to do proper unittesting. I.e. you have no way of slicing
functionality. If you use traits all the way down, then it will require a lot of
boiler plate, or excessive usage of macros. Which I will touch on after this
section.
What I recommend is:
- Using traits for IO
- Splitting functionality to make the business logic parts isolated and
testable, this is not always applicable, but does make things easier.
#### Split dat IO
IO, oh, IO without you we would just be a space heater, with you we're filled
with heartbreak, and stupid proses somehow.
IO doesn't come equal, and when I mean IO in this case, I mean side effects
pretty much, not everything that happens external to the program. I mean any
external part of your application that we've got no control over. This means
from a testing point of view, the database, sometimes filesystem, other
services, http requests, etc. etc.
This is pretty much the only place outside of the strategy pattern, where I use
traits, especially async traits.
```rust
#[async_trait]
pub trait MealPlannerDatabase {
async fn book_meal(&self) -> Result<()>;
}
pub type DynMealPlannerDatabase = Arc<dyn MealPlannerDatabase>
#[async_trait]
pub trait MealPlannerEvents {
async fn meal_booked(&self) -> Result<()>
}
pub type DynMealPlannerEvents = Arc<dyn MealPlannerEvents>
```
This means like in the previous sections that we can mock the external services,
which allows us to focus on the business logic inside the `MealPlannerAPI`, or
rather `MealPlannerService`
```rust
pub struct MealPlannerService {
// Please use the wrapper pattern shown in a previous section, this is just an example
database: DynMealPlannerDatabase,
events: DynMealPlannerEvents
}
impl MealPlannerService {
pub fn new(database: DynMealPlannerDatabase, events: DynMealPlannerEvents) -> Self {
Self {
database,
events
}
}
pub async fn book_meal(&self) -> Result<()> {
let meal_booking = self.generate_meal_booking();
self.database.book_meal(&meal_booking).await?;
self.events.meal_booked(&meal_booking).await?;
Ok(())
}
fn generate_meal_booking(&self) -> MealBooking {
// ...
}
}
```
As you can see there isn't a terrible amount of meat on this logic, I'd actually
normally argue that this shouldn't even be unit tested, but for completeness
sake, lets just say that generate_meal_booking is unreasonably complicated and
requires not just locking down its functionality, but helping guide development.
You can now choose to implement your own mocks for the `Database` and/or
`Events`. And test the `book_meal` function to make sure the database and events
are called with what you expect them too. Currently I'd either recommend rolling
your own mocks, or using `mockall`.
#### Split dat class
It may be useful in rust to simply split your functionality into multiple parts,
those that call external services, and simply isolating business logic.
```rust
impl MealPlannerService {
pub fn new(database: DynMealPlannerDatabase, events: DynMealPlannerEvents) -> Self {
Self {
database,
events
}
}
pub async fn book_meal(&self) -> Result<()> {
let meal_booking = self.generate_meal_booking();
self.database.book_meal(&meal_booking).await?;
self.events.meal_booked(&meal_booking).await?;
Ok(())
}
fn generate_meal_booking(&self) -> MealBooking {
// ...
}
}
```
Now we can simply call `generate_meal_booking`, simple as that. But now you may
say, but, but I don't get my precious 100% test coverage, and I'd like to ask if
you're out here collecting points, or actually building software. Enough
feathers ruffled, I'd highly recommend choosing wisely what to test, if you want
100% test coverage, you're gonna trade that for increased boilerplate and
complexity, and unless you're building a rocket, it may not be warranted.
This is it for testing, next one we're gonna move into a few general points
## Ergonomics
### To macro, or not to macro
Macros are useful, so much so, that they're tempting to use everywhere.
`Procmacro` is literally crack cocaine, I will provide a word of caution though.
Macros are another language inside rust, and can do anything the heart desires.
First of all if you use macros, you will trade complexity and developer
experience for decreased perceived complexity. Sometimes it is needed, other
times it is a convenience, so be sure to choose wisely.
For example:
- async_trait is essential, rust doesn't have object safe traits without, or at
least not without arcane knowledge, and increased boilerplate. This is the
only non-struct procmacro I regularly use for filling gaps in functionality.
- mockall is quite useful for generating mocks, though be careful with it, it
can introduce unexpected code, and introduce general limitations on your
traits and structs. I only use it for traits.
You should definitely use procmacros if they're essential for your app, such as
in rocket, clap, tracing, leptos, etc. A good rule of thumb is, simply to really
think if a procmacro is essential for your use-case. Often it is, most I've
overused them in the past, and had a hell of a time cleaning them up.
### Defer for simplicity
Rust has enough tools and features to do a lot of things in 100 different ways.
If you're serious about building services and product, defer for simplicity and
be consistent. You could take a stance and say that you wont use async, or never
use clone, etc. You'd end up taking on a whole load of complexity that would
make the service quite unapproachable for further development. Raw dogging
channels for request/reply is a nice feature, but honestly, it is a foundational
block of functionality not a great api.
Keep things simple, and resist the need for creating abstractions for
everything. It is okay to have the same code in a few places, and don't use
macros for doing DRY. I've never seen it play out right
### Use crates, and build your own
Quite simply if you're building services, build your own crates, tailor them to
your needs, develop a crate that automatically setups up a database connection,
bundle your own logging setup that makes sure we export things as json etc.
Implement your own context libraries for sharing variables throughout a call
etc. There are a lot of libraries that isn't useful on crates.io for others, but
if you choose to build small individual services, it can be quite useful to have
easy to use out of the box functionality
### Workspaces, be aware
Workspaces are nice, and I actually default to them for my own services, but be
careful I've got a tendency to make small libraries in these workspaces
alongside my app. This can make it difficult to know where a crate comes from,
and gives the service multiple responsibilities, or reason to be deployed /
worked on. As such remember to keep services and workspaces focused on the topic
at hand. That is unless you use a mono repo approach, but that is quite
difficult to do with rusts compile times.
# Conclusion
I hoped that I've shown you some currently good practices for how to develop
services in rust. We've covered anything I think is essential for building
production ready code, which trades some performance for increased ergonomics,
while keeping complexity at bay. It should be mentioned that this is just my own
opinions and what feels right in 2024, where we're still missing crucial async
features in rust. So it could change quite a bit over the next few years.
If you feel like something was unclear, or you'd like a topic to be expanded
upon, let me know at `contact@kasperhermansen.com`.
Thanks a lot of reading, and I hope to see you at some point to a Rust Aarhus
Meetup

View File

@@ -0,0 +1,61 @@
---
type: blog-post
title: Fuzzy clone (fz)
description: Fuzzy clone is a small cli tool that helps you quickly jump between git repos you contribute to
draft: false
date: 2024-03-09
updates:
- time: 2024-03-09
description: first iteration
tags:
- "#blog"
---
Fuzzy clone is a tool that I've used in a variety of incarnations, I've finally
sat down and actually wrote a separate utility for it.
[fuzzy-clone](https://github.com/kjuulh/fuzzy-clone)
It exists to solve one problem, and one problem only. Quickly clone, and jump to
git repositories you collaborate to.
This gif should better display why this is so useful
![fuzzy clone usage](assets/fuzzy-clone-demo.gif)
Do note that `,` is my alias for it `fz` is the actual command. The fully
qualified name is `fuzzy-clone`.
To get started simply:
```
brew install kjuulh/brew/fuzzy-clone
echo 'eval "$(fuzzy-clone init zsh)"' >> ~/.zshrc
fz
```
or follow the other options in the github repo.
## Why I can't live without it anymore
This might be a bit `ananas i egen juice` (self gratification), but the
alternative is horrid. I don't always remember the name of the repo I want to
clone even if I've written it myself at some point.
1. Go to https://github.com
2. Click repositories, because the homepage search, doesn't always show all the
repos I contribute to, or is a member of
3. Search `fuzzy-clone` as an example.
4. Open a terminal
5. `cd ~/git/github.com/kjuulh`
6. `gh repo clone kjuulh/fuzzy-clone`
7. `cd fuzzy-clone`
Granted I've got a more complicated directory structure than most. But still
there are like 7 steps. It may be small, but it takes around 30 seconds to a
minute instead of 2 seconds using the utility. That is super valuable to me, and
helps me everyday when I troubleshoot stuff in some of the repositories I don't
technically own in my organisation
A bonus is that if the directory exists, I can just jump between them. Sort of
like the `zoxide zi` tool but a bit more structured

View File

@@ -0,0 +1,126 @@
---
type: blog-post
title: The Problem with the consultant model
description: This post explores the limitations of traditional consultancy in software development, advocating for integrated, long-term solutions over repetitive, short-term fixes. It highlights the need for strategic leadership and collaborative approaches to avoid complexity and enhance efficiency.
draft: false
date: 2024-03-10
updates:
- time: 2024-03-10
description: first iteration
tags:
- "#blog"
---
I recently talked about Developer Experience in a few places and got a ton of
messages afterward way more than usual. People were really resonating with one
particular point, so let's dive into it.
Here's what I heard from a consultant: “..., Im stuck doing the same old stuff
again and again, even within the same client.” And from regular employees: “What
you showed was cool, but were just redoing the same things over and over.”
So, what was my talk about? I shared how we set up shared infrastructure at my
company. This means devs can just focus on the cool stuff, their business logic,
while we handle the boring bits like platform runtime and pipelines.
The main thing I picked up from the feedback: whether youre a consultant or a
dev, youre probably tired of setting up the same infrastructure every single
time. And it's tough to step back and think about a shared approach to fix this.
## Handy but Fleeting
Consultants are like a quick fix for companies that can afford them. They fill a
gap and then theyre out. It's easier than committing to a permanent hire, but
it's pricier in the short run. It's a bit like choosing cloud services
convenient but can get expensive, especially if you get too hooked on them.
The trouble with consultants is they dont really become part of the team. They
pop in, do their thing, and pop out. This means whatever they build can get lost
in translation when they leave, since they take their know-how with them. Even
with the best handover, its not the same as having them truly embedded in the
team.
## Bespoke, boutique, everywhere
Heres the deal with the current setup: consultants come in with their cloud
expertise, set things up for the team, and then bounce. This isnt bad in
itself, but it becomes a problem when every team ends up with their own custom
setup. Its great at first, but later, the team struggles because they dont
really know the ins and outs of what's been built.
Every team doing its own thing means no ones thinking about how much easier
life could be with a shared system that everyone uses.
## Oil and Water
Consultancy firms arent really into the whole 'one-size-fits-all' solution
thing, and businesses usually want quick fixes, not big-picture solutions. This
leaves consultants and devs kinda stuck they want to create better, easier
solutions but can't always make it happen.
- Consultants typically swoop in, fix a specific problem, and then peace out.
- Devs, on the other hand, are too busy delivering business value to really dive
deep and evolve these solutions.
This often ends up piling on complexity for companies without really unlocking
the full benefits these solutions could offer.
From what I've seen, getting a consultancy to solve a problem is usually not a
great long-term move. Ive mostly seen success when theyre brought in for extra
hands or for training, not as a standalone squad handling features.
## Delayed Gratification
The key? Be patient for that metaphorical cookie. It takes mature leadership
willing to tackle the big issues instead of just patching up small ones here and
there.
Companies should be willing to use consultants, but they need to be integrated
into the teams maintaining the solutions. Im not a fan of outsourcing the whole
implementation to a consultancy, though. That can lead to a messy situation
where the company becomes too dependent on the consultancy. If youre a dev or
leader and you see this starting to happen, shout it out to the higher-ups.
Otherwise, you're looking at a huge pile of technical debt and a knowledge gap
that's a nightmare to cross.
## Making it Work
I've seen a few places actually nail this, although it wasnt without its
challenges. The common thread? They brought in a domain expert backed 100% by
management.
Tackling a problem that every team has their own approach to can cause a lot of
tension. Management needs to fully trust the domain expert to handle things
smoothly. If not, it just undermines the whole effort.
This puts a lot of pressure on the new team and the domain expert. Theyve got
to deliver something solid, which is a whole other story, but it's definitely
possible and worth the effort.
## The Payoff
If you get these shared problems sorted, it can massively speed up development.
Ive been at places where setting up a production environment was a months-long
ordeal. At my current company? It's a 5-minute job, with next to no long term
maintenance requirements.
Compare months of work for, say, three developers to a quick setup by two devs.
That's a huge saving. Sure, you might think its nuts that setting up for
production could take that long, but it's a different ball game at traditional
companies with their own data centers and rules compared to just using AWS on
the side.
# Wrapping Up
I was struck by the feedback from consultants they were just as keen to
improve and evolve their solutions as the devs. Theyre tired of the endless
cycle of temporary fixes.
In my view, the current consultancy model isnt cutting it. The best results
Ive seen are when consultants are part of the team, not just passing visitors.
The goals of the consultants and the company are often too different, which is
why it often ends in frustration.
Hopefully, this post gives you something to think about. Im already seeing some
companies tackling this issue head-on. Others should take note unless they
want to keep dealing with the same old problems.

View File

@@ -0,0 +1,80 @@
---
type: blog-post
title: I like linux more
description: I like linux...
draft: false
date: 2024-04-11
updates:
- time: 2024-04-11
description: first iteration
tags:
- "#blog"
---
This may not come as a surprise to anyone. But I feel the need to state this as
a person probably spending 8 hours a day on a Mac. Linux is superior for my
profession; software engineering.
I can imagine you rolling your eyes right now, yet another nerd screaming into
the void that their niche choice that none understand somehow is better than a
behemoth of engineering that is MacOS.
First of all Linux is built for and by software engineers. It definitely has its
own idiosyncrasies but for me most of the choices made by my distribution,
(fancy name for a curated experience on top of the `linux` kernel).
A distribution on linux, often abbreviated as distro, is a set of software which
includes the linux kernel in their delivery. Like installing Windows, MacOS etc.
You're probably used to just those choices, but on linux it is different. We
call it linux, but you won't find a linux distribution. Instead you will find
flavours of linux that include linux as their base os. I.e. Ubuntu, Debian,
Arch, etc. Each are as different as windows vs macos. At least from a users
perspective. So it can be confusing which is the right one for you.
Linux unlike the other OS' actually allows you to choose what you prefer, each
distribution is built by people which range from you - yourself (linux from
scratch etc.) to fedora (redhat, owned by ibm), or ubuntu by canonical. These
flavors are vastly different in their user experience, so much so that for a lay
person, it without knowing that linux is the base kernel that all of it is built
on, you wouldn't even know they share the same DNA.
The linux desktop, which I am writing about in this article, is different than
the linux server that most developers are familiar with. Not in the basics, but
the user experience, simply, the linux desktop with all its flavors simply has a
lot fewer eyes on it than the linux server as the industry standard for servers
operating systems.
Eyes on software, and hands on keyboard is one of the most important metrics in
software engineering. Simply a software that is more important to more people is
more mature and refined. (often, Jira phew.)
This has already drawn on long enough, but in my experience, unlike MacOS; Linux
actually allows me to be productive in the way I choose - professionally. As a
software engineer, I am by nature or nuture, a poweruser. I usually switch work
between a browser and an editor which I use to edit the programs I write.
Often, I like to focus on a single thing at once, so I have my editor on one
screen, and a browser, communication app, planning board on the other. A need
then emerges that I quickly need to switch between these.
MacOS does allow setting keybinds to switch between these. However, there is no
keybind out of the box to send a program to another screen. And lets say I
really need that feature - which I do, don't ask.
I basically have to resort to rooting my device (rooting is a fancy term for
getting access to the dirty bits of the OS. Normally you don't want to touch
this as it really is like opening up pandoras box, you don't know what is gonna
happen). Should I do that on a company device, probably not.
The problem here is that I have to be satisfied with what Apple or Microsoft for
that matter provides for me, if I don't like it, I basically have to violate
every security best practice to simply set an uncommon keybind. While on Linux I
can just choose to change my configuration, or desktop to include that piece of
functionality.
Linux actually allows me to control my device how I prefer, I don't ask for
anything complicated, I just want to open a program on another screen, is that
really so serious, I can do that using my mouse, why can't I do that using a
keybind?
I hate using my mouse

View File

@@ -0,0 +1,235 @@
---
type: blog-post
title: Use Rust for Web services
description: Rust is becoming a mature language, it is excellent at systems development. In this post I will explain why I think Rust is a viable choice for web services, and in the following posts build a simple web service to showcase all the required features I expect from a language.
draft: true
date: 2024-07-12
updates:
- time: 2024-07-12
description: first iteration
tags:
- "#blog"
---
Rust is increasingly becoming more mature, especially for systems development.
Rust, however, can also be an excellent choice for building web services, which
may not be obvious from the outset, in this post I'll show that Rust is a viable
choice and does lend itself to web service development.
## But, systems development
It is commonly said that Rust is built for systems development. That is, the
core, nitty gritty details of our software stacks. That is definitely also where
Rust shines, especially because Rusts values aligns quite well with systems
development. That doesn't mean that Rust is unsuited for development in other
areas than systems development. It means that we should carefully align what
values Rust chooses to prioritize against our own.
Rust mentions on their website that their primary values are:
- Performance
- Reliability
- Productivity
If that fits your needs then I don't see any reason why you wouldn't choose Rust
for building your business logic and serving requests.
System development typically favors:
- Performance
- Control
- Reliability
- Security
Which is why Rust is such an excellent choice for building core infrastructure.
Building web services is entirely up to you however, most companies I've seen
favors:
- Productivity
- Reliability
- Catalog of libraries
- Ease of use
I won't go into each an every items for systems development, but I do think it
is important to reason about why those 3 values are the ones I've chosen to
select for:
### Productivity
As software developers building business services, we're expected to deliver
results. Software engineers working on front facing services, are often valued
on how fast they can deliver results. It is simply how the business and
technology work together. Business wants results, tech wants reliable, modern
and understandable systems. Rust does select for productivity, but not in the
way we usually think about it, when Rust says productivity, they say, we've got
great tooling, a robust build system, package manager, auto-formatter etc. Built
right into the tool you use the most when developing Rust. Rust does have it
downsides for productivity, it has a rigorous type system, which requires fairly
long compile times.
This is just my experience with Rust speaking here, but Rust really means
productivity in the long run, delayed gratification if you will. It will
sometimes feel like easy things are hard, but also that hard things are easy. In
my experience with Rust, building web services from scratch can take quite a
while, but doing refactorings later on, is fairly easy and reliable, simply
because Rust is so rigorous. There is often the feeling that if it compiles it
works, even if tests aren't run. Much more so than in other languages with
weaker guarantees in its type system.
### Reliability
Software engineers want to depend on tools that just works, they don't want
crazy crashes, memory overflows, garbage collectors running amok, shared memory
etc. Nobody wants to be paged at night, or spend weeks sorting out invalid data
because a software service was misused, or data transformed in the wrong way.
Rust leans into reliability hard, it chooses reliability over nearly all other
values. When Rust runs, it runs well, consistently, and fast. I've had services
in production seeing traffic in all sorts of conditions and the programs keeps
chucking along, some that have been running for months, just sitting there doing
their thing.
### Catalog of libraries
Software engineers working on business services are often integrating with a
variety of systems. The language they choose either needs an incredibly vast set
of libraries, and/or be excellent at interopating with other libraries. Hey, I
need this program to send files to an SFTP server, I need a connection with this
server, but it needs to be over mTLS, I've got these massive XML files that
needs to be parsed. We've got this arcane library that we need for reason,
please make sure to call it in this way.
There are tons of requirements for libraries, for common things such as logging,
metrics, web apis, message protocols, message queues etc. Rust in my opinion
sits at the early adopter/initial mainstream cycle here. While Rust has an
incredibly vast library of tools, a lot of these tools are built by hobbyists,
excellent quality don't get me wrong. But has currently is a second class
citizen for a lot of providers. We are beginning to see services provide SDKs
and so on for Rust, but often in the 2nd or 3rd wave of tools. Languages likes
Javascript, Python, Golang and so on are still a little ahead.
### Ease of use
As software engineers working on business services, we mostly spend our time on
domain logic, talking to the business, architecting the apis for our customers,
figuring out the right data paths for models, integrating with other internal
customers. As such we want a simple language that should be powerful, but easy
to use, we don't want to skimp on features, but we generally favor simplicity.
Rust does clash here, for one thing, it isn't easy to learn. Rust kind of has
this unfortunate curve of learning, where initially you are fighting with the
compiler to get your program built. As you lean more into the standard library
you learn some bad practices, and use features to solve the short comings in
dealing with Rust. Until hopefully when you've grokked Rusts model, can actually
build quite simple software, that isn't overly verbose, or complex to use.
This sadly puts simplicity together with mastery of the language. While not
impossible, it does make it difficult for a beginner/intermediate Rust developer
to build simple, concise Rust code. Unlike, Golang, Python and so on. Which puts
simplicity forward as some of their values. Golang especially makes it easy for
even beginners of the language, to produce ideomatic, simple code.
## Pretty decent overlap for web services
Rust actually seems from the outset to be pretty well suited for web service
development. It is ergonomic, favors productivity, rock solid to use, and has a
vast array of libraries. From experience Rust do have some issues for web
service development. Primarily that isn't easy to pick up. This is a point I
hope to help alleviate in this article. In my opinion a lot of the current use
cases showing Rust, are in too technically complex domains, making it not as
clear that Rust can actually produce quite simple code, for simple technical
problems. Instead focusing on showing Rusts power in handling complex technical
situations, especially with illustrating that Rust can handle these situations
with effective code. Effective code is not equal simple code. And while from the
outset, concise effective code looks simple. It often isn't, it requires deep
knowledge of the language to operate, something we'd like to avoid unless the
situation warrants it.
With this series, I hope to show some programming patterns for building
services, which should hopefully put you in the right path. Some of the code
will look trivial for the most part, as we're at the end of the day handling
quite simple technical problems. It should, however, show that Rust while
capable and often show wielding a sledgehammer, often it isn't warranted or the
thing to use.
## Why Rust is hard to learn
Rust is currently in this situation where it has a reputation for being hard to
learn, which I'd sort of tend to agree with, as I struggled to learn it myself.
I do have a single reason as to why Rust was hard to learn for me. I didn't have
a peer to discuss problems with, no mentor for guiding me on the right path,
etc. Lack of solid examples for doing the kind of programming I am interested in
(primarily web service development and developer tooling).
I myself had to go the entire cycle, thorns and all, from beginner and
intermediate in Rust, to having a somewhat good grasp on the language and
ecosystem to be effective in the language. In a prior post, I showed how to
build services in Rust as well. If I'd had some of these kinds of guides earlier
then I'd certainly be much further in being productive in Rust.
## Rust is obsessed with itself
The Rust community is crazy with the language, it sometimes feels a little bit
like a cult. In the last year or so, I have felt it subside a little bit, at
least in my local community. But in general, we're obsessed with the language,
nerding out writing blog posts going into hairy problems, finding arcane tricks
to solve the issue. Crazy debugging techniques to figure out why a certain
program spent a few more cycles than it should on a given function call, etc.
While entertaining, I do think we need to take a step back, and look at Rust for
the values it brings, and see how we can promote in a more digestible format. So
that the more casual side of Rust is shown as a contrast to the expert blogs.
## Rust should be better at putting you in the pit of success
To succeed with Rust you need to be in the pit of success. You can't be rolling
around at the sides like you can in most other languages. In Rust, if you're not
following the rules, you will come to refactor your app, many times. Do tricks
to avoid ownership rules, all tricks, that compound in the apps being more
complex than required.
Rust has tons of rules of how you should write the language, it just doens't
enforce them that strictly. Some are shown explicitly and implicitly in various
material, The Rust Book etc. However, most of these rules, really have to be
enforced to actually be followed. Most languages have implicit rules that you
need to follow. C++ has a really long book about it, Rust has The Book etc.
Golang also has implicit requirements. For examples that consumers should close
channels, functions should return structs but accept interfaces etc. Golang
feels like a language where it is easy to fall into the pit of success. Mostly
because the language is fairly easy to understand, so it becomes easier to grasp
the implicit rules.
Rust is hard to learn, as you are given a solution to the problem you face at
hand. But that solution is nearly always a sledge hammer instead of what you
should be doing, which is be consistent. Which is super difficult to do when you
don't know what the right thing to do is.
The learning curve for a Rust problem goes somewhat like this:
1. User creates module for handling user state
2. User gets compile error because memory is shared between threads.
3. User reads up on what tools are available to solve the problem
4. User uses an Arc<Mutex<...>> to share memory, and lock access so that nobody
can touch the data while someone is modifying it
5. User exposes their services under Arc<Mutex<>> everywhere. Every class now
need to take an Arc Mutex as input for one or more dependencies. Database,
Logger, Business service. The code becomes a mess to maintain and refactor.
6. User refactors the app, they find out that they should wrap state into
consistent ideomatic modules that only expose the api requires. All thread
safety mechanisms should either be exposed as APIs or maintained internally.
7. The user has to refactor the entire app at once to make the migration
successful
8. The user can now share dependencies with references, the code is fairly
concise, testable.
Rust has tons of rules, hidden in various syntax rules, that while boiling down
to simple concepts are difficult to implement and understand, some of the rules
even depend on the context they're in and which values your program selects for.
If writing web services, you'll want to build modules that encapsulate their
state and provides a shareable reference outside, that is cloneable and safe to
use. Structs are preferred over traits as the API. We'll go into each of these
in a more thorough manner in later posts as we encounter the need for them. If
you're a Rust developer you may contest to these ideas, which obviously is fine,
as I may be wrong :,). However, I hope that the series will show how I
consistently build services in Rust, that are ergonomic to write and not overly
verbose.

View File

@@ -0,0 +1,188 @@
---
type: blog-post
title: "Your Company's Superpower: Platform Engineering"
description: Platform Engineering basically has two angles in the industry, either the hypefor it is overwhelming because it is the new hot thing on the block, or it isunderestimated because it just looks like operations doing software engineering.I aim with this piece of content to define why you should care about PlatformEngineering.
draft: false
date: 2024-09-25
updates:
- time: 2024-09-25
description: first iteration
tags:
- "#blog"
---
Platform Engineering basically has two angles in the industry, either the hype
for it is overwhelming because it is the new hot thing on the block, or it is
underestimated because it just looks like operations doing software engineering.
I aim with this piece of content to define why you should care about Platform
Engineering.
## Platform Engineering defined
Platform Engineering to its namesake, is the practice of engineering a platform.
Often as Software Engineers we forget what the Engineering in our titles
actually mean. Engineering means building the right thing, reliably and
securely. That we understand, but the platform is harder to define. When we
think of a Platform for a company, it is the foundation that other users within
the company utilize. An important detail here is that Platform Engineering is
not just for developers, it can be for business users, data analysts, operations
people, etc. It is basically a vehicle to make someone elses job easier within
the company.
An example of a platform to be built could be: A specific individual or team,
owning the deployment pipeline that ships software into production, the metrics
solution that is used across the company, the tool analysts interface with when
they want to query the data in the company.
These might sound mundane, and you may have a question, but hey, I could just
setup github actions, and boom, my service is now going to production. You would
be correct, Platform Engineering is taking a holistic view of the company's
portfolio of tools, and make decision based on those. Often tooling starts grass
roots, a developer is missing a feature, he/she goes and implements said
feature, they now have a pipeline to build and deploy their code. But so does
the other 15 teams in the company, and now you've got 15 bespoke solutions.
Another step would be a DevOps department, being consultants to the different
teams to have some homogeniety.
A Platform Engineering team would take those requirements; we need a build and
deployment capability across the company, lets come up with as simple solution
as possible to capture the largest amount of complexity. As such the team might
end up building a complete build pipeline, but only for Golang Services, and as
such keep the complexity around for Python Services or what have you. The
Platform Engineering team would then treat the pipeline as a product, handle
user feedback, user interviews, track data for adoption, make sure the right
features are available, etc.
The end product is that you've got a dedicated team, which can capture the
partial complexity for 15 teams, and if the product is good enough, it can be
complete, i.e. the friction between the development teams and the platform teams
is minimal. If you extrapolate this mindset, you can go from:
Here is the example of the scope of the complexity a normal organisation might
have in software products, internal to the company, but without dedicated
stewardship.
5 programming languages, 30 pipelines, 5 types of software libraries, 10
libraries pr language, 30 types of deployment, 3 clouds
`5 + 30 + 5 * 10 + 30 + 3 = 83` products spread out over the organisation
Versus a stewarded Platform
1 programming language, 1 pipeline, 10 libraries, 1 deployment, 1 cloud
`1 + 1 + 10 + 1 + 1 = 14` products dedicated to a specialized team
You might say, that it is inrealistic to succeed with a single programming
language, or 1 pipeline, but it can be done. It can be a long journey to get
there, and you may not want to go that far, if you've got enough people to
maintain the software. But in general, the goal of Platform Engineering in such
an organisation is to move out complexity of feature teams to let them focus on
what they're best at; building features.
As is often the case, Platform Engineers are basically Software Engineering
working in a specialized field, with specialized tooling, as such it is more
approachable to tackle familiar problems, i.e. building out a deployment
strategy for Kubernetes, AWS, whatever. But it can also be so much more. How do
SQL analysts interface with their tools, what is slowing them down, do they
achieve the quality they want from the products they rely on, are their
workflows as effecient and tight as can be? Often this isn't the case, in the
same vein as a software engineer hacking together a pipeline, and analyst might
cook up a workflow that is borderline masochism. As Platform Engineers we've got
the knowledge and tools to help shape some of the workflows and tools to fit the
needs of our users.
## Platform Engineering doesn't mean invented here
Platform Engineering can be taken to its extremes, where we basically build all
the tools from scratch, define all workflows, templates by hand, and rely on a
massive team to support said complexity. But it shouldn't be our first approach,
one of the most interesting things about Platform Engineering is the creativity
it invites. Hey, I need to build a build pipeline, what tools do I have
available, and how can I turn this into a good product and abstraction for my
users. Do I really want to provide a small layer of abstraction on top of github
actions/jenkins etc. Or will we build a turnkey solution that basically builds
our company's version of what a Golang services looks like.
Are we supposed to build the entire build pipeline software ourselves, or can we
leverage either open-source offerings, or SaaS solutions and provide a small
opinionated layer on top to make it a product internally. That is really the
goal of Platform Engineering, to think creatively about problems, such that we
can build the most reliable thing, with the lowest complexity, in the most
secure manner.
## Platform Engineering A Superpower
I gotta make up for the title, so how is Platform Engineering a superpower? Lets
say a new security requirement comes down, that you now need to calculate
software bills of materials across all of your software because you want to sell
your services to an organisation that requires that level of security. You can
basically measure your profecciency in Platform Engineering in how well you're
able to execute cross cutting concerns.
If you had 30 pipelines, for 5 languages. You'd need to basically copy
paste/modify whatever the same product to produce software bills of materials to
each and every pipeline, that may run on different types of CI systems, have
different level of compatibility with the language in question.
If you however had 1 language and 1 pipeline, across the entire fleet of
services, you could basically build the feature in the afternoon, append it to
the build pipeline, track all the builds and see that all artifacts required
were produced. With Platform Engineering you've basically transformed a
challenging multi month effort into an afternoon project.
For the first approach developer teams will also have to own the changes,
because it is their pipelines, how difficult would it be to prioritise that
across 30 teams? From experience there are always a handful that are absolutely
strapped for time, so you probably wouldn't make it in time. The second approach
is completely automated, they wouldn't even know their pipelines are producing
bills of materials. The same could be said for signing artifacts, producing
artifacts for other architectures, swapping out library internals and whatnot.
It can be extremely fulfilling work to build a project that can basically
bootstrap a service from scratch to production in minutes, without any handoff
to other teams, or requring complex manuals for actually being allowed to go in
production. As a Platform Engineering team, we can offload a lot of requirements
from teams, such they can focus on delivering value for our paying customers,
and in turn make the organisation much more nimble, scaleable and so on.
## Platform Engineering is a double edged sword
If your Platform Engineering team isn't able to build whole abstractions, which
by the way are extremely difficult to build. They will have a lot of maintenance
on their hands, if they aren't able to fulfill requirements from their
customers, the developers, analysts and so on. You might end up with a munity on
your hands. People simply going rogue, because the complexity of the platform
has increased to an almost absurd level.
This can happen and it needs to be careful considered, as engineers we should
continuously defer products to maintenance, and pick them up once offerings
become available via. open-source or proprietary software caches up with our own
abstractions.
You might build a state of the art build system one year, let it run for 5, and
suddenly it is clunky to work with, because it has been in maintenance mode for
years, but you may discover that an open-source tool has matured that is able to
fill some of those requirements you had, because you're fully in control of the
platform you might even be able to swap out the "engine" with a less complex
one, and free up some maintenance budget for your team.
I'll discuss when you should make those decision and what they look like in
another piece of content.
## Teams require help to be in control of their software
At the end of the day, feature teams should still be fully responsible for their
products, including the operations. So as a Platform Team you've got to
carefully consider how you allow them to be in control. Do you send back all the
right signals for them to be in control? Do they know much many applications
they're running, how much CPU, memory they're using. What is their SQL latency,
when did they deploy, which version is deployed, how are their gRPC latencies,
what is the HTTP Error rate.
Again you can treat these as products, it doens't have to be fancy, but it takes
a long time to get this right, and as you build abstractions on top of products,
you'll continuously see that as services demand more of the platform that more
and more of the internals needs to be exposed, such that the feature teams can
engineer their services to the implicit requirements of the platform. I.e. they
might need to tune memory settings, connection pools, http authentication, which
ports are open. To what the platform expects.

View File

@@ -0,0 +1,42 @@
---
type: blog-post
title: "Rust: Are we general purpose yet?"
description:
draft: true
date: 2025-06-02
updates:
- time: 2025-06-02
description: first iteration
tags:
- "#blog"
- "#rust"
---
Rust has been steadily improving over the past few years, we've gotten more
complete support for async, still not fully there yet, but we're getting there.
Const generics, closures and more have also seen improvements. But all these
language features, doesn't really tell us about Rusts place in the industry, so
in this post I'll try my best to explain if I think Rust is ready for the
mainstream.
First of all lets (try) define out what general purpose means.
> In computer software, a general-purpose programming language (GPL) is a
> programming language for building software in a wide variety of application
> domains. Conversely, a domain-specific programming language (DSL) is used
> within a specific area.
[wikipedia: general purpose programming language](https://en.wikipedia.org/wiki/General-purpose_programming_language)
In laymans terms, a language that can be written to target varied use-cases,
whether they be: application development, web services, systems programming,
etc.
My own definition also includes choice, weirdly enough. This means do people
consider _it_ a mainstream language. When they pick up a new project, do they
actively weigh Rust as a contender. And do people choose rust for these cases.
With these definitions we're not gonna end up with a yes/no situation, there is
definitely a gray zone, a large one at that.
I'll be doing a fairly biased

View File

@@ -1,6 +1,6 @@
+++
title = "Kasper Hermansen"
path = "posts"
paginate_path = "posts"
sort_by = "date"
paginate_by = 5
+++

Binary file not shown.

After

Width:  |  Height:  |  Size: 208 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

View File

@@ -0,0 +1,868 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
Backend service ^9wbfh44t
Backend engineer ^OYnQIyhR
User ^d7TsIcMm
Event sourced model ^tpEFylci
Domain events ^BKDeWmFh
Data platform Ingest ^eBw97vYs
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 465,
"versionNonce": 1569199600,
"isDeleted": false,
"id": "sTx3V6IBMr5fmkkGDikFE",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -298.22390437714466,
"y": -177.26461889876435,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 1536562448,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "9wbfh44t"
},
{
"id": "J0mitVhCskhfgbLZOMKDf",
"type": "arrow"
},
{
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"type": "arrow"
},
{
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"type": "arrow"
},
{
"id": "udDfzIaj1GY-t9tp-N-Gl",
"type": "arrow"
}
],
"updated": 1694341209387,
"link": null,
"locked": false
},
{
"type": "text",
"version": 272,
"versionNonce": 837801968,
"isDeleted": false,
"id": "9wbfh44t",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -268.38386226288685,
"y": -126.76461889876435,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 89.31991577148438,
"height": 50,
"seed": 757141264,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341209387,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Backend \nservice",
"rawText": "Backend service",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "sTx3V6IBMr5fmkkGDikFE",
"originalText": "Backend service",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "diamond",
"version": 210,
"versionNonce": 1268187120,
"isDeleted": false,
"id": "vMWQeW3qRvIkUdjOLaWPz",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -345.43316720684743,
"y": -482.1972278309496,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 1298980624,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "OYnQIyhR"
},
{
"id": "J0mitVhCskhfgbLZOMKDf",
"type": "arrow"
}
],
"updated": 1694341455611,
"link": null,
"locked": false
},
{
"type": "text",
"version": 177,
"versionNonce": 311525872,
"isDeleted": false,
"id": "OYnQIyhR",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -280.0931250925896,
"y": -407.1972278309496,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 89.31991577148438,
"height": 50,
"seed": 817042704,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341455611,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Backend \nengineer",
"rawText": "Backend engineer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "vMWQeW3qRvIkUdjOLaWPz",
"originalText": "Backend engineer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 942,
"versionNonce": 1338138096,
"isDeleted": false,
"id": "J0mitVhCskhfgbLZOMKDf",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -230.81771058718556,
"y": -274.92613188060886,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 0.0046177819788795205,
"height": 89.59298371386575,
"seed": 2103470064,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478296,
"link": null,
"locked": false,
"startBinding": {
"elementId": "vMWQeW3qRvIkUdjOLaWPz",
"gap": 8.61227473592443,
"focus": -0.041822171768161574
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 8.06852926797876,
"focus": -0.0950941088262367
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
0.0046177819788795205,
89.59298371386575
]
]
},
{
"type": "diamond",
"version": 295,
"versionNonce": 1320180208,
"isDeleted": false,
"id": "v5jShMMi0xwqEukwhA_JJ",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -677.8497011004193,
"y": -202.97325254400545,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 783812880,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "d7TsIcMm"
},
{
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"type": "arrow"
}
],
"updated": 1694341100136,
"link": null,
"locked": false
},
{
"type": "text",
"version": 279,
"versionNonce": 1567595504,
"isDeleted": false,
"id": "d7TsIcMm",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -590.1896745501264,
"y": -115.47325254400545,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 44.67994689941406,
"height": 25,
"seed": 989095696,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341100136,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "User",
"rawText": "User",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "v5jShMMi0xwqEukwhA_JJ",
"originalText": "User",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 204,
"versionNonce": 911712752,
"isDeleted": false,
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -454.1370030401614,
"y": -103.30812564496424,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 136.58153297230444,
"height": 0.46945233740069625,
"seed": 1541315344,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478297,
"link": null,
"locked": false,
"startBinding": {
"elementId": "v5jShMMi0xwqEukwhA_JJ",
"gap": 3.7277696925090282,
"focus": 0.0005597545142185046
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 19.331565690712296,
"focus": 0.03082885731532018
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
136.58153297230444,
-0.46945233740069625
]
]
},
{
"type": "ellipse",
"version": 145,
"versionNonce": 683659760,
"isDeleted": false,
"id": "KgN1wquMjbcnndF7mIZZE",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -121.86574511764019,
"y": 3.1277624056234004,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 148,
"height": 148,
"seed": 1134723568,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"type": "arrow"
},
{
"id": "udDfzIaj1GY-t9tp-N-Gl",
"type": "arrow"
},
{
"type": "text",
"id": "tpEFylci"
},
{
"id": "VpBDx2DIaWwIhOzzspdiO",
"type": "arrow"
}
],
"updated": 1694341208616,
"link": null,
"locked": false
},
{
"type": "text",
"version": 51,
"versionNonce": 1976533776,
"isDeleted": false,
"id": "tpEFylci",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -89.82159840249548,
"y": 39.80186059781888,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 84.25990295410156,
"height": 75,
"seed": 384337392,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341203391,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Event \nsourced \nmodel",
"rawText": "Event sourced model",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "KgN1wquMjbcnndF7mIZZE",
"originalText": "Event sourced model",
"lineHeight": 1.25,
"baseline": 68
},
{
"type": "arrow",
"version": 312,
"versionNonce": 1626203120,
"isDeleted": false,
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -136.53086016976556,
"y": -49.76929969163509,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 61.85382626567878,
"height": 37.75817092419969,
"seed": 1044317968,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478298,
"link": null,
"locked": false,
"startBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 12.693044207379103,
"focus": 0.701485505647599
},
"endBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 20.504530823640664,
"focus": -0.09142088821761776
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
53.568507010659005,
-1.482635738631508
],
[
61.85382626567878,
36.27553518556818
]
]
},
{
"type": "arrow",
"version": 264,
"versionNonce": 594273776,
"isDeleted": false,
"id": "udDfzIaj1GY-t9tp-N-Gl",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -121.8622170366934,
"y": 37.378326729495264,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 41.452896638401896,
"height": 53.533654118295544,
"seed": 2016661488,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478298,
"link": null,
"locked": false,
"startBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 9.996996928687153,
"focus": 0.3309459780456835
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 10.109291509964066,
"focus": -1.0057786128186066
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
-41.452896638401896,
-8.277501643773128
],
[
-29.488449945388496,
-53.533654118295544
]
]
},
{
"type": "arrow",
"version": 347,
"versionNonce": 840003056,
"isDeleted": false,
"id": "VpBDx2DIaWwIhOzzspdiO",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 24.576866221444533,
"y": 18.16307254849813,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 36.80615136981645,
"height": 27.85323529808091,
"seed": 1794506512,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478299,
"link": null,
"locked": false,
"startBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 19.406459024912508,
"focus": -0.0446534289390512
},
"endBinding": {
"elementId": "Mka3QKUB7w92Jjr6-DRhz",
"gap": 18.706684742883226,
"focus": -0.4031728505525382
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
36.80615136981645,
-27.85323529808091
]
]
},
{
"type": "rectangle",
"version": 693,
"versionNonce": 56960784,
"isDeleted": false,
"id": "Mka3QKUB7w92Jjr6-DRhz",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 41.11056075971601,
"y": -179.396847492466,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 1399967504,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "BKDeWmFh"
},
{
"id": "VpBDx2DIaWwIhOzzspdiO",
"type": "arrow"
},
{
"id": "lA0qGhpVklLgpJeUcdQ1L",
"type": "arrow"
}
],
"updated": 1694341267230,
"link": null,
"locked": false
},
{
"type": "text",
"version": 511,
"versionNonce": 2146905360,
"isDeleted": false,
"id": "BKDeWmFh",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 77.41060195844648,
"y": -128.896847492466,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 76.39991760253906,
"height": 50,
"seed": 1887792400,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341225855,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Domain \nevents",
"rawText": "Domain events",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "Mka3QKUB7w92Jjr6-DRhz",
"originalText": "Domain events",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 182,
"versionNonce": 810897904,
"isDeleted": false,
"id": "UgbAq4IFZljS9mLQwG9x_",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 342.41036241134805,
"y": -181.85191063672076,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 1737255408,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "eBw97vYs"
},
{
"id": "lA0qGhpVklLgpJeUcdQ1L",
"type": "arrow"
}
],
"updated": 1694341273830,
"link": null,
"locked": false
},
{
"type": "text",
"version": 145,
"versionNonce": 800429040,
"isDeleted": false,
"id": "eBw97vYs",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 366.71045701584023,
"y": -117.35191063672076,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 155.39981079101562,
"height": 50,
"seed": 998413296,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341273830,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data platform \nIngest",
"rawText": "Data platform Ingest",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "UgbAq4IFZljS9mLQwG9x_",
"originalText": "Data platform Ingest",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 122,
"versionNonce": 498267632,
"isDeleted": false,
"id": "lA0qGhpVklLgpJeUcdQ1L",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 204.27567581184144,
"y": -109.63460928149289,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 120.06734329975325,
"height": 3.656040452848913,
"seed": 580477936,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478300,
"link": null,
"locked": false,
"startBinding": {
"elementId": "Mka3QKUB7w92Jjr6-DRhz",
"gap": 14.16511505212543,
"focus": -0.039063585773621255
},
"endBinding": {
"elementId": "UgbAq4IFZljS9mLQwG9x_",
"gap": 18.06734329975336,
"focus": 0.26558547393487125
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
120.06734329975325,
-3.656040452848913
]
]
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "transparent",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 1,
"currentItemStrokeStyle": "dashed",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 863.351248291904,
"scrollY": 505.83489718634723,
"zoom": {
"value": 1.9000000000000001
},
"currentItemRoundness": "round",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 94 KiB

View File

@@ -0,0 +1,864 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
Services & Routing layer ^72o9GXma
NATS event consumer ^s45npuhh
Datafusion pipeline (Apache Ballista) ^zTVSQ9mn
Schema applier ^amQspibu
Error lake ^aXOOUJid
Data lake ^rgyaqp3l
If schema apply fails ^ZI5ehy6p
S3 (MiniO) ^712DogOh
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 387,
"versionNonce": 1503139600,
"isDeleted": false,
"id": "EpQKc79arb5LHLs5lDD9x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -366.0923251010322,
"y": -211.55834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 477382640,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "72o9GXma"
},
{
"id": "Sa68IFzHb_kbhirC5ZYa9",
"type": "arrow"
}
],
"updated": 1694344865066,
"link": null,
"locked": false
},
{
"type": "text",
"version": 389,
"versionNonce": 1021616624,
"isDeleted": false,
"id": "72o9GXma",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -360.99221218599314,
"y": -147.05834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 193.79977416992188,
"height": 50,
"seed": 1753598448,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865066,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Services & Routing \nlayer",
"rawText": "Services & Routing layer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "EpQKc79arb5LHLs5lDD9x",
"originalText": "Services & Routing layer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 374,
"versionNonce": 1789048080,
"isDeleted": false,
"id": "26On9coKm71CUyFa0NAwt",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -44.25,
"y": -175.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 156,
"height": 98,
"seed": 110157808,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"id": "Sa68IFzHb_kbhirC5ZYa9",
"type": "arrow"
},
{
"type": "text",
"id": "s45npuhh"
},
{
"id": "hLelJzbrBZHSYN9lwBMaO",
"type": "arrow"
}
],
"updated": 1694344865066,
"link": null,
"locked": false
},
{
"type": "text",
"version": 337,
"versionNonce": 232449008,
"isDeleted": false,
"id": "s45npuhh",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -29.859939575195312,
"y": -151.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 127.21987915039062,
"height": 50,
"seed": 1057794544,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865066,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "NATS event \nconsumer",
"rawText": "NATS event consumer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "26On9coKm71CUyFa0NAwt",
"originalText": "NATS event consumer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 629,
"versionNonce": 1846427920,
"isDeleted": false,
"id": "Sa68IFzHb_kbhirC5ZYa9",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -142.99999999999994,
"y": -124.64045999275731,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 83.24999999999996,
"height": 0.7767966783556517,
"seed": 1791612400,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002232,
"link": null,
"locked": false,
"startBinding": {
"elementId": "EpQKc79arb5LHLs5lDD9x",
"gap": 19.09232510103223,
"focus": -0.04103860515644922
},
"endBinding": {
"elementId": "26On9coKm71CUyFa0NAwt",
"gap": 15.500000000000014,
"focus": -0.06341132414040519
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
83.24999999999996,
0.7767966783556517
]
]
},
{
"type": "rectangle",
"version": 270,
"versionNonce": 490651920,
"isDeleted": false,
"id": "fhs_gMyi6RKgf8dYaLSBT",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 390.75,
"y": -177.89453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 200,
"height": 91,
"seed": 1239993328,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "zTVSQ9mn"
},
{
"id": "mUwnjyxDqu4NeHQAU2N2w",
"type": "arrow"
},
{
"id": "AkQqCQSq8ht48pWAAeJpr",
"type": "arrow"
}
],
"updated": 1694344912272,
"link": null,
"locked": false
},
{
"type": "text",
"version": 197,
"versionNonce": 297756944,
"isDeleted": false,
"id": "zTVSQ9mn",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 398.11011505126953,
"y": -157.39453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 185.27976989746094,
"height": 50,
"seed": 1096904688,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344946394,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Datafusion pipeline\n(Apache Ballista)",
"rawText": "Datafusion pipeline\n(Apache Ballista)",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "fhs_gMyi6RKgf8dYaLSBT",
"originalText": "Datafusion pipeline\n(Apache Ballista)",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 129,
"versionNonce": 1347277072,
"isDeleted": false,
"id": "uZivVzfxhit67PbDewSup",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 198.5,
"y": -174.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 104,
"height": 96,
"seed": 1124405232,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "amQspibu"
},
{
"id": "hLelJzbrBZHSYN9lwBMaO",
"type": "arrow"
},
{
"id": "mUwnjyxDqu4NeHQAU2N2w",
"type": "arrow"
},
{
"id": "bMVvpgvNyJBV4m7iofm1V",
"type": "arrow"
}
],
"updated": 1694344890598,
"link": null,
"locked": false
},
{
"type": "text",
"version": 98,
"versionNonce": 1854672656,
"isDeleted": false,
"id": "amQspibu",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 210.99004364013672,
"y": -151.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 79.01991271972656,
"height": 50,
"seed": 1966093808,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865067,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Schema \napplier",
"rawText": "Schema applier",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "uZivVzfxhit67PbDewSup",
"originalText": "Schema applier",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 212,
"versionNonce": 1240477968,
"isDeleted": false,
"id": "hLelJzbrBZHSYN9lwBMaO",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 121.50000000000004,
"y": -130.07201453488125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 66.49999999999996,
"height": 1.8419941085324467,
"seed": 454756624,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002233,
"link": null,
"locked": false,
"startBinding": {
"elementId": "26On9coKm71CUyFa0NAwt",
"gap": 9.750000000000028,
"focus": -0.12427713280499124
},
"endBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 10.5,
"focus": 0.0071660427325140694
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
66.49999999999996,
1.8419941085324467
]
]
},
{
"type": "arrow",
"version": 329,
"versionNonce": 1967336720,
"isDeleted": false,
"id": "mUwnjyxDqu4NeHQAU2N2w",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 313,
"y": -130.11940510920346,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 66,
"height": 0.08441126716857639,
"seed": 1776163312,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002234,
"link": null,
"locked": false,
"startBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 10.5,
"focus": -0.08103228804526064
},
"endBinding": {
"elementId": "fhs_gMyi6RKgf8dYaLSBT",
"gap": 11.75,
"focus": -0.044880247318899974
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
66,
-0.08441126716857639
]
]
},
{
"type": "rectangle",
"version": 246,
"versionNonce": 304987408,
"isDeleted": false,
"id": "_5O__8RSAgyYEjj52QWQa",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 200.59827551530225,
"y": 17.301048334314544,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 398.79362598571333,
"height": 317.8125448468521,
"seed": 1060721136,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"id": "AkQqCQSq8ht48pWAAeJpr",
"type": "arrow"
}
],
"updated": 1694344912272,
"link": null,
"locked": false
},
{
"type": "rectangle",
"version": 205,
"versionNonce": 2025377040,
"isDeleted": false,
"id": "jF9nuAfOqlEwdFrmAFhY9",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 197.85880592564683,
"y": 18.561362062379345,
"strokeColor": "transparent",
"backgroundColor": "#e9ecef",
"width": 198,
"height": 313,
"seed": 902355440,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [
{
"id": "bMVvpgvNyJBV4m7iofm1V",
"type": "arrow"
}
],
"updated": 1694344890598,
"link": null,
"locked": false
},
{
"type": "text",
"version": 156,
"versionNonce": 1457464592,
"isDeleted": false,
"id": "aXOOUJid",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 250.25648564762275,
"y": 160.9278714862574,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 99.63987731933594,
"height": 25,
"seed": 601563120,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344875185,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Error lake",
"rawText": "Error lake",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Error lake",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "text",
"version": 223,
"versionNonce": 131597072,
"isDeleted": false,
"id": "rgyaqp3l",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 451.0195150402808,
"y": 162.17937583059228,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 102.85987854003906,
"height": 25,
"seed": 1602597136,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344875185,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data lake",
"rawText": "Data lake",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Data lake",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 170,
"versionNonce": 1829185808,
"isDeleted": false,
"id": "bMVvpgvNyJBV4m7iofm1V",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 249.4925131840489,
"y": -62.15208787739857,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 0,
"height": 61.11779708593315,
"seed": 1804713968,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694345002234,
"link": null,
"locked": false,
"startBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 15.992443372601429,
"focus": 0.019374746460598023
},
"endBinding": {
"elementId": "jF9nuAfOqlEwdFrmAFhY9",
"gap": 19.59565285384474,
"focus": -0.47844740143028225
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
0,
61.11779708593315
]
]
},
{
"type": "text",
"version": 80,
"versionNonce": 466318832,
"isDeleted": false,
"id": "ZI5ehy6p",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 150.94006538298186,
"y": -48.40058353306358,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 209.5997314453125,
"height": 25,
"seed": 1419887888,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344903041,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "If schema apply fails",
"rawText": "If schema apply fails",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "If schema apply fails",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 53,
"versionNonce": 592327952,
"isDeleted": false,
"id": "AkQqCQSq8ht48pWAAeJpr",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 493.1997290642071,
"y": -74.37564729458518,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 1.5279449271483827,
"height": 76.39724635741638,
"seed": 1265499120,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694345002233,
"link": null,
"locked": false,
"startBinding": {
"elementId": "fhs_gMyi6RKgf8dYaLSBT",
"gap": 12.518883955414822,
"focus": -0.012777240958267157
},
"endBinding": {
"elementId": "_5O__8RSAgyYEjj52QWQa",
"gap": 15.279449271483315,
"focus": 0.48483934228390557
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
1.5279449271483827,
76.39724635741638
]
]
},
{
"id": "712DogOh",
"type": "text",
"x": 344.8997881999917,
"y": 28.562780044225974,
"width": 98.77989196777344,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"roundness": null,
"seed": 1519137264,
"version": 159,
"versionNonce": 1898942960,
"isDeleted": false,
"boundElements": null,
"updated": 1694345025125,
"link": null,
"locked": false,
"text": "S3 (MiniO)",
"rawText": "S3 (MiniO)",
"fontSize": 20,
"fontFamily": 1,
"textAlign": "left",
"verticalAlign": "top",
"baseline": 18,
"containerId": null,
"originalText": "S3 (MiniO)",
"lineHeight": 1.25
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "#e9ecef",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 0.5,
"currentItemStrokeStyle": "solid",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 641.1002118000083,
"scrollY": 161.50753245577403,
"zoom": {
"value": 2
},
"currentItemRoundness": "sharp",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 110 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 142 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 135 KiB

View File

@@ -0,0 +1,401 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
IoT Service ^EoQSgbi1
IoT Sensor ^HZlDcT2x
Data platform Ingest ^72o9GXma
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 701,
"versionNonce": 167105808,
"isDeleted": false,
"id": "2wHeg2N53IYzpgCwRZ_sV",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -109.90675912352219,
"y": -145.46057384034123,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 746651120,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "EoQSgbi1"
},
{
"id": "jYcTtzBU6Ba3uXE02Cful",
"type": "arrow"
},
{
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"type": "arrow"
}
],
"updated": 1694341385095,
"link": null,
"locked": false
},
{
"type": "text",
"version": 520,
"versionNonce": 77116176,
"isDeleted": false,
"id": "EoQSgbi1",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -93.19669900389329,
"y": -82.46057384034123,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 115.57987976074219,
"height": 25,
"seed": 25935856,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341385095,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "IoT Service",
"rawText": "IoT Service",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "2wHeg2N53IYzpgCwRZ_sV",
"originalText": "IoT Service",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "diamond",
"version": 552,
"versionNonce": 1837905680,
"isDeleted": false,
"id": "HKT3KgeY8V3T-f2SJo_3r",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -488.4076748989678,
"y": -159.80102940728523,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 737034736,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "HZlDcT2x"
},
{
"id": "jYcTtzBU6Ba3uXE02Cful",
"type": "arrow"
}
],
"updated": 1694341514931,
"link": null,
"locked": false
},
{
"type": "text",
"version": 546,
"versionNonce": 722654480,
"isDeleted": false,
"id": "HZlDcT2x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -409.8976422451592,
"y": -84.80102940728523,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 62.97993469238281,
"height": 50,
"seed": 716211184,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341514931,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "IoT \nSensor",
"rawText": "IoT Sensor",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "HKT3KgeY8V3T-f2SJo_3r",
"originalText": "IoT Sensor",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 642,
"versionNonce": 1628229904,
"isDeleted": false,
"id": "jYcTtzBU6Ba3uXE02Cful",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -260.6056224823998,
"y": -63.47351673236517,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 133.76296346137323,
"height": 0.9440251662323789,
"seed": 1541047792,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341514931,
"link": null,
"locked": false,
"startBinding": {
"elementId": "HKT3KgeY8V3T-f2SJo_3r",
"gap": 8.623177202385875,
"focus": -0.028358267431578415
},
"endBinding": {
"elementId": "2wHeg2N53IYzpgCwRZ_sV",
"gap": 16.935899897504328,
"focus": -0.06442193471262163
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
133.76296346137323,
-0.9440251662323789
]
]
},
{
"type": "rectangle",
"version": 331,
"versionNonce": 369345808,
"isDeleted": false,
"id": "EpQKc79arb5LHLs5lDD9x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 148.9076748989678,
"y": -128.05834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 477382640,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "72o9GXma"
},
{
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"type": "arrow"
}
],
"updated": 1694341385095,
"link": null,
"locked": false
},
{
"type": "text",
"version": 294,
"versionNonce": 963158800,
"isDeleted": false,
"id": "72o9GXma",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 173.20776950345999,
"y": -63.55834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 155.39981079101562,
"height": 50,
"seed": 1753598448,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341385095,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data platform \nIngest",
"rawText": "Data platform Ingest",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "EpQKc79arb5LHLs5lDD9x",
"originalText": "Data platform Ingest",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 315,
"versionNonce": 420523792,
"isDeleted": false,
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 49.56397613476656,
"y": -68.71372587133547,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 78.50557061906932,
"height": 2.059257777181273,
"seed": 2065107952,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341501797,
"link": null,
"locked": false,
"startBinding": {
"elementId": "2wHeg2N53IYzpgCwRZ_sV",
"gap": 10.470735258288755,
"focus": -0.012678407176878778
},
"endBinding": {
"elementId": "EpQKc79arb5LHLs5lDD9x",
"gap": 20.83812814513192,
"focus": 0.2698545781974716
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
78.50557061906932,
2.059257777181273
]
]
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "transparent",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 1,
"currentItemStrokeStyle": "solid",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 826.25,
"scrollY": 377.71484375,
"zoom": {
"value": 2
},
"currentItemRoundness": "round",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

View File

@@ -1,39 +0,0 @@
+++
title = "My first post"
date = 2019-11-27
draft = true
+++
This is my first blog post.
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis. <!-- more -->
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis.
```rust
fn main() -> eyre::Result<()> {
Ok(())
}
```
![image](https://images.unsplash.com/photo-1680188591202-22a2f6d7d6af?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1270&q=80)

View File

@@ -1,39 +0,0 @@
+++
title = "My second post"
date = 2020-11-27
draft = true
+++
This is my first blog post.
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis.<!-- more -->
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis.
```rust
fn main() -> eyre::Result<()> {
Ok(())
}
```
![image](https://images.unsplash.com/photo-1680188591202-22a2f6d7d6af?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1270&q=80)

View File

@@ -1,39 +0,0 @@
+++
title = "My second post"
date = 2020-12-27
draft = true
+++
This is my first blog post.
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis.
Lorem ipsum dolor sit amet, officia excepteur ex fugiat reprehenderit enim
labore culpa sint ad nisi Lorem pariatur mollit ex esse exercitation amet. Nisi
anim cupidatat excepteur officia. Reprehenderit nostrud nostrud ipsum Lorem est
aliquip amet voluptate voluptate dolor minim nulla est proident. Nostrud officia
pariatur ut officia. Sit irure elit esse ea nulla sunt ex occaecat reprehenderit
commodo officia dolor Lorem duis laboris cupidatat officia voluptate. Culpa
proident adipisicing id nulla nisi laboris ex in Lorem sunt duis officia
eiusmod. Aliqua reprehenderit commodo ex non excepteur duis sunt velit enim.
Voluptate laboris sint cupidatat ullamco ut ea consectetur et est culpa et culpa
duis.
```rust
fn main() -> eyre::Result<()> {
Ok(())
}
```
![image](https://images.unsplash.com/photo-1680188591202-22a2f6d7d6af?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1270&q=80)

19
mise.toml Normal file
View File

@@ -0,0 +1,19 @@
[tasks."ci:build"]
dir = "./ci"
run = "cargo build"
[tasks."ci:run"]
run = "./ci/target/debug/ci"
[tasks."ci:main"]
depends = ["ci:build"]
run = "mise run ci:run main"
[tasks."ci:pr"]
depends = ["ci:build"]
run = "mise run ci:run pr"
[tasks."dev"]
env = { RUN = "local" }
depends = ["ci:build"]
run = "mise run ci:run"

View File

@@ -14,10 +14,10 @@
},
"dependencies": {
"@tailwindcss/typography": "^0.5.9",
"tailwindcss": "^4.0.0"
"tailwindcss": "^3.3.1"
},
"devDependencies": {
"@catppuccin/tailwindcss": "^0.1.1",
"@tailwindcss/cli": "^4.0.0"
"@tailwindcss/cli": "^0.1.2"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 208 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

View File

@@ -0,0 +1,868 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
Backend service ^9wbfh44t
Backend engineer ^OYnQIyhR
User ^d7TsIcMm
Event sourced model ^tpEFylci
Domain events ^BKDeWmFh
Data platform Ingest ^eBw97vYs
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 465,
"versionNonce": 1569199600,
"isDeleted": false,
"id": "sTx3V6IBMr5fmkkGDikFE",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -298.22390437714466,
"y": -177.26461889876435,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 1536562448,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "9wbfh44t"
},
{
"id": "J0mitVhCskhfgbLZOMKDf",
"type": "arrow"
},
{
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"type": "arrow"
},
{
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"type": "arrow"
},
{
"id": "udDfzIaj1GY-t9tp-N-Gl",
"type": "arrow"
}
],
"updated": 1694341209387,
"link": null,
"locked": false
},
{
"type": "text",
"version": 272,
"versionNonce": 837801968,
"isDeleted": false,
"id": "9wbfh44t",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -268.38386226288685,
"y": -126.76461889876435,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 89.31991577148438,
"height": 50,
"seed": 757141264,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341209387,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Backend \nservice",
"rawText": "Backend service",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "sTx3V6IBMr5fmkkGDikFE",
"originalText": "Backend service",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "diamond",
"version": 210,
"versionNonce": 1268187120,
"isDeleted": false,
"id": "vMWQeW3qRvIkUdjOLaWPz",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -345.43316720684743,
"y": -482.1972278309496,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 1298980624,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "OYnQIyhR"
},
{
"id": "J0mitVhCskhfgbLZOMKDf",
"type": "arrow"
}
],
"updated": 1694341455611,
"link": null,
"locked": false
},
{
"type": "text",
"version": 177,
"versionNonce": 311525872,
"isDeleted": false,
"id": "OYnQIyhR",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -280.0931250925896,
"y": -407.1972278309496,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 89.31991577148438,
"height": 50,
"seed": 817042704,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341455611,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Backend \nengineer",
"rawText": "Backend engineer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "vMWQeW3qRvIkUdjOLaWPz",
"originalText": "Backend engineer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 942,
"versionNonce": 1338138096,
"isDeleted": false,
"id": "J0mitVhCskhfgbLZOMKDf",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -230.81771058718556,
"y": -274.92613188060886,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 0.0046177819788795205,
"height": 89.59298371386575,
"seed": 2103470064,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478296,
"link": null,
"locked": false,
"startBinding": {
"elementId": "vMWQeW3qRvIkUdjOLaWPz",
"gap": 8.61227473592443,
"focus": -0.041822171768161574
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 8.06852926797876,
"focus": -0.0950941088262367
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
0.0046177819788795205,
89.59298371386575
]
]
},
{
"type": "diamond",
"version": 295,
"versionNonce": 1320180208,
"isDeleted": false,
"id": "v5jShMMi0xwqEukwhA_JJ",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -677.8497011004193,
"y": -202.97325254400545,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 783812880,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "d7TsIcMm"
},
{
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"type": "arrow"
}
],
"updated": 1694341100136,
"link": null,
"locked": false
},
{
"type": "text",
"version": 279,
"versionNonce": 1567595504,
"isDeleted": false,
"id": "d7TsIcMm",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -590.1896745501264,
"y": -115.47325254400545,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 44.67994689941406,
"height": 25,
"seed": 989095696,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341100136,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "User",
"rawText": "User",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "v5jShMMi0xwqEukwhA_JJ",
"originalText": "User",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 204,
"versionNonce": 911712752,
"isDeleted": false,
"id": "Gvy66Wi-Z4fYBo_6xHAo2",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -454.1370030401614,
"y": -103.30812564496424,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 136.58153297230444,
"height": 0.46945233740069625,
"seed": 1541315344,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478297,
"link": null,
"locked": false,
"startBinding": {
"elementId": "v5jShMMi0xwqEukwhA_JJ",
"gap": 3.7277696925090282,
"focus": 0.0005597545142185046
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 19.331565690712296,
"focus": 0.03082885731532018
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
136.58153297230444,
-0.46945233740069625
]
]
},
{
"type": "ellipse",
"version": 145,
"versionNonce": 683659760,
"isDeleted": false,
"id": "KgN1wquMjbcnndF7mIZZE",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -121.86574511764019,
"y": 3.1277624056234004,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 148,
"height": 148,
"seed": 1134723568,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"type": "arrow"
},
{
"id": "udDfzIaj1GY-t9tp-N-Gl",
"type": "arrow"
},
{
"type": "text",
"id": "tpEFylci"
},
{
"id": "VpBDx2DIaWwIhOzzspdiO",
"type": "arrow"
}
],
"updated": 1694341208616,
"link": null,
"locked": false
},
{
"type": "text",
"version": 51,
"versionNonce": 1976533776,
"isDeleted": false,
"id": "tpEFylci",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -89.82159840249548,
"y": 39.80186059781888,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 84.25990295410156,
"height": 75,
"seed": 384337392,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341203391,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Event \nsourced \nmodel",
"rawText": "Event sourced model",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "KgN1wquMjbcnndF7mIZZE",
"originalText": "Event sourced model",
"lineHeight": 1.25,
"baseline": 68
},
{
"type": "arrow",
"version": 312,
"versionNonce": 1626203120,
"isDeleted": false,
"id": "PS6qWUBa9zPZp3Y7X1IH_",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -136.53086016976556,
"y": -49.76929969163509,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 61.85382626567878,
"height": 37.75817092419969,
"seed": 1044317968,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478298,
"link": null,
"locked": false,
"startBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 12.693044207379103,
"focus": 0.701485505647599
},
"endBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 20.504530823640664,
"focus": -0.09142088821761776
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
53.568507010659005,
-1.482635738631508
],
[
61.85382626567878,
36.27553518556818
]
]
},
{
"type": "arrow",
"version": 264,
"versionNonce": 594273776,
"isDeleted": false,
"id": "udDfzIaj1GY-t9tp-N-Gl",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -121.8622170366934,
"y": 37.378326729495264,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 41.452896638401896,
"height": 53.533654118295544,
"seed": 2016661488,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478298,
"link": null,
"locked": false,
"startBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 9.996996928687153,
"focus": 0.3309459780456835
},
"endBinding": {
"elementId": "sTx3V6IBMr5fmkkGDikFE",
"gap": 10.109291509964066,
"focus": -1.0057786128186066
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
-41.452896638401896,
-8.277501643773128
],
[
-29.488449945388496,
-53.533654118295544
]
]
},
{
"type": "arrow",
"version": 347,
"versionNonce": 840003056,
"isDeleted": false,
"id": "VpBDx2DIaWwIhOzzspdiO",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 24.576866221444533,
"y": 18.16307254849813,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 36.80615136981645,
"height": 27.85323529808091,
"seed": 1794506512,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478299,
"link": null,
"locked": false,
"startBinding": {
"elementId": "KgN1wquMjbcnndF7mIZZE",
"gap": 19.406459024912508,
"focus": -0.0446534289390512
},
"endBinding": {
"elementId": "Mka3QKUB7w92Jjr6-DRhz",
"gap": 18.706684742883226,
"focus": -0.4031728505525382
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
36.80615136981645,
-27.85323529808091
]
]
},
{
"type": "rectangle",
"version": 693,
"versionNonce": 56960784,
"isDeleted": false,
"id": "Mka3QKUB7w92Jjr6-DRhz",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 41.11056075971601,
"y": -179.396847492466,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 1399967504,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "BKDeWmFh"
},
{
"id": "VpBDx2DIaWwIhOzzspdiO",
"type": "arrow"
},
{
"id": "lA0qGhpVklLgpJeUcdQ1L",
"type": "arrow"
}
],
"updated": 1694341267230,
"link": null,
"locked": false
},
{
"type": "text",
"version": 511,
"versionNonce": 2146905360,
"isDeleted": false,
"id": "BKDeWmFh",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 77.41060195844648,
"y": -128.896847492466,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 76.39991760253906,
"height": 50,
"seed": 1887792400,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341225855,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Domain \nevents",
"rawText": "Domain events",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "Mka3QKUB7w92Jjr6-DRhz",
"originalText": "Domain events",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 182,
"versionNonce": 810897904,
"isDeleted": false,
"id": "UgbAq4IFZljS9mLQwG9x_",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 342.41036241134805,
"y": -181.85191063672076,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 1737255408,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "eBw97vYs"
},
{
"id": "lA0qGhpVklLgpJeUcdQ1L",
"type": "arrow"
}
],
"updated": 1694341273830,
"link": null,
"locked": false
},
{
"type": "text",
"version": 145,
"versionNonce": 800429040,
"isDeleted": false,
"id": "eBw97vYs",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 366.71045701584023,
"y": -117.35191063672076,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 155.39981079101562,
"height": 50,
"seed": 998413296,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341273830,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data platform \nIngest",
"rawText": "Data platform Ingest",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "UgbAq4IFZljS9mLQwG9x_",
"originalText": "Data platform Ingest",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 122,
"versionNonce": 498267632,
"isDeleted": false,
"id": "lA0qGhpVklLgpJeUcdQ1L",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 204.27567581184144,
"y": -109.63460928149289,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 120.06734329975325,
"height": 3.656040452848913,
"seed": 580477936,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341478300,
"link": null,
"locked": false,
"startBinding": {
"elementId": "Mka3QKUB7w92Jjr6-DRhz",
"gap": 14.16511505212543,
"focus": -0.039063585773621255
},
"endBinding": {
"elementId": "UgbAq4IFZljS9mLQwG9x_",
"gap": 18.06734329975336,
"focus": 0.26558547393487125
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
120.06734329975325,
-3.656040452848913
]
]
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "transparent",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 1,
"currentItemStrokeStyle": "dashed",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 863.351248291904,
"scrollY": 505.83489718634723,
"zoom": {
"value": 1.9000000000000001
},
"currentItemRoundness": "round",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 94 KiB

View File

@@ -0,0 +1,864 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
Services & Routing layer ^72o9GXma
NATS event consumer ^s45npuhh
Datafusion pipeline (Apache Ballista) ^zTVSQ9mn
Schema applier ^amQspibu
Error lake ^aXOOUJid
Data lake ^rgyaqp3l
If schema apply fails ^ZI5ehy6p
S3 (MiniO) ^712DogOh
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 387,
"versionNonce": 1503139600,
"isDeleted": false,
"id": "EpQKc79arb5LHLs5lDD9x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -366.0923251010322,
"y": -211.55834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 477382640,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "72o9GXma"
},
{
"id": "Sa68IFzHb_kbhirC5ZYa9",
"type": "arrow"
}
],
"updated": 1694344865066,
"link": null,
"locked": false
},
{
"type": "text",
"version": 389,
"versionNonce": 1021616624,
"isDeleted": false,
"id": "72o9GXma",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -360.99221218599314,
"y": -147.05834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 193.79977416992188,
"height": 50,
"seed": 1753598448,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865066,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Services & Routing \nlayer",
"rawText": "Services & Routing layer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "EpQKc79arb5LHLs5lDD9x",
"originalText": "Services & Routing layer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 374,
"versionNonce": 1789048080,
"isDeleted": false,
"id": "26On9coKm71CUyFa0NAwt",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -44.25,
"y": -175.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 156,
"height": 98,
"seed": 110157808,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"id": "Sa68IFzHb_kbhirC5ZYa9",
"type": "arrow"
},
{
"type": "text",
"id": "s45npuhh"
},
{
"id": "hLelJzbrBZHSYN9lwBMaO",
"type": "arrow"
}
],
"updated": 1694344865066,
"link": null,
"locked": false
},
{
"type": "text",
"version": 337,
"versionNonce": 232449008,
"isDeleted": false,
"id": "s45npuhh",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -29.859939575195312,
"y": -151.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 127.21987915039062,
"height": 50,
"seed": 1057794544,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865066,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "NATS event \nconsumer",
"rawText": "NATS event consumer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "26On9coKm71CUyFa0NAwt",
"originalText": "NATS event consumer",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 629,
"versionNonce": 1846427920,
"isDeleted": false,
"id": "Sa68IFzHb_kbhirC5ZYa9",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -142.99999999999994,
"y": -124.64045999275731,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 83.24999999999996,
"height": 0.7767966783556517,
"seed": 1791612400,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002232,
"link": null,
"locked": false,
"startBinding": {
"elementId": "EpQKc79arb5LHLs5lDD9x",
"gap": 19.09232510103223,
"focus": -0.04103860515644922
},
"endBinding": {
"elementId": "26On9coKm71CUyFa0NAwt",
"gap": 15.500000000000014,
"focus": -0.06341132414040519
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
83.24999999999996,
0.7767966783556517
]
]
},
{
"type": "rectangle",
"version": 270,
"versionNonce": 490651920,
"isDeleted": false,
"id": "fhs_gMyi6RKgf8dYaLSBT",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 390.75,
"y": -177.89453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 200,
"height": 91,
"seed": 1239993328,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "zTVSQ9mn"
},
{
"id": "mUwnjyxDqu4NeHQAU2N2w",
"type": "arrow"
},
{
"id": "AkQqCQSq8ht48pWAAeJpr",
"type": "arrow"
}
],
"updated": 1694344912272,
"link": null,
"locked": false
},
{
"type": "text",
"version": 197,
"versionNonce": 297756944,
"isDeleted": false,
"id": "zTVSQ9mn",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 398.11011505126953,
"y": -157.39453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 185.27976989746094,
"height": 50,
"seed": 1096904688,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344946394,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Datafusion pipeline\n(Apache Ballista)",
"rawText": "Datafusion pipeline\n(Apache Ballista)",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "fhs_gMyi6RKgf8dYaLSBT",
"originalText": "Datafusion pipeline\n(Apache Ballista)",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "rectangle",
"version": 129,
"versionNonce": 1347277072,
"isDeleted": false,
"id": "uZivVzfxhit67PbDewSup",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 198.5,
"y": -174.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 104,
"height": 96,
"seed": 1124405232,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "amQspibu"
},
{
"id": "hLelJzbrBZHSYN9lwBMaO",
"type": "arrow"
},
{
"id": "mUwnjyxDqu4NeHQAU2N2w",
"type": "arrow"
},
{
"id": "bMVvpgvNyJBV4m7iofm1V",
"type": "arrow"
}
],
"updated": 1694344890598,
"link": null,
"locked": false
},
{
"type": "text",
"version": 98,
"versionNonce": 1854672656,
"isDeleted": false,
"id": "amQspibu",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 210.99004364013672,
"y": -151.14453125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 79.01991271972656,
"height": 50,
"seed": 1966093808,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344865067,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Schema \napplier",
"rawText": "Schema applier",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "uZivVzfxhit67PbDewSup",
"originalText": "Schema applier",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 212,
"versionNonce": 1240477968,
"isDeleted": false,
"id": "hLelJzbrBZHSYN9lwBMaO",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 121.50000000000004,
"y": -130.07201453488125,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 66.49999999999996,
"height": 1.8419941085324467,
"seed": 454756624,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002233,
"link": null,
"locked": false,
"startBinding": {
"elementId": "26On9coKm71CUyFa0NAwt",
"gap": 9.750000000000028,
"focus": -0.12427713280499124
},
"endBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 10.5,
"focus": 0.0071660427325140694
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
66.49999999999996,
1.8419941085324467
]
]
},
{
"type": "arrow",
"version": 329,
"versionNonce": 1967336720,
"isDeleted": false,
"id": "mUwnjyxDqu4NeHQAU2N2w",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 313,
"y": -130.11940510920346,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 66,
"height": 0.08441126716857639,
"seed": 1776163312,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694345002234,
"link": null,
"locked": false,
"startBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 10.5,
"focus": -0.08103228804526064
},
"endBinding": {
"elementId": "fhs_gMyi6RKgf8dYaLSBT",
"gap": 11.75,
"focus": -0.044880247318899974
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
66,
-0.08441126716857639
]
]
},
{
"type": "rectangle",
"version": 246,
"versionNonce": 304987408,
"isDeleted": false,
"id": "_5O__8RSAgyYEjj52QWQa",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 200.59827551530225,
"y": 17.301048334314544,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 398.79362598571333,
"height": 317.8125448468521,
"seed": 1060721136,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"id": "AkQqCQSq8ht48pWAAeJpr",
"type": "arrow"
}
],
"updated": 1694344912272,
"link": null,
"locked": false
},
{
"type": "rectangle",
"version": 205,
"versionNonce": 2025377040,
"isDeleted": false,
"id": "jF9nuAfOqlEwdFrmAFhY9",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 197.85880592564683,
"y": 18.561362062379345,
"strokeColor": "transparent",
"backgroundColor": "#e9ecef",
"width": 198,
"height": 313,
"seed": 902355440,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [
{
"id": "bMVvpgvNyJBV4m7iofm1V",
"type": "arrow"
}
],
"updated": 1694344890598,
"link": null,
"locked": false
},
{
"type": "text",
"version": 156,
"versionNonce": 1457464592,
"isDeleted": false,
"id": "aXOOUJid",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 250.25648564762275,
"y": 160.9278714862574,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 99.63987731933594,
"height": 25,
"seed": 601563120,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344875185,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Error lake",
"rawText": "Error lake",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Error lake",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "text",
"version": 223,
"versionNonce": 131597072,
"isDeleted": false,
"id": "rgyaqp3l",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 451.0195150402808,
"y": 162.17937583059228,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 102.85987854003906,
"height": 25,
"seed": 1602597136,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344875185,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data lake",
"rawText": "Data lake",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Data lake",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 170,
"versionNonce": 1829185808,
"isDeleted": false,
"id": "bMVvpgvNyJBV4m7iofm1V",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 249.4925131840489,
"y": -62.15208787739857,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 0,
"height": 61.11779708593315,
"seed": 1804713968,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694345002234,
"link": null,
"locked": false,
"startBinding": {
"elementId": "uZivVzfxhit67PbDewSup",
"gap": 15.992443372601429,
"focus": 0.019374746460598023
},
"endBinding": {
"elementId": "jF9nuAfOqlEwdFrmAFhY9",
"gap": 19.59565285384474,
"focus": -0.47844740143028225
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
0,
61.11779708593315
]
]
},
{
"type": "text",
"version": 80,
"versionNonce": 466318832,
"isDeleted": false,
"id": "ZI5ehy6p",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 150.94006538298186,
"y": -48.40058353306358,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 209.5997314453125,
"height": 25,
"seed": 1419887888,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694344903041,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "If schema apply fails",
"rawText": "If schema apply fails",
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "If schema apply fails",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "arrow",
"version": 53,
"versionNonce": 592327952,
"isDeleted": false,
"id": "AkQqCQSq8ht48pWAAeJpr",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 493.1997290642071,
"y": -74.37564729458518,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"width": 1.5279449271483827,
"height": 76.39724635741638,
"seed": 1265499120,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694345002233,
"link": null,
"locked": false,
"startBinding": {
"elementId": "fhs_gMyi6RKgf8dYaLSBT",
"gap": 12.518883955414822,
"focus": -0.012777240958267157
},
"endBinding": {
"elementId": "_5O__8RSAgyYEjj52QWQa",
"gap": 15.279449271483315,
"focus": 0.48483934228390557
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
1.5279449271483827,
76.39724635741638
]
]
},
{
"id": "712DogOh",
"type": "text",
"x": 344.8997881999917,
"y": 28.562780044225974,
"width": 98.77989196777344,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "#e9ecef",
"fillStyle": "hachure",
"strokeWidth": 0.5,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"roundness": null,
"seed": 1519137264,
"version": 159,
"versionNonce": 1898942960,
"isDeleted": false,
"boundElements": null,
"updated": 1694345025125,
"link": null,
"locked": false,
"text": "S3 (MiniO)",
"rawText": "S3 (MiniO)",
"fontSize": 20,
"fontFamily": 1,
"textAlign": "left",
"verticalAlign": "top",
"baseline": 18,
"containerId": null,
"originalText": "S3 (MiniO)",
"lineHeight": 1.25
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "#e9ecef",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 0.5,
"currentItemStrokeStyle": "solid",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 641.1002118000083,
"scrollY": 161.50753245577403,
"zoom": {
"value": 2
},
"currentItemRoundness": "sharp",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 110 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 142 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 135 KiB

View File

@@ -0,0 +1,401 @@
---
excalidraw-plugin: parsed
tags: [excalidraw]
---
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
IoT Service ^EoQSgbi1
IoT Sensor ^HZlDcT2x
Data platform Ingest ^72o9GXma
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.19",
"elements": [
{
"type": "rectangle",
"version": 701,
"versionNonce": 167105808,
"isDeleted": false,
"id": "2wHeg2N53IYzpgCwRZ_sV",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -109.90675912352219,
"y": -145.46057384034123,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 149,
"height": 151,
"seed": 746651120,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "EoQSgbi1"
},
{
"id": "jYcTtzBU6Ba3uXE02Cful",
"type": "arrow"
},
{
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"type": "arrow"
}
],
"updated": 1694341385095,
"link": null,
"locked": false
},
{
"type": "text",
"version": 520,
"versionNonce": 77116176,
"isDeleted": false,
"id": "EoQSgbi1",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -93.19669900389329,
"y": -82.46057384034123,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 115.57987976074219,
"height": 25,
"seed": 25935856,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341385095,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "IoT Service",
"rawText": "IoT Service",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "2wHeg2N53IYzpgCwRZ_sV",
"originalText": "IoT Service",
"lineHeight": 1.25,
"baseline": 18
},
{
"type": "diamond",
"version": 552,
"versionNonce": 1837905680,
"isDeleted": false,
"id": "HKT3KgeY8V3T-f2SJo_3r",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -488.4076748989678,
"y": -159.80102940728523,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 220,
"height": 200,
"seed": 737034736,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [
{
"type": "text",
"id": "HZlDcT2x"
},
{
"id": "jYcTtzBU6Ba3uXE02Cful",
"type": "arrow"
}
],
"updated": 1694341514931,
"link": null,
"locked": false
},
{
"type": "text",
"version": 546,
"versionNonce": 722654480,
"isDeleted": false,
"id": "HZlDcT2x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -409.8976422451592,
"y": -84.80102940728523,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 62.97993469238281,
"height": 50,
"seed": 716211184,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341514931,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "IoT \nSensor",
"rawText": "IoT Sensor",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "HKT3KgeY8V3T-f2SJo_3r",
"originalText": "IoT Sensor",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 642,
"versionNonce": 1628229904,
"isDeleted": false,
"id": "jYcTtzBU6Ba3uXE02Cful",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -260.6056224823998,
"y": -63.47351673236517,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 133.76296346137323,
"height": 0.9440251662323789,
"seed": 1541047792,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341514931,
"link": null,
"locked": false,
"startBinding": {
"elementId": "HKT3KgeY8V3T-f2SJo_3r",
"gap": 8.623177202385875,
"focus": -0.028358267431578415
},
"endBinding": {
"elementId": "2wHeg2N53IYzpgCwRZ_sV",
"gap": 16.935899897504328,
"focus": -0.06442193471262163
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
133.76296346137323,
-0.9440251662323789
]
]
},
{
"type": "rectangle",
"version": 331,
"versionNonce": 369345808,
"isDeleted": false,
"id": "EpQKc79arb5LHLs5lDD9x",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 148.9076748989678,
"y": -128.05834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 204,
"height": 179,
"seed": 477382640,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "72o9GXma"
},
{
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"type": "arrow"
}
],
"updated": 1694341385095,
"link": null,
"locked": false
},
{
"type": "text",
"version": 294,
"versionNonce": 963158800,
"isDeleted": false,
"id": "72o9GXma",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 173.20776950345999,
"y": -63.55834559271477,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 155.39981079101562,
"height": 50,
"seed": 1753598448,
"groupIds": [],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1694341385095,
"link": null,
"locked": false,
"fontSize": 20,
"fontFamily": 1,
"text": "Data platform \nIngest",
"rawText": "Data platform Ingest",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "EpQKc79arb5LHLs5lDD9x",
"originalText": "Data platform Ingest",
"lineHeight": 1.25,
"baseline": 43
},
{
"type": "arrow",
"version": 315,
"versionNonce": 420523792,
"isDeleted": false,
"id": "vzeGuvlzbu9D-RUnuM_MZ",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "dashed",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 49.56397613476656,
"y": -68.71372587133547,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 78.50557061906932,
"height": 2.059257777181273,
"seed": 2065107952,
"groupIds": [],
"frameId": null,
"roundness": {
"type": 2
},
"boundElements": [],
"updated": 1694341501797,
"link": null,
"locked": false,
"startBinding": {
"elementId": "2wHeg2N53IYzpgCwRZ_sV",
"gap": 10.470735258288755,
"focus": -0.012678407176878778
},
"endBinding": {
"elementId": "EpQKc79arb5LHLs5lDD9x",
"gap": 20.83812814513192,
"focus": 0.2698545781974716
},
"lastCommittedPoint": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"points": [
[
0,
0
],
[
78.50557061906932,
2.059257777181273
]
]
}
],
"appState": {
"theme": "dark",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "transparent",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 1,
"currentItemStrokeStyle": "solid",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 826.25,
"scrollY": 377.71484375,
"zoom": {
"value": 2
},
"currentItemRoundness": "round",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
```
%%

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

View File

@@ -56,7 +56,7 @@
</div>
</div>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.7.2/js/all.min.js"
<script src="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.2/js/all.min.js"
crossorigin="anonymous"></script>
</footer>
</body>

View File

@@ -41,4 +41,4 @@
{% endif %}
</div>
{% endblock content %}
{% endblock content %}

View File

@@ -47,4 +47,4 @@
{% endif %}
</div>
{% endblock content %}
{% endblock content %}

1284
yarn.lock

File diff suppressed because it is too large Load Diff