initial commit

This commit is contained in:
Karim Naufal
2021-07-30 10:12:37 +02:00
commit f615c994af
19 changed files with 864 additions and 0 deletions

View File

@@ -0,0 +1,87 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-cloud-controller-manager
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: hcloud-cloud-controller-manager
template:
metadata:
labels:
app: hcloud-cloud-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: cloud-controller-manager
dnsPolicy: Default
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the cloud controller manager
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
# cloud controller manages should be able to run on masters
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
- key: "node.kubernetes.io/not-ready"
effect: "NoSchedule"
hostNetwork: true
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:latest
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=false"
- "--allow-untagged-cloud"
- "--allocate-node-cidrs=true"
- "--cluster-cidr=10.42.0.0/16"
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 500m
memory: 500Mi
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
- name: HCLOUD_NETWORK
valueFrom:
secretKeyRef:
name: hcloud
key: network

View File

@@ -0,0 +1,126 @@
upgradeCompatibility: '1.10'
debug:
# -- Enable debug logging
enabled: false
# verbose:
# gke:
# enabled: true
ipam:
# -- Configure IP Address Management mode.
# ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/
mode: kubernetes
# -- Configure the encapsulation configuration for communication between nodes.
# Possible values:
# - disabled (breaks csi-controller)
# - vxlan (default)
# - geneve
tunnel: geneve
# -- Specify the IPv4 CIDR for native routing (ie to avoid IP masquerade for).
# This value corresponds to the configured cluster-cidr.
nativeRoutingCIDR: 10.0.0.0/8
# When enabled, causes legacy routing
# endpointRoutes:
# -- Enable use of per endpoint routes instead of routing via
# the cilium_host interface.
# enabled: false
# -- Enable installation of PodCIDR routes between worker
# nodes if worker nodes share a common L2 network segment.
autoDirectNodeRoutes: false
bpf:
# -- Allow cluster external access to ClusterIP services.
lbExternalClusterIP: false
# -- Enable native IP masquerade support in eBPF
masquerade: true
endpointHealthChecking:
# -- Enable connectivity health checking between virtual endpoints.
enabled: true
# -- Configure ClusterIP service handling in the host namespace (the node).
hostServices:
# -- Enable host reachable services.
enabled: true
# -- Supported list of protocols to apply ClusterIP translation to.
protocols: tcp,udp
externalIPs:
# -- Enable ExternalIPs service support.
enabled: true
hostPort:
# -- Enable hostPort service support.
enabled: true
# -- Configure N-S k8s service loadbalancing
nodePort:
# -- Enable the Cilium NodePort service implementation.
enabled: true
# -- Enable connectivity health checking.
healthChecking: true
ipv4:
# -- Enable IPv4 support.
enabled: true
ipv6:
# -- Enable IPv6 support.
enabled: false
# -- Configure Kubernetes specific configuration
k8s:
# -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
# range via the Kubernetes node resource
requireIPv4PodCIDR: true
# -- Configure the kube-proxy replacement in Cilium BPF datapath
# Valid options are "disabled", "probe", "partial", "strict".
# ref: https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free/
kubeProxyReplacement: strict
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
enableIPv4Masquerade: true
monitor:
# -- Enable the cilium-monitor sidecar.
enabled: false
# -- Configure service load balancing
loadBalancer:
# -- standalone enables the standalone L4LB which does not connect to
# kube-apiserver.
# standalone: false
# -- algorithm is the name of the load balancing algorithm for backend
# selection e.g. random or maglev
# algorithm: random
# -- mode is the operation mode of load balancing for remote backends
# e.g. snat, dsr, hybrid
mode: snat
# -- acceleration is the option to accelerate service handling via XDP
# e.g. native, disabled
# Gives "Error: virtio_net: Too few free TX rings available."
# acceleration: native
# Breaks csi
# devices: eth1
# -- The agent can be put into one of the three policy enforcement modes:
# default, always and never.
# ref: https://docs.cilium.io/en/stable/policy/intro/#policy-enforcement-modes
policyEnforcementMode: never
# -- Enables the enforcement of host policies in the eBPF datapath.
hostFirewall: false

View File

@@ -0,0 +1,8 @@
controller:
kind: DaemonSet
service:
annotations:
load-balancer.hetzner.cloud/location: "fsn1"
load-balancer.hetzner.cloud/use-private-ip: "true"
load-balancer.hetzner.cloud/type: "lb11"
type: LoadBalancer

View File

@@ -0,0 +1,50 @@
# Doc: https://rancher.com/docs/k3s/latest/en/upgrades/automated/
# agent plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-agent
namespace: system-upgrade
labels:
k3s_upgrade: agent
spec:
concurrency: 1
channel: https://update.k3s.io/v1-release/channels/stable
nodeSelector:
matchExpressions:
- {key: k3s_upgrade, operator: Exists}
- {key: k3s_upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
serviceAccountName: system-upgrade
prepare:
image: rancher/k3s-upgrade
args: ["prepare", "k3s-server"]
drain:
force: true
skipWaitForDeleteTimeout: 60
upgrade:
image: rancher/k3s-upgrade
---
# server plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-server
namespace: system-upgrade
labels:
k3s_upgrade: server
spec:
concurrency: 1
channel: https://update.k3s.io/v1-release/channels/stable
nodeSelector:
matchExpressions:
- {key: k3s_upgrade, operator: Exists}
- {key: k3s_upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
tolerations:
- {key: node-role.kubernetes.io/master, effect: NoSchedule, operator: Exists}
- {key: CriticalAddonsOnly, effect: NoExecute, operator: Exists}
serviceAccountName: system-upgrade
cordon: true
upgrade:
image: rancher/k3s-upgrade