feat: add module3-6

This commit is contained in:
Jan Schnurpfeil 2025-04-28 13:11:47 +02:00
parent 6d6273a33b
commit a4b002dc61
95 changed files with 1848 additions and 1 deletions

View file

@ -0,0 +1,16 @@
# Grafana Alloy
Docs: https://grafana.com/docs/alloy/latest/
Github Repo: https://github.com/grafana/alloy
```shell
cp values.yaml.example values.yaml
helm repo add grafana https://grafana.github.io/helm-charts --force-update
helm upgrade \
--install \
alloy grafana/alloy \
--namespace o11y \
--create-namespace \
--version 1.0.2 \
--values values.yaml
```

View file

@ -0,0 +1,154 @@
alloy:
configMap:
content: |-
// Write your Alloy config here:
logging {
level = "info"
format = "logfmt"
}
prometheus.remote_write "default" {
endpoint {
url = <terraform output o11y_metrics_push_url>
basic_auth {
username = <terraform output o11y_cluster_user>
password = <terraform output o11y_cluster_password>
}
}
}
loki.write "default" {
endpoint {
url = <terraform output o11y_logs_push_url>
basic_auth {
username = <terraform output o11y_cluster_user>
password = <terraform output o11y_cluster_password>
}
}
}
prometheus.operator.podmonitors "services" {
forward_to = [prometheus.remote_write.default.receiver]
}
prometheus.operator.servicemonitors "services" {
forward_to = [prometheus.remote_write.default.receiver]
}
// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.
// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.
discovery.kubernetes "pod" {
role = "pod"
}
// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.
// If no rules are defined, then the input targets are exported as-is.
discovery.relabel "pod_logs" {
targets = discovery.kubernetes.pod.targets
// Label creation - "namespace" field from "__meta_kubernetes_namespace"
rule {
source_labels = ["__meta_kubernetes_namespace"]
action = "replace"
target_label = "namespace"
}
// Label creation - "pod" field from "__meta_kubernetes_pod_name"
rule {
source_labels = ["__meta_kubernetes_pod_name"]
action = "replace"
target_label = "pod"
}
// Label creation - "container" field from "__meta_kubernetes_pod_container_name"
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "container"
}
// Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name"
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
action = "replace"
target_label = "app"
}
// Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name
rule {
source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "job"
separator = "/"
replacement = "$1"
}
// Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log
rule {
source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "__path__"
separator = "/"
replacement = "/var/log/pods/*$1/*.log"
}
// Label creation - "container_runtime" field from "__meta_kubernetes_pod_container_id"
rule {
source_labels = ["__meta_kubernetes_pod_container_id"]
action = "replace"
target_label = "container_runtime"
regex = "^(\\S+):\\/\\/.+$"
replacement = "$1"
}
}
// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.
loki.source.kubernetes "pod_logs" {
targets = discovery.relabel.pod_logs.output
forward_to = [loki.process.pod_logs.receiver]
}
// loki.process receives log entries from other Loki components, applies one or more processing stages,
// and forwards the results to the list of receivers in the component's arguments.
loki.process "pod_logs" {
stage.static_labels {
values = {
cluster = "scrumlr",
}
}
forward_to = [loki.write.default.receiver]
}
// loki.source.kubernetes_events tails events from the Kubernetes API and converts them
// into log lines to forward to other Loki components.
loki.source.kubernetes_events "cluster_events" {
job_name = "integrations/kubernetes/eventhandler"
log_format = "logfmt"
forward_to = [
loki.process.cluster_events.receiver,
]
}
// loki.process receives log entries from other loki components, applies one or more processing stages,
// and forwards the results to the list of receivers in the component's arguments.
loki.process "cluster_events" {
forward_to = [loki.write.default.receiver]
stage.static_labels {
values = {
cluster = "scrumlr",
}
}
stage.labels {
values = {
kubernetes_cluster_events = "job",
}
}
}

View file

@ -0,0 +1,21 @@
# cert-manager
Docs: https://cert-manager.io/docs/
Github Repo: https://github.com/cert-manager/cert-manager
```sh
helm repo add jetstack https://charts.jetstack.io --force-update
helm upgrade \
--install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.17.2 \
--values values.yaml
```
Install [ClusterIssuer](https://cert-manager.io/docs/concepts/issuer/) for [Let's Encrypt](https://letsencrypt.org/)
```sh
kubectl apply -f clusterissuer-letsencrypt-production.yaml
```

View file

@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-production
solvers:
- http01:
ingress:
ingressClassName: nginx

View file

@ -0,0 +1,2 @@
crds:
enabled: true

View file

@ -0,0 +1,15 @@
# ingress-nginx
Docs: https://kubernetes.github.io/ingress-nginx/
Github Repo: https://github.com/kubernetes/ingress-nginx
```sh
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx --force-update
helm upgrade \
ingress-nginx ingress-nginx/ingress-nginx \
--install \
--namespace ingress-nginx \
--create-namespace \
--version 4.12.1 \
--values values.yaml
```

View file

@ -0,0 +1,5 @@
controller:
metrics:
enabled: true
serviceMonitor:
enabled: true

View file

@ -0,0 +1,15 @@
# NATS
Docs: https://docs.nats.io/
Github Repo: https://github.com/nats-io/k8s
```sh
helm repo add nats https://nats-io.github.io/k8s/helm/charts/ --force-update
helm upgrade \
--install \
nats nats/nats \
--namespace nats \
--create-namespace \
--version 1.3.3 \
--values values.yaml
```

View file

@ -0,0 +1,15 @@
config:
cluster:
enabled: true
replicas: 3
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: ScheduleAnyway
promExporter:
enabled: true
podMonitor:
enabled: true

View file

@ -0,0 +1,15 @@
# Prometheus Operator CRDs
Docs: https://prometheus-operator.dev/docs/getting-started/introduction/
Github Repo: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-operator-crds
```sh
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm upgrade \
--install \
prometheus-operator-crds prometheus-community/prometheus-operator-crds \
--namespace o11y \
--create-namespace \
--version 19.1.0 \
--values values.yaml
```

View file

@ -0,0 +1,31 @@
# Only enable Service & Pod Monitor
crds:
servicemonitors:
enabled: true
podmonitors:
enabled: true
alertmanagerconfigs:
enabled: false
alertmanagers:
enabled: false
probes:
enabled: false
prometheusagents:
enabled: false
prometheuses:
enabled: false
prometheusrules:
enabled: false
scrapeconfigs:
enabled: false
thanosrulers:
enabled: false

View file

@ -0,0 +1,3 @@
SCRUMLR_SERVER_PORT=8080
SCRUMLR_BASE_PATH=/api
SCRUMLR_SERVER_NATS_URL=nats.nats.svc.cluster.local:4222

View file

@ -0,0 +1,70 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: scrumlr-backend
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
template:
metadata:
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
containers:
- name: backend
image: ghcr.io/inovex/scrumlr.io/scrumlr-server:3.10.3
args:
- "/app/main"
- "-disable-check-origin"
resources:
requests:
cpu: "50m"
memory: "200Mi"
limits:
memory: "200Mi"
startupProbe:
httpGet:
path: /api/health
port: 8080
failureThreshold: 30
periodSeconds: 10
livenessProbe:
httpGet:
path: /api/health
port: 8080
readinessProbe:
httpGet:
path: /api/health
port: 8080
envFrom:
- configMapRef:
name: scrumlr-backend
- secretRef:
name: scrumlr-backend
env:
- name: SCRUMLR_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: scrumlr-ecdsa-key
key: jwt.key
ports:
- containerPort: 8080

View file

@ -0,0 +1,24 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment-scrumlr-backend.yaml
- service-scrumlr-backend.yaml
- poddisruptionbudget-backend.yaml
configMapGenerator:
- name: scrumlr-backend
options:
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
envs:
- configmap-scrumlr-backend.env
secretGenerator:
- name: scrumlr-backend
options:
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
envs:
- secret-scrumlr-backend.env

View file

@ -0,0 +1,11 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: backend
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"

View file

@ -0,0 +1 @@
SCRUMLR_SERVER_DATABASE_URL=postgres://user:PASSWORD@INSTANCE-ID.postgresql.eu01.onstackit.cloud:5432/scrumlr

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: scrumlr-backend
spec:
selector:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "backend"
app.kubernetes.io/part-of: "scrumlr"
ports:
- port: 8080
targetPort: 8080

View file

@ -0,0 +1,3 @@
SCRUMLR_SERVER_URL=/api
SCRUMLR_SERVER_PORT=8080
SCRUMLR_SHOW_LEGAL_DOCUMENTS=true

View file

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: scrumlr-frontend
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
template:
metadata:
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
containers:
- name: frontend
image: ghcr.io/inovex/scrumlr.io/scrumlr-frontend:3.10.3
resources:
requests:
cpu: "25m"
memory: "100Mi"
limits:
memory: "100Mi"
envFrom:
- configMapRef:
name: scrumlr-frontend
ports:
- containerPort: 8080

View file

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment-scrumlr-frontend.yaml
- service-scrumlr-frontend.yaml
- poddisruptionbudget-frontend.yaml
configMapGenerator:
- name: scrumlr-frontend
options:
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
envs:
- configmap-scrumlr-frontend.env

View file

@ -0,0 +1,11 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: frontend
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: scrumlr-frontend
spec:
selector:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/component: "frontend"
app.kubernetes.io/part-of: "scrumlr"
ports:
- port: 80
targetPort: 8080

View file

@ -0,0 +1,37 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: scrumlr
labels:
app.kubernetes.io/name: "scrumlr"
app.kubernetes.io/part-of: "scrumlr"
annotations:
nginx.ingress.kubernetes.io/limit-connections: "100"
# Websocket optimization https://kubernetes.github.io/ingress-nginx/user-guide/miscellaneous/#websockets
nginx.ingress.kubernetes.io/proxy-send-timeout: "7200"
nginx.ingress.kubernetes.io/proxy-read-timeout: "7200"
cert-manager.io/cluster-issuer: "letsencrypt-production"
spec:
ingressClassName: nginx
tls:
- hosts:
- CHANGE-ME.domain.tld
secretName: scrumlr-tls
rules:
- host: CHANGE-ME.domain.tld
http:
paths:
- path: /api
pathType: Prefix
backend:
service:
name: scrumlr-backend
port:
number: 8080
- path: /
pathType: Prefix
backend:
service:
name: scrumlr-frontend
port:
number: 80

View file

@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: scrumlr
resources:
- namespace-scrumlr.yaml
- backend/
- frontend/
- ingress-scrumlr.yaml
# Create key: openssl ecparam -genkey -name secp521r1 -noout -out jwt.key
secretGenerator:
- name: scrumlr-ecdsa-key
files:
- jwt.key

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: scrumlr