988a25ca by Adam Heath

Letsencrypt is working, and backup/restore of the generated secrets as

well.
1 parent f3c25da7
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-secrets-do-backup
spec:
schedule: "13 * * * *"
startingDeadlineSeconds: 3600
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 10
failedJobsHistoryLimit: 10
jobTemplate:
spec:
template:
spec:
serviceAccountName: backup-secrets
restartPolicy: OnFailure
volumes:
- name: work-space
emptyDir: {}
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
name: backup-secrets-scripts
defaultMode: 0755
- name: aws-config
configMap:
name: backup-secrets-config
items:
- key: config
path: config
- name: aws-credentials
secret:
secretName: backup-secrets-auth
items:
- key: credentials
path: credentials
initContainers:
- name: list-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["list-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: calculate-delete-s3
image: debian
command: ["/scripts/backup"]
args: ["calculate-delete-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: create-secrets-tarball
image: bitnami/kubectl
command: ["/scripts/backup"]
args: ["create-secrets-tarball"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: copy-to-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["copy-to-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
containers:
- name: show-backup
image: debian
command: ["/scripts/backup"]
args: ["show-backup"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: scripts
mountPath: /scripts
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./backup.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-secrets-do-backup
spec:
schedule: "13 * * * *"
startingDeadlineSeconds: 3600
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 10
failedJobsHistoryLimit: 10
jobTemplate:
spec:
template:
spec:
serviceAccountName: backup-secrets
restartPolicy: OnFailure
volumes:
- name: work-space
emptyDir: {}
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
name: backup-secrets-scripts
defaultMode: 0755
- name: aws-config
configMap:
name: backup-secrets-config
items:
- key: config
path: config
- name: aws-credentials
secret:
secretName: backup-secrets-auth
items:
- key: credentials
path: credentials
initContainers:
- name: list-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["list-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: calculate-delete-s3
image: debian
command: ["/scripts/backup"]
args: ["calculate-delete-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: create-secrets-tarball
image: bitnami/kubectl
command: ["/scripts/backup"]
args: ["create-secrets-tarball"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: copy-to-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["copy-to-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
containers:
- name: show-backup
image: debian
command: ["/scripts/backup"]
args: ["show-backup"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: scripts
mountPath: /scripts
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backup-secrets
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: backup-secrets
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "create", "list", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: backup-secrets
subjects:
# You can specify more than one "subject"
- kind: ServiceAccount
name: backup-secrets
apiGroup: ""
roleRef:
kind: Role
name: backup-secrets
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-config
data:
APP_NAME: test
AWS_PROFILE: "default"
S3_ENABLED: "true"
S3_BUCKET: "brainfood-bikeshed"
S3_PREFIX: "um-adam/secret-backup"
KEEP_COUNT: "30"
SECRETS_TARBALL: ""
config: ""
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-scripts
---
apiVersion: v1
kind: Secret
metadata:
name: backup-secrets-auth
stringData:
credentials: ""
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./config.yaml
- ./backup.yaml
configMapGenerator:
- name: backup-secrets-scripts
behavior: merge
options:
disableNameSuffixHash: true
files:
- ../../scripts/backup
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./restore.yaml
apiVersion: batch/v1
kind: Job
metadata:
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation
# annotations:
# helm.sh/hook: pre-install
# helm.sh/hook-delete-policy: before-hook-creation
# helm.sh/hook-weight: "-5"
name: backup-secrets-do-restore
spec:
template:
spec:
restartPolicy: Never
serviceAccountName: backup-secrets
containers:
- name: show-backup
command: ["/scripts/backup"]
args: ["show-backup"]
image: debian
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /scripts
name: scripts
initContainers:
- name: list-s3
command: ["/scripts/backup"]
args: ["list-s3"]
image: peakcom/s5cmd
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: calculate-restore-s3
command: ["/scripts/backup"]
args: ["calculate-restore-s3"]
image: debian
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- name: copy-from-s3
command: ["/scripts/backup"]
args: ["copy-from-s3"]
image: peakcom/s5cmd
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: restore-secrets
command: ["/scripts/backup"]
args: ["restore-secrets"]
image: bitnami/kubectl
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /scripts
name: scripts
volumes:
- name: work-space
emptyDir: {}
- name: aws-config
configMap:
name: backup-secrets-config
items:
- key: config
path: config
- name: aws-credentials
secret:
secretName: backup-secrets-auth
items:
- key: credentials
path: credentials
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
defaultMode: 0755
name: backup-secrets-scripts
name: test
namespace: default
aws:
config: |-
[default]
region=us-west-2
credentials: |-
[default]
aws_access_key_id="xxx"
aws_secret_access_key="xxx"
bases:
- ../common/environments.yaml
---
releases:
- name: {{ .Values.name }}-backup-secrets-base
chart: charts/base
namespace: {{ .Values.namespace }}
values:
- namePrefix: "{{ .Values.name }}-"
strategicMergePatches:
- apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ .Values.namespace }}
name: {{ .Values.name }}-backup-secrets-config
data:
APP_NAME: {{ .Values.name }}-gateway
config: {{ .Values.aws.config | quote }}
- apiVersion: v1
kind: Secret
metadata:
namespace: {{ .Values.namespace }}
name: {{ .Values.name }}-backup-secrets-auth
stringData:
credentials: {{ .Values.aws.credentials | quote }}
- name: {{ .Values.name }}-backup-secrets-restore
chart: charts/restore
namespace: {{ .Values.namespace }}
needs:
- {{ .Values.name }}-backup-secrets-base
values:
- namePrefix: "{{ .Values.name }}-"
strategicMergePatches:
# Have to adjust these namePrefix manually because helmfile/chartify doesn't work correctly with kustomize/helm-hooks
- apiVersion: batch/v1
kind: Job
metadata:
namespace: {{ .Values.namespace }}
name: {{ .Values.name }}-backup-secrets-do-restore
spec:
template:
spec:
serviceAccountName: {{ .Values.name }}-backup-secrets
volumes:
- name: aws-config
configMap:
name: {{ .Values.name }}-backup-secrets-config
- name: aws-credentials
secret:
secretName: {{ .Values.name }}-backup-secrets-auth
- name: config
configMap:
name: {{ .Values.name }}-backup-secrets-config
- name: auth
secret:
secretName: {{ .Values.name }}-backup-secrets-auth
- name: scripts
configMap:
name: {{ .Values.name }}-backup-secrets-scripts
#!/bin/sh
set -ex
setup_s3() {
AWS_PROFILE="$(cat /config/AWS_PROFILE)"
S3_BUCKET="$(cat /config/S3_BUCKET)"
S3_PREFIX="$(cat /config/S3_PREFIX)"
export AWS_PROFILE
}
case "$1" in
# restore/backup
(list-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
setup_s3
if /s5cmd ls "s3://$S3_BUCKET/$S3_PREFIX/*" > /work-space/s3-file-listing.tmp; then
mv /work-space/s3-file-listing.tmp /work-space/s3-file-listing
echo "$S3_BUCKET" > /work-space/S3_BUCKET
echo "$S3_PREFIX" > /work-space/S3_PREFIX
cat /work-space/s3-file-listing
fi
fi
;;
# backup
(calculate-delete-s3)
# this is broken
KEEP_COUNT="$(cat /config/KEEP_COUNT)"
if [ -e /work-space/s3-file-listing ] && [ "z$KEEP_COUNT" != "z" ]; then
S3_BUCKET="$(cat /work-space/S3_BUCKET)"
S3_PREFIX="$(cat /work-space/S3_PREFIX)"
sort /work-space/s3-file-listing
if [ "z$SECRETS_TARBALL" = z ]; then
SECRETS_TARBALL="$(gawk '/wp-content.tgz$/{print $4}' < /work-space/s3-file-listing | sort | tail -1)"
if [ "z$SECRETS_TARBALL" != z ]; then
SECRETS_TARBALL="s3://$S3_BUCKET/$S3_PREFIX$SECRETS_TARBALL"
fi
fi
fi
if [ "z$SECRETS_TARBALL" != z ]; then
echo "$SECRETS_TARBALL" > /work-space/secrets-tarball.file
fi
;;
# backup
(create-secrets-tarball)
APP_NAME="$(cat /config/APP_NAME)"
secret_template="{{ \$item.metadata.namespace }}:{{ \$item.metadata.name }}{{ \"\n\" }}"
secret_template_list="{{ range \$index, \$item := .items }}$secret_template{{ end }}"
kubectl auth can-i --list
_get_backup_secrets() {
kubectl get secret -l 'backup.cert-manager.brainfood.com=true' -l "app.backup.cert-manager.brainfood.com=$APP_NAME" -o go-template --template "$secret_template_list"
}
date="$(date)"
timestamp_year_month="$(date -d "$date" '+%Y/%m')"
timestamp_name="$(date -d "$date" '+%Y%m%d-%H%M%S')"
mkdir "/work-space/$date"
(_get_backup_secrets) | while IFS=":" read namespace name; do
mkdir -p "/work-space/$timestamp_name/$namespace"
kubectl get -n "$namespace" secret "$name" -o yaml > "/work-space/$timestamp_name/$namespace/$name.yaml"
done
tar -C "/work-space/$timestamp_name" -f - -c . | gzip -9 > "/work-space/$timestamp_name.tar.gz"
echo "$timestamp_name.tar.gz" > /work-space/secrets-tarball.file
echo "$timestamp_year_month" > /work-space/timestamp_year_month
;;
# backup
(copy-to-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
SECRETS_TARBALL="$(cat /work-space/secrets-tarball.file)"
timestamp_year_month="$(cat /work-space/timestamp_year_month)"
if [ "z$SECRETS_TARBALL" != z ]; then
setup_s3
/s5cmd cp "/work-space/$SECRETS_TARBALL" "s3://$S3_BUCKET/$S3_PREFIX/$timestamp_year_month/$SECRETS_TARBALL"
fi
:
fi
;;
# restore
(calculate-restore-s3)
if [ -e /work-space/s3-file-listing ]; then
S3_BUCKET="$(cat /work-space/S3_BUCKET)"
S3_PREFIX="$(cat /work-space/S3_PREFIX)"
sort /work-space/s3-file-listing
ls -alR /work-space
if [ "z$SECRETS_TARBALL" = z ]; then
SECRETS_TARBALL="$(grep -E '[0-9]{4}\/[0-9]{2}\/[0-9]{8}-[0-9]{6}.tar.gz' /work-space/s3-file-listing | awk '{print $4}' | sort | tail -n 1)"
if [ "z$SECRETS_TARBALL" != z ]; then
SECRETS_TARBALL="s3://$S3_BUCKET/$S3_PREFIX/$SECRETS_TARBALL"
fi
fi
fi
if [ "z$SECRETS_TARBALL" != z ]; then
echo "$SECRETS_TARBALL" > /work-space/secrets-tarball.file
fi
;;
# restore
(copy-from-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
setup_s3
if [ -s /work-space/secrets-tarball.file ]; then
/s5cmd cp "$(cat /work-space/secrets-tarball.file)" /work-space/secrets.tar.gz
fi
fi
;;
# restore
(restore-secrets)
if [ -e /work-space/secrets.tar.gz ]; then
rm -rf /work-space/secret-restore
mkdir /work-space/secret-restore
tar -zxC /work-space/secret-restore -f /work-space/secrets.tar.gz
kubectl create --dry-run=client -o yaml -f /work-space/secret-restore/* | kubectl apply -f -
fi
;;
esac
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-secrets-do-backup
spec:
schedule: "13 * * * *"
startingDeadlineSeconds: 3600
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 10
failedJobsHistoryLimit: 10
jobTemplate:
spec:
template:
spec:
serviceAccountName: backup-secrets
restartPolicy: OnFailure
volumes:
- name: work-space
emptyDir: {}
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
name: backup-secrets-scripts
defaultMode: 0755
- name: aws-config
configMap:
name: backup-secrets-config
items:
- key: config
path: config
- name: aws-credentials
secret:
secretName: backup-secrets-auth
items:
- key: credentials
path: credentials
initContainers:
- name: list-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["list-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: calculate-delete-s3
image: debian
command: ["/scripts/backup"]
args: ["calculate-delete-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: create-secrets-tarball
image: bitnami/kubectl
command: ["/scripts/backup"]
args: ["create-secrets-tarball"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: copy-to-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["copy-to-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
containers:
- name: show-backup
image: debian
command: ["/scripts/backup"]
args: ["show-backup"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: scripts
mountPath: /scripts
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backup-secrets
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: backup-secrets
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "create", "list", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: backup-secrets
subjects:
# You can specify more than one "subject"
- kind: ServiceAccount
name: backup-secrets
apiGroup: ""
roleRef:
kind: Role
name: backup-secrets
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-config
data:
APP_NAME: test
AWS_PROFILE: "default"
S3_ENABLED: "true"
S3_BUCKET: "brainfood-bikeshed"
S3_PREFIX: "um-adam/secret-backup"
KEEP_COUNT: "30"
SECRETS_TARBALL: ""
config: ""
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-scripts
---
apiVersion: v1
kind: Secret
metadata:
name: backup-secrets-auth
stringData:
credentials: ""
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./config.yaml
- ./backup.yaml
configMapGenerator:
- name: backup-secrets-scripts
behavior: merge
options:
disableNameSuffixHash: true
files:
- ../../scripts/backup
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-secrets-do-backup
annotations:
helm.sh/hook: post-install,post-upgrade
spec:
schedule: "13 * * * *"
startingDeadlineSeconds: 3600
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 10
failedJobsHistoryLimit: 10
jobTemplate:
spec:
template:
spec:
serviceAccountName: backup-secrets
restartPolicy: OnFailure
volumes:
- name: work-space
emptyDir: {}
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
name: backup-secrets-scripts
defaultMode: 0755
initContainers:
- name: list-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["list-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
- name: calculate-delete-s3
image: debian
command: ["/scripts/backup"]
args: ["calculate-delete-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: scripts
mountPath: /scripts
- name: create-secrets-tarball
image: bitnami/kubectl
command: ["/scripts/backup"]
args: ["create-secrets-tarball"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: scripts
mountPath: /scripts
- name: copy-to-s3
image: peakcom/s5cmd
command: ["/scripts/backup"]
args: ["copy-to-s3"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: config
mountPath: /config
- name: auth
mountPath: /auth
- name: scripts
mountPath: /scripts
containers:
- name: show-backup
image: debian
command: ["/scripts/backup"]
args: ["show-backup"]
volumeMounts:
- name: work-space
mountPath: /work-space
- name: scripts
mountPath: /scripts
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backup-secrets
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: backup-secrets
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "create", "list", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: backup-secrets
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
subjects:
# You can specify more than one "subject"
- kind: ServiceAccount
name: backup-secrets
apiGroup: ""
roleRef:
kind: Role
name: backup-secrets
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-config
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
data:
S3_ENABLED: "true"
S3_REGION: "us-west-2"
S3_BUCKET: "brainfood-bikeshed"
S3_PREFIX: "um-adam/secret-backup"
KEEP_COUNT: "30"
SECRETS_TARBALL: ""
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backup-secrets-scripts
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
---
apiVersion: v1
kind: Secret
metadata:
name: backup-secrets-auth
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "-6"
stringData:
AWS_ACCESS_KEY: "AKIAYF7A6NI4N2CAG6V4"
AWS_SECRET_ACCESS_KEY: "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"
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./restore.yaml
apiVersion: batch/v1
kind: Job
metadata:
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation
# annotations:
# helm.sh/hook: pre-install
# helm.sh/hook-delete-policy: before-hook-creation
# helm.sh/hook-weight: "-5"
name: backup-secrets-do-restore
spec:
template:
spec:
restartPolicy: Never
serviceAccountName: backup-secrets
containers:
- name: show-backup
command: ["/scripts/backup"]
args: ["show-backup"]
image: debian
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /scripts
name: scripts
initContainers:
- name: list-s3
command: ["/scripts/backup"]
args: ["list-s3"]
image: peakcom/s5cmd
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: calculate-restore-s3
command: ["/scripts/backup"]
args: ["calculate-restore-s3"]
image: debian
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- name: copy-from-s3
command: ["/scripts/backup"]
args: ["copy-from-s3"]
image: peakcom/s5cmd
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /config
name: config
- mountPath: /auth
name: auth
- mountPath: /scripts
name: scripts
- mountPath: /root/.aws/config
name: aws-config
subPath: config
- mountPath: /root/.aws/credentials
name: aws-credentials
subPath: credentials
- name: restore-secrets
command: ["/scripts/backup"]
args: ["restore-secrets"]
image: bitnami/kubectl
volumeMounts:
- mountPath: /work-space
name: work-space
- mountPath: /scripts
name: scripts
volumes:
- name: work-space
emptyDir: {}
- name: aws-config
configMap:
name: backup-secrets-config
items:
- key: config
path: config
- name: aws-credentials
secret:
secretName: backup-secrets-auth
items:
- key: credentials
path: credentials
- name: config
configMap:
name: backup-secrets-config
- name: auth
secret:
secretName: backup-secrets-auth
- name: scripts
configMap:
defaultMode: 0755
name: backup-secrets-scripts
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
......
......@@ -5,3 +5,4 @@ resources:
- ./gateway.yaml
- ./issuers.yaml
- ./certificates.yaml
......
name: test
namespace: default
gatewayClassName: istio
baseDomain: example.com
subDomains:
- auth.example.com
- www.example.com
base:
secretTemplate:
labels:
"app.backup.cert-manager.brainfood.com": "foo-app"
"backup.cert-manager.brainfood.com": "true"
domain: app.local
sub:
secretTemplate:
labels:
"app.backup.cert-manager.brainfood.com": "foo-app"
"backup.cert-manager.brainfood.com": "true"
domains:
- auth.app.local
- www.app.local
letsEncrypt:
email: name@example.com
email: user@example.com
acmeAccountSecretTemplate:
labels:
"app.backup.cert-manager.brainfood.com": "foo-app"
"backup.cert-manager.brainfood.com": "true"
......
......@@ -22,7 +22,7 @@ releases:
- op: replace
path: /spec/dnsNames
value:
- {{ .Values.baseDomain }}
- {{ .Values.base.domain }}
- target:
version: v1
group: cert-manager.io
......@@ -32,7 +32,7 @@ releases:
patch:
- op: replace
path: /spec/dnsNames
value: {{ .Values.subDomains | toYaml | nindent 14 }}
value: {{ .Values.sub.domains | toYaml | nindent 14 }}
- target:
version: v1beta1
group: gateway.networking.k8s.io
......@@ -45,13 +45,13 @@ releases:
value: {{ .Values.gatewayClassName }}
- op: replace
path: /spec/listeners/1/hostname
value: {{ .Values.baseDomain }}
value: {{ .Values.base.domain }}
- op: replace
path: /spec/listeners/1/tls/certificateRefs/0/name
value: {{ .Values.name }}-exact-cert
- op: replace
path: /spec/listeners/2/hostname
value: "*.{{ .Values.baseDomain }}"
value: "*.{{ .Values.base.domain }}"
- op: replace
path: /spec/listeners/2/tls/certificateRefs/0/name
value: {{ .Values.name }}-subs-cert
......@@ -85,6 +85,7 @@ releases:
issuerRef:
name: {{ .Values.name }}-letsencrypt-staging
secretName: {{ .Values.name }}-exact-cert
secretTemplate: {{ .Values.base.secretTemplate | toYaml | nindent 12 }}
- apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
......@@ -94,6 +95,7 @@ releases:
issuerRef:
name: {{ .Values.name }}-letsencrypt-staging
secretName: {{ .Values.name }}-subs-cert
secretTemplate: {{ .Values.sub.secretTemplate | toYaml | nindent 12 }}
- apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
......@@ -114,4 +116,3 @@ releases:
email: {{ .Values.letsEncrypt.email }}
privateKeySecretRef:
name: {{ .Values.name }}-letsencrypt-account
......
#!/bin/sh
set -ex
setup_s3() {
AWS_PROFILE="$(cat /config/AWS_PROFILE)"
S3_BUCKET="$(cat /config/S3_BUCKET)"
S3_PREFIX="$(cat /config/S3_PREFIX)"
export AWS_PROFILE
}
case "$1" in
# restore/backup
(list-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
setup_s3
if /s5cmd ls "s3://$S3_BUCKET/$S3_PREFIX/*" > /work-space/s3-file-listing.tmp; then
mv /work-space/s3-file-listing.tmp /work-space/s3-file-listing
echo "$S3_BUCKET" > /work-space/S3_BUCKET
echo "$S3_PREFIX" > /work-space/S3_PREFIX
cat /work-space/s3-file-listing
fi
fi
;;
# backup
(calculate-delete-s3)
# this is broken
KEEP_COUNT="$(cat /config/KEEP_COUNT)"
if [ -e /work-space/s3-file-listing ] && [ "z$KEEP_COUNT" != "z" ]; then
S3_BUCKET="$(cat /work-space/S3_BUCKET)"
S3_PREFIX="$(cat /work-space/S3_PREFIX)"
sort /work-space/s3-file-listing
if [ "z$SECRETS_TARBALL" = z ]; then
SECRETS_TARBALL="$(gawk '/wp-content.tgz$/{print $4}' < /work-space/s3-file-listing | sort | tail -1)"
if [ "z$SECRETS_TARBALL" != z ]; then
SECRETS_TARBALL="s3://$S3_BUCKET/$S3_PREFIX$SECRETS_TARBALL"
fi
fi
fi
if [ "z$SECRETS_TARBALL" != z ]; then
echo "$SECRETS_TARBALL" > /work-space/secrets-tarball.file
fi
;;
# backup
(create-secrets-tarball)
APP_NAME="$(cat /config/APP_NAME)"
secret_template="{{ \$item.metadata.namespace }}:{{ \$item.metadata.name }}{{ \"\n\" }}"
secret_template_list="{{ range \$index, \$item := .items }}$secret_template{{ end }}"
kubectl auth can-i --list
_get_backup_secrets() {
kubectl get secret -l 'backup.cert-manager.brainfood.com=true' -l "app.backup.cert-manager.brainfood.com=$APP_NAME" -o go-template --template "$secret_template_list"
}
date="$(date)"
timestamp_year_month="$(date -d "$date" '+%Y/%m')"
timestamp_name="$(date -d "$date" '+%Y%m%d-%H%M%S')"
mkdir "/work-space/$date"
(_get_backup_secrets) | while IFS=":" read namespace name; do
mkdir -p "/work-space/$timestamp_name/$namespace"
kubectl get -n "$namespace" secret "$name" -o yaml > "/work-space/$timestamp_name/$namespace/$name.yaml"
done
tar -C "/work-space/$timestamp_name" -f - -c . | gzip -9 > "/work-space/$timestamp_name.tar.gz"
echo "$timestamp_name.tar.gz" > /work-space/secrets-tarball.file
echo "$timestamp_year_month" > /work-space/timestamp_year_month
;;
# backup
(copy-to-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
SECRETS_TARBALL="$(cat /work-space/secrets-tarball.file)"
timestamp_year_month="$(cat /work-space/timestamp_year_month)"
if [ "z$SECRETS_TARBALL" != z ]; then
setup_s3
/s5cmd cp "/work-space/$SECRETS_TARBALL" "s3://$S3_BUCKET/$S3_PREFIX/$timestamp_year_month/$SECRETS_TARBALL"
fi
:
fi
;;
# restore
(calculate-restore-s3)
if [ -e /work-space/s3-file-listing ]; then
S3_BUCKET="$(cat /work-space/S3_BUCKET)"
S3_PREFIX="$(cat /work-space/S3_PREFIX)"
sort /work-space/s3-file-listing
ls -alR /work-space
if [ "z$SECRETS_TARBALL" = z ]; then
SECRETS_TARBALL="$(grep -E '[0-9]{4}\/[0-9]{2}\/[0-9]{8}-[0-9]{6}.tar.gz' /work-space/s3-file-listing | awk '{print $4}' | sort | tail -n 1)"
if [ "z$SECRETS_TARBALL" != z ]; then
SECRETS_TARBALL="s3://$S3_BUCKET/$S3_PREFIX/$SECRETS_TARBALL"
fi
fi
fi
if [ "z$SECRETS_TARBALL" != z ]; then
echo "$SECRETS_TARBALL" > /work-space/secrets-tarball.file
fi
;;
# restore
(copy-from-s3)
# s5cmd image doesn't have many tools installed
S3_ENABLED="$(cat /config/S3_ENABLED)"
if [ true = "$S3_ENABLED" ]; then
setup_s3
if [ -s /work-space/secrets-tarball.file ]; then
/s5cmd cp "$(cat /work-space/secrets-tarball.file)" /work-space/secrets.tar.gz
fi
fi
;;
# restore
(restore-secrets)
if [ -e /work-space/secrets.tar.gz ]; then
rm -rf /work-space/secret-restore
mkdir /work-space/secret-restore
tar -zxC /work-space/secret-restore -f /work-space/secrets.tar.gz
kubectl create --dry-run=client -o yaml -f /work-space/secret-restore/* | kubectl apply -f -
fi
;;
esac