forked from kanisterio/kanister
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpostgres-start-stop-blueprint.yaml
172 lines (161 loc) · 6 KB
/
postgres-start-stop-blueprint.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
apiVersion: cr.kanister.io/v1alpha1
kind: Blueprint
metadata:
name: postgres-bp
actions:
backup:
kind: StatefulSet
outputArtifacts:
cloudObject:
keyValue:
backupArtifactLocation: "{{ .Phases.copyFiles.Output.backupArtifactLocation }}"
backupID: "{{ .Phases.copyFiles.Output.backupID }}"
backupTag: "{{ .Phases.copyFiles.Output.backupTag }}"
pvc: '{{- range $key, $_ := index .StatefulSet.PersistentVolumeClaims (index .StatefulSet.Pods 0) -}}{{$key}}{{break}}{{end}}'
deferPhase:
func: KubeOps
args:
operation: delete
objectReference:
apiVersion: v1
resource: "pods"
name: "{{ .Phases.createBackupPod.Output.name }}"
namespace: '{{ .StatefulSet.Namespace }}'
phases:
- name: createBackupPod
func: KubeOps
objects:
pgSecret:
kind: Secret
name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql'
namespace: '{{ .StatefulSet.Namespace }}'
args:
operation: create
namespace: '{{ .StatefulSet.Namespace }}'
spec: |-
apiVersion: v1
kind: Pod
metadata:
generateName: postgres-backup-session
spec:
restartPolicy: Never
containers:
- name: container
image: bitnami/postgresql:16
command:
- bash
- -o
- errexit
- -o
- pipefail
- -c
- |
export PGHOST='{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql.{{ .StatefulSet.Namespace }}.svc.cluster.local'
export PGUSER='postgres'
export PGPASSWORD='{{ index .Phases.createBackupPod.Secrets.pgSecret.Data "postgres-password" | toString }}'
## Create file descriptor to send commands to psql
mkfifo /tmp/pg_in
## Create "holder" process to keep pg_in open
while sleep 1; do :; done >/tmp/pg_in &
## Save "holder" PID to a file to kill it later
echo $! > /tmp/holder_pid
## Run psql session reading from pg_in and writing ot pg_out
## Using tee here to keep the pod logs (might need to replace with just `> /tmp/pg_out`)
## TODO: should we track stderr here?
cat /tmp/pg_in | psql -U ${PGUSER} | tee /tmp/pg_out
- func: WaitV2
name: waitForPodReady
args:
timeout: 5m
conditions:
anyOf:
- condition: '{{ $available := false }}{{ range $condition := $.status.conditions }}{{ if and (eq .type "ContainersReady") (eq .status "True") }}{{ $available = true }}{{ end }}{{ end }}{{ $available }}'
objectReference:
apiVersion: "v1"
name: "{{ .Phases.createBackupPod.Output.name }}"
namespace: '{{ .StatefulSet.Namespace }}'
resource: "pods"
- name: startBackup
func: KubeExec
args:
namespace: '{{ .StatefulSet.Namespace }}'
pod: "{{ .Phases.createBackupPod.Output.name }}"
command:
- bash
- -o
- errexit
- -o
- pipefail
- -c
- |
## Send pg_backup_start command to psql session
echo "SELECT pg_backup_start(label => 'kanister_backup', fast => false);" > /tmp/pg_in
## Make sure operation completed
## TODO: maybe there's a better way to fail/log here?
grep -q pg_backup_start <(tail -f /tmp/pg_out)
- name: copyFiles
func: CopyVolumeData
args:
namespace: '{{ .StatefulSet.Namespace }}'
## TODO: maybe there's a better way of doing that in go templates
volume: '{{- range $key, $_ := index .StatefulSet.PersistentVolumeClaims (index .StatefulSet.Pods 0) -}}{{$key}}{{break}}{{end}}'
# volume: '{{ index .StatefulSet.PersistentVolumeClaims 0 }}'
dataArtifactPrefix: s3-bucket/path/artifactPrefix
- name: stopBackup
func: KubeExec
args:
namespace: '{{ .StatefulSet.Namespace }}'
pod: "{{ .Phases.createBackupPod.Output.name }}"
command:
- bash
- -o
- errexit
- -o
- pipefail
- -c
- |
## Send pg_backup_stop command to psql session
echo "SELECT * FROM pg_backup_stop(wait_for_archive => true);" > /tmp/pg_in
## Make sure operation completed
## TODO: maybe there's a better way to fail/log here?
grep -q "LABEL: kanister_backup" <(tail -f /tmp/pg_out)
restore:
kind: StatefulSet
inputArtifactNames:
- cloudObject
phases:
- func: ScaleWorkload
name: ShutdownApplication
args:
namespace: '{{.StatefulSet.Namespace }}'
name: '{{ .StatefulSet.Name }}'
kind: StatefulSet
replicas: 0
- func: RestoreData
name: RestoreFromObjectStore
args:
namespace: '{{.StatefulSet.Namespace }}'
# pod: '{{ index .StatefulSet.Pods 0 }}'
volumes:
'{{ .ArtifactsIn.cloudObject.KeyValue.pvc }}': '/mnt/vol_data/{{ .ArtifactsIn.cloudObject.KeyValue.pvc }}'
image: ghcr.io/kanisterio/kanister-tools:0.110.0
backupArtifactPrefix: s3-bucket/path/artifactPrefix
backupTag: '{{ .ArtifactsIn.cloudObject.KeyValue.backupTag }}'
deferPhase:
func: ScaleWorkload
name: StartupApplication
args:
namespace: '{{.StatefulSet.Namespace }}'
name: '{{ .StatefulSet.Name }}'
kind: StatefulSet
replicas: '{{ len .StatefulSet.Pods }}'
delete:
inputArtifactNames:
- cloudObject
phases:
- func: DeleteData
name: deleteFromObjectStore
args:
namespace: '{{.StatefulSet.Namespace }}'
backupArtifactPrefix: s3-bucket/path/artifactPrefix
backupID: "{{ .ArtifactsIn.cloudObject.KeyValue.backupID }}"