forked from hashicorp/vault-helm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
276 lines (239 loc) · 9.66 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
# Available parameters and their default values for the Vault chart.
global:
# enabled is the master enabled switch. Setting this to true or false
# will enable or disable all the components within this chart by default.
enabled: true
# Image is the name (and tag) of the Vault Docker image.
image: "vault:1.2.2"
# Overrides the default Image Pull Policy
imagePullPolicy: IfNotPresent
# Image pull secret to use for registry authentication.
imagePullSecrets: []
# imagePullSecrets:
# - name: image-pull-secret
# TLS for end-to-end encrypted transport
tlsDisable: true
server:
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec.
# By default no direct resource request is made.
securityContext:
readOnlyRootFilesystem: true
resources:
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# Ingress settings
# Enabling it a ingress will be created which will manage external access to the
# cluster, very useful if you want to expose the Vault UI
ingress:
enabled: false
labels: {}
# traffic: external
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# authDelegator enables a cluster role binding to be attached to the service
# account. This cluster role binding can be used to setup Kubernetes auth
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
authDelegator:
enabled: false
# extraEnvironmentVars is a list of extra enviroment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars: {}
# GOOGLE_REGION: global
# GOOGLE_PROJECT: myproject
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
# extraSecretEnvironmentVars is a list of extra enviroment variables to set with the stateful set.
# These variables take value from existing Secret objects.
extraSecretEnvironmentVars: []
# - envName: AWS_SECRET_ACCESS_KEY
# secretName: vault
# secretKey: AWS_SECRET_ACCESS_KEY
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
# an array of objects, examples are shown below.
extraVolumes: []
# - type: secret (or "configMap")
# name: my-secret
# load: false # if true, will add to `-config` to load by Vault
# path: null # default is `/vault/userconfig`
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: {}
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: {}
# Extra labels to attach to the server pods
# This should be a multi-line string mapping directly to the a map of
# the labels to apply to the server pods
extraLabels: {}
# Extra annotations to attach to the server pods
# This should be a multi-line string mapping directly to the a map of
# the annotations to apply to the server pods
annotations: {}
# Enables a headless service to be used by the Vault Statefulset
service:
enabled: true
# clusterIP controls whether a Cluster IP address is attached to the
# Vault service within Kubernetes. By default the Vault service will
# be given a Cluster IP address, set to None to disable. When disabled
# Kubernetes will create a "headless" service. Headless services can be
# used to communicate with pods directly through DNS instead of a round robin
# load balancer.
# clusterIP: None
# Port on which Vault server is listening
port: 8200
# Target port to which the service should be mapped to
targetPort: 8200
# Extra annotations for the service definition
annotations: {}
# This configures the Vault Statefulset to create a PVC for data
# storage when using the file backend.
# See https://www.vaultproject.io/docs/audit/index.html to know more
dataStorage:
enabled: true
# Size of the PVC created
size: 10Gi
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# This configures the Vault Statefulset to create a PVC for audit
# logs. Once Vault is deployed, initialized and unseal, Vault must
# be configured to use this for audit logs. This will be mounted to
# /vault/audit
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: false
# Size of the PVC created
size: 10Gi
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Run Vault in "dev" mode. This requires no further setup, no state management,
# and no initialization. This is useful for experimenting with Vault without
# needing to unseal, store keys, et. al. All data is lost on restart - do not
# use dev mode for anything other than experimenting.
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
dev:
enabled: false
# Run Vault in "standalone" mode. This is the default mode that will deploy if
# no arguments are given to helm. This requires a PVC for data storage to use
# the "file" backend. This mode is not highly available and should not be scaled
# past a single replica.
standalone:
enabled: "-"
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
# and store data there. This is only used when using a Replica count of 1, and
# using a stateful set. This should be HCL.
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Run Vault in "HA" mode. There are no storage requirements unless audit log
# persistence is required. In HA mode Vault will configure itself to use Consul
# for its storage backend. The default configuration provided will work the Consul
# Helm project by default. It is possible to manually configure Vault to use a
# different HA backend.
ha:
enabled: false
replicas: 3
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a Consul for its HA storage backend.
# This should be HCL.
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# A disruption budget limits the number of pods of a replicated application
# that are down simultaneously from voluntary disruptions
disruptionBudget:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of
# replicas. If you'd like a custom value, you can specify an override here.
maxUnavailable: null
# Definition of the serviceaccount used to run Vault.
serviceaccount:
annotations: {}
# Vault UI
ui:
# True if you want to create a Service entry for the Vault UI.
#
# serviceType can be used to control the type of service created. For
# example, setting this to "LoadBalancer" will create an external load
# balancer (for supported K8S installations) to access the UI.
enabled: false
serviceType: "ClusterIP"
serviceNodePort: null
externalPort: 8200
# loadBalancerSourceRanges:
# - 10.0.0.0/16
# - 1.78.23.3/32
# loadBalancerIP:
# Extra annotations to attach to the ui service
# This should be a multi-line string mapping directly to the a map of
# the annotations to apply to the ui service
annotations: {}