forked from rancher/terraform-provider-rke
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexample.tf
308 lines (266 loc) · 9.54 KB
/
example.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
# This example same as https://github.com/rancher/rke/blob/master/cluster.yml
resource "rke_cluster" "cluster" {
# Disable port check validation between nodes
disable_port_check = false
###############################################
# Kubernets nodes
###############################################
nodes {
address = "1.1.1.1"
user = "ubuntu"
role = ["controlplane", "etcd"]
ssh_key_path = "~/.ssh/id_rsa"
port = 2222
}
nodes {
address = "2.2.2.2"
user = "ubuntu"
role = ["worker"]
ssh_key = <<EOL
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
EOL
# or
#ssh_key = "${file("~/.ssh/id_rsa")}"
}
nodes {
address = "example.com"
user = "ubuntu"
role = ["controlplane", "etcd", "worker"]
# or
# roles = "controlplane,etcd,worker"
hostname_override = "node3"
internal_address = "192.168.1.6"
labels = {
app = "ingress"
}
}
# If set to true, RKE will not fail when unsupported Docker version are found
ignore_docker_version = false
################################################
# SSH configuration
################################################
# Cluster level SSH private key
# Used if no ssh information is set for the node
ssh_key_path = "~/.ssh/test"
# Enable use of SSH agent to use SSH private keys with passphrase
# This requires the environment `SSH_AUTH_SOCK` configured pointing to your SSH agent which has the private key added
ssh_agent_auth = false
################################################
# Bastion/Jump host configuration
################################################
#bastion_host {
# address = "1.1.1.1"
# user = "ubuntu"
# ssh_key_path = "~/.ssh/id_rsa"
# or
# ssh_key = file("~/.ssh/id_rsa")
# port = 2222
#}
################################################
# Private Registries
################################################
# List of registry credentials, if you are using a Docker Hub registry,
# you can omit the `url` or set it to `docker.io`
private_registries {
url = "registry1.com"
user = "Username"
password = "password1"
}
private_registries {
url = "registry2.com"
user = "Username"
password = "password1"
}
################################################
# Cluster Name
################################################
# Set the name of the Kubernetes cluster
#cluster_name = ""
################################################
# Versions
################################################
# The kubernetes version used.
# For now, this should match the version defined in rancher/types defaults map:
# https://github.com/rancher/types/blob/master/apis/management.cattle.io/v3/k8s_defaults.go#L14
#
# In case the kubernetes_version and kubernetes image in system_images are defined,
# the system_images configuration will take precedence over kubernetes_version.
#
# Allowed values: [v1.12.10-rancher1-2, v1.13.10-rancher1-2, v1.14.6-rancher1-1(default), v1.15.3-rancher1-1]
kubernetes_version = "v1.14.6-rancher1-1"
################################################
# System Images
################################################
# System Image Tags are defaulted to a tag tied with specific kubernetes Versions
# Default Tags:
# https://github.com/rancher/types/blob/master/apis/management.cattle.io/v3/k8s_defaults.go)
#
system_images {
kubernetes = "rancher/hyperkube:v1.10.3-rancher2"
etcd = "rancher/coreos-etcd:v3.1.12"
alpine = "rancher/rke-tools:v0.1.9"
nginx_proxy = "rancher/rke-tools:v0.1.9"
cert_downloader = "rancher/rke-tools:v0.1.9"
kubernetes_services_sidecar = "rancher/rke-tools:v0.1.9"
kube_dns = "rancher/k8s-dns-kube-dns-amd64:1.14.8"
dnsmasq = "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8"
kube_dns_sidecar = "rancher/k8s-dns-sidecar-amd64:1.14.8"
kube_dns_autoscaler = "rancher/cluster-proportional-autoscaler-amd64:1.0.0"
pod_infra_container = "rancher/pause-amd64:3.1"
}
###############################################
# Kubernetes services
###############################################
services_etcd {
# if external etcd used
#path = "/etcdcluster"
#ca_cert = file("ca_cert")
#cert = file("cert")
#key = file("key")
# for etcd snapshots
#backup_config {
# interval_hours = 12
# retention = 6
# # s3 specific parameters
# #s3_backup_config {
# # access_key = "access-key"
# # secret_key = "secret_key"
# # bucket_name = "bucket-name"
# # region = "region"
# # endpoint = "s3.amazonaws.com"
# #}
#}
}
services_kube_api {
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range = "10.43.0.0/16"
# Expose a different port range for NodePort services
service_node_port_range = "30000-32767"
pod_security_policy = false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args = {
audit-log-path = "-"
delete-collection-workers = 3
v = 4
}
}
services_kube_controller {
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr = "10.42.0.0/16"
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range = "10.43.0.0/16"
}
services_scheduler {
}
services_kubelet {
# Base domain for the cluster
cluster_domain = "cluster.local"
# IP address for the DNS service endpoint
cluster_dns_server = "10.43.0.10"
# Fail if swap is on
fail_swap_on = false
# Optionally define additional volume binds to a service
extra_binds = [
"/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins",
]
}
services_kubeproxy {
}
################################################
# Authentication
################################################
# Currently, only authentication strategy supported is x509.
# You can optionally create additional SANs (hostnames or IPs) to add to the API server PKI certificate.
# This is useful if you want to use a load balancer for the control plane servers.
authentication {
strategy = "x509"
sans = [
"10.18.160.10",
"my-loadbalancer-1234567890.us-west-2.elb.amazonaws.com",
]
}
################################################
# Authorization
################################################
# Kubernetes authorization mode
# - Use `mode: "rbac"` to enable RBAC
# - Use `mode: "none"` to disable authorization
authorization {
mode = "rbac"
}
################################################
# Cloud Provider
################################################
# If you want to set a Kubernetes cloud provider, you specify the name and configuration
cloud_provider {
name = "aws"
}
# Add-ons are deployed using kubernetes jobs. RKE will give up on trying to get the job status after this timeout in seconds..
addon_job_timeout = 30
#########################################################
# Network(CNI) - supported: flannel/calico/canal/weave
#########################################################
# There are several network plug-ins that work, but we default to canal
network {
plugin = "canal"
}
################################################
# Ingress
################################################
# Currently only nginx ingress provider is supported.
# To disable ingress controller, set `provider: none`
ingress {
provider = "nginx"
}
################################################
# Addons
################################################
# all addon manifests MUST specify a namespace
addons = <<EOL
---
apiVersion: v1
kind: Pod
metadata:
name: my-nginx
namespace: default
spec:
containers:
- name: my-nginx
image: nginx
ports:
- containerPort: 80
EOL
addons_include = [
"https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-operator.yaml",
"https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-cluster.yaml",
"/path/to/manifest",
]
}
###############################################################################
# If you need kubeconfig.yml for using kubectl, please uncomment follows.
###############################################################################
#resource "local_file" "kube_cluster_yaml" {
# filename = "${path.root}/kube_config_cluster.yml"
# content = rke_cluster.cluster.kube_config_yaml
#}
###############################################################################
# If you need ca_crt/client_cert/client_key, please uncomment follows.
###############################################################################
#resource "local_file" "ca_crt" {
# filename = "${path.root}/ca_cert"
# content = rke_cluster.cluster.ca_crt
#}
#
#resource "local_file" "client_cert" {
# filename = "${path.root}/client_cert"
# content = rke_cluster.cluster.client_cert
#}
#
#resource "local_file" "client_key" {
# filename = "${path.root}/client_key"
# content = rke_cluster.cluster.client_key
#}