forked from zimmertr/TJs-Kubernetes-Service
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vars.yml
311 lines (247 loc) · 14.4 KB
/
vars.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
###############################################################################################
# -General Options- #
###############################################################################################
# These parameters are required. #
###############################################################################################
# qcow2 image to use for your Virtual Machines:
qcow2_image: 'https://cdimage.debian.org/cdimage/openstack/current-10/debian-10-openstack-amd64.qcow2'
# Location to use for storing the qcow2 image. Be sure to end the absolute path with a '/'.
qcow2_download_location: '/tmp/'
# Resource Pool for your virtual machines.
k8s_resource_pool: 'Kubernetes'
# Path to the SSH Public Key on your Proxmox Server.
k8s_ssh_key: '/root/.ssh/sol.milkyway.kubernetes.pub'
# VM IDs
k8s_master_id: '4010'
k8s_node1_id: '40100'
k8s_node2_id: '40101'
k8s_node3_id: '40102'
# Hostnames
k8s_master_hn: 'Pluto'
k8s_node1_hn: 'Ceres'
k8s_node2_hn: 'Eris'
k8s_node3_hn: 'Haumea'
# Number of CPUs
k8s_master_cpu: '2'
k8s_node1_cpu: '4'
k8s_node2_cpu: '4'
k8s_node3_cpu: '4'
# Amount of memory expressed in megabytes
k8s_master_mem: '5120'
k8s_node1_mem: '10240'
k8s_node2_mem: '10240'
k8s_node3_mem: '10240'
# Disk Sizes
k8s_master_size: '50G'
k8s_node1_size: '50G'
k8s_node2_size: '50G'
k8s_node3_size: '50G'
# IP Addresses
k8s_master_ip: '192.168.40.10'
k8s_node1_ip: '192.168.40.100'
k8s_node2_ip: '192.168.40.101'
k8s_node3_ip: '192.168.40.102'
# Subnet Sizes
k8s_master_sn: '/24'
k8s_node1_sn: '/24'
k8s_node2_sn: '/24'
k8s_node3_sn: '/24'
# Network Gateway
k8s_master_gw: '192.168.40.1'
k8s_node1_gw: '192.168.40.1'
k8s_node2_gw: '192.168.40.1'
k8s_node3_gw: '192.168.40.1'
# DNS Servers
k8s_master_ns: '192.168.1.100'
k8s_node1_ns: '192.168.1.100'
k8s_node2_ns: '192.168.1.100'
k8s_node3_ns: '192.168.1.100'
# Search Domains
k8s_master_sd: 'sol.milkyway'
k8s_node1_sd: 'sol.milkyway'
k8s_node2_sd: 'sol.milkyway'
k8s_node3_sd: 'sol.milkyway'
# Network Bridges
k8s_master_bridge: 'vmbr0'
k8s_node1_bridge: 'vmbr0'
k8s_node2_bridge: 'vmbr0'
k8s_node3_bridge: 'vmbr0'
# Storage volumes
k8s_master_stg: 'SaturnPool'
k8s_node1_stg: 'SaturnPool'
k8s_node2_stg: 'SaturnPool'
k8s_node3_stg: 'SaturnPool'
# CIDR for the Calico Pod Network - MUST be different from your Kubernetes CIDR to avoid an IP conflict. Subnet size will automatically be set to /16.
calico_cidr: '172.16.0.0'
# URL for the Calico Network Policy:
# Can be found here: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#tabs-pod-install-2
calico_policy_url: 'https://docs.projectcalico.org/v3.8/manifests/calico.yaml'
# The URL for the Kubernetes Dashboard Manifest. Not locally hosted as it changes frequently.
# Can be found in the `Getting Started` section of this page: https://github.com/kubernetes/dashboard/blob/master/README.md
k8s_dashboard_url: 'https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml'
# Service user created for the dashboard.
k8s_user_name: 'tj'
###############################################################################################
# -Dashboard Options- #
###############################################################################################
# These parameters are only required if you intend to deploy the Kubernetes Dashboard to your #
# cluster. If you wish to bind the Dashboard service to a Load Balanced IP Address, then #
# uncomment and provide the values below. If you wish to deploy the Dashboard like normal, #
# and use `kubectl-proxy` or `kubectl port-forward` to expose it, leave them commented out. #
# #
# If you exposed the Dashboard with MetalLB, you can navigate to: https://HOSTNAME:8443/ #
# If you are exposing the Dashboard with `kubectl proxy`, you can find instructions on how to #
# access the dashboard here: #
# https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/ #
# #
# TKS has already provisioned a Service Account that you can use to authenticate against your #
# Dashboard. You can retrieve this token like so. Be sure to replace K8S_USER_NAME with the #
#value that you set for `k8s_user_name` above. #
# kubectl describe -n kube-system secret K8S_USER_NAME-token | grep token: | awk '{print $2}' #
###############################################################################################
# The MetalLB Address Pool from which you want to obtain an IP address.
dashboard_load_balancer_address_pool: 'VLAN50'
# The IP address within the Address Pool to which you want to bind the service.
dashboard_load_balancer_ip: '192.168.50.100'
# The hostname with which you want to test connectivity to the IP Address after the deployment has completed.
dashboard_hostname: 'amalthea.sol.milkyway'
###############################################################################################
# -VLAN Options- #
###############################################################################################
# These parameters are only required if your network is configured with VLANs and you wish to #
# specify which VLAN each of your Kubernetes nodes is allocated to. Leave these variables #
# blank if you do not intend to use VLANs for your cluster. #
###############################################################################################
# VLAN Tags
k8s_master_vlan: '40'
k8s_node1_vlan: '40'
k8s_node2_vlan: '40'
k8s_node3_vlan: '40'
###############################################################################################
# -Unattended Upgrades- #
###############################################################################################
# These parameters are only required if you want to enable Unattended Upgrades on the #
# operating systems that comprise your Kubernetes master and worker nodes. None of these #
# variables are required to enable the feature. However, you may wish to enable email #
# notifications via SMTP or tweak the way in which Unattended Upgrades are applied to your #
# system. To do so, simply uncomment the relevant configuration setting. #
# #
# NOTE: If you enable any of the following variables, then you must enable the other ones as #
# well or SMTP will not work: #
# - smtp_server #
# - smtp_port #
# - smtp_username #
# - smtp_password #
# - email_address #
###############################################################################################
# SMTP Server
smtp_server: 'smtp.gmail.com'
# SMTP Port
smtp_port: '587'
# SMTP Server username
smtp_username: '[email protected]'
# SMTP Server password
smtp_password: 'PASSWORD'
# The email address to which notifications will be delivered.
email_address: '[email protected]'
# You want to be naughty and use SMTP without TLS.
#smtp_use_tls: 'off'
# Only receive emails when an error occurs.
mail_only_on_error: "true"
# Only perform upgrades when a graceful OS shutdown occurs.
#update_only_on_shutdown: "true"
# Automatically remove unused kernel packages.
remove_unused_kernel_packages: "true"
# Automatically remove unused dependencies.
remove_unused_dependencies: "true"
# Automatically perform a reboot after upgrading if required.
#automatic_reboot_if_required: "true"
# If automatic reboots are enabled, configure the time at which they occur.
#automatic_reboot_time: "02:00"
# Enable logging to syslog.
#enable_syslog_logging: "true"
###############################################################################################
# -NFS Options- #
###############################################################################################
# These parameters are only required if you have an NFS server and would like to enable the #
# optional support for dynamic provisioning of Persistent Storage volumes for your pods. #
###############################################################################################
# NFS server
nfs_hostname: 'saturn.sol.milkyway'
# NFS Mount Point
nfs_mount_point: '/SaturnPool/Kubernetes'
# NFS Provisioner Name
nfs_provisioner: 'saturnpool'
# Namespace to deploy the NFS Provisioner to.
nfs_namespace: 'nfs-provisioner'
# Default reclaim policy. https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy
nfs_reclaim_policy: 'Retain'
###############################################################################################
# -MetalLB Options- #
###############################################################################################
# #
# WARNING: MetalLB is currently supported, however dynamic provisioning is NOT currently #
# supported despite the explanation below. This will come in a later release, however, if you #
# are interested in using this feature you will need to modify the configmap located in the #
# MetalLB files directory yourself to match the variables you declare here. #
# #
# These parameters are only required if you want to enable on-prem load balancing with #
# MetalLB. The MetalLB configuration file will be dynamically provisioned based on how many #
# variable sets you declare below. This is intended to allow you to provision a single or #
# multiple address pools depending on your personal network configuration. The configuration #
# is dynamically generated based on how many sets of variables you list below. For example: #
# #
# If you wish to deploy a single address pool, you would declare these three variables. #
# #
# metallb_ap1_name: NAME #
# metallb_ap1_protocol: PROTOCOL #
# metallb_ap1_cidr: CIDR #
# #
# #
# However, if you wished to declare two address pools, you would include a second set of #
# variables where the `ap1` portion of the variable name is incremeneted by one. #
# #
# metallb_ap1_name: NAME #
# metallb_ap2_name: NAME #
# metallb_ap1_protocol: PROTOCOL #
# metallb_ap2_protocol: PROTOCOL #
# metallb_ap1_cidr: CIDR #
# metallb_ap2_cidr: CIDR #
###############################################################################################
# The IP Address of the router you wish to peer with.
metallb_peer_router: '192.168.1.1'
# The AS Number of the router you wish to peer with.
metallb_peer_asn: '64512'
# The names of your address pools.
metallb_ap1_name: 'VLAN10'
metallb_ap4_name: 'VLAN70'
metallb_ap2_name: 'VLAN50'
metallb_ap3_name: 'VLAN60'
# The protocols of your address pools. (bgp|layer2)
metallb_ap1_protocol: 'bgp'
metallb_ap2_protocol: 'bgp'
metallb_ap3_protocol: 'bgp'
metallb_ap4_protocol: 'bgp'
# The CIDRs of your address pools.
metallb_ap1_cidr: '192.168.10.1/24'
metallb_ap2_cidr: '192.168.50.1/24'
metallb_ap3_cidr: '192.168.60.1/24'
metallb_ap4_cidr: '192.168.70.1/24'
###############################################################################################
# -NGINX Ingress Controller Options- #
###############################################################################################
# These parameters are only required if you want to enable the NGINX ingress controller. #
# REQUIREMENT: The NGINX Ingress controller integration requires a Load Balancer integration. #
###############################################################################################
# The IP Address to use
nginx_load_balancer_ip: '192.168.0.100'
###############################################################################################
# -DataDog Options- #
###############################################################################################
# These parameters are only required if you have an account with DataDog and would like to #
# enable metrics collection for your cluster. #
###############################################################################################
# Your DataDog API Key
dd_api_key: 'APIKEY'
# Namespace to deploy DataDog to
dd_namespace: 'default'