diff --git a/documentation/Installation.md b/documentation/Installation.md index 557162d0b..720244e65 100644 --- a/documentation/Installation.md +++ b/documentation/Installation.md @@ -989,6 +989,9 @@ By default, autodetection is enabled. In order to assign VRRP IP you need to create a `vrrp_ips` section in the inventory and specify the appropriate configuration. You can specify several VRRP IP addresses. +This configuration will be applied in keepalived configuration on balancer nodes. +If it's needed, it's possible to specify global parameters or override whole keepalived configuration. More about it is +in keepalived section in [loadbalancer specification](#loadbalancer). The following parameters are supported: @@ -3220,7 +3223,7 @@ However, it is possible to add or modify any deployment parameters of the invent #### loadbalancer -`loadbalancer` configures the balancers for the Kubernetes cluster. Currently, only the Haproxy configuration can be customized. +`loadbalancer` configures the balancers for the Kubernetes cluster. Currently, only the Haproxy and Keepalived configuration can be customized. ###### target_ports @@ -3433,6 +3436,168 @@ This parameter use the following context options for template rendering: As an example of a template, you can look at [default template](/kubemarine/templates/haproxy.cfg.j2). +##### keepalived + +This section describes the configuration parameters that are applied to the **keepalived.conf** config file in addition +to those related to [vrrp ips](#vrrp_ips). +By default, the following configuration is used: + +```yaml +services: + loadbalancer: + keepalived: + global: {} +``` + +These settings can be overrided in the **cluster.yaml**. Currently, the following settings of **keepalived.conf** are supported: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterType
Default valueDescription
global.vrrp_garp_master_refreshinteger"vrrp_garp_master_refresh". Number of gratuitous ARP messages to send at a time while MASTER. Not applied by default.
keep_configs_updatedbooleanTrueAllows Kubemarine update keepalived configs every time, when cluster (re)installed or it's schema updated (added/removed nodes)
configstringCustom keepalived config value to be used instead of the default one. It can be userful, when every installation/add_node procedure adds only one balancer. In that case it's possible to specify custom configuration for every balancer without jinja templates./td> +
config_filestringPath to the Jinja-template file with custom keepalived config to be used instead of the default one.
+ +**Note**: you can use either `config` or `config_file` if you need to use custom config instead of default. + +Parameter `config` allows to specify your custom config file. The priority of this option is higher than that of `config_file`, and if both are specified, `config` will be used. Example: + +```yaml +services: + loadbalancer: + keepalived: + keep_configs_updated: True + config: | + global_defs { + vrrp_garp_master_refresh 60 + vrrp_garp_master_refresh_repeat 10 + vrrp_garp_interval 1 + } + vrrp_script script_27a2eb32e5 { + script "/usr/local/bin/check_haproxy.sh" + interval 2 + fall 2 + rise 2 + } + + vrrp_instance balancer_27a2eb32e5 { + state BACKUP + interface enp0s8 + virtual_router_id 130 + priority 254 + nopreempt + virtual_ipaddress { + 10.0.2.2 dev enp0s8 label vip_27a2eb32e5 + } + + track_script { + script_27a2eb32e5 + } + + authentication { + auth_type PASS + auth_pass 6f3a13e1 + } + } +``` + +Parameter `config_file` allows to specify path to Jinja-compiled template. Example: +```yaml +services: + loadbalancer: + keepalived: + keep_configs_updated: True + config_file: '/root/my_keepalived_config.conf.j2' +``` + +This parameter use the following context options for template rendering: +* `globals` (values from 'services.loadbalancer.keepalived.global`); +* modified [vrrp_ips](#vrrp_ips) with following properties for every of them: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterType
Description
idstringThe ID of the VRRP IP, specified in vrrp_ips[].id.
router_idstringThe router ID of the VRRP IP, specified in vrrp_ips[].router_id.
ipstringThe IP address for virtual IP, specified in vrrp_ips[].ip.
passwordstringPassword for VRRP IP set, specified in vrrp_ips[].password or randomly generated.
interfacestringThe interface on which the address must be listened, specified in vrrp_ips[].interface or autodetected.
prioritystringThe priority of the VRRP IP host, specified in vrrp_ips[].priority.
sourcestringThe `internal_address` of the node, where target configuration is applied.
peerslist of stringsThe `internal_address` of the other nodes, where VRRP IP should be set.
+ +As an example of a template, you can look at [default template](/kubemarine/templates/keepalived.conf.j2). + #### maintenance mode Kubemarine supports maintenance mode for HAproxy balancer. HAproxy balancer has additional configuration file for that purpose. The following configuration enable maintenance mode for balancer: diff --git a/kubemarine/keepalived.py b/kubemarine/keepalived.py index 8dd1ac96a..f657e1200 100644 --- a/kubemarine/keepalived.py +++ b/kubemarine/keepalived.py @@ -229,40 +229,40 @@ def disable(group: NodeGroup) -> None: def generate_config(cluster: KubernetesCluster, node: NodeConfig) -> str: - config = '' - - inventory = cluster.inventory - for i, item in enumerate(inventory['vrrp_ips']): - - if i > 0: - # this is required for double newline in config, but avoid double newline in the end of file - config += "\n" - - ips = { - 'source': node['internal_address'], - 'peers': [] - } - - for record in item['hosts']: - if record['name'] == node['name']: - priority = record['priority'] - interface = record['interface'] - break - else: + config_options: dict = cluster.inventory['services']['loadbalancer']['keepalived'] + config_string: Optional[str] = config_options.get('config') + if config_string is not None: + return config_string + + vrrps_ips = [] + keepalived_nodes = cluster.nodes['keepalived'].get_final_nodes().get_ordered_members_configs_list() + for item in cluster.inventory['vrrp_ips']: + host = next((record for record in item['hosts'] if record['name'] == node['name']), None) + if not host: # This VRRP IP should not be configured on this node. # There is still at least one VRRP IP to configure on this node # due to the way how 'keepalived' group is calculated. continue - - for i_node in cluster.nodes['keepalived'].get_final_nodes().get_ordered_members_configs_list(): - for record in item['hosts']: - if i_node['name'] == record['name'] and i_node['internal_address'] != ips['source']: - ips['peers'].append(i_node['internal_address']) - + vrrps_ips.append({ + 'id': item['id'], + 'router_id': item['router_id'], + 'ip': item['ip'], + 'password': item['password'], + 'interface': host['interface'], + 'priority': host['priority'], + 'source': node['internal_address'], + 'peers': [ + i_node['internal_address'] for i_node in keepalived_nodes + if any(i_node['name'] == record['name'] and i_node['internal_address'] != node['internal_address'] + for record in item['hosts']) + ] + }) + + if config_options.get('config_file'): + config_source = utils.read_external(config_options['config_file']) + else: config_source = utils.read_internal('templates/keepalived.conf.j2') - config += Template(config_source).render(inventory=inventory, item=item, node=node, - interface=interface, - priority=priority, **ips) + "\n" + config = Template(config_source).render(vrrp_ips=vrrps_ips, globals=config_options['global']) return config diff --git a/kubemarine/procedures/install.py b/kubemarine/procedures/install.py index e63a4619e..256c2e52f 100755 --- a/kubemarine/procedures/install.py +++ b/kubemarine/procedures/install.py @@ -424,6 +424,10 @@ def deploy_loadbalancer_keepalived_install(group: NodeGroup) -> None: def deploy_loadbalancer_keepalived_configure(cluster: KubernetesCluster) -> None: + if not cluster.inventory['services'].get('loadbalancer', {}) \ + .get('keepalived', {}).get('keep_configs_updated', True): + cluster.log.debug('Skipped - keepalived balancers configs update manually disabled') + return # For install procedure, configure all keepalives. # If balancer with VRPP IP is added or removed, reconfigure all keepalives keepalived_nodes = cluster.make_group_from_roles(['keepalived']) diff --git a/kubemarine/resources/configurations/defaults.yaml b/kubemarine/resources/configurations/defaults.yaml index cf67aee23..d83d81b8e 100644 --- a/kubemarine/resources/configurations/defaults.yaml +++ b/kubemarine/resources/configurations/defaults.yaml @@ -434,6 +434,9 @@ services: target_ports: http: '{% if nodes | select("has_role", "balancer") | reject("has_role", "remove_node") | first %}20080{% else %}80{% endif %}' https: '{% if nodes | select("has_role", "balancer") | reject("has_role", "remove_node") | first %}20443{% else %}443{% endif %}' + keepalived: + keep_configs_updated: True + global: {} packages: cache_versions: true diff --git a/kubemarine/resources/schemas/definitions/services/loadbalancer.json b/kubemarine/resources/schemas/definitions/services/loadbalancer.json index 0aee8ac41..9788c8ffd 100644 --- a/kubemarine/resources/schemas/definitions/services/loadbalancer.json +++ b/kubemarine/resources/schemas/definitions/services/loadbalancer.json @@ -39,7 +39,8 @@ "default": 10000, "description": "Set the total number of connections allowed, process-wide." } - } + }, + "additionalProperties": false }, "defaults": { "type": "object", @@ -75,9 +76,11 @@ "default": 10000, "description": "Limits the sockets to this number of concurrent connections" } - } + }, + "additionalProperties": false } - } + }, + "additionalProperties": false }, "target_ports": { "type": "object", @@ -93,7 +96,39 @@ "default": 443, "description": "Target https port" } - } + }, + "additionalProperties": false + }, + "keepalived": { + "type": "object", + "description": "The section contains the configuration parameters that are applied to the keepalived.conf config file", + "properties": { + "global": { + "type": "object", + "description": "Parameters that are passed directly to the 'global_defs ' section of keepalived.conf file.", + "properties": { + "vrrp_garp_master_refresh": { + "type": "integer", + "description": "minimum time interval (in seconds) for refreshing gratuitous ARPs while MASTER." + } + }, + "additionalProperties": false + }, + "config": { + "type": "string", + "description": "Custom keepalived config value to be used instead of the default one" + }, + "config_file": { + "type": "string", + "description": "Path to the Jinja-template file with custom keepalived config to be used instead of the default one" + }, + "keep_configs_updated": { + "type": "boolean", + "default": true, + "description": "Allows Kubemarine update keepalived configs every time, when cluster (re)installed or it's schema updated (added/removed nodes)" + } + }, + "additionalProperties": false } }, "additionalProperties": false diff --git a/kubemarine/templates/keepalived.conf.j2 b/kubemarine/templates/keepalived.conf.j2 index 281176732..2a35dfc22 100644 --- a/kubemarine/templates/keepalived.conf.j2 +++ b/kubemarine/templates/keepalived.conf.j2 @@ -1,3 +1,10 @@ +{% if globals|length %} +global_defs { + {% if globals['vrrp_garp_master_refresh'] is defined %}vrrp_garp_master_refresh {{ globals['vrrp_garp_master_refresh'] }}{% endif %} +} +{% endif %} + +{%- for item in vrrp_ips %} vrrp_script script_{{ item['id'] }} { script "/usr/local/bin/check_haproxy.sh" interval 2 @@ -8,12 +15,12 @@ vrrp_script script_{{ item['id'] }} { vrrp_instance balancer_{{ item['id'] }} { state BACKUP - interface {{ interface }} + interface {{ item['interface'] }} virtual_router_id {{ item['router_id'] }} - priority {{ priority }} + priority {{ item['priority'] }} nopreempt virtual_ipaddress { - {{ item['ip'] }} dev {{ interface }} label vip_{{ item['id'] }} + {{ item['ip'] }} dev {{ item['interface'] }} label vip_{{ item['id'] }} } track_script { @@ -24,12 +31,15 @@ vrrp_instance balancer_{{ item['id'] }} { auth_type PASS auth_pass {{ item['password'] }} } -{%- if peers | length > 0 %} - unicast_src_ip {{ source }} + +{%- if item['peers'] | length > 0 %} + unicast_src_ip {{ item['source'] }} unicast_peer { -{%- for ip in peers %} +{%- for ip in item['peers'] %} {{ ip }} {%- endfor %} } {%- endif %} } + +{%- endfor %} diff --git a/test/unit/test_keepalived.py b/test/unit/test_keepalived.py index d59080c5d..f4029cb3a 100755 --- a/test/unit/test_keepalived.py +++ b/test/unit/test_keepalived.py @@ -427,6 +427,38 @@ def test_skip_removed_peers(self): config_3 = keepalived.generate_config(cluster, balancers[2]) self.assertIn(only_left_peer_template.format(peer=balancers[1]['internal_address']), config_3) + def test_default_global_defs(self): + inventory = demo.generate_inventory(master=3, worker=3, balancer=1, keepalived=1) + first_balancer = next(node for node in inventory['nodes'] if 'balancer' in node['roles']) + + cluster = demo.new_cluster(inventory) + + config_1 = keepalived.generate_config(cluster, first_balancer) + self.assertNotIn("global_defs", config_1) + + def test_default_overriden_global_defs(self): + inventory = demo.generate_inventory(master=3, worker=3, balancer=1, keepalived=1) + first_balancer = next(node for node in inventory['nodes'] if 'balancer' in node['roles']) + + vrrp_garp_master_refresh = 60 + inventory['services'] = { + "loadbalancer": { + "keepalived": { + "global": { + "vrrp_garp_master_refresh": vrrp_garp_master_refresh + } + } + } + } + + cluster = demo.new_cluster(inventory) + only_vrrp_garp_template = """\ +global_defs {{ + vrrp_garp_master_refresh {vrrp_garp_master_refresh} +}}""" + config_1 = keepalived.generate_config(cluster, first_balancer) + self.assertIn(only_vrrp_garp_template.format(vrrp_garp_master_refresh=vrrp_garp_master_refresh), config_1) + class TestKeepalivedConfigApply(unittest.TestCase):