1---
2# You can use this file to override _any_ variable throughout Kolla.
3# Additional options can be found in the
4# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the
5# commented parameters are shown here, To override the default value uncomment
6# the parameter and change its value.
7
8###################
9# Ansible options
10###################
11
12# This variable is used as the "filter" argument for the setup module. For
13# instance, if one wants to remove/ignore all Neutron interface facts:
14# kolla_ansible_setup_filter: "ansible_[!qt]*"
15# By default, we do not provide a filter.
16#kolla_ansible_setup_filter: "{{ omit }}"
17
18# This variable is used as the "gather_subset" argument for the setup module.
19# For instance, if one wants to avoid collecting facts via facter:
20# kolla_ansible_setup_gather_subset: "all,!facter"
21# By default, we do not provide a gather subset.
22#kolla_ansible_setup_gather_subset: "{{ omit }}"
23
24# Dummy variable to allow Ansible to accept this file.
25workaround_ansible_issue_8743: yes
26
27# This variable is used as "any_errors_fatal" setting for the setup (gather
28# facts) plays.
29# This is useful for weeding out failing hosts early to avoid late failures
30# due to missing facts (especially cross-host).
31# Do note this still supports host fact caching and it will not affect
32# scenarios with all facts cached (as there is no task to fail).
33#kolla_ansible_setup_any_errors_fatal: false
34
35# This variable may be used to set the maximum failure percentage for all
36# plays. More fine-grained control is possible via per-service variables, e.g.
37# nova_max_fail_percentage. The default behaviour is to set a max fail
38# percentage of 100, which is equivalent to not setting it.
39#kolla_max_fail_percentage:
40
41###############
42# Kolla options
43###############
44# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
45config_strategy: "COPY_ALWAYS"
46
47# Valid options are ['centos', 'debian', 'rocky', 'ubuntu']
48kolla_base_distro: "rocky"
49
50# Do not override this unless you know what you are doing.
51openstack_release: "2024.1"
52
53# Docker image tag used by default.
54#openstack_tag: "{{ openstack_release ~ openstack_tag_suffix }}"
55
56# Suffix applied to openstack_release to generate openstack_tag.
57#openstack_tag_suffix: ""
58
59# Location of configuration overrides
60#node_custom_config: "{{ node_config }}/config"
61
62# This should be a VIP, an unused IP on your network that will float between
63# the hosts running keepalived for high-availability. If you want to run an
64# All-In-One without haproxy and keepalived, you can set enable_haproxy to no
65# in "OpenStack options" section, and set this value to the IP of your
66# 'network_interface' as set in the Networking section below.
67kolla_internal_vip_address: "172.16.5.96"
68
69# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
70# default it is the same as kolla_internal_vip_address.
71#kolla_internal_fqdn: "internal.yousuckat.lol"
72
73# This should be a VIP, an unused IP on your network that will float between
74# the hosts running keepalived for high-availability. It defaults to the
75# kolla_internal_vip_address, allowing internal and external communication to
76# share the same address. Specify a kolla_external_vip_address to separate
77# internal and external requests between two VIPs.
78kolla_external_vip_address: "172.16.2.96"
79
80# The Public address used to communicate with OpenStack as set in the public_url
81# for the endpoints that will be created. This DNS name should map to
82# kolla_external_vip_address.
83#kolla_external_fqdn: "external.yousuckat.lol"
84
85# Optionally change the path to sysctl.conf modified by Kolla Ansible plays.
86#kolla_sysctl_conf_path: /etc/sysctl.conf
87
88##################
89# Container engine
90##################
91
92# Valid options are [ docker, podman ]
93#kolla_container_engine: docker
94
95
96################
97# Docker options
98################
99
100# Custom docker registry settings:
101#docker_registry:
102# Please read the docs carefully before applying docker_registry_insecure.
103#docker_registry_insecure: "no"
104#docker_registry_username:
105# docker_registry_password is set in the passwords.yml file.
106
107# Namespace of images:
108#docker_namespace: "kolla"
109
110# Docker client timeout in seconds.
111#docker_client_timeout: 120
112
113#docker_configure_for_zun: "no"
114#containerd_configure_for_zun: "no"
115#containerd_grpc_gid: 42463
116
117###################
118# Messaging options
119###################
120# Whether to enable TLS for oslo.messaging communication with RabbitMQ.
121#om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}"
122# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS.
123#om_rabbitmq_cacert: "{{ rabbitmq_cacert }}"
124
125##############################
126# Neutron - Networking Options
127##############################
128# This interface is what all your api services will be bound to by default.
129# Additionally, all vxlan/tunnel and storage network traffic will go over this
130# interface by default. This interface must contain an IP address.
131# It is possible for hosts to have non-matching names of interfaces - these can
132# be set in an inventory file per host or per group or stored separately, see
133# http://docs.ansible.com/ansible/latest/intro_inventory.html
134# Yet another way to workaround the naming problem is to create a bond for the
135# interface on all hosts and give the bond name here. Similar strategy can be
136# followed for other types of interfaces.
137network_interface: "enp2s0"
138
139# These can be adjusted for even more customization. The default is the same as
140# the 'network_interface'. These interfaces must contain an IP address.
141#kolla_external_vip_interface: "{{ network_interface }}"
142#api_interface: "{{ network_interface }}"
143#swift_storage_interface: "{{ network_interface }}"
144#swift_replication_interface: "{{ swift_storage_interface }}"
145#tunnel_interface: "{{ network_interface }}"
146#dns_interface: "{{ network_interface }}"
147#octavia_network_interface: "{{ api_interface }}"
148
149# Configure the address family (AF) per network.
150# Valid options are [ ipv4, ipv6 ]
151#network_address_family: "ipv4"
152#api_address_family: "{{ network_address_family }}"
153#storage_address_family: "{{ network_address_family }}"
154#swift_storage_address_family: "{{ storage_address_family }}"
155#swift_replication_address_family: "{{ swift_storage_address_family }}"
156#migration_address_family: "{{ api_address_family }}"
157#tunnel_address_family: "{{ network_address_family }}"
158#octavia_network_address_family: "{{ api_address_family }}"
159#bifrost_network_address_family: "{{ network_address_family }}"
160#dns_address_family: "{{ network_address_family }}"
161
162# This is the raw interface given to neutron as its external network port. Even
163# though an IP address can exist on this interface, it will be unusable in most
164# configurations. It is recommended this interface not be configured with any IP
165# addresses for that reason.
166neutron_external_interface: "wlp3s0"
167
168# Valid options are [ openvswitch, ovn, linuxbridge, vmware_nsxv, vmware_nsxv3, vmware_nsxp, vmware_dvs ]
169# if vmware_nsxv3 or vmware_nsxp is selected, enable_openvswitch MUST be set to "no" (default is yes)
170# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable.
171# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html
172#neutron_plugin_agent: "openvswitch"
173
174# Valid options are [ internal, infoblox ]
175#neutron_ipam_driver: "internal"
176
177# Configure Neutron upgrade option, currently Kolla support
178# two upgrade ways for Neutron: legacy_upgrade and rolling_upgrade
179# The variable "neutron_enable_rolling_upgrade: yes" is meaning rolling_upgrade
180# were enabled and opposite
181# Neutron rolling upgrade were enable by default
182neutron_enable_rolling_upgrade: "yes"
183
184# Configure neutron logging framework to log ingress/egress connections to instances
185# for security groups rules. More information can be found here:
186# https://docs.openstack.org/neutron/latest/admin/config-logging.html
187enable_neutron_packet_logging: "yes"
188
189####################
190# keepalived options
191####################
192# Arbitrary unique number from 0..255
193# This should be changed from the default in the event of a multi-region deployment
194# where the VIPs of different regions reside on a common subnet.
195#keepalived_virtual_router_id: "51"
196
197###################
198# Dimension options
199###################
200# This is to provide an extra option to deploy containers with Resource constraints.
201# We call it dimensions here.
202# The dimensions for each container are defined by a mapping, where each dimension value should be a
203# string.
204# Reference_Docs
205# https://docs.docker.com/config/containers/resource_constraints/
206# eg:
207# <container_name>_dimensions:
208# blkio_weight:
209# cpu_period:
210# cpu_quota:
211# cpu_shares:
212# cpuset_cpus:
213# cpuset_mems:
214# mem_limit:
215# mem_reservation:
216# memswap_limit:
217# kernel_memory:
218# ulimits:
219
220#####################
221# Healthcheck options
222#####################
223#enable_container_healthchecks: "yes"
224# Healthcheck options for Docker containers
225# interval/timeout/start_period are in seconds
226#default_container_healthcheck_interval: 30
227#default_container_healthcheck_timeout: 30
228#default_container_healthcheck_retries: 3
229#default_container_healthcheck_start_period: 5
230
231##################
232# Firewall options
233##################
234# Configures firewalld on both ubuntu and centos systems
235# for enabled services.
236# firewalld should be installed beforehand.
237# disable_firewall: "true"
238# enable_external_api_firewalld: "false"
239# external_api_firewalld_zone: "public"
240
241#############
242# TLS options
243#############
244# To provide encryption and authentication on the kolla_external_vip_interface,
245# TLS can be enabled. When TLS is enabled, certificates must be provided to
246# allow clients to perform authentication.
247kolla_enable_tls_internal: "yes"
248kolla_enable_tls_external: "yes"
249kolla_certificates_dir: "/etc/kolla/certificates"
250kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem"
251kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem"
252kolla_admin_openrc_cacert: "/etc/ssl/certs/ca-certificates.crt"
253kolla_copy_ca_into_containers: "yes"
254openstack_cacert: "/etc/ssl/certs/ca-certificates.crt"
255
256#haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
257#haproxy_backend_cacert_dir: "/etc/ssl/certs"
258
259##################
260# Backend options
261##################
262#kolla_httpd_keep_alive: "60"
263#kolla_httpd_timeout: "60"
264
265#####################
266# Backend TLS options
267#####################
268kolla_enable_tls_backend: "yes"
269kolla_verify_tls_backend: "yes"
270kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem"
271kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
272
273#####################
274# ACME client options
275#####################
276# A list of haproxy backend server directives pointing to addresses used by the
277# ACME client to complete http-01 challenge.
278# Please read the docs for more details.
279#acme_client_servers: []
280
281####################
282# LetsEncrypt options
283####################
284# This option is required for letsencrypt role to work properly.
285enable_letsencrypt: "yes"
286letsencrypt_email: "jacksondilla@tutanota.com"
287
288####################
289# LetsEncrypt certificate server options
290####################
291letsencrypt_cert_server: "https://acme-v02.api.letsencrypt.org/directory"
292# attempt to renew Let's Encrypt certificate every 12 hours
293letsencrypt_cron_renew_schedule: "0 0 */5 * *"
294
295################
296# Region options
297################
298# Use this option to change the name of this region.
299#openstack_region_name: "RegionOne"
300
301# Use this option to define a list of region names - only needs to be configured
302# in a multi-region deployment, and then only in the *first* region.
303#multiple_regions_names: ["{{ openstack_region_name }}"]
304
305###################
306# OpenStack options
307###################
308# Use these options to set the various log levels across all OpenStack projects
309# Valid options are [ True, False ]
310openstack_logging_debug: "True"
311
312# Enable core OpenStack services. This includes:
313# glance, keystone, neutron, nova, heat, and horizon.
314enable_openstack_core: "yes"
315
316# These roles are required for Kolla to be operation, however a savvy deployer
317# could disable some of these required roles and run their own services.
318#enable_glance: "{{ enable_openstack_core | bool }}"
319#enable_hacluster: "no"
320#enable_haproxy: "yes"
321#enable_keepalived: "{{ enable_haproxy | bool }}"
322#enable_keystone: "{{ enable_openstack_core | bool }}"
323#enable_mariadb: "yes"
324#enable_memcached: "yes"
325#enable_neutron: "{{ enable_openstack_core | bool }}"
326#enable_nova: "{{ enable_openstack_core | bool }}"
327#enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
328
329# OpenStack services can be enabled or disabled with these options
330#enable_aodh: "no"
331#enable_barbican: "no"
332#enable_blazar: "no"
333#enable_ceilometer: "no"
334#enable_ceilometer_ipmi: "no"
335#enable_cells: "no"
336#enable_central_logging: "no"
337#enable_ceph_rgw: "no"
338#enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
339enable_cinder: "yes"
340enable_cinder_backup: "yes"
341#enable_cinder_backend_hnas_nfs: "no"
342#enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}"
343#enable_cinder_backend_lvm: "no"
344enable_cinder_backend_nfs: "yes"
345#enable_cinder_backend_quobyte: "no"
346#enable_cinder_backend_pure_iscsi: "no"
347#enable_cinder_backend_pure_fc: "no"
348#enable_cinder_backend_pure_roce: "no"
349#enable_cloudkitty: "no"
350#enable_collectd: "no"
351#enable_cyborg: "no"
352#enable_designate: "no"
353#enable_destroy_images: "no"
354#enable_etcd: "no"
355#enable_fluentd: "yes"
356#enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}"
357#enable_gnocchi: "no"
358#enable_gnocchi_statsd: "no"
359#enable_grafana: "no"
360#enable_grafana_external: "{{ enable_grafana | bool }}"
361#enable_heat: "{{ enable_openstack_core | bool }}"
362#enable_horizon: "{{ enable_openstack_core | bool }}"
363#enable_horizon_blazar: "{{ enable_blazar | bool }}"
364#enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
365#enable_horizon_designate: "{{ enable_designate | bool }}"
366#enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}"
367#enable_horizon_heat: "{{ enable_heat | bool }}"
368#enable_horizon_ironic: "{{ enable_ironic | bool }}"
369#enable_horizon_magnum: "{{ enable_magnum | bool }}"
370#enable_horizon_manila: "{{ enable_manila | bool }}"
371#enable_horizon_masakari: "{{ enable_masakari | bool }}"
372#enable_horizon_mistral: "{{ enable_mistral | bool }}"
373#enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
374#enable_horizon_octavia: "{{ enable_octavia | bool }}"
375#enable_horizon_tacker: "{{ enable_tacker | bool }}"
376#enable_horizon_trove: "{{ enable_trove | bool }}"
377#enable_horizon_watcher: "{{ enable_watcher | bool }}"
378#enable_horizon_zun: "{{ enable_zun | bool }}"
379#enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
380#enable_ironic: "no"
381#enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
382#enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
383#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
384#enable_kuryr: "no"
385#enable_magnum: "no"
386#enable_manila: "no"
387#enable_manila_backend_generic: "no"
388#enable_manila_backend_hnas: "no"
389#enable_manila_backend_cephfs_native: "no"
390#enable_manila_backend_cephfs_nfs: "no"
391#enable_manila_backend_glusterfs_nfs: "no"
392#enable_mariabackup: "no"
393#enable_masakari: "no"
394#enable_mistral: "no"
395#enable_multipathd: "no"
396#enable_neutron_vpnaas: "no"
397#enable_neutron_sriov: "no"
398#enable_neutron_dvr: "no"
399#enable_neutron_fwaas: "no"
400#enable_neutron_qos: "no"
401#enable_neutron_agent_ha: "no"
402#enable_neutron_bgp_dragent: "no"
403#enable_neutron_provider_networks: "no"
404#enable_neutron_segments: "no"
405#enable_neutron_sfc: "no"
406#enable_neutron_trunk: "no"
407#enable_neutron_metering: "no"
408#enable_neutron_infoblox_ipam_agent: "no"
409#enable_neutron_port_forwarding: "no"
410#enable_nova_serialconsole_proxy: "no"
411#enable_nova_ssh: "yes"
412#enable_octavia: "no"
413#enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
414#enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}"
415#enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') }}"
416#enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
417#enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
418#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
419#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
420#enable_ovs_dpdk: "no"
421#enable_osprofiler: "no"
422#enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
423#enable_prometheus: "no"
424#enable_proxysql: "no"
425#enable_redis: "no"
426#enable_skyline: "no"
427#enable_swift: "no"
428#enable_swift_s3api: "no"
429#enable_tacker: "no"
430#enable_telegraf: "no"
431#enable_trove: "no"
432#enable_trove_singletenant: "no"
433#enable_venus: "no"
434#enable_watcher: "no"
435#enable_zun: "no"
436
437#############
438# S3 options
439#############
440# Common options for S3 Cinder Backup and Glance S3 backend.
441#s3_url:
442#s3_bucket:
443#s3_access_key:
444#s3_secret_key:
445
446##################
447# RabbitMQ options
448##################
449# Options passed to RabbitMQ server startup script via the
450# RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS environment var.
451# See Kolla Ansible docs RabbitMQ section for details.
452# These are appended to args already provided by Kolla Ansible
453# to configure IPv6 in RabbitMQ server.
454# More details can be found in the RabbitMQ docs:
455# https://www.rabbitmq.com/runtime.html#scheduling
456# https://www.rabbitmq.com/runtime.html#busy-waiting
457# The default tells RabbitMQ to always use two cores (+S 2:2),
458# and not to busy wait (+sbwt none +sbwtdcpu none +sbwtdio none):
459#rabbitmq_server_additional_erl_args: "+S 2:2 +sbwt none +sbwtdcpu none +sbwtdio none"
460# Whether to enable TLS encryption for RabbitMQ client-server communication.
461rabbitmq_enable_tls: "yes"
462# CA certificate bundle in RabbitMQ container.
463#rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
464
465#################
466# MariaDB options
467#################
468# List of additional WSREP options
469#mariadb_wsrep_extra_provider_options: []
470
471#######################
472# External Ceph options
473#######################
474# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
475#external_ceph_cephx_enabled: "yes"
476
477# Glance
478#ceph_glance_user: "glance"
479#ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
480#ceph_glance_pool_name: "images"
481# Cinder
482#ceph_cinder_user: "cinder"
483#ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
484#ceph_cinder_pool_name: "volumes"
485#ceph_cinder_backup_user: "cinder-backup"
486#ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
487#ceph_cinder_backup_pool_name: "backups"
488# Nova
489#ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
490#ceph_nova_user: "{{ ceph_cinder_user }}"
491#ceph_nova_pool_name: "vms"
492# Gnocchi
493#ceph_gnocchi_user: "gnocchi"
494#ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
495#ceph_gnocchi_pool_name: "gnocchi"
496# Manila
497#ceph_manila_user: "manila"
498#ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
499
500#############################
501# Keystone - Identity Options
502#############################
503
504#keystone_admin_user: "admin"
505
506#keystone_admin_project: "admin"
507
508# Interval to rotate fernet keys by (in seconds). Must be an interval of
509# 60(1 min), 120(2 min), 180(3 min), 240(4 min), 300(5 min), 360(6 min),
510# 600(10 min), 720(12 min), 900(15 min), 1200(20 min), 1800(30 min),
511# 3600(1 hour), 7200(2 hour), 10800(3 hour), 14400(4 hour), 21600(6 hour),
512# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week).
513#fernet_token_expiry: 86400
514
515# Whether or not to apply changes to service user passwords when services are
516# reconfigured
517#update_keystone_service_user_passwords: "true"
518
519########################
520# Glance - Image Options
521########################
522# Configure image backend.
523#glance_backend_ceph: "no"
524#glance_backend_file: "yes"
525#glance_backend_swift: "no"
526#glance_backend_vmware: "no"
527#glance_backend_s3: "no"
528#enable_glance_image_cache: "no"
529#glance_enable_property_protection: "no"
530#glance_enable_interoperable_image_import: "no"
531# Configure glance upgrade option.
532# Due to this feature being experimental in glance,
533# the default value is "no".
534#glance_enable_rolling_upgrade: "no"
535
536####################
537# Glance S3 Backend
538####################
539#glance_backend_s3_url: "{{ s3_url }}"
540#glance_backend_s3_bucket: "{{ s3_bucket }}"
541#glance_backend_s3_access_key: "{{ s3_access_key }}"
542#glance_backend_s3_secret_key: "{{ s3_secret_key }}"
543
544####################
545# Osprofiler options
546####################
547# valid values: ["elasticsearch", "redis"]
548#osprofiler_backend: "elasticsearch"
549
550##################
551# Barbican options
552##################
553# Valid options are [ simple_crypto, p11_crypto ]
554#barbican_crypto_plugin: "simple_crypto"
555#barbican_library_path: "/usr/lib/libCryptoki2_64.so"
556
557#################
558# Gnocchi options
559#################
560# Valid options are [ file, ceph, swift ]
561#gnocchi_backend_storage: "{% if enable_swift | bool %}swift{% else %}file{% endif %}"
562
563# Valid options are [redis, '']
564#gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
565
566################################
567# Cinder - Block Storage Options
568################################
569# Enable / disable Cinder backends
570#cinder_backend_ceph: "no"
571#cinder_backend_vmwarevc_vmdk: "no"
572#cinder_backend_vmware_vstorage_object: "no"
573#cinder_volume_group: "cinder-volumes"
574# Valid options are [ '', redis, etcd ]
575#cinder_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}"
576
577# Valid options are [ nfs, swift, ceph, s3 ]
578#cinder_backup_driver: "ceph"
579#cinder_backup_share: ""
580cinder_backup_mount_options_nfs: ""
581cinder_backup_driver: "nfs"
582cinder_backup_share: "controller1:/kolla_nfs"
583
584
585# Cinder backup S3 options
586#cinder_backup_s3_url: "{{ s3_url }}"
587#cinder_backup_s3_bucket: "{{ s3_bucket }}"
588#cinder_backup_s3_access_key: "{{ s3_access_key }}"
589#cinder_backup_s3_secret_key: "{{ s3_secret_key }}"
590
591#######################
592# Cloudkitty options
593#######################
594# Valid option is gnocchi
595#cloudkitty_collector_backend: "gnocchi"
596# Valid options are 'sqlalchemy' or 'influxdb'. The default value is
597# 'influxdb', which matches the default in Cloudkitty since the Stein release.
598# When the backend is "influxdb", we also enable Influxdb.
599# Also, when using 'influxdb' as the backend, we trigger the configuration/use
600# of Cloudkitty storage backend version 2.
601#cloudkitty_storage_backend: "influxdb"
602
603###################
604# Designate options
605###################
606# Valid options are [ bind9 ]
607#designate_backend: "bind9"
608#designate_ns_record:
609# - "ns1.example.org"
610# Valid options are [ '', redis ]
611#designate_coordination_backend: "{{ 'redis' if enable_redis|bool else '' }}"
612
613########################
614# Nova - Compute Options
615########################
616#nova_backend_ceph: "no"
617
618# Valid options are [ qemu, kvm, vmware ]
619#nova_compute_virt_type: "kvm"
620
621# The number of fake driver per compute node
622#num_nova_fake_per_node: 5
623
624# The flag "nova_safety_upgrade" need to be consider when
625# "nova_enable_rolling_upgrade" is enabled. The "nova_safety_upgrade"
626# controls whether the nova services are all stopped before rolling
627# upgrade to the new version, for the safety and availability.
628# If "nova_safety_upgrade" is "yes", that will stop all nova services (except
629# nova-compute) for no failed API operations before upgrade to the
630# new version. And opposite.
631#nova_safety_upgrade: "no"
632
633# Valid options are [ none, novnc, spice ]
634#nova_console: "novnc"
635
636##############################
637# Neutron - networking options
638##############################
639# Enable distributed floating ip for OVN deployments
640#neutron_ovn_distributed_fip: "no"
641
642# Enable DHCP agent(s) to use with OVN
643#neutron_ovn_dhcp_agent: "no"
644
645#############################
646# Horizon - Dashboard Options
647#############################
648#horizon_backend_database: false
649
650#############################
651# Ironic options
652#############################
653# dnsmasq bind interface for Ironic Inspector, by default is network_interface
654#ironic_dnsmasq_interface: "{{ network_interface }}"
655# The following value must be set when enabling ironic, the value format is a
656# list of ranges - at least one must be configured, for example:
657# - range: 192.168.0.10,192.168.0.100
658# See Kolla Ansible docs on Ironic for details.
659#ironic_dnsmasq_dhcp_ranges:
660# PXE bootloader file for Ironic Inspector, relative to /var/lib/ironic/tftpboot.
661#ironic_dnsmasq_boot_file: "pxelinux.0"
662
663# Configure ironic upgrade option, due to currently kolla support
664# two upgrade ways for ironic: legacy_upgrade and rolling_upgrade
665# The variable "ironic_enable_rolling_upgrade: yes" is meaning rolling_upgrade
666# were enabled and opposite
667# Rolling upgrade were enable by default
668#ironic_enable_rolling_upgrade: "yes"
669
670# List of extra kernel parameters passed to the kernel used during inspection
671#ironic_inspector_kernel_cmdline_extras: []
672
673# Valid options are [ '', redis, etcd ]
674#ironic_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}"
675
676######################################
677# Manila - Shared File Systems Options
678######################################
679# HNAS backend configuration
680#hnas_ip:
681#hnas_user:
682#hnas_password:
683#hnas_evs_id:
684#hnas_evs_ip:
685#hnas_file_system_name:
686
687# CephFS backend configuration.
688# External Ceph FS name.
689# By default this is empty to allow Manila to auto-find the first FS available.
690#manila_cephfs_filesystem_name:
691
692# Gluster backend configuration
693# The option of glusterfs share layout can be directory or volume
694# The default option of share layout is 'volume'
695#manila_glusterfs_share_layout:
696# The default option of nfs server type is 'Gluster'
697#manila_glusterfs_nfs_server_type:
698
699# Volume layout Options (required)
700# If the glusterfs server requires remote ssh, then you need to fill
701# in 'manila_glusterfs_servers', ssh user 'manila_glusterfs_ssh_user', and ssh password
702# 'manila_glusterfs_ssh_password'.
703# 'manila_glusterfs_servers' value List of GlusterFS servers which provide volumes,
704# the format is for example:
705# - 10.0.1.1
706# - 10.0.1.2
707#manila_glusterfs_servers:
708#manila_glusterfs_ssh_user:
709#manila_glusterfs_ssh_password:
710# Used to filter GlusterFS volumes for share creation.
711# Examples: manila-share-volume-\\d+$, manila-share-volume-#{size}G-\\d+$;
712#manila_glusterfs_volume_pattern:
713
714# Directory layout Options
715# If the glusterfs server is on the local node of the manila share,
716# it’s of the format <glustervolserver>:/<glustervolid>
717# If the glusterfs server is on a remote node,
718# it’s of the format <username>@<glustervolserver>:/<glustervolid> ,
719# and define 'manila_glusterfs_ssh_password'
720#manila_glusterfs_target:
721#manila_glusterfs_mount_point_base:
722
723################################
724# Swift - Object Storage Options
725################################
726# Swift expects block devices to be available for storage. Two types of storage
727# are supported: 1 - storage device with a special partition name and filesystem
728# label, 2 - unpartitioned disk with a filesystem. The label of this filesystem
729# is used to detect the disk which Swift will be using.
730
731# Swift support two matching modes, valid options are [ prefix, strict ]
732#swift_devices_match_mode: "strict"
733
734# This parameter defines matching pattern: if "strict" mode was selected,
735# for swift_devices_match_mode then swift_device_name should specify the name of
736# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was
737# selected then swift_devices_name should specify a pattern which would match to
738# filesystems' labels prepared for swift.
739#swift_devices_name: "KOLLA_SWIFT_DATA"
740
741# Configure swift upgrade option, due to currently kolla support
742# two upgrade ways for swift: legacy_upgrade and rolling_upgrade
743# The variable "swift_enable_rolling_upgrade: yes" is meaning rolling_upgrade
744# were enabled and opposite
745# Rolling upgrade were enable by default
746#swift_enable_rolling_upgrade: "yes"
747
748###################################
749# VMware - OpenStack VMware support
750###################################
751#vmware_vcenter_host_ip:
752#vmware_vcenter_host_username:
753#vmware_vcenter_host_password:
754#vmware_datastore_name:
755#vmware_vcenter_name:
756#vmware_vcenter_cluster_name:
757
758############
759# Prometheus
760############
761#enable_prometheus_server: "{{ enable_prometheus | bool }}"
762#enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
763#enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
764#enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
765#enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
766#enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable fluentd | bool }}"
767#enable_prometheus_memcached: "{{ enable_prometheus | bool }}"
768#enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
769#enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}"
770#enable_prometheus_ceph_mgr_exporter: "no"
771#enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
772#enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
773#enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
774#enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
775#enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
776#enable_prometheus_msteams: "no"
777
778# The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager).
779# prometheus_external_labels:
780# <labelname>: <labelvalue>
781# By default, prometheus_external_labels is empty
782#prometheus_external_labels:
783
784# List of extra parameters passed to prometheus. You can add as many to the list.
785#prometheus_cmdline_extras:
786
787# List of extra parameters passed to cAdvisor. By default system cgroups
788# and container labels are not exposed to reduce time series cardinality.
789#prometheus_cadvisor_cmdline_extras: "--docker_only --store_container_labels=false --disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process"
790
791# Extra parameters passed to Prometheus exporters.
792#prometheus_blackbox_exporter_cmdline_extras:
793#prometheus_elasticsearch_exporter_cmdline_extras:
794#prometheus_memcached_exporter_cmdline_extras:
795#prometheus_mysqld_exporter_cmdline_extras:
796#prometheus_node_exporter_cmdline_extras:
797#prometheus_openstack_exporter_cmdline_extras:
798
799# Example of setting endpoints for prometheus ceph mgr exporter.
800# You should add all ceph mgr's in your external ceph deployment.
801#prometheus_ceph_mgr_exporter_endpoints:
802# - host1:port1
803# - host2:port2
804
805##########
806# Telegraf
807##########
808# Configure telegraf to use the docker daemon itself as an input for
809# telemetry data.
810#telegraf_enable_docker_input: "no"
811
812##########################################
813# Octavia - openstack loadbalancer Options
814##########################################
815# Whether to run Kolla Ansible's automatic configuration for Octavia.
816# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no`
817# and keep your other Octavia config like before.
818#octavia_auto_configure: yes
819
820# Octavia amphora flavor.
821# See os_nova_flavor for details. Supported parameters:
822# - flavorid (optional)
823# - is_public (optional)
824# - name
825# - vcpus
826# - ram
827# - disk
828# - ephemeral (optional)
829# - swap (optional)
830# - extra_specs (optional)
831#octavia_amp_flavor:
832# name: "amphora"
833# is_public: no
834# vcpus: 1
835# ram: 1024
836# disk: 5
837
838# Octavia security groups. lb-mgmt-sec-grp is for amphorae.
839#octavia_amp_security_groups:
840# mgmt-sec-grp:
841# name: "lb-mgmt-sec-grp"
842# rules:
843# - protocol: icmp
844# - protocol: tcp
845# src_port: 22
846# dst_port: 22
847# - protocol: tcp
848# src_port: "{{ octavia_amp_listen_port }}"
849# dst_port: "{{ octavia_amp_listen_port }}"
850
851# Octavia management network.
852# See os_network and os_subnet for details. Supported parameters:
853# - external (optional)
854# - mtu (optional)
855# - name
856# - provider_network_type (optional)
857# - provider_physical_network (optional)
858# - provider_segmentation_id (optional)
859# - shared (optional)
860# - subnet
861# The subnet parameter has the following supported parameters:
862# - allocation_pool_start (optional)
863# - allocation_pool_end (optional)
864# - cidr
865# - enable_dhcp (optional)
866# - gateway_ip (optional)
867# - name
868# - no_gateway_ip (optional)
869# - ip_version (optional)
870# - ipv6_address_mode (optional)
871# - ipv6_ra_mode (optional)
872#octavia_amp_network:
873# name: lb-mgmt-net
874# shared: false
875# subnet:
876# name: lb-mgmt-subnet
877# cidr: "{{ octavia_amp_network_cidr }}"
878# no_gateway_ip: yes
879# enable_dhcp: yes
880
881# Octavia management network subnet CIDR.
882#octavia_amp_network_cidr: 10.1.0.0/24
883
884#octavia_amp_image_tag: "amphora"
885
886# Load balancer topology options are [ SINGLE, ACTIVE_STANDBY ]
887#octavia_loadbalancer_topology: "SINGLE"
888
889# The following variables are ignored as along as `octavia_auto_configure` is set to `yes`.
890#octavia_amp_image_owner_id:
891#octavia_amp_boot_network_list:
892#octavia_amp_secgroup_list:
893#octavia_amp_flavor_id:
894
895####################
896# Corosync options
897####################
898
899# this is UDP port
900#hacluster_corosync_port: 5405
901
902##############
903# etcd options
904##############
905# If `etcd_remove_deleted_members` is enabled, Kolla Ansible will automatically
906# remove etcd members from the cluster that are no longer in the inventory.
907#etcd_remove_deleted_members: "no"