Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Consolidate IRIS and stackhpc branches #160

Open
wants to merge 6 commits into
base: stackhpc/victoria
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions ansible/group_vars/all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,12 @@ barbican_api_listen_port: "{{ barbican_api_port }}"

blazar_api_port: "1234"

caso_tcp_output_port: "24224"

ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
ceph_rgw_port: "6780"

cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
cinder_api_port: "8776"
Expand Down Expand Up @@ -447,6 +453,7 @@ placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"

prometheus_port: "9091"
prometheus_libvirt_exporter_port: "9177"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
Expand Down Expand Up @@ -584,6 +591,7 @@ enable_glance: "{{ enable_openstack_core | bool }}"
enable_haproxy: "yes"
enable_keepalived: "{{ enable_haproxy | bool }}"
enable_keystone: "{{ enable_openstack_core | bool }}"
enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "{{ enable_openstack_core | bool }}"
Expand All @@ -600,10 +608,13 @@ enable_haproxy_memcached: "no"
enable_aodh: "no"
enable_barbican: "no"
enable_blazar: "no"
enable_caso: "no"
enable_ceilometer: "no"
enable_ceilometer_ipmi: "no"
enable_cells: "no"
enable_central_logging: "no"
enable_ceph_rgw: "no"
enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
enable_chrony: "yes"
enable_cinder: "no"
enable_cinder_backup: "yes"
Expand Down Expand Up @@ -1053,6 +1064,7 @@ enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"

horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"

#################
# Qinling options
Expand Down Expand Up @@ -1112,6 +1124,7 @@ use_common_mariadb_user: "no"
# Prometheus
############
enable_prometheus_server: "{{ enable_prometheus | bool }}"
enable_prometheus_libvirt_exporter: "no"
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
Expand All @@ -1125,6 +1138,7 @@ enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"

prometheus_alertmanager_user: "admin"
prometheus_libvirt_exporter_interval: "60s"
prometheus_openstack_exporter_interval: "60s"
prometheus_openstack_exporter_timeout: "10s"
prometheus_elasticsearch_exporter_interval: "60s"
Expand Down Expand Up @@ -1207,3 +1221,45 @@ swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_add
octavia_admin_endpoint: "{{ admin_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"

###################################
# Identity federation configuration
###################################
# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
# We require the administrator to enter the following metadata:
# * name (internal name of the IdP in Keystone);
# * openstack_domain (the domain in Keystone that the IdP belongs to)
# * protocol (the federated protocol used by the IdP; e.g. openid or saml);
# * identifier (the IdP identifier; e.g. https://accounts.google.com);
# * public_name (the public name that will be shown for users in Horizon);
# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration);
# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol
# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf,
# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}');
# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem';
# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid);
#
# The IdPs meta information are to be presented to Kolla-Ansible as the following example:
# keystone_identity_providers:
# - name: "myidp1"
# openstack_domain: "my-domain"
# protocol: "openid"
# identifier: "https://accounts.google.com"
# public_name: "Authenticate via myidp1"
# attribute_mapping: "mappingId1"
# metadata_folder: "path/to/metadata/folder"
# certificate_file: "path/to/certificate/file.pem"
#
# We also need to configure the attribute mapping that is used by IdPs.
# The configuration of attribute mappings is a list of objects, where each
# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP
# object in the IdPs set), and the 'file' with a full qualified path to a mapping file.
# keystone_identity_mappings:
# - name: "mappingId1"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
# - name: "mappingId2"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2"
# - name: "mappingId3"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3"
keystone_identity_providers: []
keystone_identity_mappings: []
7 changes: 7 additions & 0 deletions ansible/inventory/all-in-one
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ localhost ansible_connection=local
[deployment]
localhost ansible_connection=local

# Caso
[caso:children]
monitoring

# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.

Expand Down Expand Up @@ -728,6 +732,9 @@ compute
network
storage

[prometheus-libvirt-exporter:children]
compute

[prometheus-mysqld-exporter:children]
mariadb

Expand Down
7 changes: 7 additions & 0 deletions ansible/inventory/multinode
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ monitoring
[tls-backend:children]
control

# Caso
[caso:children]
monitoring

# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.

Expand Down Expand Up @@ -746,6 +750,9 @@ compute
network
storage

[prometheus-libvirt-exporter:children]
compute

[prometheus-mysqld-exporter:children]
mariadb

Expand Down
40 changes: 40 additions & 0 deletions ansible/roles/caso/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
---
caso_services:
caso:
container_name: caso
group: caso
enabled: true
image: "{{ caso_image_full }}"
volumes:
- "{{ node_config_directory }}/caso/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "caso_spool:/var/lib/caso"
- "caso_ssm_outgoing:/var/spool/apel/outgoing/openstack"
- "kolla_logs:/var/log/kolla/"
dimensions: "{{ caso_dimensions }}"

####################
# caso
####################
caso_site_name: "kolla_caso"
caso_projects: []
caso_logging_debug: "{{ openstack_logging_debug }}"
caso_log_dir: "/var/log/kolla/caso"
caso_cron_table: "10 * * * *"
caso_messengers:
- caso.messenger.logstash.LogstashMessenger

####################
# OpenStack
####################
caso_openstack_auth: "{{ openstack_auth }}"
caso_keystone_user: "caso"

####################
# Docker
####################
caso_install_type: "{{ kolla_install_type }}"
caso_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ caso_install_type }}-caso"
caso_tag: "{{ openstack_release }}"

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

openstack_tag

caso_image_full: "{{ caso_image }}:{{ caso_tag }}"
caso_dimensions: "{{ default_container_dimensions }}"
24 changes: 24 additions & 0 deletions ansible/roles/caso/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
---
- name: Restart caso container
vars:
service_name: "caso"
service: "{{ caso_services[service_name] }}"
config_json: "{{ caso_config_jsons.results|selectattr('item.key', 'equalto', service_name)|first }}"
caso_container: "{{ check_caso_containers.results|selectattr('item.key', 'equalto', service_name)|first }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- inventory_hostname in groups[service.group]
- service.enabled | bool
- config_json.changed | bool
or caso_conf.changed | bool
or caso_vom_conf.changed | bool
or caso_crontab.changed | bool
or caso_container.changed | bool
3 changes: 3 additions & 0 deletions ansible/roles/caso/meta/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---
dependencies:
- { role: common }

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We dropped these role deps in victoria.

1 change: 1 addition & 0 deletions ansible/roles/caso/tasks/check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
---
90 changes: 90 additions & 0 deletions ansible/roles/caso/tasks/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"

- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
register: caso_config_jsons
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
notify:
- Restart caso container

- name: Copying over caso config
merge_configs:
sources:
- "{{ role_path }}/templates/caso.conf.j2"
- "{{ node_custom_config }}//caso/caso.conf"
- "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/caso.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/caso.conf"
mode: "0660"
become: true
register: caso_conf
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
notify:
- Restart caso container

- name: Copying over caso crontab
template:
src: "{{ role_path }}/templates/caso.crontab.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/caso.crontab"
mode: "0660"
become: true
register: caso_crontab
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
notify:
- Restart caso container

- name: Copying over caso voms file
template:
src: "{{ role_path }}/templates/voms.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/voms.json"
mode: "0660"
become: true
register: caso_vom_conf
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
notify:
- Restart caso container

- name: Check caso containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
dimensions: "{{ item.value.dimensions }}"
register: check_caso_containers
when:
- kolla_action != "config"
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
notify:
- Restart caso container
12 changes: 12 additions & 0 deletions ansible/roles/caso/tasks/deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
---
- include_tasks: register.yml
when: inventory_hostname in groups['caso']

- include_tasks: config.yml
when: inventory_hostname in groups['caso']

- name: Flush handlers
meta: flush_handlers

- include_tasks: check.yml
when: inventory_hostname in groups['caso']
2 changes: 2 additions & 0 deletions ansible/roles/caso/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
---
- include_tasks: "{{ kolla_action }}.yml"
1 change: 1 addition & 0 deletions ansible/roles/caso/tasks/precheck.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
---
11 changes: 11 additions & 0 deletions ansible/roles/caso/tasks/pull.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
- name: Pulling caso images
become: true
kolla_docker:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ item.value.image }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ caso_services }}"
2 changes: 2 additions & 0 deletions ansible/roles/caso/tasks/reconfigure.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
---
- include_tasks: deploy.yml
14 changes: 14 additions & 0 deletions ansible/roles/caso/tasks/register.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
- name: Configure cASO user
kolla_toolbox:
module_name: "kolla_keystone_user"

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We stopped doing it this way in Train. The module was dropped from the kolla-toolbox image. You need to use service-ks-register role now

module_args:
project: "{{ item }}"
user: "{{ caso_keystone_user }}"
password: "{{ caso_keystone_password }}"
region_name: "{{ openstack_region_name }}"
role: admin
auth: "{{ caso_openstack_auth }}"
endpoint_type: "{{ openstack_interface }}"
with_items: "{{ caso_projects }}"
run_once: True
5 changes: 5 additions & 0 deletions ansible/roles/caso/tasks/upgrade.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
- include_tasks: config.yml

- name: Flush handlers
meta: flush_handlers
23 changes: 23 additions & 0 deletions ansible/roles/caso/templates/caso.conf.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
[DEFAULT]
messengers = {{ caso_messengers|join(', ') }}
site_name = {{ caso_site_name }}
projects = {{ caso_projects|join(', ') }}
debug = {{ caso_logging_debug }}
log_file = caso.log
log_dir = {{ caso_log_dir }}
log_rotation_type = none
spooldir = /var/lib/caso

[keystone_auth]
auth_type = password
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

keystone_admin_url

project_domain_id = {{ default_project_domain_id }}
username = {{ caso_keystone_user }}
user_domain_id = {{ default_user_domain_id }}
password = {{ caso_keystone_password }}

[logstash]
port = {{ caso_tcp_output_port }}

[ssm]
output_path = /var/spool/apel/outgoing/openstack
1 change: 1 addition & 0 deletions ansible/roles/caso/templates/caso.crontab.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{{ caso_cron_table }} caso-extract --config-file /etc/caso/caso.conf
Loading