diff --git a/docs/PROVISIONING_RHV.adoc b/docs/PROVISIONING_RHV.adoc new file mode 100644 index 00000000..fb1c34e2 --- /dev/null +++ b/docs/PROVISIONING_RHV.adoc @@ -0,0 +1,92 @@ += OpenShift on RHV using CASL + +TODO: Configure and test OpenShift Container Storage portions + +== Local Setup (one time, only) + +NOTE: These steps are a canned set of steps serving as an example, and may be different in your environment. + +Before getting started following this guide, you'll need the following: + +* Access to the RHV Manager with the proper policies to create resources (see details below) +* Docker installed + ** RHEL/CentOS: `yum install -y docker` + ** Fedora: `dnf install -y docker` + ** **NOTE:** If you plan to run docker as yourself (non-root), your username must be added to the `docker` user group. +* Ansible 2.5 or later installed + ** link:https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html[See Installation Guide] + +[source,bash] +---- +cd ~/src/ +git clone https://github.com/redhat-cop/casl-ansible.git +---- + +* Run `ansible-galaxy` to pull in the necessary requirements for the CASL provisioning of OpenShift on RHV: + +NOTE: The target directory ( `galaxy` ) is **important** as the playbooks know to source roles and playbooks from that location. + +[source,bash] +---- +cd ~/src/casl-ansible +ansible-galaxy install -r casl-requirements.yml -p galaxy +---- + +== RHV Setup + +The following needs to be set up in your RHV manager before provisioning. + +* Access to the admin@internal or similarly privileged account. +* A RHEL template must be created +** Satellite CA certificate (if using Satellite) should be installed in the template +** A user with sudo access and a known password +* Storage domain must be available +* DNS for all the OCP nodes, public and private URL and wildcard application URL must be in place PRIOR to running this playbook +** During the RUN a list of all expected DNS entries will be provided +** If any of the required DNS settings are not in place, the role will fail + +Cool! Now you're ready to provision OpenShift clusters on RHV. + +== Provision an OpenShift Cluster + +As an example, we'll provision the `sample.rhv.example.com` cluster defined in the `~/src/casl-ansible/inventory` directory. + +NOTE: Unless you already have a working inventory, it is recommended that you make a copy of the above mentioned sample inventory and keep it somewhere outside of the casl-ansible directory. This allows you to update/remove/change your casl-ansble source directory without losing your inventory. Also note that it may take some effort to get the inventory just right, hence it is very beneficial to keep it around for future use without having to redo everything. + +The following is just an example on how the `sample.rhv.example.com` inventory can be used: + +1. Update the variable settings in `group_vars/all.yml` with your environmental settings. + +[source,bash] +---- +docker run': +docker run -u `id -u` \ + -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \ + -v :/tmp/src:Z \ + -e INVENTORY_DIR=/casl-ansible/inventory/sample.rhv.example.com.d/inventory \ + -e PLAYBOOK_FILE=/casl-ansible/playbooks/openshift/rhv/provision.yml \ + -e OVIRT_URL='https://rhvm.example.com/ovirt-engine/api/v4' \ + -e OVIRT_USERNAME='admin@internal' \ + -e OVIRT_PASSWORD='rhvm_password' \ + -e OVIRT_CA='/casl-ansible/ca.crt' \ + -e ANSIBLE_USER='cloud-user' \ + -e ANSIBLE_PASS='template_password' \ + -i redhat-cop/casl-ansible + +---- + +== Updating a Cluster + +Once provisioned, a cluster may be adjusted/reconfigured as needed by updating the inventory and re-running the `end-to-end.yml` playbook. + +== Scaling Up and Down + +A cluster's Infra and App nodes may be scaled up and down by editing the following parameters in the `all.yml` file and then re-running the `end-to-end.yml` playbook as shown above. + +[source,yaml] +---- +appnodes: + count: +infranodes: + count: +---- diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/OSEv3.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/OSEv3.yml new file mode 100644 index 00000000..4b53b23a --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/OSEv3.yml @@ -0,0 +1,111 @@ +--- + +# The username Ansible should use to access the instances with +ansible_user: "{{ lookup('env','ANSIBLE_USER') }}" +ansible_password: "{{ lookup('env','ANSIBLE_PASS') }}" + +# Should Ansible use "become" to gain elevated privileges (i.e.: root) +ansible_become: true + +# CNS relative vars - Uncommented to automatically deploy CNS - 'cns_deploy' from all.yml must be 'true' in that case +# openshift_storage_glusterfs_namespace: glusterfs +# openshift_storage_glusterfs_name: cns + +openshift_clusterid: "{{ env_id }}" + + +# OpenShift Specific Configuration Options +# - Check the official OpenShift documentation for more details +deployment_type: openshift-enterprise +openshift_deployment_type: openshift-enterprise +containerized: false + +### OCP version to install +openshift_release: v3.11 + +osm_default_node_selector: 'node-role.kubernetes.io/compute=true' +osm_use_cockpit: true +osm_cockpit_plugins: +- 'cockpit-kubernetes' + +# Enable the Multi-Tenant plugin +os_sdn_network_plugin_name: 'redhat/openshift-ovs-multitenant' + +# OpenShift FQDNs, DNS, App domain specific configurations +openshift_master_cluster_method: native +openshift_master_default_subdomain: "apps.{{ env_id }}.{{ dns_domain }}" +openshift_master_cluster_hostname: "console.internal.{{ env_id }}.{{ dns_domain }}" +openshift_master_cluster_public_hostname: "console.{{ env_id }}.{{ dns_domain }}" + +# Registry URL & Credentials +# For more info: https://access.redhat.com/terms-based-registry/ +oreg_url: 'registry.redhat.io/openshift3/ose-${component}:${version}' +#oreg_auth_user: "{{ lookup('env', 'OREG_AUTH_USER' )}}" +#oreg_auth_password: "{{ lookup('env', 'OREG_AUTH_PASSWORD' )}}" + +# Deploy Logging with dynamic storage +#openshift_logging_install_logging: false +#openshift_logging_es_pvc_dynamic: true +#openshift_logging_es_pvc_size: 40G +#openshift_logging_curator_default_days: 1 + +# Deploy Metrics with dynamic storage +#openshift_metrics_install_metrics: false +#openshift_metrics_cassandra_storage_type: dynamic +#openshift_metrics_cassandra_pvc_size: 40G +#openshift_metrics_duration: 2 + +# HTPASSWD Identity Provider +# - update to other types of auth providers if necessary (i.e: LDAP, OAuth, ...) +openshift_master_identity_providers: + - 'name': 'htpasswd_auth' + 'login': 'true' + 'challenge': 'true' + 'kind': 'HTPasswdPasswordIdentityProvider' + +# Uncommented to automatically create a set of test users with the above +# HTPASSWD Identity Provider +#create_users: +# num_users: 5 +# prefix: 'rdu-user' +# passwd_file: '/etc/origin/master/htpasswd' +# password: 'rdu-sample' + +# OpenShift Node specific parameters +openshift_node_groups: +- name: node-config-master + labels: + - 'node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.kube-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 50 }}M' + - key: kubeletArguments.system-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 100 }}M' +- name: node-config-infra + labels: + - 'node-role.kubernetes.io/infra=true' + edits: + - key: kubeletArguments.kube-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 50 }}M' + - key: kubeletArguments.system-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 100 }}M' +- name: node-config-compute + labels: + - 'node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.kube-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 50 }}M' + - key: kubeletArguments.system-reserved + value: + - 'cpu={{ ansible_processor_vcpus * 50 }}m' + - 'memory={{ ansible_processor_vcpus * 100 }}M' diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/all.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/all.yml new file mode 100644 index 00000000..84dd3394 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/all.yml @@ -0,0 +1,217 @@ +--- + +# 'hosting_infrastructure' is used to drive the correct behavior based +# on the hosting infrastructure, cloud provider, etc. Valid values are: +# - 'openstack' +# - 'rhv' +# - 'azure' (Coming Soon) +# - 'gcp' +hosting_infrastructure: rhv + +# DNS configurations +# the 'dns_domain' will be used as the base domain for the deployment +# the 'dns_nameservers' is a list of DNS resolvers the cluster should use +dns_domain: "" +dns_nameservers: +- + +# Cluster Environment ID to uniquely identify the environment +env_id: "" +default_name_prefix: + +# Default RHV environment settings to use +# Update the following values for your environment +default_rhv_cluster: +default_template_name: +default_network: +default_network_netmask: +default_network_gateway: +default_network_nic_name: + +# Defaults for CPU and memory, you will almost certainly want to customize these below under each type (master/infra/appnode) +default_cpu_count: +default_memory: + +default_root_volume_size: +default_rhv_storage_domain: +default_docker_volume_size: +default_etcd_volume_size: +default_origin_volume_size: +default_cns_volume_size: '' +default_gluster_storage_domain: '' + +# Define custom Master(s) API and Console Port +# Comment out these entries to use a different port for both the Web Console and OpenShift API - Default port is 8443 +#openshift_master_api_port: 443 +#openshift_master_console_port: 443 + +# Define custom console and apps public DNS Name +# NOTE: These values need to be a subset of {{ dns_domain }} +openshift_master_default_subdomain: "apps.{{ env_id }}.{{ dns_domain }}" +openshift_master_cluster_hostname: "console.internal.{{ env_id }}.{{ dns_domain }}" +openshift_master_cluster_public_hostname: "console.{{ env_id }}.{{ dns_domain }}" + +# Define infrastructure skeleton +# These try to leverage defaults to simplify the initial deployment. For more granular control change the settings below +cloud_infrastructure: + masters: + count: + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + etcd_storage_domain: "{{ default_rhv_storage_domain }}" + etcd_volume_size: "{{ default_etcd_volume_size }}" + etcdnodes: + count: + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + etcd_storage_domain: "{{ default_rhv_storage_domain }}" + etcd_volume_size: "{{ default_etcd_volume_size }}" + appnodes: + count: + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + origin_storage_domain: "{{ default_rhv_storage_domain }}" + origin_volume_size: "{{ default_etcd_volume_size }}" + infranodes: + count: + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + origin_storage_domain: "{{ default_rhv_storage_domain }}" + origin_volume_size: "{{ default_etcd_volume_size }}" + cnsnodes: + count: + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + gluster_storage_domain: "{{ default_gluster_storage_domain }}" + gluster_volume_size: "{{ default_cns_volume_size }}" + + +## Temporary set for Docker and Gluster Storage block device names +## NOTE: do not modify this info. Will be handle on a handle on a different way in the future +docker_storage_block_device: '/dev/sdb' +docker_storage_mount_point: '/var/lib/containers/docker' +cns_node_glusterfs_volume: '/dev/sdb' + +# Specify the version of docker to use +#docker_version: "1.12.*" + +# This example uses ENV variables (recommended), but the values +# can also be specified here +ovirt_url: "{{ lookup('env','OVIRT_URL') }}" +ovirt_username: "{{ lookup('env','OVIRT_USERNAME') }}" +ovirt_password: "{{ lookup('env','OVIRT_PASSWORD') }}" +ovirt_ca_file: "{{ lookup('env','OVIRT_CA') }}" + +# These are the security groups created when rhv_create_vpc is 'true'. Modify accordingly to your environment in case using existing VPC and SGs +# NOTE: The use of custom VPC is not supported yet +rhv_master_sgroups: ['ocp-ssh', 'ocp-master', 'ocp-app-node'] +rhv_etcd_sgroups: ['ocp-ssh', 'ocp-etcd', 'ocp-app-node'] +rhv_infra_node_sgroups: ['ocp-ssh', 'ocp-infra-node', 'ocp-app-node'] +rhv_app_node_sgroups: ['ocp-ssh', 'ocp-app-node'] +rhv_cns_node_sgroups: ['ocp-ssh', 'ocp-app-node', 'ocp-cns'] + +## These are the tag used to create different dynamic groups based on these names +## NOTE: modifying these default values will affect on how the different type of nodes are discovered and configured on the required groups. You will need to update `hosts` inventory file accordingly if these values are modified +group_masters_tag: masters_rhv +group_masters_etcd_tag: masters_etcd_rhv +group_etcd_nodes_tag: etcd_nodes_rhv +group_infra_nodes_tag: infra_nodes_rhv +group_app_nodes_tag: app_nodes_rhv +group_cns_nodes_tag: cns_nodes_rhv + +## These tags will define the labels your OCP Nodes will be assigned with as part of the OCP deployment process +labels_masters_tag: '{"region": "default"}' +labels_etcd_nodes_tag: '{"region": "primary"}' +labels_infra_nodes_tag: '{"region": "infra"}' +labels_app_nodes_tag: '{"region": "primary"}' +labels_cns_nodes_tag: '{"region": "primary"}' + + +# Subscription Management Details +rhsm_register: True +rhsm_repos: + - "rhel-7-server-rpms" + - "rhel-7-server-ose-3.11-rpms" + - "rhel-7-server-extras-rpms" + - "rhel-7-server-ansible-2.6-rpms" + +# Uncomment the following to use Red Hat Satellite: +#rhsm_server_hostname: 'sat-6.example.com' +#rhsm_org_id: 'CASL_ORG' +#rhsm_activationkey: 'casl-latest' + +# Uncomment the following to use RHSM username, password from environment variable: +#rhsm_username: "{{ lookup('env', 'RHSM_USER' )}}" +#rhsm_password: "{{ lookup('env', 'RHSM_PASSWD' )}}" + +# leave commented out if you want to `--auto-attach` a pool +#rhsm_pool: "{{ lookup('env', 'RHSM_POOL' )}}" + +# WARNING: By default the tools will update RPMs during provisioning. If any packages are +# updated, the host(s) will reboot to ensure the correct versions are in use. This may +# NOT be desirable during an consecutive runs to just apply minor changes. If you would +# like to avoid "surprise" reboots, make sure to uncomment the following variable. +# Do NOTE that a reboot should most likely happen on initial install, so it's important +# that this variable is commented out or set to `True` for initial runs. +#update_cluster_hosts: False + +# Uncomment the following `additional_list_of_packages_to_install` to list additional +# packages/RPMs to install during install +#additional_list_of_packages_to_install: +# - rpm-1 +# - rpm-2 diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/app_hosts.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/app_hosts.yml new file mode 100644 index 00000000..eea66e84 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/app_hosts.yml @@ -0,0 +1,7 @@ +--- + +openshift_node_group_name: 'node-config-compute' + +openshift_node_open_ports: +- service: "prometheus node exporter" + port: "9100/tcp" diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/glusterfs.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/glusterfs.yml new file mode 100644 index 00000000..0fa99a45 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/glusterfs.yml @@ -0,0 +1 @@ +glusterfs_devices: '[ "{{ cns_node_glusterfs_volume }}" ]' \ No newline at end of file diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/infra_hosts.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/infra_hosts.yml new file mode 100644 index 00000000..945fc41d --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/infra_hosts.yml @@ -0,0 +1,10 @@ +--- + +openshift_node_group_name: 'node-config-infra' + +openshift_node_open_ports: +- service: "router stats port" + port: "1936/tcp" +- service: "prometheus node exporter" + port: "9100/tcp" + diff --git a/inventory/sample.rhv.example.com.d/inventory/group_vars/masters.yml b/inventory/sample.rhv.example.com.d/inventory/group_vars/masters.yml new file mode 100644 index 00000000..ebb54561 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/group_vars/masters.yml @@ -0,0 +1,7 @@ +--- + +openshift_node_group_name: 'node-config-master' + +openshift_node_open_ports: +- service: "prometheus node exporter" + port: "9100/tcp" diff --git a/inventory/sample.rhv.example.com.d/inventory/host_vars/localhost.yml b/inventory/sample.rhv.example.com.d/inventory/host_vars/localhost.yml new file mode 100644 index 00000000..cb8d9aa1 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/host_vars/localhost.yml @@ -0,0 +1,3 @@ +--- + +ansible_connection: local diff --git a/inventory/sample.rhv.example.com.d/inventory/hosts b/inventory/sample.rhv.example.com.d/inventory/hosts new file mode 100644 index 00000000..2490e6ef --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/hosts @@ -0,0 +1,52 @@ + +#[all:vars] +# For all group_vars, see ./group_vars/all.yml + +# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. +# The lb group lets Ansible configure HAProxy as the load balancing solution. +# Comment lb out if your load balancer is pre-configured. +[cluster_hosts:children] +OSEv3 + +[OSEv3:children] +masters +nodes +etcd +glusterfs + +# Set variables common for all OSEv3 hosts +#[OSEv3:vars] + +# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml + +# Host Groups + +[masters:children] +tag_masters_rhv +tag_masters_etcd_rhv + +[etcd:children] +tag_etcd_nodes_rhv +tag_masters_etcd_rhv + +[nodes:children] +masters +infra_hosts +app_hosts +glusterfs + +[infra_hosts:children] +tag_infra_nodes_rhv + +[app_hosts:children] +tag_app_nodes_rhv + +[glusterfs:children] +tag_cns_nodes_rhv + +[tag_masters_rhv] +[tag_masters_etcd_rhv] +[tag_etcd_nodes_rhv] +[tag_infra_nodes_rhv] +[tag_app_nodes_rhv] +[tag_cns_nodes_rhv] diff --git a/inventory/sample.rhv.example.com.d/inventory/ovirt4.py b/inventory/sample.rhv.example.com.d/inventory/ovirt4.py new file mode 100755 index 00000000..fb62afc8 --- /dev/null +++ b/inventory/sample.rhv.example.com.d/inventory/ovirt4.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +""" +oVirt dynamic inventory script +================================= + +Generates dynamic inventory file for oVirt. + +Script will return following attributes for each virtual machine: + - id + - name + - host + - cluster + - status + - description + - fqdn + - os_type + - template + - tags + - statistics + - devices + +When run in --list mode, virtual machines are grouped by the following categories: + - cluster + - tag + - status + + Note: If there is some virtual machine which has has more tags it will be in both tag + records. + +Examples: + # Execute update of system on webserver virtual machine: + + $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" + + # Get webserver virtual machine information: + + $ contrib/inventory/ovirt4.py --host webserver + +Author: Ondra Machacek (@machacekondra) +""" + +import argparse +import os +import sys + +from collections import defaultdict + +try: + import ConfigParser as configparser +except ImportError: + import configparser + +try: + import json +except ImportError: + import simplejson as json + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') + sys.exit(1) + + +def parse_args(): + """ + Create command line parser for oVirt dynamic inventory script. + """ + parser = argparse.ArgumentParser( + description='Ansible dynamic inventory script for oVirt.', + ) + parser.add_argument( + '--list', + action='store_true', + default=True, + help='Get data of all virtual machines (default: True).', + ) + parser.add_argument( + '--host', + help='Get data of virtual machines running on specified host.', + ) + parser.add_argument( + '--pretty', + action='store_true', + default=False, + help='Pretty format (default: False).', + ) + return parser.parse_args() + + +def create_connection(): + """ + Create a connection to oVirt engine API. + """ + """ + DISABLE CONFIG FILE + # Get the path of the configuration file, by default use + # 'ovirt.ini' file in script directory: + default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'ovirt.ini', + ) + config_path = os.environ.get('OVIRT_INI_PATH', default_path) + + # Create parser and add ovirt section if it doesn't exist: + config = configparser.SafeConfigParser( + defaults={ + 'ovirt_url': None, + 'ovirt_username': None, + 'ovirt_password': None, + 'ovirt_ca_file': None, + } + ) + if not config.has_section('ovirt'): + config.add_section('ovirt') + config.read(config_path) + """ + # Create a connection with options defined in ENV variables: + return sdk.Connection( + url=os.environ.get('OVIRT_URL', 'ovirt_url'), + username=os.environ.get('OVIRT_USERNAME', 'ovirt_username'), + password=os.environ.get('OVIRT_PASSWORD', 'ovirt_password'), + ca_file=os.environ.get('OVIRT_CA', 'ovirt_ca_file'), + insecure=os.environ.get('OVIRT_SECURE', 'ovirt_ca_file') is None, + ) + + +def get_dict_of_struct(connection, vm): + """ + Transform SDK Vm Struct type to Python dictionary. + """ + if vm is None: + return dict() + + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + vm_service = vms_service.vm_service(vm.id) + devices = vm_service.reported_devices_service().list() + tags = vm_service.tags_service().list() + stats = vm_service.statistics_service().list() + labels = vm_service.affinity_labels_service().list() + groups = clusters_service.cluster_service( + vm.cluster.id + ).affinity_groups_service().list() + + return { + 'id': vm.id, + 'name': vm.name, + 'host': connection.follow_link(vm.host).name if vm.host else None, + 'cluster': connection.follow_link(vm.cluster).name, + 'status': str(vm.status), + 'description': vm.description, + 'fqdn': vm.fqdn, + 'os_type': vm.os.type, + 'template': connection.follow_link(vm.template).name, + 'tags': [tag.name for tag in tags], + 'affinity_labels': [label.name for label in labels], + 'affinity_groups': [ + group.name for group in groups + if vm.name in [vm.name for vm in connection.follow_link(group.vms)] + ], + 'statistics': dict( + (stat.name, stat.values[0].datum) for stat in stats + ), + 'devices': dict( + (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips + ), + 'ansible_host': next((device.ips[0].address for device in devices if device.ips and device.name == "eth0"), None) + } + + +def get_data(connection, vm_name=None): + """ + Obtain data of `vm_name` if specified, otherwise obtain data of all vms. + """ + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + + if vm_name: + vm = vms_service.list(search='name=%s' % vm_name) or [None] + data = get_dict_of_struct( + connection=connection, + vm=vm[0], + ) + else: + vms = dict() + data = defaultdict(list) + for vm in vms_service.list(): + name = vm.name + vm_service = vms_service.vm_service(vm.id) + cluster_service = clusters_service.cluster_service(vm.cluster.id) + + # Add vm to vms dict: + vms[name] = get_dict_of_struct(connection, vm) + + # Add vm to cluster group: + cluster_name = connection.follow_link(vm.cluster).name + data['cluster_%s' % cluster_name].append(name) + + # Add vm to tag group: + tags_service = vm_service.tags_service() + for tag in tags_service.list(): + data['tag_%s' % tag.name].append(name) + + # Add vm to status group: + data['status_%s' % vm.status].append(name) + + # Add vm to affinity group: + for group in cluster_service.affinity_groups_service().list(): + if vm.name in [ + v.name for v in connection.follow_link(group.vms) + ]: + data['affinity_group_%s' % group.name].append(vm.name) + + # Add vm to affinity label group: + affinity_labels_service = vm_service.affinity_labels_service() + for label in affinity_labels_service.list(): + data['affinity_label_%s' % label.name].append(name) + + data["_meta"] = { + 'hostvars': vms, + } + + return data + + +def main(): + args = parse_args() + connection = create_connection() + + print( + json.dumps( + obj=get_data( + connection=connection, + vm_name=args.host, + ), + sort_keys=args.pretty, + indent=args.pretty * 2, + ) + ) + +if __name__ == '__main__': + main() diff --git a/inventory/scripts/ovirt4.py b/inventory/scripts/ovirt4.py new file mode 100755 index 00000000..b0c235fc --- /dev/null +++ b/inventory/scripts/ovirt4.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +""" +oVirt dynamic inventory script +================================= + +Generates dynamic inventory file for oVirt. + +Script will return following attributes for each virtual machine: + - id + - name + - host + - cluster + - status + - description + - fqdn + - os_type + - template + - tags + - statistics + - devices + +When run in --list mode, virtual machines are grouped by the following categories: + - cluster + - tag + - status + + Note: If there is some virtual machine which has has more tags it will be in both tag + records. + +Examples: + # Execute update of system on webserver virtual machine: + + $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" + + # Get webserver virtual machine information: + + $ contrib/inventory/ovirt4.py --host webserver + +Author: Ondra Machacek (@machacekondra) +""" + +import argparse +import os +import sys + +from collections import defaultdict + +try: + import ConfigParser as configparser +except ImportError: + import configparser + +try: + import json +except ImportError: + import simplejson as json + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') + sys.exit(1) + + +def parse_args(): + """ + Create command line parser for oVirt dynamic inventory script. + """ + parser = argparse.ArgumentParser( + description='Ansible dynamic inventory script for oVirt.', + ) + parser.add_argument( + '--list', + action='store_true', + default=True, + help='Get data of all virtual machines (default: True).', + ) + parser.add_argument( + '--host', + help='Get data of virtual machines running on specified host.', + ) + parser.add_argument( + '--pretty', + action='store_true', + default=False, + help='Pretty format (default: False).', + ) + return parser.parse_args() + + +def create_connection(): + """ + Create a connection to oVirt engine API. + """ + # Get the path of the configuration file, by default use + # 'ovirt.ini' file in script directory: + default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'ovirt.ini', + ) + config_path = os.environ.get('OVIRT_INI_PATH', default_path) + + # Create parser and add ovirt section if it doesn't exist: + config = configparser.SafeConfigParser( + defaults={ + 'ovirt_url': None, + 'ovirt_username': None, + 'ovirt_password': None, + 'ovirt_ca_file': None, + } + ) + if not config.has_section('ovirt'): + config.add_section('ovirt') + config.read(config_path) + + # Create a connection with options defined in ini file: + return sdk.Connection( + url=config.get('ovirt', 'ovirt_url'), + username=config.get('ovirt', 'ovirt_username'), + password=config.get('ovirt', 'ovirt_password'), + ca_file=config.get('ovirt', 'ovirt_ca_file'), + insecure=config.get('ovirt', 'ovirt_ca_file') is None, + ) + + +def get_dict_of_struct(connection, vm): + """ + Transform SDK Vm Struct type to Python dictionary. + """ + if vm is None: + return dict() + + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + vm_service = vms_service.vm_service(vm.id) + devices = vm_service.reported_devices_service().list() + tags = vm_service.tags_service().list() + stats = vm_service.statistics_service().list() + labels = vm_service.affinity_labels_service().list() + groups = clusters_service.cluster_service( + vm.cluster.id + ).affinity_groups_service().list() + + return { + 'id': vm.id, + 'name': vm.name, + 'host': connection.follow_link(vm.host).name if vm.host else None, + 'cluster': connection.follow_link(vm.cluster).name, + 'status': str(vm.status), + 'description': vm.description, + 'fqdn': vm.fqdn, + 'os_type': vm.os.type, + 'template': connection.follow_link(vm.template).name, + 'tags': [tag.name for tag in tags], + 'affinity_labels': [label.name for label in labels], + 'affinity_groups': [ + group.name for group in groups + if vm.name in [vm.name for vm in connection.follow_link(group.vms)] + ], + 'statistics': dict( + (stat.name, stat.values[0].datum) for stat in stats + ), + 'devices': dict( + (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips + ), + 'ansible_host': next((device.ips[0].address for device in devices if device.ips and device.name == "eth0"), None) + } + + +def get_data(connection, vm_name=None): + """ + Obtain data of `vm_name` if specified, otherwise obtain data of all vms. + """ + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + + if vm_name: + vm = vms_service.list(search='name=%s' % vm_name) or [None] + data = get_dict_of_struct( + connection=connection, + vm=vm[0], + ) + else: + vms = dict() + data = defaultdict(list) + for vm in vms_service.list(): + name = vm.name + vm_service = vms_service.vm_service(vm.id) + cluster_service = clusters_service.cluster_service(vm.cluster.id) + + # Add vm to vms dict: + vms[name] = get_dict_of_struct(connection, vm) + + # Add vm to cluster group: + cluster_name = connection.follow_link(vm.cluster).name + data['cluster_%s' % cluster_name].append(name) + + # Add vm to tag group: + tags_service = vm_service.tags_service() + for tag in tags_service.list(): + data['tag_%s' % tag.name].append(name) + + # Add vm to status group: + data['status_%s' % vm.status].append(name) + + # Add vm to affinity group: + for group in cluster_service.affinity_groups_service().list(): + if vm.name in [ + v.name for v in connection.follow_link(group.vms) + ]: + data['affinity_group_%s' % group.name].append(vm.name) + + # Add vm to affinity label group: + affinity_labels_service = vm_service.affinity_labels_service() + for label in affinity_labels_service.list(): + data['affinity_label_%s' % label.name].append(name) + + data["_meta"] = { + 'hostvars': vms, + } + + return data + + +def main(): + args = parse_args() + connection = create_connection() + + print( + json.dumps( + obj=get_data( + connection=connection, + vm_name=args.host, + ), + sort_keys=args.pretty, + indent=args.pretty * 2, + ) + ) + +if __name__ == '__main__': + main() diff --git a/playbooks/openshift/rhv/provision.yml b/playbooks/openshift/rhv/provision.yml new file mode 100644 index 00000000..fee51c4f --- /dev/null +++ b/playbooks/openshift/rhv/provision.yml @@ -0,0 +1,29 @@ +--- +- hosts: localhost + roles: + - { role: manage-rhv-infra, action: 'present' } + + tasks: + +- name: Refresh Server inventory + hosts: localhost + connection: local + gather_facts: False + tasks: + - meta: refresh_inventory + + - name: List known inventory + debug: + msg: "{{ item }}" + with_inventory_hostnames: + - all +- name: Wait for systems to become reachable + hosts: cluster_hosts + gather_facts: False + tasks: + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 + - wait_for_connection: + timeout: 300 diff --git a/playbooks/openshift/rhv/roles b/playbooks/openshift/rhv/roles new file mode 120000 index 00000000..20c4c58c --- /dev/null +++ b/playbooks/openshift/rhv/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/roles/manage-rhv-infra/README.md b/roles/manage-rhv-infra/README.md new file mode 100644 index 00000000..0b6dd1c0 --- /dev/null +++ b/roles/manage-rhv-infra/README.md @@ -0,0 +1,117 @@ +manage-rhv-infra +================ + +This role deploys and manages the underlying OCP required Infrastructure in RHV based in the variables defined in the inventory. + +As this is a shared environment, specific tags not related to OCP are added as well to every ec2 and ebs created objects so they can be easily identified. These can be found and modified under 'instance_tags' option on every ec2 instance creation. + +Requirements +------------ + +Ansible version >= 2.4 + +Role Variables +-------------- + +The majority of the variables required to use the role are defined under the **cloud_infrastructure** object. This object definition is the representation for the underlaying infrastructure and all the required components to deploy an OpenShift Cluster on top of it. + +Many of these required variables have default values. These are the **mandatory** variables you need to specify in order the role to work. + +Infrastructure skeleton variables +--------------------------------- + +```yaml +cloud_infrastructure: + masters: + count: **mandatory** + cpu_count: **defaulted** + memory: **defaulted** + network: **defaulted** + template_name: **defaulted** + network_netmask: **defaulted** + network_gateway: **defaulted** + network_nic_name: **defaulted** + name_prefix: **defaulted** + rhv_cluster: **defaulted** + root_volume_size: **defaulted** + rhv_storage_domain: **defaulted** + docker_storage_domain: **defaulted** + docker_volume_size: **defaulted** + etcd_storage_domain: **defaulted** + etcd_volume_size: **defaulted** + etcdnodes: + count: **mandatory** + cpu_count: **defaulted** + memory: **defaulted** + network: **defaulted** + template_name: **defaulted** + network_netmask: **defaulted** + network_gateway: **defaulted** + network_nic_name: **defaulted** + name_prefix: **defaulted** + rhv_cluster: **defaulted** + root_volume_size: **defaulted** + rhv_storage_domain: **defaulted** + docker_storage_domain: **defaulted** + docker_volume_size: **defaulted** + etcd_storage_domain: **defaulted** + etcd_volume_size: **defaulted** + appnodes: + count: **mandatory** + cpu_count: **defaulted** + memory: **defaulted** + network: **defaulted** + template_name: **defaulted** + network_netmask: **defaulted** + network_gateway: **defaulted** + network_nic_name: **defaulted** + name_prefix: **defaulted** + rhv_cluster: **defaulted** + root_volume_size: **defaulted** + rhv_storage_domain: **defaulted** + docker_storage_domain: **defaulted** + docker_volume_size: **defaulted** + origin_storage_domain: **defaulted** + origin_volume_size: **defaulted** + infranodes: + count: **mandatory** + cpu_count: **defaulted** + memory: **defaulted** + network: **defaulted** + template_name: **defaulted** + network_netmask: **defaulted** + network_gateway: **defaulted** + network_nic_name: **defaulted** + name_prefix: **defaulted** + rhv_cluster: **defaulted** + root_volume_size: **defaulted** + rhv_storage_domain: **defaulted** + docker_storage_domain: **defaulted** + docker_volume_size: **defaulted** + origin_storage_domain: **defaulted** + origin_volume_size: **defaulted** + cnsnodes: + count: **mandatory** + cpu_count: **defaulted** + memory: **defaulted** + network: **defaulted** + template_name: **defaulted** + network_netmask: **defaulted** + network_gateway: **defaulted** + network_nic_name: **defaulted** + name_prefix: **defaulted** + rhv_cluster: **defaulted** + root_volume_size: **defaulted** + rhv_storage_domain: **defaulted** + docker_storage_domain: **defaulted** + docker_volume_size: **defaulted** + gluster_storage_domain: **mandatory** + gluster_volume_size: **mandatory** + +``` + +Other variables +--------------- + +| Variable | Description | +|:---------------:|:-------------------------------------:| diff --git a/roles/manage-rhv-infra/defaults/main.yml b/roles/manage-rhv-infra/defaults/main.yml new file mode 100644 index 00000000..7d086bc7 --- /dev/null +++ b/roles/manage-rhv-infra/defaults/main.yml @@ -0,0 +1,199 @@ +--- +# Default RHV environment settings to use +# Update the following values for your environment +default_rhv_cluster: "Default" +default_template_name: "rhel-server-7.6-template" +default_network: "ovirtmgmt" +default_network_netmask: 255.255.255.0 +default_network_gateway: 192.168.2.1 +default_network_nic_name: eth0 + +# Defaults for CPU and memory, you will almost certainly want to customize these below under each type (master/infra/appnode) +default_cpu_count: 4 +default_memory: 16384Mib + +default_root_volume_size: 40Gib +default_rhv_storage_domain: data-store-01 +default_docker_volume_size: 100Gib +default_etcd_volume_size: 40Gib +default_origin_volume_size: 40Gib +default_cns_volume_size: '200Gib' +default_gluster_storage_domain: vm-store + +# Default port for OpenShift API and Web Console +openshift_master_api_port: 8443 +openshift_master_console_port: 8443 + + +## Default infra skeleton variables +default_cloud_infrastructure: + masters: + count: 1 + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + etcd_storage_domain: "{{ default_rhv_storage_domain }}" + etcd_volume_size: "{{ default_etcd_volume_size }}" + etcdnodes: + count: 0 + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + etcd_storage_domain: "{{ default_rhv_storage_domain }}" + etcd_volume_size: "{{ default_etcd_volume_size }}" + appnodes: + count: 0 + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + origin_storage_domain: "{{ default_rhv_storage_domain }}" + origin_volume_size: "{{ default_etcd_volume_size }}" + infranodes: + count: 1 + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + origin_storage_domain: "{{ default_rhv_storage_domain }}" + origin_volume_size: "{{ default_etcd_volume_size }}" + cnsnodes: + count: 0 + cpu_count: "{{ default_cpu_count }}" + memory: "{{ default_memory }}" + network: "{{ default_network }}" + template_name: "{{ default_template_name }}" + network_netmask: "{{ default_network_netmask }}" + network_gateway: "{{ default_network_gateway }}" + network_nic_name: "{{ default_network_nic_name }}" + name_prefix: "{{ default_name_prefix }}" + rhv_cluster: "{{ default_rhv_cluster }}" + root_volume_size: "{{ default_root_volume_size }}" + rhv_storage_domain: "{{ default_rhv_storage_domain }}" + docker_storage_domain: "{{ default_rhv_storage_domain }}" + docker_volume_size: "{{ default_docker_volume_size }}" + gluster_storage_domain: "{{ default_gluster_storage_domain }}" + gluster_volume_size: "{{ default_cns_volume_size }}" + +## Default volume device names +default_root_volume: '/dev/sda1' + +## NOTE: These devices names are fixed now. Will be customizable in the future +## Keeping the values here for future references +#default_docker_volume: '/dev/xvdb' +#default_cns_volume: '/dev/xvdg' + +## Default instance numbers +#default_rhv_num_masters: 1 +#default_rhv_num_etcd: 0 +#default_rhv_num_app_nodes: 3 +#default_rhv_num_infra_nodes: 1 +#default_rhv_num_cns_nodes: 0 + +## Default instance flavors +#default_master_flavor: "m4.xlarge" +#default_etcd_flavor: "i3.xlarge" +#default_app_node_flavor: "m4.xlarge" +#default_infra_node_flavor: "m4.xlarge" +#default_cns_node_flavor: "i3.xlarge" + +## Default instance prefix names +#default_rhv_masters_name_prefix: "master" +#default_rhv_etcd_name_prefix: "etcd" +#default_rhv_appnodes_name_prefix: "node" +#default_rhv_infranodes_name_prefix: "infra" +#default_rhv_cnsnodes_name_prefix: "cns" + +## Feed AZs for each instance from `cloud_infrastructure` list on the inventory +rhv_masters_zones: "{{ cloud_infrastructure.masters.zones }}" +rhv_etcd_zones: "{{ cloud_infrastructure.etcdnodes.zones }}" +rhv_appnodes_zones: "{{ cloud_infrastructure.appnodes.zones }}" +rhv_infranodes_zones: "{{ cloud_infrastructure.infranodes.zones }}" +rhv_cnsnodes_zones: "{{ cloud_infrastructure.cnsnodes.zones }}" + +## Feed name prefix for each instance from `cloud_infrastructure` list on the inventory +rhv_masters_name_prefix: "{{ cloud_infrastructure.masters.name_prefix }}" +rhv_etcd_name_prefix: "{{ cloud_infrastructure.etcdnodes.name_prefix }}" +rhv_appnodes_name_prefix: "{{ cloud_infrastructure.appnodes.name_prefix }}" +rhv_infranodes_name_prefix: "{{ cloud_infrastructure.infranodes.name_prefix }}" +rhv_cnsnodes_name_prefix: "{{ cloud_infrastructure.cnsnodes.name_prefix }}" + +## Feed instance numbers for each instance from `cloud_infrastructure` list on the inventory +rhv_num_masters: "{{ cloud_infrastructure.masters.count }}" +rhv_num_etcd: "{{ cloud_infrastructure.etcdnodes.count }}" +rhv_num_app_nodes: "{{ cloud_infrastructure.appnodes.count }}" +rhv_num_infra_nodes: "{{ cloud_infrastructure.infranodes.count }}" +rhv_num_cns_nodes: "{{ cloud_infrastructure.cnsnodes.count }}" + +## Feed instance flavors for each instance from `cloud_infrastructure` list on the inventory +master_flavor: "{{ cloud_infrastructure.masters.flavor }}" +etcd_flavor: "{{ cloud_infrastructure.etcdnodes.flavor }}" +app_node_flavor: "{{ cloud_infrastructure.appnodes.flavor }}" +infra_node_flavor: "{{ cloud_infrastructure.infranodes.flavor }}" +cns_node_flavor: "{{ cloud_infrastructure.cnsnodes.flavor }}" + +## Feed volume device names for each instance from `cloud_infrastructure` list on the inventory +master_root_volume: "{{ default_root_volume }}" +etcd_root_volume: "{{ default_root_volume }}" +infra_node_root_volume: "{{ default_root_volume }}" +app_node_root_volume: "{{ default_root_volume }}" +cns_node_root_volume: "{{ default_root_volume }}" +## Temporary set for Docker and Gluster Storage block device names +## Leaving here as a reference. Will be handle on a handle on a different way in the future +#cns_node_glusterfs_volume: "{{ default_cns_volume }}" +#docker_storage_block_device: "{{ default_docker_volume }}" + +## Feed root volume device sizes for each instance from `cloud_infrastructure` list on the inventory +master_root_volume_size: "{{ cloud_infrastructure.masters.root_volume_size }}" +etcd_root_volume_size: "{{ cloud_infrastructure.etcdnodes.root_volume_size }}" +infra_node_root_volume_size: "{{ cloud_infrastructure.infranodes.root_volume_size }}" +app_node_root_volume_size: "{{ cloud_infrastructure.appnodes.root_volume_size }}" +cns_node_root_volume_size: "{{ cloud_infrastructure.cnsnodes.root_volume_size }}" + +## Feed root volume device sizes for each instance from `cloud_infrastructure` list on the inventory +master_docker_volume_size: "{{ cloud_infrastructure.masters.docker_volume_size }}" +etcd_docker_volume_size: "{{ cloud_infrastructure.etcdnodes.docker_volume_size }}" +infra_node_docker_volume_size: "{{ cloud_infrastructure.infranodes.docker_volume_size }}" +app_node_docker_volume_size: "{{ cloud_infrastructure.appnodes.docker_volume_size }}" +cns_node_docker_volume_size: "{{ cloud_infrastructure.cnsnodes.docker_volume_size }}" + +## Feed gluster volume device sizes for CNS instances from `cloud_infrastructure` list on the inventory +cns_node_glusterfs_volume_size: "{{ cloud_infrastructure.cnsnodes.gluster_volume_size }}" diff --git a/roles/manage-rhv-infra/tasks/create-vms.yml b/roles/manage-rhv-infra/tasks/create-vms.yml new file mode 100644 index 00000000..f42d0e8c --- /dev/null +++ b/roles/manage-rhv-infra/tasks/create-vms.yml @@ -0,0 +1,370 @@ +--- +# tasks file for rhv-ocp + +#- include_vars: ovirt_password.yml + +# Verify expected DNS and store associated IPs +- name: Validate and store OpenShift Master IPs + command: "dig +short {{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.masters.count }} + when: cloud_infrastructure.masters.count > 0 + register: masters_ip + failed_when: masters_ip.stdout == "" or masters_ip.rc == 1 + +- name: Debug masters_ip + debug: + var: masters_ip + +#- name: Print masters_ip test +# debug: +# msg: "{{ masters_ip['results'][1]['stdout'] }}" + + +- name: Validate and store OpenShift Etcd IPs + command: "dig +short {{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + when: cloud_infrastructure.etcdnodes.count > 0 + register: etcdnodes_ip + failed_when: etcdnodes_ip.stdout == "" or etcdnodes_ip.rc == 1 + +- name: Validate and store OpenShift Infra IPs + command: "dig +short {{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + when: cloud_infrastructure.infranodes.count > 0 + register: infranodes_ip + failed_when: infranodes_ip.stdout == "" or infranodes_ip.rc == 1 + +- name: Validate and store OpenShift App Nodes IPs + command: "dig +short {{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + when: cloud_infrastructure.appnodes.count > 0 + register: appnodes_ip + failed_when: appnodes_ip.stdout == "" or appnodes_ip.rc == 1 + +- name: Validate and store OpenShift CNS Nodes IPs + command: "dig +short {{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + when: cloud_infrastructure.cnsnodes.count > 0 + register: cnsnodes_ip + failed_when: cnsnodes_ip.stdout == "" or cnsnodes_ip.rc == 1 + +## Create the VMs +- name: Obtain SSO token + ovirt_auth: + state: present +# url: "{{ ovirt_url }}" +# username: "{{ ovirt_username }}" +# password: "{{ ovirt_password }}" +# ca_file: "{{ ovirt_ca_file }}" + +### Creates the Masters +- name: Create OCP Master Virtual Machines from template + ovirt_vms: + name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + template: "{{ cloud_infrastructure.masters.template_name }}" + state: running + cluster: "{{ cloud_infrastructure.masters.rhv_cluster }}" + cpu_cores: "{{ cloud_infrastructure.masters.cpu_count }}" + memory: "{{ cloud_infrastructure.masters.memory }}" + wait: True + cloud_init: + host_name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + dns_search: "{{ dns_domain }}" + nic_boot_protocol: static + nic_ip_address: "{{ masters_ip['results'][item|int -1]['stdout'] }}" + nic_netmask: "{{ cloud_infrastructure.masters.network_netmask }}" + nic_gateway: "{{ cloud_infrastructure.masters.network_gateway }}" + nic_name: "{{ cloud_infrastructure.masters.network_nic_name }}" + nic_on_boot: true + with_sequence: count={{ cloud_infrastructure.masters.count }} + +- name: Create OCP Master Docker disk + ovirt_disk: + name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}-{{ env_id }}-docker-disk" + vm_name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.masters.docker_storage_domain }}" + size: "{{ cloud_infrastructure.masters.docker_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.masters.count }} + +- name: Create OCP Master Etcd disk + ovirt_disk: + name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}-{{ env_id }}-etcd-disk" + vm_name: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.masters.etcd_storage_domain }}" + size: "{{ cloud_infrastructure.masters.etcd_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.masters.count }} + +- name: Tag as master + ovirt_tags: + name: "{{ group_masters_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.masters.count }} + when: cloud_infrastructure.etcdnodes.count > 0 + +- name: Tag as a master and etcd + ovirt_tags: + name: "{{ group_masters_etcd_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.masters.count }} + when: cloud_infrastructure.etcdnodes.count == 0 + +########### ETCD NODES ############## + +- name: Create OCP Etcd Virtual Machines from template + ovirt_vms: + name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + template: "{{ cloud_infrastructure.etcdnodes.template_name }}" + state: running + cluster: "{{ cloud_infrastructure.etcdnodes.rhv_cluster }}" + cpu_cores: "{{ cloud_infrastructure.etcdnodes.cpu_count }}" + memory: "{{ cloud_infrastructure.etcdnodes.memory }}" + wait: True + cloud_init: + host_name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + dns_search: "{{ dns_domain }}" + nic_boot_protocol: static + nic_ip_address: "{{ etcdnodes_ip['results'][item|int -1]['stdout'] }}" + nic_netmask: "{{ cloud_infrastructure.etcdnodes.network_netmask }}" + nic_gateway: "{{ cloud_infrastructure.etcdnodes.network_gateway }}" + nic_name: "{{ cloud_infrastructure.etcdnodes.network_nic_name }}" + nic_on_boot: true + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + +- name: Create OCP Etcdnode Docker disk + ovirt_disk: + name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}-{{ env_id }}-docker-disk" + vm_name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.etcdnodes.docker_storage_domain }}" + size: "{{ cloud_infrastructure.etcdnodes.docker_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + +- name: Create OCP Etcdnode Etcd disk + ovirt_disk: + name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}-{{ env_id }}-etcd-disk" + vm_name: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.etcdnodes.etcd_storage_domain }}" + size: "{{ cloud_infrastructure.etcdnodes.etcd_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + +- name: Tag the etcdnodes + ovirt_tags: + name: "{{ group_etcd_nodes_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + +########### INFRA NODES ############## + +- name: Create OCP Infranodes Virtual Machines from template + ovirt_vms: + name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + template: "{{ cloud_infrastructure.infranodes.template_name }}" + state: running + cluster: "{{ cloud_infrastructure.infranodes.rhv_cluster }}" + cpu_cores: "{{ cloud_infrastructure.infranodes.cpu_count }}" + memory: "{{ cloud_infrastructure.infranodes.memory }}" + wait: True + cloud_init: + host_name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + dns_search: "{{ dns_domain }}" + nic_boot_protocol: static + nic_ip_address: "{{ infranodes_ip['results'][item|int -1]['stdout'] }}" + nic_netmask: "{{ cloud_infrastructure.infranodes.network_netmask }}" + nic_gateway: "{{ cloud_infrastructure.infranodes.network_gateway }}" + nic_name: "{{ cloud_infrastructure.infranodes.network_nic_name }}" + nic_on_boot: true + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + +- name: Create OCP Infranodes Docker disk + ovirt_disk: + name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}-{{ env_id }}-docker-disk" + vm_name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.infranodes.docker_storage_domain }}" + size: "{{ cloud_infrastructure.infranodes.docker_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + +- name: Create OCP Infranodes Origin disk + ovirt_disk: + name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}-{{ env_id }}-etcd-disk" + vm_name: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.infranodes.origin_storage_domain }}" + size: "{{ cloud_infrastructure.infranodes.origin_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + +- name: Tag the infranode + ovirt_tags: + name: "{{ group_infra_nodes_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + +########### APP NODES ############## + +- name: Create OCP Appnode Virtual Machines from template + ovirt_vms: + name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + template: "{{ cloud_infrastructure.appnodes.template_name }}" + state: running + cluster: "{{ cloud_infrastructure.appnodes.rhv_cluster }}" + cpu_cores: "{{ cloud_infrastructure.appnodes.cpu_count }}" + memory: "{{ cloud_infrastructure.appnodes.memory }}" + wait: True + cloud_init: + host_name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + dns_search: "{{ dns_domain }}" + nic_boot_protocol: static + nic_ip_address: "{{ appnodes_ip['results'][item|int -1]['stdout'] }}" + nic_netmask: "{{ cloud_infrastructure.appnodes.network_netmask }}" + nic_gateway: "{{ cloud_infrastructure.appnodes.network_gateway }}" + nic_name: "{{ cloud_infrastructure.appnodes.network_nic_name }}" + nic_on_boot: true + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + +- name: Create OCP Appnode Docker disk + ovirt_disk: + name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}-{{ env_id }}-docker-disk" + vm_name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.appnodes.docker_storage_domain }}" + size: "{{ cloud_infrastructure.appnodes.docker_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + +- name: Create OCP Appnode Origin disk + ovirt_disk: + name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}-{{ env_id }}-etcd-disk" + vm_name: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.appnodes.origin_storage_domain }}" + size: "{{ cloud_infrastructure.appnodes.origin_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + +- name: Tag the appnode + ovirt_tags: + name: "{{ group_app_nodes_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + +########### OCS NODES ############## + +- name: Create OCP OpenShift Container Storage Virtual Machines from template + ovirt_vms: + name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + template: "{{ cloud_infrastructure.cnsnodes.template_name }}" + state: running + cluster: "{{ cloud_infrastructure.cnsnodes.rhv_cluster }}" + cpu_cores: "{{ cloud_infrastructure.cnsnodes.cpu_count }}" + memory: "{{ cloud_infrastructure.cnsnodes.memory }}" + wait: True + cloud_init: + host_name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + dns_search: "{{ dns_domain }}" + nic_boot_protocol: static + nic_ip_address: "{{ cnsnodes_ip['results'][item|int -1]['stdout'] }}" + nic_netmask: "{{ cloud_infrastructure.cnsnodes.network_netmask }}" + nic_gateway: "{{ cloud_infrastructure.cnsnodes.network_gateway }}" + nic_name: "{{ cloud_infrastructure.cnsnodes.network_nic_name }}" + nic_on_boot: true + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + +- name: Create OCP OpenShift Container Storage Docker disk + ovirt_disk: + name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}-{{ env_id }}-docker-disk" + vm_name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.cnsnodes.docker_storage_domain }}" + size: "{{ cloud_infrastructure.cnsnodes.docker_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + +- name: Create OCP OpenShift Container Storage Origin disk + ovirt_disk: + name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}-{{ env_id }}-etcd-disk" + vm_name: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + storage_domain: "{{ cloud_infrastructure.cnsnodes.origin_storage_domain }}" + size: "{{ cloud_infrastructure.cnsnodes.origin_volume_size }}" + format: cow + interface: virtio + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + +- name: Tag the OpenShift Container Storage node + ovirt_tags: + name: "{{ group_cns_nodes_tag }}" + state: attached + vms: + - "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + +## Reboot all the nodes to clear run-once + +#- name: Pause for 2 minutes to ensure that cloud-init has finished +# pause: +# minutes: 2 + +#- name: Power off OCP Ansible Host +# ovirt_vms: +# state: stopped +# name: "{{ ansible_bastion_name }}" + +#- name: Power on OCP Ansible Host +# ovirt_vms: +# state: running +# name: "{{ ansible_bastion_name }}" +# cluster: Default + +#- name: Stop OCP masters Virtual Machines +# ovirt_vms: +# state: stopped +# name: "{{ master_name }}-{{ item }}" +# with_sequence: count={{ master_count }} + +#- name: Start OCP masters Virtual Machines +# ovirt_vms: +# state: running +# name: "{{ master_name }}-{{ item }}" +# with_sequence: count={{ master_count }} + +#- name: Stop OCP Infra Virtual Machines +# ovirt_vms: +# state: stopped +# name: "{{ infra_name }}-{{ item }}" +# with_sequence: count={{ infra_count }} + +#- name: Start OCP Infra Virtual Machines +# ovirt_vms: +# state: running +# name: "{{ infra_name }}-{{ item }}" +# with_sequence: count={{ infra_count }} + +#- name: Start OCP App Node Virtual Machines +# ovirt_vms: +# state: stopped +# name: "{{ appnode_name }}-{{ item }}" +# with_sequence: count={{ appnode_count }} + +#- name: Start OCP App Node Virtual Machines +# ovirt_vms: +# state: running +# name: "{{ appnode_name }}-{{ item }}" +# with_sequence: count={{ appnode_count }} diff --git a/roles/manage-rhv-infra/tasks/display-expected-dns.yml b/roles/manage-rhv-infra/tasks/display-expected-dns.yml new file mode 100644 index 00000000..d359dbd5 --- /dev/null +++ b/roles/manage-rhv-infra/tasks/display-expected-dns.yml @@ -0,0 +1,44 @@ +--- +# RHV doesn't have a DNS component so DNS entries must be created prior to running +# casl. This will give a list of all the DNS entries needed for nodes. +- name: Display expected Openshift Master Default subdomain that must exist in DNS + debug: + msg: "{{ openshift_master_default_subdomain }}" + +- name: Display expected OpenShift Public Cluster LB that must exist in DNS + debug: + msg: "{{ openshift_master_cluster_public_hostname }}" + +- name: Display expected OpenShift Internal Cluster LB that must exist in DNS + debug: + msg: "{{ openshift_master_cluster_hostname }}" + +- name: Display expected Master DNS entries that must exist prior to execution + debug: + msg: "{{ cloud_infrastructure.masters.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.masters.count }} + when: cloud_infrastructure.masters.count > 0 + +- name: Display expected Etcd DNS entries that must exist prior to execution + debug: + msg: "{{ cloud_infrastructure.etcdnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.etcdnodes.count }} + when: cloud_infrastructure.etcdnodes.count > 0 + +- name: Display expected Infra DNS entries that must exist prior to execution + debug: + msg: "{{ cloud_infrastructure.infranodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.infranodes.count }} + when: cloud_infrastructure.infranodes.count > 0 + +- name: Display expected App Nodes DNS entries that must exist prior to execution + debug: + msg: "{{ cloud_infrastructure.appnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.appnodes.count }} + when: cloud_infrastructure.appnodes.count > 0 + +- name: Display expected CNS Nodes DNS entries that must exist prior to execution + debug: + msg: "{{ cloud_infrastructure.cnsnodes.name_prefix }}-{{ item }}.{{ env_id }}.{{ dns_domain }}" + with_sequence: count={{ cloud_infrastructure.cnsnodes.count }} + when: cloud_infrastructure.cnsnodes.count > 0 diff --git a/roles/manage-rhv-infra/tasks/main.yml b/roles/manage-rhv-infra/tasks/main.yml new file mode 100644 index 00000000..eed69689 --- /dev/null +++ b/roles/manage-rhv-infra/tasks/main.yml @@ -0,0 +1,7 @@ +--- + +- import_tasks: pre-reqs.yml + +- import_tasks: display-expected-dns.yml + +- import_tasks: create-vms.yml diff --git a/roles/manage-rhv-infra/tasks/pre-reqs.yml b/roles/manage-rhv-infra/tasks/pre-reqs.yml new file mode 100644 index 00000000..2b547672 --- /dev/null +++ b/roles/manage-rhv-infra/tasks/pre-reqs.yml @@ -0,0 +1,11 @@ +--- +- name: Fail when RHV URL, USER, PASSWORD and CA certificate environment variables are not defined + debug: + msg: "'OVIRT_URL', 'OVIRT_USERNAME', 'OVIRT_PASSWORD' and 'OVIRT_CA' environment variables must be defined" + failed_when: (lookup('env','OVIRT_URL') == "") or + (lookup('env','OVIRT_USERNAME') == "") or + (lookup('env','OVIRT_PASSWORD') == "") or + (lookup('env','OVIRT_CA') == "") + +- name: Merge infrastructure dictionary + set_fact: cloud_infrastructure="{{ default_cloud_infrastructure |combine(cloud_infrastructure, recursive=True) }}"