Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SSH Firewall, OSX and Parameterization Updates #1

Open
wants to merge 16 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ ansible-playbook -v -i hosts /usr/share/ansible/openshift-ansible/playbooks/byo/
From the bastion host run the following
```
ansible 'masters' -i hosts -b -m shell -a "htpasswd -b /etc/origin/master/htpasswd <username> <password>"
ansible 'masters' -i hosts -b -m shell -a "oadm policy add-cluster-role-to-user cluster-admin admin"
```
## Clean up

Expand Down
10 changes: 6 additions & 4 deletions cleanup-gcp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,16 @@
set -e
gcloud config set project $GCLOUD_PROJECT

#echo "delete google storage buckets"
#delete google storage buckets
for i in $(gsutil ls); do
gsutil rm -r $i &
done;
wait

#echo "delete firewall rules"
#delete firewall rules
for i in $(gcloud compute firewall-rules list -r ^oc-.* | awk 'NR>1 {print $1}'); do
for i in $(gcloud compute firewall-rules list --filter="name~'^oc-.*'" | awk 'NR>1 {print $1}'); do
gcloud compute firewall-rules delete -q $i &
done;
wait
Expand Down Expand Up @@ -47,15 +49,15 @@ wait

#delete instance-groups
for k in us-central1-a us-central1-b us-central1-f; do
for i in $(gcloud compute instance-groups unmanaged list --zones $k | awk 'NR>1 {print $1}'); do
for i in $(gcloud compute instance-groups unmanaged list --filter="zone:( $k )" | awk 'NR>1 {print $1}'); do
gcloud compute instance-groups unmanaged delete -q $i --zone $k &
done;
done;
wait

#delete VMs
for k in us-central1-a us-central1-b us-central1-f; do
for i in $(gcloud compute instances list --zones $k| awk 'NR>1 {print $1}'); do
for i in $(gcloud compute instances list --filter="zone:( $k )" | awk 'NR>1 {print $1}'); do
gcloud compute instances delete $i -q --zone "$k" &
done;
done;
Expand All @@ -64,7 +66,7 @@ wait

#delete disks
for k in us-central1-a us-central1-b us-central1-f; do
for i in $(gcloud compute disks list --zones $k | awk 'NR>1 {print $1}'); do
for i in $(gcloud compute disks list --filter="zone:( $k )" | awk 'NR>1 {print $1}'); do
gcloud compute disks delete -q $i --zone $k &
done;
done
Expand Down
72 changes: 47 additions & 25 deletions hosts
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@ nodes
etcd
nfs
#{% if {{ env[GLUSTER] }} equals 'yes' %}
glusterfs
#glusterfs
#{% endif %}

# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
ansible_ssh_user=BASTION_USERNAME
ansible_ssh_user=scottes

# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
Expand Down Expand Up @@ -80,11 +80,11 @@ osm_cockpit_plugins=['cockpit-kubernetes']
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
openshift_master_cluster_method=native
openshift_master_cluster_hostname=master.10.128.0.10.xip.io
openshift_master_cluster_public_hostname=master.104.197.199.131.xip.io
openshift_master_cluster_hostname=mi.ocp.scottes.io
openshift_master_cluster_public_hostname=master.ocp.scottes.io

# default subdomain to use for exposed routes
openshift_master_default_subdomain=apps.104.198.35.122.xip.io
openshift_master_default_subdomain=apps.ocp.scottes.io

# OpenShift Router Options
#
Expand All @@ -110,31 +110,53 @@ openshift_hosted_manage_registry=true
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
openshift_hosted_metrics_deploy=true
#openshift_hosted_metrics_deploy=true
#openshift_hosted_metrics_deployer_version=3.3.0
openshift_hosted_metrics_storage_kind=dynamic
openshift_hosted_metrics_storage_volume_size= 10Gi
openshift_metrics_hawkular_replicas=2
openshift_metrics_cassandra_replicas=3
openshift_metrics_hawkular_nodeselector='{"region":"infra"}'
openshift_metrics_cassandra_nodeselector='{"region":"infra"}'
openshift_metrics_heapster_nodeselector='{"region":"infra"}'
openshift_metrics_selector="region=infra"
#openshift_hosted_metrics_storage_kind=nfs
#openshift_hosted_metrics_storage_volume_size=10Gi
#openshift_metrics_hawkular_replicas=2
#openshift_metrics_cassandra_replicas=3
#openshift_metrics_hawkular_nodeselector='{"region":"infra"}'
#openshift_metrics_cassandra_nodeselector='{"region":"infra"}'
#openshift_metrics_heapster_nodeselector='{"region":"infra"}'
#openshift_metrics_selector="region=infra"


# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
openshift_hosted_logging_deploy=false
#openshift_hosted_logging_deploy=false
openshift_hosted_logging_deploy=true
#openshift_hosted_logging_deployer_version=3.3.0
#openshift_hosted_metrics_storage_kind=dynamic
#openshift_logging_es_pvc_size= 100Gi
#openshift_logging_es_cluster_size= 3
openshift_logging_image_version=v3.6
#openshift_logging_es_pvc_size=100Gi
openshift_logging_es_cluster_size=3
#openshift_logging_es_number_of_replicas=2
#openshift_logging_kibana_replica_count=2
#openshift_logging_es_nodeselector='{"region":"infra"}'
#openshift_logging_kibana_nodeselector='{"region":"infra"}'
#openshift_logging_curator_nodeselector='{"region":"infra"}'
openshift_logging_es_nodeselector='{"region":"infra"}'
openshift_logging_kibana_nodeselector='{"region":"infra"}'
openshift_logging_curator_nodeselector='{"region":"infra"}'

#openshift_hosted_logging_storage_host=host
openshift_hosted_logging_storage_kind=nfs
openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
openshift_hosted_logging_storage_nfs_directory=/exports
openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_logging_storage_volume_name=logging
openshift_hosted_logging_storage_volume_size=10Gi
openshift_hosted_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_dynamic=false
openshift_logging_es_ops_pvc_dynamic=false
#openshift_logging_kibana_hostname=
#openshift_hosted_logging_hostname=

#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_host=ose-bastion.c.ocp-demo.internal
#openshift_hosted_logging_storage_nfs_directory=/NotBackedUp/nfs/ose36
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#openshift_hosted_logging_storage_labels={'storage': 'logging'}

# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
Expand Down Expand Up @@ -198,8 +220,8 @@ openshift_disable_check=docker_storage,memory_availability,disk_availability

#{% if {{ env[GLUSTER] }} equals = 'yes' %}
#gluster
openshift_storage_glusterfs_namespace=glusterfs
openshift_storage_glusterfs_name=storage
#openshift_storage_glusterfs_namespace=glusterfs
#openshift_storage_glusterfs_name=storage
#{% endif %}


Expand All @@ -223,7 +245,7 @@ infranode[1:3] openshift_node_labels="{'region': 'infra'}"


#{% if {{ env[GLUSTER] }} equals 'yes' %}
[glusterfs]
node[1:3] glusterfs_ip="{{ ansible_default_ipv4.address }}" glusterfs_devices='[ "/dev/sdc" ]'
#[glusterfs]
#node[1:3] glusterfs_ip="{{ ansible_default_ipv4.address }}" glusterfs_devices='[ "/dev/sdc" ]'
#{% endif %}

15 changes: 15 additions & 0 deletions post-cluster-creation/delete-pvs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash
set -e

#crate necessary dirs in nfs server

#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'for i in {1..30}; do echo "/exports/pv$i *(rw,root_squash)" | sudo tee -a /etc/exports.d/openshift-ansible.exports > /dev/null; done;'
#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'for i in {1..30}; do sudo mkdir "/exports/pv$i" && sudo chown nfsnobody:nfsnobody "/exports/pv$i" && sudo chmod 777 "/exports/pv$i"; done;'

#restart nfs
#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` sudo systemctl restart nfs

# create pvs
for i in {1..30}; do
oc delete pv pv$i
done
15 changes: 15 additions & 0 deletions post-cluster-creation/new-configure-pvs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash
set -e

#crate necessary dirs in nfs server

#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'for i in {1..30}; do echo "/exports/pv$i *(rw,root_squash)" | sudo tee -a /etc/exports.d/openshift-ansible.exports > /dev/null; done;'
#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'for i in {1..30}; do sudo mkdir "/exports/pv$i" && sudo chown nfsnobody:nfsnobody "/exports/pv$i" && sudo chmod 777 "/exports/pv$i"; done;'

#restart nfs
#ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` sudo systemctl restart nfs

# create pvs
for i in {1..30}; do
oc process -f pv_template.yaml -p NFS_EXPORT="pv$i" -p PV_NAME="pv$i" | oc create -f -
done
4 changes: 2 additions & 2 deletions post-cluster-creation/pv_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ objects:
accessModes:
- ReadWriteOnce
capacity:
storage: 10Gi
storage: 100Gi
nfs:
path: /exports/${NFS_EXPORT}
server: ose-bastion
Expand All @@ -20,4 +20,4 @@ parameters:
required: true
- name: NFS_EXPORT
description: name of the nfs export
required: true
required: true
10 changes: 7 additions & 3 deletions prepare-bastion.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,13 @@ a=`whoami`
sed -i "s/^/$a:/" ./my_id.pub
export BASTION_USERNAME=$a


[ -z "$OCP_VERSION" ] && OCP_VERSION=3.10

gcloud compute project-info add-metadata --metadata-from-file sshKeys=./my_id.pub

# prepare bastion to receive variables
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'sudo yum -y --disablerepo=rhui* install google-rhui-client-rhel7'
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'echo AcceptEnv RHN_USERNAME RHN_PASSWORD DNS_DOMAIN BASTION_USERNAME RHN_SUB_POOL GLUSTER | sudo tee -a /etc/ssh/sshd_config > /dev/null'
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` sudo systemctl restart sshd
# disable host check on ssh connections
Expand All @@ -21,9 +25,9 @@ ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'su
#subscribe
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` sudo subscription-manager register --username=$RHN_USERNAME --password=$RHN_PASSWORD
# configure subscriptions
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` -o SendEnv=RHN_USERNAME -o SendEnv=RHN_PASSWORD -o SendEnv=DNS_DOMAIN -o SendEnv=RHN_SUB_POOL -o SendEnv=BASTION_USERNAME 'sudo subscription-manager attach --pool=$RHN_SUB_POOL && sudo subscription-manager refresh && sudo subscription-manager repos --disable="*" && sudo subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-optional-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ose-3.6-rpms" --enable="rhel-7-fast-datapath-rpms"'
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` -o SendEnv=RHN_USERNAME -o SendEnv=RHN_PASSWORD -o SendEnv=DNS_DOMAIN -o SendEnv=RHN_SUB_POOL -o SendEnv=BASTION_USERNAME 'sudo subscription-manager attach --pool=$RHN_SUB_POOL && sudo subscription-manager refresh && sudo subscription-manager repos --disable="*" && sudo subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-optional-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ose-$OCP_VERSION-rpms" --enable="rhel-7-fast-datapath-rpms"'
#update install packages
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'sudo yum update -y && sudo yum install -y git ansible atomic-openshift-utils screen bind-utils atomic-openshift-clients'
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'sudo yum update -y && sudo yum install -y git ansible atomic-openshift-utils screen bind-utils atomic-openshift-clients openshift-ansible'
# generate and add keys
ssh `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` 'ssh-keygen -t rsa -f .ssh/id_rsa -N ""'
# set the key in gcloud metadata
Expand All @@ -34,7 +38,7 @@ gcloud compute project-info add-metadata --metadata-from-file sshKeys=./my_id.pu


# download git
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` git clone https://github.com/raffaelespazzoli/openshift-enablement-exam
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` git clone https://github.com/sully6768/openshift-enablement-exam

# prepare hostfile
ssh -t `gcloud compute addresses list | grep ose-bastion | awk '{print $3}'` -o SendEnv=RHN_USERNAME -o SendEnv=RHN_PASSWORD -o SendEnv=DNS_DOMAIN -o SendEnv=RHN_SUB_POOL -o SendEnv=BASTION_USERNAME 'sed -i "s/master.10.128.0.10.xip.io/mi.$DNS_DOMAIN/g" /home/$BASTION_USERNAME/openshift-enablement-exam/hosts'
Expand Down
9 changes: 5 additions & 4 deletions prepare-cluster.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
#!/bin/bash
set -e

# Prepare Cluster
ansible nodes -b -i hosts -m shell -a "yum install -y subscription-manager && subscription-manager clean"
ansible nodes -b -i hosts -m shell -a "subscription-manager register --username=$RHN_USERNAME --password=$RHN_PASSWORD && subscription-manager attach --pool=$RHN_SUB_POOL && subscription-manager refresh"
# Prepare Cluster
ansible nodes -b -i hosts -m shell -a "yum -y --disablerepo=rhui* install google-rhui-client-rhel7"
ansible nodes -b -i hosts -m shell -a "yum install -y subscription-manager && subscription-manager clean"
ansible nodes -b -i hosts -m shell -a "subscription-manager register --username=$RHN_USERNAME --password=$RHN_PASSWORD && subscription-manager attach --pool=$RHN_SUB_POOL && subscription-manager refresh"
ansible nodes -b -i hosts -m shell -a "subscription-manager repos --disable='*' && subscription-manager repos --enable=rhel-7-server-rpms --enable=rhel-7-server-optional-rpms --enable=rhel-7-server-extras-rpms --enable=rhel-7-server-ose-3.6-rpms --enable=rhel-7-fast-datapath-rpms"
ansible nodes -b -i hosts -m shell -a "yum update -y && yum install -y docker wget git net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct"
ansible 'nodes:!masters' -i hosts -b -m copy -a "src=docker-storage-setup dest=/etc/sysconfig/docker-storage-setup"
#this is non-idempotent
ansible 'nodes:!masters' -i hosts -b -m shell -a "yum install -y docker && docker-storage-setup"
ansible nodes -b -i hosts -m service -a "name=docker enabled=true state=started"
ansible nodes -b -i hosts -m shell -a "reboot"
ansible nodes -b -i hosts -m shell -a "reboot"
Loading