Please note this project is just a spike to play a bit around with concourse, a simple node app on AWS and k8s.
It might not follow all security regulations, please use it on your own risk.
fly -t test set-pipeline --load-vars-from credentials.yml -p smoke -c pipeline.yml
aws s3api create-bucket \ --bucket kops-k8s-de-state-store \ --region eu-central-1 \ --create-bucket-configuration LocationConstraint=eu-central-1
aws s3api put-bucket-versioning --bucket kops-k8s-de-state-store --versioning-configuration Status=Enabled
run kubectl proxy
and call ${URL_AND_PORT}/ui in the browser
export NAME=myfirstprivatecluster.kops-k8s.de
export KOPS_STATE_STORE=s3://kops-k8s-de-state-store
kops create cluster \ --zones us-west-2a \ --ssh-public-key ~/.ssh/kops-k8s.pub \ --alsologtostderr \ --log_dir ~/projects/kops/logs \ ${NAME}
kops create cluster --node-count 3 --zones eu-central-1a,eu-central-1b,eu-central-1c --master-zones eu-central-1a --topology private --networking weave --node-size t2.medium --master-size t2.large --associate-public-ip=false --bastion=true --authorization RBAC --ssh-public-key ~/.ssh/id_rsa.pub ${NAME}
kops update cluster --yes ${NAME}
helm init kubectl create serviceaccount --namespace kube-system tiller kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' helm install stable/concourse
export POD_NAME=$(kubectl get pods --namespace default -l "app=knobby-tiger-web" -o jsonpath="{.items[0].metadata.name}") kubectl port-forward --namespace default $POD_NAME 8080:8080
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml kubectl create -f dashboard-admin.yaml //you can get this yaml file in fun-with-concourse repo kubectl proxy
kubectl create serviceaccount concourse kubectl get serviceaccounts concourse -o yaml
kubectl create -f deployer-role.yaml kubectl create -f deployer-role-binding.yaml
kubectl port-forward pod-name 3000:3000
helm install --name nginx-ingress1 stable/nginx-ingress --set rbac.create=true
wait for the pods and services to be ready and create the ingress for the service with the following command:
kubectl apply -f fun-ingress.yaml
wait again for the route to be avaiable and reach it.
HURRAAYYYYYYYYYYY!