diff --git a/CHANGELOG-0.4.md b/CHANGELOG-0.4.md index 2c0e1559cf..fc6442d447 100644 --- a/CHANGELOG-0.4.md +++ b/CHANGELOG-0.4.md @@ -1,9 +1,23 @@ # Changelog 0.4 -## [0.4.1] 2019-10-17 +## [0.4.2] 2019-11-20 + +### Added + +- Online/offline upgrade of K8s, Docker and common packages + +### Changed + +- Removed legacy Epiphany from the repository +- [#617](https://github.com/epiphany-platform/epiphany/issues/617) - Docker images are loaded only on image registry host ### Fixed +- [#694](https://github.com/epiphany-platform/epiphany/issues/694) - epicli apply does not remove from build files that were removed from sources + +## [0.4.1] 2019-10-17 + +### Fixed - [#612](https://github.com/epiphany-platform/epiphany/issues/612) - 'epicli delete' - cannot delete a partially built infrastructure - [#613](https://github.com/epiphany-platform/epiphany/pull/613) - Hotfixes for Ubuntu offline installation in air-gap mode @@ -35,4 +49,4 @@ ### Known issues -- \ No newline at end of file +- diff --git a/core/ansible.cfg b/core/ansible.cfg deleted file mode 100644 index e6b11cea0f..0000000000 --- a/core/ansible.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Ansible config file for the project -# Force the inventory to be 'development' so you will need to use -i on all ansible commands. This is for safety reasons - -[defaults] -# inventory = development -# gathering = smart diff --git a/core/bin/README.md b/core/bin/README.md deleted file mode 100644 index a3c5770ea0..0000000000 --- a/core/bin/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Bin Folder - -This folder holds all of the binaries and scripts that are maintained outside of Epiphany but used by Epiphany. This folder does *NOT* hold output from the /build folder. The /build folder is used for holding the output of any generated code or rendered output from the build process. - -## template_engine - -The `template_engine` is a Python app that takes 3 parameters: - -- Input template -- Output file -- Data file - -```bash -./template_engine \ - -i /data/terraform/pipeline/main.tf.j2 \ - -o /build/terraform/pipeline/main.tf \ - -d /data/terraform/pipeline/pipeline.yaml -``` - -It uses `Jinja2` templating syntax so you can have templates embedded in templates for complex needs or just simple code/data generation. The data is in a `yaml` format. The input template should have a `.j2` file extension but it's not required. diff --git a/core/bin/replacer b/core/bin/replacer deleted file mode 100644 index 3790f0017e..0000000000 --- a/core/bin/replacer +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python - -from jinja2 import Environment, FileSystemLoader -import traceback -import os -import yaml -import argparse -import sys -import json - - -def load_files(base_file, replacer_file): - dictbase = dict() - dictreplace = dict() - try: - with open(base_file) as base_data: - dictbase = yaml.safe_load(base_data) - - with open(replacer_file) as replacer_data: - dictreplace = yaml.safe_load(replacer_data) - - except Exception as e: - print traceback.print_exc(file=sys.stdout) - exit(1) - - # The 'core: azure: standard: vms:' part of the data.yaml is matched separetly to allow users - # to define different number of vms than 2 in the simplified config file. - dictbase_replaced_vms = vm_matching(dictbase, dictreplace) - - return replacer(dictbase_replaced_vms, dictreplace) - - -def vm_matching(base, replace): - # Assumes current structure of the data.yaml is preserved in the future - distincts - # between worker vm config and the master vm config. - random_vm_master = base['core']['azure']['standard']['vms'][0].copy() - random_vm_worker = base['core']['azure']['standard']['vms'][1].copy() - - base['core']['azure']['standard']['vms'] = [] - - # This copies default master/worker vm configs desired number of times. - for vm_number in range(0, len(replace['core']['azure']['standard']['vms'])): - if 'master' in replace['core']['azure']['standard']['vms'][vm_number]['roles']: - random_vm_object = random_vm_master.copy() - base['core']['azure']['standard']['vms'].append(random_vm_object) - else: - random_vm_object = random_vm_worker.copy() - base['core']['azure']['standard']['vms'].append(random_vm_object) - - # Replacing for the vms configs. - for vm_number in range(0, len(replace['core']['azure']['standard']['vms'])): - replacer(base['core']['azure']['standard']['vms'][vm_number], - replace['core']['azure']['standard']['vms'][vm_number]) - return base - - -# Replacing the rest of the data.yaml file -def replacer(base, replace): - for key_old in base: - for key_new in replace: - # Ignore vms as this part is already done - if key_old == 'vms' or key_new == 'vms': - pass - - # There is 15 different 'name' properties in the data.yaml, - # it is still possible that this will get seriously broken if the structure of the data.yaml changes. - elif type(base[key_old]) is dict: - if key_new == key_old and type(replace[key_new]) is dict: - replacer(base[key_old], replace[key_new]) - - # Replacing on the matched keys - only nested list of dicts is 'vms' which we handled before - # Lists have to be replaced whole - elif key_old == key_new: - base[key_old] = replace[key_new] - return base - - -if __name__ == '__main__': - p = argparse.ArgumentParser(description='Replacer Engine', prog='Replacer_Engine') - p.add_argument('--base_file', '-b', help='The template input file.') - p.add_argument('--replacer_file', '-r', help='The rendered output file.') - - options = p.parse_args() - - replaced_data_yaml = load_files(options.base_file, options.replacer_file) - - a = yaml.safe_dump(replaced_data_yaml, default_flow_style=False, indent=2, allow_unicode=True) - - b = a.replace("null", "") - - with open(options.base_file, 'w') as f: - f.write(b) diff --git a/core/bin/template_engine b/core/bin/template_engine deleted file mode 100755 index 3cc4617413..0000000000 --- a/core/bin/template_engine +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# -# Author: Hans Chris Jones -# Copyright 2018, LambdaStack -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# NOTE: You need to pass in the full paths to the file references below. The data is file should be private and it's -# not in the project. The reason is you should create a YAML file that fits how you want to configure your -# environment. For eample, you can have a small YAML data file for configuring the kickstart/ISO process and then -# maybe one for building out the missing USER and/or SYSTEM data used in the CHEF Environment files. A sample -# environment file ships with the project for vagrant called vagrant.json. However, a production.json should -# really be a jinja2 template like base_environment.json.j2 with as much default data and with template {{ }} placeholders -# for the actual data. The output of this process should be the TRUE production.json file. Also, it's a good idea -# to name your production.json file more descriptive of the environment it actually belongs to. For example, -# prod-dc101.json or something like it. - -from jinja2 import Environment, FileSystemLoader -# import jinja2.ext.loopcontrols -import traceback -import os -import yaml -import argparse -import sys -import json - - -# All three file paths must be full paths to each. -def render_template(data_file, in_file, out_file, json_arg, yaml_arg): - dict = "" - - # If the -j flag was passed then convert the yaml to pretty json in sorted order - if json_arg: - with open(data_file) as data: - dict = yaml.load(data) - print json.dumps(dict, indent=4, sort_keys=True) - exit(0) - - if yaml_arg: - with open(data_file) as data: - dict = json.load(data) - print yaml.safe_dump(dict, indent=2, allow_unicode=True, default_flow_style=False) - exit(0) - - # Start the template processing - try: - #env = Environment(autoescape=False, loader=FileSystemLoader('/')), trim_blocks=True) - env = Environment(loader=FileSystemLoader('/')) - env.filters['jsonify'] = json.dumps - - with open(data_file) as data: - dict = yaml.load(data, Loader=yaml.FullLoader) - - # Render template and print generated config to console - template = env.get_template(in_file) - - with open(out_file, 'w') as f: - output = template.render(dict) - f.write(output) - - except Exception as e: - # Print out error, traceback and debug info... - print - print '='*60 - print "Template Engine stopped due to the following error ===> ", e - print '-'*60 - print 'Debugging Output:' - print traceback.print_exc(file=sys.stdout) - print '-'*60 - print 'Data dictionary:' - print - print json.dumps(dict, indent=4, sort_keys=True) - print '='*60 - print "Template Engine stopped due to the following error ===> ", e - print 'Scan up to see traceback and JSON data (traceback at both top and bottom of this output)' - print '-'*60 - print 'Debugging Output:' - print traceback.print_exc(file=sys.stdout) - print '='*60 - print - exit(1) - - -# Used to pass a string instead of input file as a template -# dict is json dictionary of the values to sub -def render_string(in_string, dict): - return Environment().from_string(in_string).render(dict) - - -# Standard way of calling... -# ./template_engine -i